benchmark.sh 1.1 KB
Newer Older
M
MRXLT 已提交
1
rm profile_log
M
MRXLT 已提交
2 3 4
export CUDA_VISIBLE_DEVICES=0,1,2,3
export FLAGS_profile_server=1
export FLAGS_profile_client=1
M
MRXLT 已提交
5 6
export FLAGS_serving_latency=1
python -m paddle_serving_server_gpu.serve --model $1 --port 9292 --thread 4 --gpu_ids 0,1,2,3 2> elog > stdlog &
M
MRXLT 已提交
7 8 9 10

sleep 5

#warm up
M
MRXLT 已提交
11
$PYTHONROOT/bin/python benchmark.py --thread 8 --batch_size 1 --model $2/serving_client_conf.prototxt --request rpc > profile 2>&1
M
MRXLT 已提交
12

M
MRXLT 已提交
13
for thread_num in 4 8 16
M
MRXLT 已提交
14
do
M
MRXLT 已提交
15 16
for batch_size in 1 4 16 64 256
do
M
MRXLT 已提交
17 18
    $PYTHONROOT/bin/python benchmark.py --thread $thread_num --batch_size $batch_size --model $2/serving_client_conf.prototxt --request rpc > profile 2>&1
    echo "model name :" $1
M
MRXLT 已提交
19 20 21
    echo "thread num :" $thread_num
    echo "batch size :" $batch_size
    echo "=================Done===================="
M
MRXLT 已提交
22 23 24 25 26
    echo "model name :$1" >> profile_log_$1
    echo "batch size :$batch_size" >> profile_log_$1
    $PYTHONROOT/bin/python ../util/show_profile.py profile $thread_num >> profile_log_$1
    tail -n 8 profile >> profile_log_$1
    echo "" >> profile_log_$1
M
MRXLT 已提交
27
done
M
MRXLT 已提交
28
done
M
MRXLT 已提交
29 30

ps -ef|grep 'serving'|grep -v grep|cut -c 9-15 | xargs kill -9