benchmark_batch.sh 759 字节
Newer Older
M
MRXLT 已提交
1
rm profile_log
G
guru4elephant 已提交
2 3 4 5 6
export CUDA_VISIBLE_DEVICES=0,1,2,3
python -m paddle_serving_server_gpu.serve --model bert_seq20_model/ --port 9295 --thread 4 --gpu_ids 0,1,2,3 2> elog > stdlog &

sleep 5

M
MRXLT 已提交
7
for thread_num in 1 2 4 8 16
M
MRXLT 已提交
8
do
M
MRXLT 已提交
9 10 11 12
for batch_size in 1 2 4 8 16 32 64 128 256 512
do
    $PYTHONROOT/bin/python benchmark_batch.py --thread $thread_num --batch_size $batch_size --model serving_client_conf/serving_client_conf.prototxt --request rpc > profile 2>&1
    echo "========================================"
G
guru4elephant 已提交
13 14
    echo "thread num: ", $thread_num
    echo "batch size: ", $batch_size
M
MRXLT 已提交
15
    echo "batch size : $batch_size" >> profile_log
M
MRXLT 已提交
16
    $PYTHONROOT/bin/python ../util/show_profile.py profile $thread_num >> profile_log
M
MRXLT 已提交
17 18
    tail -n 1 profile >> profile_log
done
M
MRXLT 已提交
19
done