benchmark.sh 3.9 KB
Newer Older
W
wangjiawei04 已提交
1
export FLAGS_profile_pipeline=1
T
TeslaZhao 已提交
2
alias python3="python3.7"
W
wangjiawei04 已提交
3
modelname="ocr"
4

W
wangjiawei04 已提交
5
# HTTP
6
#ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 
W
wangjiawei04 已提交
7
sleep 3
8 9
# Create yaml,If you already have the config.yaml, ignore it. 
#python3 benchmark.py yaml local_predictor 1 gpu 
W
wangjiawei04 已提交
10
rm -rf profile_log_$modelname
11 12 13

echo "Starting HTTP Clients..."
# Start a client in each thread, tesing the case of multiple threads.
T
TeslaZhao 已提交
14
for thread_num in 1 2 4 6 8 12 16
W
wangjiawei04 已提交
15 16 17
do
  for batch_size in 1
  do
T
TeslaZhao 已提交
18
    echo "----$modelname thread num: $thread_num batch size: $batch_size mode:http ----" >>profile_log_$modelname
19 20 21 22 23 24 25
    # Start one web service, If you start the service yourself, you can ignore it here.
    #python3 web_service.py >web.log 2>&1 &
    #sleep 3

    # --id is the serial number of the GPU card, Must be the same as the gpu id used by the server. 
    nvidia-smi --id=3 --query-gpu=memory.used --format=csv -lms 1000 > gpu_use.log 2>&1 &
    nvidia-smi --id=3 --query-gpu=utilization.gpu --format=csv -lms 1000 > gpu_utilization.log 2>&1 &
W
wangjiawei04 已提交
26
    echo "import psutil\ncpu_utilization=psutil.cpu_percent(1,False)\nprint('CPU_UTILIZATION:', cpu_utilization)\n" > cpu_utilization.py
27 28 29 30 31 32
    # Start http client
    python3 benchmark.py run http $thread_num $batch_size > profile 2>&1

    # Collect CPU metrics, Filter data that is zero momentarily, Record the maximum value of GPU memory and the average value of GPU utilization
    python3 cpu_utilization.py >> profile_log_$modelname
    grep -av '^0 %' gpu_utilization.log > gpu_utilization.log.tmp
W
wangjiawei04 已提交
33
    awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "MAX_GPU_MEMORY:", max}' gpu_use.log >> profile_log_$modelname
34 35 36 37 38 39
    awk -F' ' '{sum+=$1} END {print "GPU_UTILIZATION:", sum/NR, sum, NR }' gpu_utilization.log.tmp >> profile_log_$modelname

    # Show profiles
    python3 ../../util/show_profile.py profile $thread_num >> profile_log_$modelname
    tail -n 8 profile >> profile_log_$modelname
    echo '' >> profile_log_$modelname
W
wangjiawei04 已提交
40 41
  done
done
42 43 44 45 46 47

# Kill all nvidia-smi background task.
pkill nvidia-smi

echo "Starting RPC Clients..."

W
wangjiawei04 已提交
48
# RPC
49
#ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
W
wangjiawei04 已提交
50 51
sleep 3

52 53
# Create yaml,If you already have the config.yaml, ignore it.
#python3 benchmark.py yaml local_predictor 1 gpu
T
TeslaZhao 已提交
54
#rm -rf profile_log_$modelname
55 56 57

# Start a client in each thread, tesing the case of multiple threads.
for thread_num in 1 2 4 6 8 12 16
W
wangjiawei04 已提交
58 59 60
do
  for batch_size in 1
  do
61 62 63 64 65 66 67 68
    echo "----$modelname thread num: $thread_num batch size: $batch_size mode:rpc ----" >> profile_log_$modelname
    # Start one web service, If you start the service yourself, you can ignore it here.
    #python3 web_service.py >web.log 2>&1 &
    #sleep 3

    # --id is the serial number of the GPU card, Must be the same as the gpu id used by the server.
    nvidia-smi --id=3 --query-compute-apps=used_memory --format=csv -lms 100 > gpu_use.log 2>&1 &
    nvidia-smi --id=3 --query-gpu=utilization.gpu --format=csv -lms 100 > gpu_utilization.log 2>&1 &
W
wangjiawei04 已提交
69
    echo "import psutil\ncpu_utilization=psutil.cpu_percent(1,False)\nprint('CPU_UTILIZATION:', cpu_utilization)\n" > cpu_utilization.py
70 71 72 73 74 75 76

    # Start http client
    python3 benchmark.py run rpc $thread_num $batch_size > profile 2>&1

    # Collect CPU metrics, Filter data that is zero momentarily, Record the maximum value of GPU memory and the average value of GPU utilization
    python3 cpu_utilization.py >> profile_log_$modelname
    grep -av '^0 %' gpu_utilization.log > gpu_utilization.log.tmp
W
wangjiawei04 已提交
77
    awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "MAX_GPU_MEMORY:", max}' gpu_use.log >> profile_log_$modelname
78 79 80 81 82 83
    awk -F" " '{sum+=$1} END {print "GPU_UTILIZATION:", sum/NR, sum, NR }' gpu_utilization.log.tmp >> profile_log_$modelname

    # Show profiles
    python3 ../../util/show_profile.py profile $thread_num >> profile_log_$modelname
    tail -n 8 profile >> profile_log_$modelname
    echo "" >> profile_log_$modelname
W
wangjiawei04 已提交
84 85
  done
done
86 87 88

# Kill all nvidia-smi background task.
pkill nvidia-smi