diff --git a/python/examples/pipeline/PaddleClas/DarkNet53/benchmark.py b/python/examples/pipeline/PaddleClas/DarkNet53/benchmark.py index 10078994047abab5e9790a1b32d9692c89400ec6..2433b0132728dc96627254f9231949a74a551c28 100644 --- a/python/examples/pipeline/PaddleClas/DarkNet53/benchmark.py +++ b/python/examples/pipeline/PaddleClas/DarkNet53/benchmark.py @@ -53,18 +53,39 @@ def run_http(idx, batch_size): keys.append("image_{}".format(i)) values.append(image) data = {"key": keys, "value": values} + latency_list = [] start_time = time.time() + total_num = 0 while True: + l_start = time.time() r = requests.post(url=url, data=json.dumps(data)) print(r.json()) + l_end = time.time() + latency_list.append(l_end * 1000 - l_start * 1000) + total_num += 1 if time.time() - start_time > 20: break end = time.time() - return [[end - start]] + return [[end - start], latency_list, [total_num]] def multithread_http(thread, batch_size): multi_thread_runner = MultiThreadRunner() - result = multi_thread_runner.run(run_http , thread, batch_size) + start = time.time() + result = multi_thread_runner.run(run_http, thread, batch_size) + end = time.time() + total_cost = end - start + avg_cost = 0 + total_number = 0 + for i in range(thread): + avg_cost += result[0][i] + total_number += result[2][i] + avg_cost = avg_cost / thread + print("Total cost: {}s".format(total_cost)) + print("Each thread cost: {}s. ".format(avg_cost)) + print("Total count: {}. ".format(total_number)) + print("AVG QPS: {} samples/s".format(batch_size * total_number / + total_cost)) + show_latency(result[1]) def run_rpc(thread, batch_size): client = PipelineClient() diff --git a/python/examples/pipeline/PaddleClas/DarkNet53/benchmark.sh b/python/examples/pipeline/PaddleClas/DarkNet53/benchmark.sh index 7c5a57f5430c49cf3b18f155c331b29939de060b..ee20eccf64c1e08dff16c7427ae5107525281dee 100644 --- a/python/examples/pipeline/PaddleClas/DarkNet53/benchmark.sh +++ b/python/examples/pipeline/PaddleClas/DarkNet53/benchmark.sh @@ -1,42 +1,44 @@ export FLAGS_profile_pipeline=1 -alias python3="python3.7" -modelname="imagenet" -use_gpu=0 -gpu_id="0" -benchmark_config_filename="benchmark_config.yaml" +alias python3="python3.6" +modelname="clas-DarkNet53" # HTTP -ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 +#ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 sleep 3 -if [ $use_gpu -eq 1 ]; then - python3 benchmark.py yaml local_predictor 1 gpu $gpu_id -else - python3 benchmark.py yaml local_predictor 1 cpu -fi +# Create yaml,If you already have the config.yaml, ignore it. +#python3 benchmark.py yaml local_predictor 1 gpu rm -rf profile_log_$modelname -for thread_num in 1 + +echo "Starting HTTP Clients..." +# Start a client in each thread, tesing the case of multiple threads. +for thread_num in 1 2 4 8 12 16 do for batch_size in 1 do - echo "#----imagenet thread num: $thread_num batch size: $batch_size mode:http use_gpu:$use_gpu----" >>profile_log_$modelname - rm -rf PipelineServingLogs - rm -rf cpu_utilization.py - python3 resnet50_web_service.py >web.log 2>&1 & - sleep 3 - nvidia-smi --id=${gpu_id} --query-compute-apps=used_memory --format=csv -lms 100 > gpu_use.log 2>&1 & - nvidia-smi --id=${gpu_id} --query-gpu=utilization.gpu --format=csv -lms 100 > gpu_utilization.log 2>&1 & + echo "----${modelname} thread num: ${thread_num} batch size: ${batch_size} mode:http ----" >>profile_log_$modelname + # Start one web service, If you start the service yourself, you can ignore it here. + #python3 web_service.py >web.log 2>&1 & + #sleep 3 + + # --id is the serial number of the GPU card, Must be the same as the gpu id used by the server. + nvidia-smi --id=3 --query-gpu=memory.used --format=csv -lms 1000 > gpu_use.log 2>&1 & + nvidia-smi --id=3 --query-gpu=utilization.gpu --format=csv -lms 1000 > gpu_utilization.log 2>&1 & echo "import psutil\ncpu_utilization=psutil.cpu_percent(1,False)\nprint('CPU_UTILIZATION:', cpu_utilization)\n" > cpu_utilization.py - python3 benchmark.py run http $thread_num $batch_size - python3 cpu_utilization.py >>profile_log_$modelname - python3 -m paddle_serving_server_gpu.profiler >>profile_log_$modelname - ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 - ps -ef | grep nvidia-smi | awk '{print $2}' | xargs kill -9 - python3 benchmark.py dump benchmark.log benchmark.tmp - mv benchmark.tmp benchmark.log - awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_MEM:", max}' gpu_use.log >> profile_log_$modelname - awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_UTIL:", max}' gpu_utilization.log >> profile_log_$modelname - cat benchmark.log >> profile_log_$modelname - python3 -m paddle_serving_server_gpu.parse_profile --benchmark_cfg $benchmark_config_filename --benchmark_log profile_log_$modelname - #rm -rf gpu_use.log gpu_utilization.log + # Start http client + python3 benchmark.py run http $thread_num $batch_size > profile 2>&1 + + # Collect CPU metrics, Filter data that is zero momentarily, Record the maximum value of GPU memory and the average value of GPU utilization + python3 cpu_utilization.py >> profile_log_$modelname + grep -av '^0 %' gpu_utilization.log > gpu_utilization.log.tmp + awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "MAX_GPU_MEMORY:", max}' gpu_use.log >> profile_log_$modelname + awk -F' ' '{sum+=$1} END {print "GPU_UTILIZATION:", sum/NR, sum, NR }' gpu_utilization.log.tmp >> profile_log_$modelname + + # Show profiles + python3 ../../../util/show_profile.py profile $thread_num >> profile_log_$modelname + tail -n 8 profile >> profile_log_$modelname + echo '' >> profile_log_$modelname done done + +# Kill all nvidia-smi background task. +pkill nvidia-smi diff --git a/python/examples/pipeline/PaddleClas/HRNet_W18_C/benchmark.py b/python/examples/pipeline/PaddleClas/HRNet_W18_C/benchmark.py index 10078994047abab5e9790a1b32d9692c89400ec6..2433b0132728dc96627254f9231949a74a551c28 100644 --- a/python/examples/pipeline/PaddleClas/HRNet_W18_C/benchmark.py +++ b/python/examples/pipeline/PaddleClas/HRNet_W18_C/benchmark.py @@ -53,18 +53,39 @@ def run_http(idx, batch_size): keys.append("image_{}".format(i)) values.append(image) data = {"key": keys, "value": values} + latency_list = [] start_time = time.time() + total_num = 0 while True: + l_start = time.time() r = requests.post(url=url, data=json.dumps(data)) print(r.json()) + l_end = time.time() + latency_list.append(l_end * 1000 - l_start * 1000) + total_num += 1 if time.time() - start_time > 20: break end = time.time() - return [[end - start]] + return [[end - start], latency_list, [total_num]] def multithread_http(thread, batch_size): multi_thread_runner = MultiThreadRunner() - result = multi_thread_runner.run(run_http , thread, batch_size) + start = time.time() + result = multi_thread_runner.run(run_http, thread, batch_size) + end = time.time() + total_cost = end - start + avg_cost = 0 + total_number = 0 + for i in range(thread): + avg_cost += result[0][i] + total_number += result[2][i] + avg_cost = avg_cost / thread + print("Total cost: {}s".format(total_cost)) + print("Each thread cost: {}s. ".format(avg_cost)) + print("Total count: {}. ".format(total_number)) + print("AVG QPS: {} samples/s".format(batch_size * total_number / + total_cost)) + show_latency(result[1]) def run_rpc(thread, batch_size): client = PipelineClient() diff --git a/python/examples/pipeline/PaddleClas/HRNet_W18_C/benchmark.sh b/python/examples/pipeline/PaddleClas/HRNet_W18_C/benchmark.sh index 7c5a57f5430c49cf3b18f155c331b29939de060b..ee20eccf64c1e08dff16c7427ae5107525281dee 100644 --- a/python/examples/pipeline/PaddleClas/HRNet_W18_C/benchmark.sh +++ b/python/examples/pipeline/PaddleClas/HRNet_W18_C/benchmark.sh @@ -1,42 +1,44 @@ export FLAGS_profile_pipeline=1 -alias python3="python3.7" -modelname="imagenet" -use_gpu=0 -gpu_id="0" -benchmark_config_filename="benchmark_config.yaml" +alias python3="python3.6" +modelname="clas-DarkNet53" # HTTP -ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 +#ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 sleep 3 -if [ $use_gpu -eq 1 ]; then - python3 benchmark.py yaml local_predictor 1 gpu $gpu_id -else - python3 benchmark.py yaml local_predictor 1 cpu -fi +# Create yaml,If you already have the config.yaml, ignore it. +#python3 benchmark.py yaml local_predictor 1 gpu rm -rf profile_log_$modelname -for thread_num in 1 + +echo "Starting HTTP Clients..." +# Start a client in each thread, tesing the case of multiple threads. +for thread_num in 1 2 4 8 12 16 do for batch_size in 1 do - echo "#----imagenet thread num: $thread_num batch size: $batch_size mode:http use_gpu:$use_gpu----" >>profile_log_$modelname - rm -rf PipelineServingLogs - rm -rf cpu_utilization.py - python3 resnet50_web_service.py >web.log 2>&1 & - sleep 3 - nvidia-smi --id=${gpu_id} --query-compute-apps=used_memory --format=csv -lms 100 > gpu_use.log 2>&1 & - nvidia-smi --id=${gpu_id} --query-gpu=utilization.gpu --format=csv -lms 100 > gpu_utilization.log 2>&1 & + echo "----${modelname} thread num: ${thread_num} batch size: ${batch_size} mode:http ----" >>profile_log_$modelname + # Start one web service, If you start the service yourself, you can ignore it here. + #python3 web_service.py >web.log 2>&1 & + #sleep 3 + + # --id is the serial number of the GPU card, Must be the same as the gpu id used by the server. + nvidia-smi --id=3 --query-gpu=memory.used --format=csv -lms 1000 > gpu_use.log 2>&1 & + nvidia-smi --id=3 --query-gpu=utilization.gpu --format=csv -lms 1000 > gpu_utilization.log 2>&1 & echo "import psutil\ncpu_utilization=psutil.cpu_percent(1,False)\nprint('CPU_UTILIZATION:', cpu_utilization)\n" > cpu_utilization.py - python3 benchmark.py run http $thread_num $batch_size - python3 cpu_utilization.py >>profile_log_$modelname - python3 -m paddle_serving_server_gpu.profiler >>profile_log_$modelname - ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 - ps -ef | grep nvidia-smi | awk '{print $2}' | xargs kill -9 - python3 benchmark.py dump benchmark.log benchmark.tmp - mv benchmark.tmp benchmark.log - awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_MEM:", max}' gpu_use.log >> profile_log_$modelname - awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_UTIL:", max}' gpu_utilization.log >> profile_log_$modelname - cat benchmark.log >> profile_log_$modelname - python3 -m paddle_serving_server_gpu.parse_profile --benchmark_cfg $benchmark_config_filename --benchmark_log profile_log_$modelname - #rm -rf gpu_use.log gpu_utilization.log + # Start http client + python3 benchmark.py run http $thread_num $batch_size > profile 2>&1 + + # Collect CPU metrics, Filter data that is zero momentarily, Record the maximum value of GPU memory and the average value of GPU utilization + python3 cpu_utilization.py >> profile_log_$modelname + grep -av '^0 %' gpu_utilization.log > gpu_utilization.log.tmp + awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "MAX_GPU_MEMORY:", max}' gpu_use.log >> profile_log_$modelname + awk -F' ' '{sum+=$1} END {print "GPU_UTILIZATION:", sum/NR, sum, NR }' gpu_utilization.log.tmp >> profile_log_$modelname + + # Show profiles + python3 ../../../util/show_profile.py profile $thread_num >> profile_log_$modelname + tail -n 8 profile >> profile_log_$modelname + echo '' >> profile_log_$modelname done done + +# Kill all nvidia-smi background task. +pkill nvidia-smi diff --git a/python/examples/pipeline/PaddleClas/MobileNetV1/benchmark.py b/python/examples/pipeline/PaddleClas/MobileNetV1/benchmark.py index 10078994047abab5e9790a1b32d9692c89400ec6..2433b0132728dc96627254f9231949a74a551c28 100644 --- a/python/examples/pipeline/PaddleClas/MobileNetV1/benchmark.py +++ b/python/examples/pipeline/PaddleClas/MobileNetV1/benchmark.py @@ -53,18 +53,39 @@ def run_http(idx, batch_size): keys.append("image_{}".format(i)) values.append(image) data = {"key": keys, "value": values} + latency_list = [] start_time = time.time() + total_num = 0 while True: + l_start = time.time() r = requests.post(url=url, data=json.dumps(data)) print(r.json()) + l_end = time.time() + latency_list.append(l_end * 1000 - l_start * 1000) + total_num += 1 if time.time() - start_time > 20: break end = time.time() - return [[end - start]] + return [[end - start], latency_list, [total_num]] def multithread_http(thread, batch_size): multi_thread_runner = MultiThreadRunner() - result = multi_thread_runner.run(run_http , thread, batch_size) + start = time.time() + result = multi_thread_runner.run(run_http, thread, batch_size) + end = time.time() + total_cost = end - start + avg_cost = 0 + total_number = 0 + for i in range(thread): + avg_cost += result[0][i] + total_number += result[2][i] + avg_cost = avg_cost / thread + print("Total cost: {}s".format(total_cost)) + print("Each thread cost: {}s. ".format(avg_cost)) + print("Total count: {}. ".format(total_number)) + print("AVG QPS: {} samples/s".format(batch_size * total_number / + total_cost)) + show_latency(result[1]) def run_rpc(thread, batch_size): client = PipelineClient() diff --git a/python/examples/pipeline/PaddleClas/MobileNetV1/benchmark.sh b/python/examples/pipeline/PaddleClas/MobileNetV1/benchmark.sh index 7c5a57f5430c49cf3b18f155c331b29939de060b..ee20eccf64c1e08dff16c7427ae5107525281dee 100644 --- a/python/examples/pipeline/PaddleClas/MobileNetV1/benchmark.sh +++ b/python/examples/pipeline/PaddleClas/MobileNetV1/benchmark.sh @@ -1,42 +1,44 @@ export FLAGS_profile_pipeline=1 -alias python3="python3.7" -modelname="imagenet" -use_gpu=0 -gpu_id="0" -benchmark_config_filename="benchmark_config.yaml" +alias python3="python3.6" +modelname="clas-DarkNet53" # HTTP -ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 +#ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 sleep 3 -if [ $use_gpu -eq 1 ]; then - python3 benchmark.py yaml local_predictor 1 gpu $gpu_id -else - python3 benchmark.py yaml local_predictor 1 cpu -fi +# Create yaml,If you already have the config.yaml, ignore it. +#python3 benchmark.py yaml local_predictor 1 gpu rm -rf profile_log_$modelname -for thread_num in 1 + +echo "Starting HTTP Clients..." +# Start a client in each thread, tesing the case of multiple threads. +for thread_num in 1 2 4 8 12 16 do for batch_size in 1 do - echo "#----imagenet thread num: $thread_num batch size: $batch_size mode:http use_gpu:$use_gpu----" >>profile_log_$modelname - rm -rf PipelineServingLogs - rm -rf cpu_utilization.py - python3 resnet50_web_service.py >web.log 2>&1 & - sleep 3 - nvidia-smi --id=${gpu_id} --query-compute-apps=used_memory --format=csv -lms 100 > gpu_use.log 2>&1 & - nvidia-smi --id=${gpu_id} --query-gpu=utilization.gpu --format=csv -lms 100 > gpu_utilization.log 2>&1 & + echo "----${modelname} thread num: ${thread_num} batch size: ${batch_size} mode:http ----" >>profile_log_$modelname + # Start one web service, If you start the service yourself, you can ignore it here. + #python3 web_service.py >web.log 2>&1 & + #sleep 3 + + # --id is the serial number of the GPU card, Must be the same as the gpu id used by the server. + nvidia-smi --id=3 --query-gpu=memory.used --format=csv -lms 1000 > gpu_use.log 2>&1 & + nvidia-smi --id=3 --query-gpu=utilization.gpu --format=csv -lms 1000 > gpu_utilization.log 2>&1 & echo "import psutil\ncpu_utilization=psutil.cpu_percent(1,False)\nprint('CPU_UTILIZATION:', cpu_utilization)\n" > cpu_utilization.py - python3 benchmark.py run http $thread_num $batch_size - python3 cpu_utilization.py >>profile_log_$modelname - python3 -m paddle_serving_server_gpu.profiler >>profile_log_$modelname - ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 - ps -ef | grep nvidia-smi | awk '{print $2}' | xargs kill -9 - python3 benchmark.py dump benchmark.log benchmark.tmp - mv benchmark.tmp benchmark.log - awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_MEM:", max}' gpu_use.log >> profile_log_$modelname - awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_UTIL:", max}' gpu_utilization.log >> profile_log_$modelname - cat benchmark.log >> profile_log_$modelname - python3 -m paddle_serving_server_gpu.parse_profile --benchmark_cfg $benchmark_config_filename --benchmark_log profile_log_$modelname - #rm -rf gpu_use.log gpu_utilization.log + # Start http client + python3 benchmark.py run http $thread_num $batch_size > profile 2>&1 + + # Collect CPU metrics, Filter data that is zero momentarily, Record the maximum value of GPU memory and the average value of GPU utilization + python3 cpu_utilization.py >> profile_log_$modelname + grep -av '^0 %' gpu_utilization.log > gpu_utilization.log.tmp + awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "MAX_GPU_MEMORY:", max}' gpu_use.log >> profile_log_$modelname + awk -F' ' '{sum+=$1} END {print "GPU_UTILIZATION:", sum/NR, sum, NR }' gpu_utilization.log.tmp >> profile_log_$modelname + + # Show profiles + python3 ../../../util/show_profile.py profile $thread_num >> profile_log_$modelname + tail -n 8 profile >> profile_log_$modelname + echo '' >> profile_log_$modelname done done + +# Kill all nvidia-smi background task. +pkill nvidia-smi diff --git a/python/examples/pipeline/PaddleClas/MobileNetV2/benchmark.py b/python/examples/pipeline/PaddleClas/MobileNetV2/benchmark.py index 10078994047abab5e9790a1b32d9692c89400ec6..2433b0132728dc96627254f9231949a74a551c28 100644 --- a/python/examples/pipeline/PaddleClas/MobileNetV2/benchmark.py +++ b/python/examples/pipeline/PaddleClas/MobileNetV2/benchmark.py @@ -53,18 +53,39 @@ def run_http(idx, batch_size): keys.append("image_{}".format(i)) values.append(image) data = {"key": keys, "value": values} + latency_list = [] start_time = time.time() + total_num = 0 while True: + l_start = time.time() r = requests.post(url=url, data=json.dumps(data)) print(r.json()) + l_end = time.time() + latency_list.append(l_end * 1000 - l_start * 1000) + total_num += 1 if time.time() - start_time > 20: break end = time.time() - return [[end - start]] + return [[end - start], latency_list, [total_num]] def multithread_http(thread, batch_size): multi_thread_runner = MultiThreadRunner() - result = multi_thread_runner.run(run_http , thread, batch_size) + start = time.time() + result = multi_thread_runner.run(run_http, thread, batch_size) + end = time.time() + total_cost = end - start + avg_cost = 0 + total_number = 0 + for i in range(thread): + avg_cost += result[0][i] + total_number += result[2][i] + avg_cost = avg_cost / thread + print("Total cost: {}s".format(total_cost)) + print("Each thread cost: {}s. ".format(avg_cost)) + print("Total count: {}. ".format(total_number)) + print("AVG QPS: {} samples/s".format(batch_size * total_number / + total_cost)) + show_latency(result[1]) def run_rpc(thread, batch_size): client = PipelineClient() diff --git a/python/examples/pipeline/PaddleClas/MobileNetV2/benchmark.sh b/python/examples/pipeline/PaddleClas/MobileNetV2/benchmark.sh index 5ae26eb1a6d9eaff8a0c783fb8e674cde57b03d2..53ede7915316ef23c1208c02b624ad4a327733e6 100644 --- a/python/examples/pipeline/PaddleClas/MobileNetV2/benchmark.sh +++ b/python/examples/pipeline/PaddleClas/MobileNetV2/benchmark.sh @@ -1,42 +1,44 @@ export FLAGS_profile_pipeline=1 -alias python3="python3.7" -modelname="imagenet" -use_gpu=0 -gpu_id="0" -benchmark_config_filename="benchmark_config.yaml" +alias python3="python3.6" +modelname="clas-MobileNetV2" # HTTP -ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 +#ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 sleep 3 -if [ $use_gpu -eq 1 ]; then - python3 benchmark.py yaml local_predictor 1 gpu $gpu_id -else - python3 benchmark.py yaml local_predictor 1 cpu -fi +# Create yaml,If you already have the config.yaml, ignore it. +#python3 benchmark.py yaml local_predictor 1 gpu rm -rf profile_log_$modelname -for thread_num in 1 + +echo "Starting HTTP Clients..." +# Start a client in each thread, tesing the case of multiple threads. +for thread_num in 1 2 4 8 12 16 do for batch_size in 1 do - echo "#----imagenet thread num: $thread_num batch size: $batch_size mode:http use_gpu:$use_gpu----" >>profile_log_$modelname - rm -rf PipelineServingLogs - rm -rf cpu_utilization.py - python3 resnet50_web_service.py >web.log 2>&1 & - sleep 3 - nvidia-smi --id=${gpu_id} --query-compute-apps=used_memory --format=csv -lms 100 > gpu_use.log 2>&1 & - nvidia-smi --id=${gpu_id} --query-gpu=utilization.gpu --format=csv -lms 100 > gpu_utilization.log 2>&1 & + echo "----${modelname} thread num: ${thread_num} batch size: ${batch_size} mode:http ----" >>profile_log_$modelname + # Start one web service, If you start the service yourself, you can ignore it here. + #python3 web_service.py >web.log 2>&1 & + #sleep 3 + + # --id is the serial number of the GPU card, Must be the same as the gpu id used by the server. + nvidia-smi --id=3 --query-gpu=memory.used --format=csv -lms 1000 > gpu_use.log 2>&1 & + nvidia-smi --id=3 --query-gpu=utilization.gpu --format=csv -lms 1000 > gpu_utilization.log 2>&1 & echo "import psutil\ncpu_utilization=psutil.cpu_percent(1,False)\nprint('CPU_UTILIZATION:', cpu_utilization)\n" > cpu_utilization.py - python3 benchmark.py run http $thread_num $batch_size - python3 cpu_utilization.py >>profile_log_$modelname - python3 -m paddle_serving_server_gpu.profiler >>profile_log_$modelname - ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 - ps -ef | grep nvidia-smi | awk '{print $2}' | xargs kill -9 - python3 benchmark.py dump benchmark.log benchmark.tmp - mv benchmark.tmp benchmark.log - awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_MEM:", max}' gpu_use.log >> profile_log_$modelname - awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_UTIL:", max}' gpu_utilization.log >> profile_log_$modelname - cat benchmark.log >> profile_log_$modelname - python3 -m paddle_serving_server_gpu.parse_profile --benchmark_cfg $benchmark_config_filename --benchmark_log profile_log_$modelname - #rm -rf gpu_use.log gpu_utilization.log + # Start http client + python3 benchmark.py run http $thread_num $batch_size > profile 2>&1 + + # Collect CPU metrics, Filter data that is zero momentarily, Record the maximum value of GPU memory and the average value of GPU utilization + python3 cpu_utilization.py >> profile_log_$modelname + grep -av '^0 %' gpu_utilization.log > gpu_utilization.log.tmp + awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "MAX_GPU_MEMORY:", max}' gpu_use.log >> profile_log_$modelname + awk -F' ' '{sum+=$1} END {print "GPU_UTILIZATION:", sum/NR, sum, NR }' gpu_utilization.log.tmp >> profile_log_$modelname + + # Show profiles + python3 ../../../util/show_profile.py profile $thread_num >> profile_log_$modelname + tail -n 8 profile >> profile_log_$modelname + echo '' >> profile_log_$modelname done done + +# Kill all nvidia-smi background task. +pkill nvidia-smi diff --git a/python/examples/pipeline/PaddleClas/MobileNetV3_large_x1_0/benchmark.py b/python/examples/pipeline/PaddleClas/MobileNetV3_large_x1_0/benchmark.py index 10078994047abab5e9790a1b32d9692c89400ec6..2433b0132728dc96627254f9231949a74a551c28 100644 --- a/python/examples/pipeline/PaddleClas/MobileNetV3_large_x1_0/benchmark.py +++ b/python/examples/pipeline/PaddleClas/MobileNetV3_large_x1_0/benchmark.py @@ -53,18 +53,39 @@ def run_http(idx, batch_size): keys.append("image_{}".format(i)) values.append(image) data = {"key": keys, "value": values} + latency_list = [] start_time = time.time() + total_num = 0 while True: + l_start = time.time() r = requests.post(url=url, data=json.dumps(data)) print(r.json()) + l_end = time.time() + latency_list.append(l_end * 1000 - l_start * 1000) + total_num += 1 if time.time() - start_time > 20: break end = time.time() - return [[end - start]] + return [[end - start], latency_list, [total_num]] def multithread_http(thread, batch_size): multi_thread_runner = MultiThreadRunner() - result = multi_thread_runner.run(run_http , thread, batch_size) + start = time.time() + result = multi_thread_runner.run(run_http, thread, batch_size) + end = time.time() + total_cost = end - start + avg_cost = 0 + total_number = 0 + for i in range(thread): + avg_cost += result[0][i] + total_number += result[2][i] + avg_cost = avg_cost / thread + print("Total cost: {}s".format(total_cost)) + print("Each thread cost: {}s. ".format(avg_cost)) + print("Total count: {}. ".format(total_number)) + print("AVG QPS: {} samples/s".format(batch_size * total_number / + total_cost)) + show_latency(result[1]) def run_rpc(thread, batch_size): client = PipelineClient() diff --git a/python/examples/pipeline/PaddleClas/MobileNetV3_large_x1_0/benchmark.sh b/python/examples/pipeline/PaddleClas/MobileNetV3_large_x1_0/benchmark.sh index 4c008b4f6b627e934b176adcc07fcb3e8421476b..ee20eccf64c1e08dff16c7427ae5107525281dee 100644 --- a/python/examples/pipeline/PaddleClas/MobileNetV3_large_x1_0/benchmark.sh +++ b/python/examples/pipeline/PaddleClas/MobileNetV3_large_x1_0/benchmark.sh @@ -1,41 +1,44 @@ export FLAGS_profile_pipeline=1 -alias python3="python3.7" -modelname="imagenet" -use_gpu=0 -gpu_id="0" -benchmark_config_filename="benchmark_config.yaml" +alias python3="python3.6" +modelname="clas-DarkNet53" # HTTP -ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 +#ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 sleep 3 -if [ $use_gpu -eq 1 ]; then - python3 benchmark.py yaml local_predictor 1 gpu $gpu_id -else - python3 benchmark.py yaml local_predictor 1 cpu -fi +# Create yaml,If you already have the config.yaml, ignore it. +#python3 benchmark.py yaml local_predictor 1 gpu rm -rf profile_log_$modelname -for thread_num in 1 + +echo "Starting HTTP Clients..." +# Start a client in each thread, tesing the case of multiple threads. +for thread_num in 1 2 4 8 12 16 do for batch_size in 1 do - echo "#----imagenet thread num: $thread_num batch size: $batch_size mode:http use_gpu:$use_gpu----" >>profile_log_$modelname - rm -rf PipelineServingLogs - rm -rf cpu_utilization.py - python3 resnet50_web_service.py >web.log 2>&1 & - sleep 3 - nvidia-smi --id=${gpu_id} --query-compute-apps=used_memory --format=csv -lms 100 > gpu_use.log 2>&1 & - nvidia-smi --id=${gpu_id} --query-gpu=utilization.gpu --format=csv -lms 100 > gpu_utilization.log 2>&1 & + echo "----${modelname} thread num: ${thread_num} batch size: ${batch_size} mode:http ----" >>profile_log_$modelname + # Start one web service, If you start the service yourself, you can ignore it here. + #python3 web_service.py >web.log 2>&1 & + #sleep 3 + + # --id is the serial number of the GPU card, Must be the same as the gpu id used by the server. + nvidia-smi --id=3 --query-gpu=memory.used --format=csv -lms 1000 > gpu_use.log 2>&1 & + nvidia-smi --id=3 --query-gpu=utilization.gpu --format=csv -lms 1000 > gpu_utilization.log 2>&1 & echo "import psutil\ncpu_utilization=psutil.cpu_percent(1,False)\nprint('CPU_UTILIZATION:', cpu_utilization)\n" > cpu_utilization.py - python3 benchmark.py run http $thread_num $batch_size - python3 cpu_utilization.py >>profile_log_$modelname - python3 -m paddle_serving_server_gpu.profiler >>profile_log_$modelname - ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 - python3 benchmark.py dump benchmark.log benchmark.tmp - mv benchmark.tmp benchmark.log - awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_MEM:", max}' gpu_use.log >> profile_log_$modelname - awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_UTIL:", max}' gpu_utilization.log >> profile_log_$modelname - cat benchmark.log >> profile_log_$modelname - python3 -m paddle_serving_server_gpu.parse_profile --benchmark_cfg $benchmark_config_filename --benchmark_log profile_log_$modelname - #rm -rf gpu_use.log gpu_utilization.log + # Start http client + python3 benchmark.py run http $thread_num $batch_size > profile 2>&1 + + # Collect CPU metrics, Filter data that is zero momentarily, Record the maximum value of GPU memory and the average value of GPU utilization + python3 cpu_utilization.py >> profile_log_$modelname + grep -av '^0 %' gpu_utilization.log > gpu_utilization.log.tmp + awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "MAX_GPU_MEMORY:", max}' gpu_use.log >> profile_log_$modelname + awk -F' ' '{sum+=$1} END {print "GPU_UTILIZATION:", sum/NR, sum, NR }' gpu_utilization.log.tmp >> profile_log_$modelname + + # Show profiles + python3 ../../../util/show_profile.py profile $thread_num >> profile_log_$modelname + tail -n 8 profile >> profile_log_$modelname + echo '' >> profile_log_$modelname done done + +# Kill all nvidia-smi background task. +pkill nvidia-smi diff --git a/python/examples/pipeline/PaddleClas/ResNeXt101_vd_64x4d/benchmark.py b/python/examples/pipeline/PaddleClas/ResNeXt101_vd_64x4d/benchmark.py index 10078994047abab5e9790a1b32d9692c89400ec6..2433b0132728dc96627254f9231949a74a551c28 100644 --- a/python/examples/pipeline/PaddleClas/ResNeXt101_vd_64x4d/benchmark.py +++ b/python/examples/pipeline/PaddleClas/ResNeXt101_vd_64x4d/benchmark.py @@ -53,18 +53,39 @@ def run_http(idx, batch_size): keys.append("image_{}".format(i)) values.append(image) data = {"key": keys, "value": values} + latency_list = [] start_time = time.time() + total_num = 0 while True: + l_start = time.time() r = requests.post(url=url, data=json.dumps(data)) print(r.json()) + l_end = time.time() + latency_list.append(l_end * 1000 - l_start * 1000) + total_num += 1 if time.time() - start_time > 20: break end = time.time() - return [[end - start]] + return [[end - start], latency_list, [total_num]] def multithread_http(thread, batch_size): multi_thread_runner = MultiThreadRunner() - result = multi_thread_runner.run(run_http , thread, batch_size) + start = time.time() + result = multi_thread_runner.run(run_http, thread, batch_size) + end = time.time() + total_cost = end - start + avg_cost = 0 + total_number = 0 + for i in range(thread): + avg_cost += result[0][i] + total_number += result[2][i] + avg_cost = avg_cost / thread + print("Total cost: {}s".format(total_cost)) + print("Each thread cost: {}s. ".format(avg_cost)) + print("Total count: {}. ".format(total_number)) + print("AVG QPS: {} samples/s".format(batch_size * total_number / + total_cost)) + show_latency(result[1]) def run_rpc(thread, batch_size): client = PipelineClient() diff --git a/python/examples/pipeline/PaddleClas/ResNeXt101_vd_64x4d/benchmark.sh b/python/examples/pipeline/PaddleClas/ResNeXt101_vd_64x4d/benchmark.sh index 7c5a57f5430c49cf3b18f155c331b29939de060b..ee20eccf64c1e08dff16c7427ae5107525281dee 100644 --- a/python/examples/pipeline/PaddleClas/ResNeXt101_vd_64x4d/benchmark.sh +++ b/python/examples/pipeline/PaddleClas/ResNeXt101_vd_64x4d/benchmark.sh @@ -1,42 +1,44 @@ export FLAGS_profile_pipeline=1 -alias python3="python3.7" -modelname="imagenet" -use_gpu=0 -gpu_id="0" -benchmark_config_filename="benchmark_config.yaml" +alias python3="python3.6" +modelname="clas-DarkNet53" # HTTP -ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 +#ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 sleep 3 -if [ $use_gpu -eq 1 ]; then - python3 benchmark.py yaml local_predictor 1 gpu $gpu_id -else - python3 benchmark.py yaml local_predictor 1 cpu -fi +# Create yaml,If you already have the config.yaml, ignore it. +#python3 benchmark.py yaml local_predictor 1 gpu rm -rf profile_log_$modelname -for thread_num in 1 + +echo "Starting HTTP Clients..." +# Start a client in each thread, tesing the case of multiple threads. +for thread_num in 1 2 4 8 12 16 do for batch_size in 1 do - echo "#----imagenet thread num: $thread_num batch size: $batch_size mode:http use_gpu:$use_gpu----" >>profile_log_$modelname - rm -rf PipelineServingLogs - rm -rf cpu_utilization.py - python3 resnet50_web_service.py >web.log 2>&1 & - sleep 3 - nvidia-smi --id=${gpu_id} --query-compute-apps=used_memory --format=csv -lms 100 > gpu_use.log 2>&1 & - nvidia-smi --id=${gpu_id} --query-gpu=utilization.gpu --format=csv -lms 100 > gpu_utilization.log 2>&1 & + echo "----${modelname} thread num: ${thread_num} batch size: ${batch_size} mode:http ----" >>profile_log_$modelname + # Start one web service, If you start the service yourself, you can ignore it here. + #python3 web_service.py >web.log 2>&1 & + #sleep 3 + + # --id is the serial number of the GPU card, Must be the same as the gpu id used by the server. + nvidia-smi --id=3 --query-gpu=memory.used --format=csv -lms 1000 > gpu_use.log 2>&1 & + nvidia-smi --id=3 --query-gpu=utilization.gpu --format=csv -lms 1000 > gpu_utilization.log 2>&1 & echo "import psutil\ncpu_utilization=psutil.cpu_percent(1,False)\nprint('CPU_UTILIZATION:', cpu_utilization)\n" > cpu_utilization.py - python3 benchmark.py run http $thread_num $batch_size - python3 cpu_utilization.py >>profile_log_$modelname - python3 -m paddle_serving_server_gpu.profiler >>profile_log_$modelname - ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 - ps -ef | grep nvidia-smi | awk '{print $2}' | xargs kill -9 - python3 benchmark.py dump benchmark.log benchmark.tmp - mv benchmark.tmp benchmark.log - awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_MEM:", max}' gpu_use.log >> profile_log_$modelname - awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_UTIL:", max}' gpu_utilization.log >> profile_log_$modelname - cat benchmark.log >> profile_log_$modelname - python3 -m paddle_serving_server_gpu.parse_profile --benchmark_cfg $benchmark_config_filename --benchmark_log profile_log_$modelname - #rm -rf gpu_use.log gpu_utilization.log + # Start http client + python3 benchmark.py run http $thread_num $batch_size > profile 2>&1 + + # Collect CPU metrics, Filter data that is zero momentarily, Record the maximum value of GPU memory and the average value of GPU utilization + python3 cpu_utilization.py >> profile_log_$modelname + grep -av '^0 %' gpu_utilization.log > gpu_utilization.log.tmp + awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "MAX_GPU_MEMORY:", max}' gpu_use.log >> profile_log_$modelname + awk -F' ' '{sum+=$1} END {print "GPU_UTILIZATION:", sum/NR, sum, NR }' gpu_utilization.log.tmp >> profile_log_$modelname + + # Show profiles + python3 ../../../util/show_profile.py profile $thread_num >> profile_log_$modelname + tail -n 8 profile >> profile_log_$modelname + echo '' >> profile_log_$modelname done done + +# Kill all nvidia-smi background task. +pkill nvidia-smi diff --git a/python/examples/pipeline/PaddleClas/ResNet50_vd/benchmark.py b/python/examples/pipeline/PaddleClas/ResNet50_vd/benchmark.py index 10078994047abab5e9790a1b32d9692c89400ec6..2433b0132728dc96627254f9231949a74a551c28 100644 --- a/python/examples/pipeline/PaddleClas/ResNet50_vd/benchmark.py +++ b/python/examples/pipeline/PaddleClas/ResNet50_vd/benchmark.py @@ -53,18 +53,39 @@ def run_http(idx, batch_size): keys.append("image_{}".format(i)) values.append(image) data = {"key": keys, "value": values} + latency_list = [] start_time = time.time() + total_num = 0 while True: + l_start = time.time() r = requests.post(url=url, data=json.dumps(data)) print(r.json()) + l_end = time.time() + latency_list.append(l_end * 1000 - l_start * 1000) + total_num += 1 if time.time() - start_time > 20: break end = time.time() - return [[end - start]] + return [[end - start], latency_list, [total_num]] def multithread_http(thread, batch_size): multi_thread_runner = MultiThreadRunner() - result = multi_thread_runner.run(run_http , thread, batch_size) + start = time.time() + result = multi_thread_runner.run(run_http, thread, batch_size) + end = time.time() + total_cost = end - start + avg_cost = 0 + total_number = 0 + for i in range(thread): + avg_cost += result[0][i] + total_number += result[2][i] + avg_cost = avg_cost / thread + print("Total cost: {}s".format(total_cost)) + print("Each thread cost: {}s. ".format(avg_cost)) + print("Total count: {}. ".format(total_number)) + print("AVG QPS: {} samples/s".format(batch_size * total_number / + total_cost)) + show_latency(result[1]) def run_rpc(thread, batch_size): client = PipelineClient() diff --git a/python/examples/pipeline/PaddleClas/ResNet50_vd/benchmark.sh b/python/examples/pipeline/PaddleClas/ResNet50_vd/benchmark.sh index 7c5a57f5430c49cf3b18f155c331b29939de060b..ee20eccf64c1e08dff16c7427ae5107525281dee 100644 --- a/python/examples/pipeline/PaddleClas/ResNet50_vd/benchmark.sh +++ b/python/examples/pipeline/PaddleClas/ResNet50_vd/benchmark.sh @@ -1,42 +1,44 @@ export FLAGS_profile_pipeline=1 -alias python3="python3.7" -modelname="imagenet" -use_gpu=0 -gpu_id="0" -benchmark_config_filename="benchmark_config.yaml" +alias python3="python3.6" +modelname="clas-DarkNet53" # HTTP -ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 +#ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 sleep 3 -if [ $use_gpu -eq 1 ]; then - python3 benchmark.py yaml local_predictor 1 gpu $gpu_id -else - python3 benchmark.py yaml local_predictor 1 cpu -fi +# Create yaml,If you already have the config.yaml, ignore it. +#python3 benchmark.py yaml local_predictor 1 gpu rm -rf profile_log_$modelname -for thread_num in 1 + +echo "Starting HTTP Clients..." +# Start a client in each thread, tesing the case of multiple threads. +for thread_num in 1 2 4 8 12 16 do for batch_size in 1 do - echo "#----imagenet thread num: $thread_num batch size: $batch_size mode:http use_gpu:$use_gpu----" >>profile_log_$modelname - rm -rf PipelineServingLogs - rm -rf cpu_utilization.py - python3 resnet50_web_service.py >web.log 2>&1 & - sleep 3 - nvidia-smi --id=${gpu_id} --query-compute-apps=used_memory --format=csv -lms 100 > gpu_use.log 2>&1 & - nvidia-smi --id=${gpu_id} --query-gpu=utilization.gpu --format=csv -lms 100 > gpu_utilization.log 2>&1 & + echo "----${modelname} thread num: ${thread_num} batch size: ${batch_size} mode:http ----" >>profile_log_$modelname + # Start one web service, If you start the service yourself, you can ignore it here. + #python3 web_service.py >web.log 2>&1 & + #sleep 3 + + # --id is the serial number of the GPU card, Must be the same as the gpu id used by the server. + nvidia-smi --id=3 --query-gpu=memory.used --format=csv -lms 1000 > gpu_use.log 2>&1 & + nvidia-smi --id=3 --query-gpu=utilization.gpu --format=csv -lms 1000 > gpu_utilization.log 2>&1 & echo "import psutil\ncpu_utilization=psutil.cpu_percent(1,False)\nprint('CPU_UTILIZATION:', cpu_utilization)\n" > cpu_utilization.py - python3 benchmark.py run http $thread_num $batch_size - python3 cpu_utilization.py >>profile_log_$modelname - python3 -m paddle_serving_server_gpu.profiler >>profile_log_$modelname - ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 - ps -ef | grep nvidia-smi | awk '{print $2}' | xargs kill -9 - python3 benchmark.py dump benchmark.log benchmark.tmp - mv benchmark.tmp benchmark.log - awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_MEM:", max}' gpu_use.log >> profile_log_$modelname - awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_UTIL:", max}' gpu_utilization.log >> profile_log_$modelname - cat benchmark.log >> profile_log_$modelname - python3 -m paddle_serving_server_gpu.parse_profile --benchmark_cfg $benchmark_config_filename --benchmark_log profile_log_$modelname - #rm -rf gpu_use.log gpu_utilization.log + # Start http client + python3 benchmark.py run http $thread_num $batch_size > profile 2>&1 + + # Collect CPU metrics, Filter data that is zero momentarily, Record the maximum value of GPU memory and the average value of GPU utilization + python3 cpu_utilization.py >> profile_log_$modelname + grep -av '^0 %' gpu_utilization.log > gpu_utilization.log.tmp + awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "MAX_GPU_MEMORY:", max}' gpu_use.log >> profile_log_$modelname + awk -F' ' '{sum+=$1} END {print "GPU_UTILIZATION:", sum/NR, sum, NR }' gpu_utilization.log.tmp >> profile_log_$modelname + + # Show profiles + python3 ../../../util/show_profile.py profile $thread_num >> profile_log_$modelname + tail -n 8 profile >> profile_log_$modelname + echo '' >> profile_log_$modelname done done + +# Kill all nvidia-smi background task. +pkill nvidia-smi diff --git a/python/examples/pipeline/PaddleClas/ResNet50_vd_FPGM/benchmark.py b/python/examples/pipeline/PaddleClas/ResNet50_vd_FPGM/benchmark.py index 10078994047abab5e9790a1b32d9692c89400ec6..2433b0132728dc96627254f9231949a74a551c28 100644 --- a/python/examples/pipeline/PaddleClas/ResNet50_vd_FPGM/benchmark.py +++ b/python/examples/pipeline/PaddleClas/ResNet50_vd_FPGM/benchmark.py @@ -53,18 +53,39 @@ def run_http(idx, batch_size): keys.append("image_{}".format(i)) values.append(image) data = {"key": keys, "value": values} + latency_list = [] start_time = time.time() + total_num = 0 while True: + l_start = time.time() r = requests.post(url=url, data=json.dumps(data)) print(r.json()) + l_end = time.time() + latency_list.append(l_end * 1000 - l_start * 1000) + total_num += 1 if time.time() - start_time > 20: break end = time.time() - return [[end - start]] + return [[end - start], latency_list, [total_num]] def multithread_http(thread, batch_size): multi_thread_runner = MultiThreadRunner() - result = multi_thread_runner.run(run_http , thread, batch_size) + start = time.time() + result = multi_thread_runner.run(run_http, thread, batch_size) + end = time.time() + total_cost = end - start + avg_cost = 0 + total_number = 0 + for i in range(thread): + avg_cost += result[0][i] + total_number += result[2][i] + avg_cost = avg_cost / thread + print("Total cost: {}s".format(total_cost)) + print("Each thread cost: {}s. ".format(avg_cost)) + print("Total count: {}. ".format(total_number)) + print("AVG QPS: {} samples/s".format(batch_size * total_number / + total_cost)) + show_latency(result[1]) def run_rpc(thread, batch_size): client = PipelineClient() diff --git a/python/examples/pipeline/PaddleClas/ResNet50_vd_FPGM/benchmark.sh b/python/examples/pipeline/PaddleClas/ResNet50_vd_FPGM/benchmark.sh index 7c5a57f5430c49cf3b18f155c331b29939de060b..ee20eccf64c1e08dff16c7427ae5107525281dee 100644 --- a/python/examples/pipeline/PaddleClas/ResNet50_vd_FPGM/benchmark.sh +++ b/python/examples/pipeline/PaddleClas/ResNet50_vd_FPGM/benchmark.sh @@ -1,42 +1,44 @@ export FLAGS_profile_pipeline=1 -alias python3="python3.7" -modelname="imagenet" -use_gpu=0 -gpu_id="0" -benchmark_config_filename="benchmark_config.yaml" +alias python3="python3.6" +modelname="clas-DarkNet53" # HTTP -ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 +#ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 sleep 3 -if [ $use_gpu -eq 1 ]; then - python3 benchmark.py yaml local_predictor 1 gpu $gpu_id -else - python3 benchmark.py yaml local_predictor 1 cpu -fi +# Create yaml,If you already have the config.yaml, ignore it. +#python3 benchmark.py yaml local_predictor 1 gpu rm -rf profile_log_$modelname -for thread_num in 1 + +echo "Starting HTTP Clients..." +# Start a client in each thread, tesing the case of multiple threads. +for thread_num in 1 2 4 8 12 16 do for batch_size in 1 do - echo "#----imagenet thread num: $thread_num batch size: $batch_size mode:http use_gpu:$use_gpu----" >>profile_log_$modelname - rm -rf PipelineServingLogs - rm -rf cpu_utilization.py - python3 resnet50_web_service.py >web.log 2>&1 & - sleep 3 - nvidia-smi --id=${gpu_id} --query-compute-apps=used_memory --format=csv -lms 100 > gpu_use.log 2>&1 & - nvidia-smi --id=${gpu_id} --query-gpu=utilization.gpu --format=csv -lms 100 > gpu_utilization.log 2>&1 & + echo "----${modelname} thread num: ${thread_num} batch size: ${batch_size} mode:http ----" >>profile_log_$modelname + # Start one web service, If you start the service yourself, you can ignore it here. + #python3 web_service.py >web.log 2>&1 & + #sleep 3 + + # --id is the serial number of the GPU card, Must be the same as the gpu id used by the server. + nvidia-smi --id=3 --query-gpu=memory.used --format=csv -lms 1000 > gpu_use.log 2>&1 & + nvidia-smi --id=3 --query-gpu=utilization.gpu --format=csv -lms 1000 > gpu_utilization.log 2>&1 & echo "import psutil\ncpu_utilization=psutil.cpu_percent(1,False)\nprint('CPU_UTILIZATION:', cpu_utilization)\n" > cpu_utilization.py - python3 benchmark.py run http $thread_num $batch_size - python3 cpu_utilization.py >>profile_log_$modelname - python3 -m paddle_serving_server_gpu.profiler >>profile_log_$modelname - ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 - ps -ef | grep nvidia-smi | awk '{print $2}' | xargs kill -9 - python3 benchmark.py dump benchmark.log benchmark.tmp - mv benchmark.tmp benchmark.log - awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_MEM:", max}' gpu_use.log >> profile_log_$modelname - awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_UTIL:", max}' gpu_utilization.log >> profile_log_$modelname - cat benchmark.log >> profile_log_$modelname - python3 -m paddle_serving_server_gpu.parse_profile --benchmark_cfg $benchmark_config_filename --benchmark_log profile_log_$modelname - #rm -rf gpu_use.log gpu_utilization.log + # Start http client + python3 benchmark.py run http $thread_num $batch_size > profile 2>&1 + + # Collect CPU metrics, Filter data that is zero momentarily, Record the maximum value of GPU memory and the average value of GPU utilization + python3 cpu_utilization.py >> profile_log_$modelname + grep -av '^0 %' gpu_utilization.log > gpu_utilization.log.tmp + awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "MAX_GPU_MEMORY:", max}' gpu_use.log >> profile_log_$modelname + awk -F' ' '{sum+=$1} END {print "GPU_UTILIZATION:", sum/NR, sum, NR }' gpu_utilization.log.tmp >> profile_log_$modelname + + # Show profiles + python3 ../../../util/show_profile.py profile $thread_num >> profile_log_$modelname + tail -n 8 profile >> profile_log_$modelname + echo '' >> profile_log_$modelname done done + +# Kill all nvidia-smi background task. +pkill nvidia-smi diff --git a/python/examples/pipeline/PaddleClas/ResNet50_vd_KL/benchmark.py b/python/examples/pipeline/PaddleClas/ResNet50_vd_KL/benchmark.py index 10078994047abab5e9790a1b32d9692c89400ec6..2433b0132728dc96627254f9231949a74a551c28 100644 --- a/python/examples/pipeline/PaddleClas/ResNet50_vd_KL/benchmark.py +++ b/python/examples/pipeline/PaddleClas/ResNet50_vd_KL/benchmark.py @@ -53,18 +53,39 @@ def run_http(idx, batch_size): keys.append("image_{}".format(i)) values.append(image) data = {"key": keys, "value": values} + latency_list = [] start_time = time.time() + total_num = 0 while True: + l_start = time.time() r = requests.post(url=url, data=json.dumps(data)) print(r.json()) + l_end = time.time() + latency_list.append(l_end * 1000 - l_start * 1000) + total_num += 1 if time.time() - start_time > 20: break end = time.time() - return [[end - start]] + return [[end - start], latency_list, [total_num]] def multithread_http(thread, batch_size): multi_thread_runner = MultiThreadRunner() - result = multi_thread_runner.run(run_http , thread, batch_size) + start = time.time() + result = multi_thread_runner.run(run_http, thread, batch_size) + end = time.time() + total_cost = end - start + avg_cost = 0 + total_number = 0 + for i in range(thread): + avg_cost += result[0][i] + total_number += result[2][i] + avg_cost = avg_cost / thread + print("Total cost: {}s".format(total_cost)) + print("Each thread cost: {}s. ".format(avg_cost)) + print("Total count: {}. ".format(total_number)) + print("AVG QPS: {} samples/s".format(batch_size * total_number / + total_cost)) + show_latency(result[1]) def run_rpc(thread, batch_size): client = PipelineClient() diff --git a/python/examples/pipeline/PaddleClas/ResNet50_vd_KL/benchmark.sh b/python/examples/pipeline/PaddleClas/ResNet50_vd_KL/benchmark.sh index 7c5a57f5430c49cf3b18f155c331b29939de060b..ee20eccf64c1e08dff16c7427ae5107525281dee 100644 --- a/python/examples/pipeline/PaddleClas/ResNet50_vd_KL/benchmark.sh +++ b/python/examples/pipeline/PaddleClas/ResNet50_vd_KL/benchmark.sh @@ -1,42 +1,44 @@ export FLAGS_profile_pipeline=1 -alias python3="python3.7" -modelname="imagenet" -use_gpu=0 -gpu_id="0" -benchmark_config_filename="benchmark_config.yaml" +alias python3="python3.6" +modelname="clas-DarkNet53" # HTTP -ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 +#ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 sleep 3 -if [ $use_gpu -eq 1 ]; then - python3 benchmark.py yaml local_predictor 1 gpu $gpu_id -else - python3 benchmark.py yaml local_predictor 1 cpu -fi +# Create yaml,If you already have the config.yaml, ignore it. +#python3 benchmark.py yaml local_predictor 1 gpu rm -rf profile_log_$modelname -for thread_num in 1 + +echo "Starting HTTP Clients..." +# Start a client in each thread, tesing the case of multiple threads. +for thread_num in 1 2 4 8 12 16 do for batch_size in 1 do - echo "#----imagenet thread num: $thread_num batch size: $batch_size mode:http use_gpu:$use_gpu----" >>profile_log_$modelname - rm -rf PipelineServingLogs - rm -rf cpu_utilization.py - python3 resnet50_web_service.py >web.log 2>&1 & - sleep 3 - nvidia-smi --id=${gpu_id} --query-compute-apps=used_memory --format=csv -lms 100 > gpu_use.log 2>&1 & - nvidia-smi --id=${gpu_id} --query-gpu=utilization.gpu --format=csv -lms 100 > gpu_utilization.log 2>&1 & + echo "----${modelname} thread num: ${thread_num} batch size: ${batch_size} mode:http ----" >>profile_log_$modelname + # Start one web service, If you start the service yourself, you can ignore it here. + #python3 web_service.py >web.log 2>&1 & + #sleep 3 + + # --id is the serial number of the GPU card, Must be the same as the gpu id used by the server. + nvidia-smi --id=3 --query-gpu=memory.used --format=csv -lms 1000 > gpu_use.log 2>&1 & + nvidia-smi --id=3 --query-gpu=utilization.gpu --format=csv -lms 1000 > gpu_utilization.log 2>&1 & echo "import psutil\ncpu_utilization=psutil.cpu_percent(1,False)\nprint('CPU_UTILIZATION:', cpu_utilization)\n" > cpu_utilization.py - python3 benchmark.py run http $thread_num $batch_size - python3 cpu_utilization.py >>profile_log_$modelname - python3 -m paddle_serving_server_gpu.profiler >>profile_log_$modelname - ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 - ps -ef | grep nvidia-smi | awk '{print $2}' | xargs kill -9 - python3 benchmark.py dump benchmark.log benchmark.tmp - mv benchmark.tmp benchmark.log - awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_MEM:", max}' gpu_use.log >> profile_log_$modelname - awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_UTIL:", max}' gpu_utilization.log >> profile_log_$modelname - cat benchmark.log >> profile_log_$modelname - python3 -m paddle_serving_server_gpu.parse_profile --benchmark_cfg $benchmark_config_filename --benchmark_log profile_log_$modelname - #rm -rf gpu_use.log gpu_utilization.log + # Start http client + python3 benchmark.py run http $thread_num $batch_size > profile 2>&1 + + # Collect CPU metrics, Filter data that is zero momentarily, Record the maximum value of GPU memory and the average value of GPU utilization + python3 cpu_utilization.py >> profile_log_$modelname + grep -av '^0 %' gpu_utilization.log > gpu_utilization.log.tmp + awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "MAX_GPU_MEMORY:", max}' gpu_use.log >> profile_log_$modelname + awk -F' ' '{sum+=$1} END {print "GPU_UTILIZATION:", sum/NR, sum, NR }' gpu_utilization.log.tmp >> profile_log_$modelname + + # Show profiles + python3 ../../../util/show_profile.py profile $thread_num >> profile_log_$modelname + tail -n 8 profile >> profile_log_$modelname + echo '' >> profile_log_$modelname done done + +# Kill all nvidia-smi background task. +pkill nvidia-smi diff --git a/python/examples/pipeline/PaddleClas/ResNet50_vd_PACT/benchmark.py b/python/examples/pipeline/PaddleClas/ResNet50_vd_PACT/benchmark.py index 10078994047abab5e9790a1b32d9692c89400ec6..2433b0132728dc96627254f9231949a74a551c28 100644 --- a/python/examples/pipeline/PaddleClas/ResNet50_vd_PACT/benchmark.py +++ b/python/examples/pipeline/PaddleClas/ResNet50_vd_PACT/benchmark.py @@ -53,18 +53,39 @@ def run_http(idx, batch_size): keys.append("image_{}".format(i)) values.append(image) data = {"key": keys, "value": values} + latency_list = [] start_time = time.time() + total_num = 0 while True: + l_start = time.time() r = requests.post(url=url, data=json.dumps(data)) print(r.json()) + l_end = time.time() + latency_list.append(l_end * 1000 - l_start * 1000) + total_num += 1 if time.time() - start_time > 20: break end = time.time() - return [[end - start]] + return [[end - start], latency_list, [total_num]] def multithread_http(thread, batch_size): multi_thread_runner = MultiThreadRunner() - result = multi_thread_runner.run(run_http , thread, batch_size) + start = time.time() + result = multi_thread_runner.run(run_http, thread, batch_size) + end = time.time() + total_cost = end - start + avg_cost = 0 + total_number = 0 + for i in range(thread): + avg_cost += result[0][i] + total_number += result[2][i] + avg_cost = avg_cost / thread + print("Total cost: {}s".format(total_cost)) + print("Each thread cost: {}s. ".format(avg_cost)) + print("Total count: {}. ".format(total_number)) + print("AVG QPS: {} samples/s".format(batch_size * total_number / + total_cost)) + show_latency(result[1]) def run_rpc(thread, batch_size): client = PipelineClient() diff --git a/python/examples/pipeline/PaddleClas/ResNet50_vd_PACT/benchmark.sh b/python/examples/pipeline/PaddleClas/ResNet50_vd_PACT/benchmark.sh index 7c5a57f5430c49cf3b18f155c331b29939de060b..ee20eccf64c1e08dff16c7427ae5107525281dee 100644 --- a/python/examples/pipeline/PaddleClas/ResNet50_vd_PACT/benchmark.sh +++ b/python/examples/pipeline/PaddleClas/ResNet50_vd_PACT/benchmark.sh @@ -1,42 +1,44 @@ export FLAGS_profile_pipeline=1 -alias python3="python3.7" -modelname="imagenet" -use_gpu=0 -gpu_id="0" -benchmark_config_filename="benchmark_config.yaml" +alias python3="python3.6" +modelname="clas-DarkNet53" # HTTP -ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 +#ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 sleep 3 -if [ $use_gpu -eq 1 ]; then - python3 benchmark.py yaml local_predictor 1 gpu $gpu_id -else - python3 benchmark.py yaml local_predictor 1 cpu -fi +# Create yaml,If you already have the config.yaml, ignore it. +#python3 benchmark.py yaml local_predictor 1 gpu rm -rf profile_log_$modelname -for thread_num in 1 + +echo "Starting HTTP Clients..." +# Start a client in each thread, tesing the case of multiple threads. +for thread_num in 1 2 4 8 12 16 do for batch_size in 1 do - echo "#----imagenet thread num: $thread_num batch size: $batch_size mode:http use_gpu:$use_gpu----" >>profile_log_$modelname - rm -rf PipelineServingLogs - rm -rf cpu_utilization.py - python3 resnet50_web_service.py >web.log 2>&1 & - sleep 3 - nvidia-smi --id=${gpu_id} --query-compute-apps=used_memory --format=csv -lms 100 > gpu_use.log 2>&1 & - nvidia-smi --id=${gpu_id} --query-gpu=utilization.gpu --format=csv -lms 100 > gpu_utilization.log 2>&1 & + echo "----${modelname} thread num: ${thread_num} batch size: ${batch_size} mode:http ----" >>profile_log_$modelname + # Start one web service, If you start the service yourself, you can ignore it here. + #python3 web_service.py >web.log 2>&1 & + #sleep 3 + + # --id is the serial number of the GPU card, Must be the same as the gpu id used by the server. + nvidia-smi --id=3 --query-gpu=memory.used --format=csv -lms 1000 > gpu_use.log 2>&1 & + nvidia-smi --id=3 --query-gpu=utilization.gpu --format=csv -lms 1000 > gpu_utilization.log 2>&1 & echo "import psutil\ncpu_utilization=psutil.cpu_percent(1,False)\nprint('CPU_UTILIZATION:', cpu_utilization)\n" > cpu_utilization.py - python3 benchmark.py run http $thread_num $batch_size - python3 cpu_utilization.py >>profile_log_$modelname - python3 -m paddle_serving_server_gpu.profiler >>profile_log_$modelname - ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 - ps -ef | grep nvidia-smi | awk '{print $2}' | xargs kill -9 - python3 benchmark.py dump benchmark.log benchmark.tmp - mv benchmark.tmp benchmark.log - awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_MEM:", max}' gpu_use.log >> profile_log_$modelname - awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_UTIL:", max}' gpu_utilization.log >> profile_log_$modelname - cat benchmark.log >> profile_log_$modelname - python3 -m paddle_serving_server_gpu.parse_profile --benchmark_cfg $benchmark_config_filename --benchmark_log profile_log_$modelname - #rm -rf gpu_use.log gpu_utilization.log + # Start http client + python3 benchmark.py run http $thread_num $batch_size > profile 2>&1 + + # Collect CPU metrics, Filter data that is zero momentarily, Record the maximum value of GPU memory and the average value of GPU utilization + python3 cpu_utilization.py >> profile_log_$modelname + grep -av '^0 %' gpu_utilization.log > gpu_utilization.log.tmp + awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "MAX_GPU_MEMORY:", max}' gpu_use.log >> profile_log_$modelname + awk -F' ' '{sum+=$1} END {print "GPU_UTILIZATION:", sum/NR, sum, NR }' gpu_utilization.log.tmp >> profile_log_$modelname + + # Show profiles + python3 ../../../util/show_profile.py profile $thread_num >> profile_log_$modelname + tail -n 8 profile >> profile_log_$modelname + echo '' >> profile_log_$modelname done done + +# Kill all nvidia-smi background task. +pkill nvidia-smi diff --git a/python/examples/pipeline/PaddleClas/ShuffleNetV2_x1_0/benchmark.py b/python/examples/pipeline/PaddleClas/ShuffleNetV2_x1_0/benchmark.py index 10078994047abab5e9790a1b32d9692c89400ec6..2433b0132728dc96627254f9231949a74a551c28 100644 --- a/python/examples/pipeline/PaddleClas/ShuffleNetV2_x1_0/benchmark.py +++ b/python/examples/pipeline/PaddleClas/ShuffleNetV2_x1_0/benchmark.py @@ -53,18 +53,39 @@ def run_http(idx, batch_size): keys.append("image_{}".format(i)) values.append(image) data = {"key": keys, "value": values} + latency_list = [] start_time = time.time() + total_num = 0 while True: + l_start = time.time() r = requests.post(url=url, data=json.dumps(data)) print(r.json()) + l_end = time.time() + latency_list.append(l_end * 1000 - l_start * 1000) + total_num += 1 if time.time() - start_time > 20: break end = time.time() - return [[end - start]] + return [[end - start], latency_list, [total_num]] def multithread_http(thread, batch_size): multi_thread_runner = MultiThreadRunner() - result = multi_thread_runner.run(run_http , thread, batch_size) + start = time.time() + result = multi_thread_runner.run(run_http, thread, batch_size) + end = time.time() + total_cost = end - start + avg_cost = 0 + total_number = 0 + for i in range(thread): + avg_cost += result[0][i] + total_number += result[2][i] + avg_cost = avg_cost / thread + print("Total cost: {}s".format(total_cost)) + print("Each thread cost: {}s. ".format(avg_cost)) + print("Total count: {}. ".format(total_number)) + print("AVG QPS: {} samples/s".format(batch_size * total_number / + total_cost)) + show_latency(result[1]) def run_rpc(thread, batch_size): client = PipelineClient() diff --git a/python/examples/pipeline/PaddleClas/ShuffleNetV2_x1_0/benchmark.sh b/python/examples/pipeline/PaddleClas/ShuffleNetV2_x1_0/benchmark.sh index 7c5a57f5430c49cf3b18f155c331b29939de060b..ee20eccf64c1e08dff16c7427ae5107525281dee 100644 --- a/python/examples/pipeline/PaddleClas/ShuffleNetV2_x1_0/benchmark.sh +++ b/python/examples/pipeline/PaddleClas/ShuffleNetV2_x1_0/benchmark.sh @@ -1,42 +1,44 @@ export FLAGS_profile_pipeline=1 -alias python3="python3.7" -modelname="imagenet" -use_gpu=0 -gpu_id="0" -benchmark_config_filename="benchmark_config.yaml" +alias python3="python3.6" +modelname="clas-DarkNet53" # HTTP -ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 +#ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 sleep 3 -if [ $use_gpu -eq 1 ]; then - python3 benchmark.py yaml local_predictor 1 gpu $gpu_id -else - python3 benchmark.py yaml local_predictor 1 cpu -fi +# Create yaml,If you already have the config.yaml, ignore it. +#python3 benchmark.py yaml local_predictor 1 gpu rm -rf profile_log_$modelname -for thread_num in 1 + +echo "Starting HTTP Clients..." +# Start a client in each thread, tesing the case of multiple threads. +for thread_num in 1 2 4 8 12 16 do for batch_size in 1 do - echo "#----imagenet thread num: $thread_num batch size: $batch_size mode:http use_gpu:$use_gpu----" >>profile_log_$modelname - rm -rf PipelineServingLogs - rm -rf cpu_utilization.py - python3 resnet50_web_service.py >web.log 2>&1 & - sleep 3 - nvidia-smi --id=${gpu_id} --query-compute-apps=used_memory --format=csv -lms 100 > gpu_use.log 2>&1 & - nvidia-smi --id=${gpu_id} --query-gpu=utilization.gpu --format=csv -lms 100 > gpu_utilization.log 2>&1 & + echo "----${modelname} thread num: ${thread_num} batch size: ${batch_size} mode:http ----" >>profile_log_$modelname + # Start one web service, If you start the service yourself, you can ignore it here. + #python3 web_service.py >web.log 2>&1 & + #sleep 3 + + # --id is the serial number of the GPU card, Must be the same as the gpu id used by the server. + nvidia-smi --id=3 --query-gpu=memory.used --format=csv -lms 1000 > gpu_use.log 2>&1 & + nvidia-smi --id=3 --query-gpu=utilization.gpu --format=csv -lms 1000 > gpu_utilization.log 2>&1 & echo "import psutil\ncpu_utilization=psutil.cpu_percent(1,False)\nprint('CPU_UTILIZATION:', cpu_utilization)\n" > cpu_utilization.py - python3 benchmark.py run http $thread_num $batch_size - python3 cpu_utilization.py >>profile_log_$modelname - python3 -m paddle_serving_server_gpu.profiler >>profile_log_$modelname - ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 - ps -ef | grep nvidia-smi | awk '{print $2}' | xargs kill -9 - python3 benchmark.py dump benchmark.log benchmark.tmp - mv benchmark.tmp benchmark.log - awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_MEM:", max}' gpu_use.log >> profile_log_$modelname - awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_UTIL:", max}' gpu_utilization.log >> profile_log_$modelname - cat benchmark.log >> profile_log_$modelname - python3 -m paddle_serving_server_gpu.parse_profile --benchmark_cfg $benchmark_config_filename --benchmark_log profile_log_$modelname - #rm -rf gpu_use.log gpu_utilization.log + # Start http client + python3 benchmark.py run http $thread_num $batch_size > profile 2>&1 + + # Collect CPU metrics, Filter data that is zero momentarily, Record the maximum value of GPU memory and the average value of GPU utilization + python3 cpu_utilization.py >> profile_log_$modelname + grep -av '^0 %' gpu_utilization.log > gpu_utilization.log.tmp + awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "MAX_GPU_MEMORY:", max}' gpu_use.log >> profile_log_$modelname + awk -F' ' '{sum+=$1} END {print "GPU_UTILIZATION:", sum/NR, sum, NR }' gpu_utilization.log.tmp >> profile_log_$modelname + + # Show profiles + python3 ../../../util/show_profile.py profile $thread_num >> profile_log_$modelname + tail -n 8 profile >> profile_log_$modelname + echo '' >> profile_log_$modelname done done + +# Kill all nvidia-smi background task. +pkill nvidia-smi