diff --git a/python/examples/pipeline/imagenet/benchmark.py b/python/examples/pipeline/imagenet/benchmark.py index 22c1ce873f2db2d866a96ff9a68ffce3bd5b98f5..10078994047abab5e9790a1b32d9692c89400ec6 100644 --- a/python/examples/pipeline/imagenet/benchmark.py +++ b/python/examples/pipeline/imagenet/benchmark.py @@ -32,6 +32,8 @@ def gen_yml(device, gpu_id): if device == "gpu": config["op"]["imagenet"]["local_service_conf"]["device_type"] = 1 config["op"]["imagenet"]["local_service_conf"]["devices"] = gpu_id + else: + config["op"]["imagenet"]["local_service_conf"]["device_type"] = 0 with open("config2.yml", "w") as fout: yaml.dump(config, fout, default_flow_style=False) @@ -91,7 +93,10 @@ if __name__ == "__main__": mode = sys.argv[2] # brpc/ local predictor thread = int(sys.argv[3]) device = sys.argv[4] - gpu_id = sys.argv[5] + if device == "gpu": + gpu_id = sys.argv[5] + else: + gpu_id = None gen_yml(device, gpu_id) elif sys.argv[1] == "run": mode = sys.argv[2] # http/ rpc diff --git a/python/examples/pipeline/imagenet/benchmark.sh b/python/examples/pipeline/imagenet/benchmark.sh index 572c029ee624189ee8cc34f559354129e56dd2f6..4c008b4f6b627e934b176adcc07fcb3e8421476b 100644 --- a/python/examples/pipeline/imagenet/benchmark.sh +++ b/python/examples/pipeline/imagenet/benchmark.sh @@ -1,19 +1,24 @@ export FLAGS_profile_pipeline=1 alias python3="python3.7" modelname="imagenet" +use_gpu=0 gpu_id="0" benchmark_config_filename="benchmark_config.yaml" # HTTP ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 sleep 3 -python3 benchmark.py yaml local_predictor 1 gpu $gpu_id +if [ $use_gpu -eq 1 ]; then + python3 benchmark.py yaml local_predictor 1 gpu $gpu_id +else + python3 benchmark.py yaml local_predictor 1 cpu +fi rm -rf profile_log_$modelname for thread_num in 1 do for batch_size in 1 do - echo "#----imagenet thread num: $thread_num batch size: $batch_size mode:http ----" >>profile_log_$modelname + echo "#----imagenet thread num: $thread_num batch size: $batch_size mode:http use_gpu:$use_gpu----" >>profile_log_$modelname rm -rf PipelineServingLogs rm -rf cpu_utilization.py python3 resnet50_web_service.py >web.log 2>&1 & @@ -25,7 +30,7 @@ do python3 cpu_utilization.py >>profile_log_$modelname python3 -m paddle_serving_server_gpu.profiler >>profile_log_$modelname ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 - python3 benchmark.py dump benchmark.log benchmark.tmp + python3 benchmark.py dump benchmark.log benchmark.tmp mv benchmark.tmp benchmark.log awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_MEM:", max}' gpu_use.log >> profile_log_$modelname awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_UTIL:", max}' gpu_utilization.log >> profile_log_$modelname