未验证 提交 05a28109 编写于 作者: J Jiawei Wang 提交者: GitHub

Merge pull request #1255 from bjjwwang/develop

rm some redundant files in PaddleClas
cuda_version: "10.1"
cudnn_version: "7.6"
trt_version: "6.0"
python_version: "3.7"
gcc_version: "8.2"
paddle_version: "2.0.1"
cpu: "Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz X12"
gpu: "T4"
xpu: "None"
api: ""
owner: "cuicheng01"
model_name: "DarkNet53"
model_type: "static"
model_source: "PaddleClas"
model_url: "https://paddle-imagenet-models-name.bj.bcebos.com/DarkNet53_pretrained.tar"
batch_size: 1
num_of_samples: 1000
input_shape: "3,224,224"
runtime_device: "gpu"
ir_optim: true
enable_memory_optim: true
enable_tensorrt: false
precision: "fp32"
enable_mkldnn: false
cpu_math_library_num_threads: ""
cuda_version: "10.1"
cudnn_version: "7.6"
trt_version: "6.0"
python_version: "3.7"
gcc_version: "8.2"
paddle_version: "2.0.1"
cpu: "Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz X12"
gpu: "T4"
xpu: "None"
api: ""
owner: "cuicheng01"
model_name: "imagenet"
model_type: "static"
model_source: "PaddleClas"
model_url: "model_url_path"
batch_size: 1
num_of_samples: 1000
input_shape: "3,224,224"
runtime_device: "cpu"
ir_optim: true
enable_memory_optim: true
enable_tensorrt: false
precision: "fp32"
enable_mkldnn: false
cpu_math_library_num_threads: ""
export FLAGS_profile_pipeline=1
alias python3="python3.7"
modelname="imagenet"
use_gpu=1
gpu_id="0"
benchmark_config_filename="benchmark_config.yaml"
# HTTP
ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
sleep 3
if [ $use_gpu -eq 1 ]; then
python3 benchmark.py yaml local_predictor 1 gpu $gpu_id
else
python3 benchmark.py yaml local_predictor 1 cpu
fi
rm -rf profile_log_$modelname
for thread_num in 1
do
for batch_size in 1
do
echo "#----imagenet thread num: $thread_num batch size: $batch_size mode:http use_gpu:$use_gpu----" >>profile_log_$modelname
rm -rf PipelineServingLogs
rm -rf cpu_utilization.py
python3 resnet50_web_service.py >web.log 2>&1 &
sleep 3
nvidia-smi --id=${gpu_id} --query-compute-apps=used_memory --format=csv -lms 100 > gpu_use.log 2>&1 &
nvidia-smi --id=${gpu_id} --query-gpu=utilization.gpu --format=csv -lms 100 > gpu_utilization.log 2>&1 &
echo "import psutil\ncpu_utilization=psutil.cpu_percent(1,False)\nprint('CPU_UTILIZATION:', cpu_utilization)\n" > cpu_utilization.py
python3 benchmark.py run http $thread_num $batch_size
python3 cpu_utilization.py >>profile_log_$modelname
python3 -m paddle_serving_server_gpu.profiler >>profile_log_$modelname
ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
ps -ef | grep nvidia-smi | awk '{print $2}' | xargs kill -9
python3 benchmark.py dump benchmark.log benchmark.tmp
mv benchmark.tmp benchmark.log
awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_MEM:", max}' gpu_use.log >> profile_log_$modelname
awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_UTIL:", max}' gpu_utilization.log >> profile_log_$modelname
cat benchmark.log >> profile_log_$modelname
python3 -m paddle_serving_server_gpu.parse_profile --benchmark_cfg $benchmark_config_filename --benchmark_log profile_log_$modelname
#rm -rf gpu_use.log gpu_utilization.log
done
done
cuda_version: "10.1"
cudnn_version: "7.6"
trt_version: "6.0"
python_version: "3.7"
gcc_version: "8.2"
paddle_version: "2.0.1"
cpu: "Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz X12"
gpu: "T4"
xpu: "None"
api: ""
owner: "cuicheng01"
model_name: "HRNet_W18_C"
model_type: "static"
model_source: "PaddleClas"
model_url: "https://paddle-imagenet-models-name.bj.bcebos.com/HRNet_W18_C_pretrained.tar"
batch_size: 1
num_of_samples: 1000
input_shape: "3,224,224"
runtime_device: "gpu"
ir_optim: true
enable_memory_optim: true
enable_tensorrt: false
precision: "fp32"
enable_mkldnn: false
cpu_math_library_num_threads: ""
cuda_version: "10.1"
cudnn_version: "7.6"
trt_version: "6.0"
python_version: "3.7"
gcc_version: "8.2"
paddle_version: "2.0.1"
cpu: "Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz X12"
gpu: "T4"
xpu: "None"
api: ""
owner: "cuicheng01"
model_name: "imagenet"
model_type: "static"
model_source: "PaddleClas"
model_url: "model_url_path"
batch_size: 1
num_of_samples: 1000
input_shape: "3,224,224"
runtime_device: "cpu"
ir_optim: true
enable_memory_optim: true
enable_tensorrt: false
precision: "fp32"
enable_mkldnn: false
cpu_math_library_num_threads: ""
export FLAGS_profile_pipeline=1
alias python3="python3.7"
modelname="imagenet"
use_gpu=1
gpu_id="0"
benchmark_config_filename="benchmark_config.yaml"
# HTTP
ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
sleep 3
if [ $use_gpu -eq 1 ]; then
python3 benchmark.py yaml local_predictor 1 gpu $gpu_id
else
python3 benchmark.py yaml local_predictor 1 cpu
fi
rm -rf profile_log_$modelname
for thread_num in 1
do
for batch_size in 1
do
echo "#----imagenet thread num: $thread_num batch size: $batch_size mode:http use_gpu:$use_gpu----" >>profile_log_$modelname
rm -rf PipelineServingLogs
rm -rf cpu_utilization.py
python3 resnet50_web_service.py >web.log 2>&1 &
sleep 3
nvidia-smi --id=${gpu_id} --query-compute-apps=used_memory --format=csv -lms 100 > gpu_use.log 2>&1 &
nvidia-smi --id=${gpu_id} --query-gpu=utilization.gpu --format=csv -lms 100 > gpu_utilization.log 2>&1 &
echo "import psutil\ncpu_utilization=psutil.cpu_percent(1,False)\nprint('CPU_UTILIZATION:', cpu_utilization)\n" > cpu_utilization.py
python3 benchmark.py run http $thread_num $batch_size
python3 cpu_utilization.py >>profile_log_$modelname
python3 -m paddle_serving_server_gpu.profiler >>profile_log_$modelname
ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
ps -ef | grep nvidia-smi | awk '{print $2}' | xargs kill -9
python3 benchmark.py dump benchmark.log benchmark.tmp
mv benchmark.tmp benchmark.log
awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_MEM:", max}' gpu_use.log >> profile_log_$modelname
awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_UTIL:", max}' gpu_utilization.log >> profile_log_$modelname
cat benchmark.log >> profile_log_$modelname
python3 -m paddle_serving_server_gpu.parse_profile --benchmark_cfg $benchmark_config_filename --benchmark_log profile_log_$modelname
#rm -rf gpu_use.log gpu_utilization.log
done
done
cuda_version: "10.1"
cudnn_version: "7.6"
trt_version: "6.0"
python_version: "3.7"
gcc_version: "8.2"
paddle_version: "2.0.1"
cpu: "Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz X12"
gpu: "T4"
xpu: "None"
api: ""
owner: "cuicheng01"
model_name: "MobileNetV1"
model_type: "static"
model_source: "PaddleClas"
model_url: "https://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV1_pretrained.tar"
batch_size: 1
num_of_samples: 1000
input_shape: "3,224,224"
runtime_device: "gpu"
ir_optim: true
enable_memory_optim: true
enable_tensorrt: false
precision: "fp32"
enable_mkldnn: false
cpu_math_library_num_threads: ""
cuda_version: "10.1"
cudnn_version: "7.6"
trt_version: "6.0"
python_version: "3.7"
gcc_version: "8.2"
paddle_version: "2.0.1"
cpu: "Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz X12"
gpu: "T4"
xpu: "None"
api: ""
owner: "cuicheng01"
model_name: "imagenet"
model_type: "static"
model_source: "PaddleClas"
model_url: "model_url_path"
batch_size: 1
num_of_samples: 1000
input_shape: "3,224,224"
runtime_device: "cpu"
ir_optim: true
enable_memory_optim: true
enable_tensorrt: false
precision: "fp32"
enable_mkldnn: false
cpu_math_library_num_threads: ""
export FLAGS_profile_pipeline=1
alias python3="python3.7"
modelname="imagenet"
use_gpu=1
gpu_id="0"
benchmark_config_filename="benchmark_config.yaml"
# HTTP
ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
sleep 3
if [ $use_gpu -eq 1 ]; then
python3 benchmark.py yaml local_predictor 1 gpu $gpu_id
else
python3 benchmark.py yaml local_predictor 1 cpu
fi
rm -rf profile_log_$modelname
for thread_num in 1
do
for batch_size in 1
do
echo "#----imagenet thread num: $thread_num batch size: $batch_size mode:http use_gpu:$use_gpu----" >>profile_log_$modelname
rm -rf PipelineServingLogs
rm -rf cpu_utilization.py
python3 resnet50_web_service.py >web.log 2>&1 &
sleep 3
nvidia-smi --id=${gpu_id} --query-compute-apps=used_memory --format=csv -lms 100 > gpu_use.log 2>&1 &
nvidia-smi --id=${gpu_id} --query-gpu=utilization.gpu --format=csv -lms 100 > gpu_utilization.log 2>&1 &
echo "import psutil\ncpu_utilization=psutil.cpu_percent(1,False)\nprint('CPU_UTILIZATION:', cpu_utilization)\n" > cpu_utilization.py
python3 benchmark.py run http $thread_num $batch_size
python3 cpu_utilization.py >>profile_log_$modelname
python3 -m paddle_serving_server_gpu.profiler >>profile_log_$modelname
ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
ps -ef | grep nvidia-smi | awk '{print $2}' | xargs kill -9
python3 benchmark.py dump benchmark.log benchmark.tmp
mv benchmark.tmp benchmark.log
awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_MEM:", max}' gpu_use.log >> profile_log_$modelname
awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_UTIL:", max}' gpu_utilization.log >> profile_log_$modelname
cat benchmark.log >> profile_log_$modelname
python3 -m paddle_serving_server_gpu.parse_profile --benchmark_cfg $benchmark_config_filename --benchmark_log profile_log_$modelname
#rm -rf gpu_use.log gpu_utilization.log
done
done
cuda_version: "10.1"
cudnn_version: "7.6"
trt_version: "6.0"
python_version: "3.7"
gcc_version: "8.2"
paddle_version: "2.0.1"
cpu: "Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz X12"
gpu: "T4"
xpu: "None"
api: ""
owner: "cuicheng01"
model_name: "MobileNetV2"
model_type: "static"
model_source: "PaddleClas"
model_url: "https://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV2_pretrained.tar"
batch_size: 1
num_of_samples: 1000
input_shape: "3,224,224"
runtime_device: "gpu"
ir_optim: true
enable_memory_optim: true
enable_tensorrt: false
precision: "fp32"
enable_mkldnn: false
cpu_math_library_num_threads: ""
cuda_version: "10.1"
cudnn_version: "7.6"
trt_version: "6.0"
python_version: "3.7"
gcc_version: "8.2"
paddle_version: "2.0.1"
cpu: "Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz X12"
gpu: "T4"
xpu: "None"
api: ""
owner: "cuicheng01"
model_name: "imagenet"
model_type: "static"
model_source: "PaddleClas"
model_url: "model_url_path"
batch_size: 1
num_of_samples: 1000
input_shape: "3,224,224"
runtime_device: "cpu"
ir_optim: true
enable_memory_optim: true
enable_tensorrt: false
precision: "fp32"
enable_mkldnn: false
cpu_math_library_num_threads: ""
export FLAGS_profile_pipeline=1
alias python3="python3.7"
modelname="imagenet"
use_gpu=1
gpu_id="0"
benchmark_config_filename="benchmark_config.yaml"
# HTTP
ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
sleep 3
if [ $use_gpu -eq 1 ]; then
python3 benchmark.py yaml local_predictor 1 gpu $gpu_id
else
python3 benchmark.py yaml local_predictor 1 cpu
fi
rm -rf profile_log_$modelname
for thread_num in 1
do
for batch_size in 1
do
echo "#----imagenet thread num: $thread_num batch size: $batch_size mode:http use_gpu:$use_gpu----" >>profile_log_$modelname
rm -rf PipelineServingLogs
rm -rf cpu_utilization.py
python3 resnet50_web_service.py >web.log 2>&1 &
sleep 3
nvidia-smi --id=${gpu_id} --query-compute-apps=used_memory --format=csv -lms 100 > gpu_use.log 2>&1 &
nvidia-smi --id=${gpu_id} --query-gpu=utilization.gpu --format=csv -lms 100 > gpu_utilization.log 2>&1 &
echo "import psutil\ncpu_utilization=psutil.cpu_percent(1,False)\nprint('CPU_UTILIZATION:', cpu_utilization)\n" > cpu_utilization.py
python3 benchmark.py run http $thread_num $batch_size
python3 cpu_utilization.py >>profile_log_$modelname
python3 -m paddle_serving_server_gpu.profiler >>profile_log_$modelname
ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
ps -ef | grep nvidia-smi | awk '{print $2}' | xargs kill -9
python3 benchmark.py dump benchmark.log benchmark.tmp
mv benchmark.tmp benchmark.log
awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_MEM:", max}' gpu_use.log >> profile_log_$modelname
awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_UTIL:", max}' gpu_utilization.log >> profile_log_$modelname
cat benchmark.log >> profile_log_$modelname
python3 -m paddle_serving_server_gpu.parse_profile --benchmark_cfg $benchmark_config_filename --benchmark_log profile_log_$modelname
#rm -rf gpu_use.log gpu_utilization.log
done
done
cuda_version: "10.1"
cudnn_version: "7.6"
trt_version: "6.0"
python_version: "3.7"
gcc_version: "8.2"
paddle_version: "2.0.1"
cpu: "Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz X12"
gpu: "T4"
xpu: "None"
api: ""
owner: "cuicheng01"
model_name: "MobileNetV3_large_x1_0"
model_type: "static"
model_source: "PaddleClas"
model_url: "https://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV3_large_x1_0_pretrained.tar"
batch_size: 1
num_of_samples: 1000
input_shape: "3,224,224"
runtime_device: "gpu"
ir_optim: true
enable_memory_optim: true
enable_tensorrt: false
precision: "fp32"
enable_mkldnn: false
cpu_math_library_num_threads: ""
cuda_version: "10.1"
cudnn_version: "7.6"
trt_version: "6.0"
python_version: "3.7"
gcc_version: "8.2"
paddle_version: "2.0.1"
cpu: "Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz X12"
gpu: "T4"
xpu: "None"
api: ""
owner: "cuicheng01"
model_name: "imagenet"
model_type: "static"
model_source: "PaddleClas"
model_url: "model_url_path"
batch_size: 1
num_of_samples: 1000
input_shape: "3,224,224"
runtime_device: "cpu"
ir_optim: true
enable_memory_optim: true
enable_tensorrt: false
precision: "fp32"
enable_mkldnn: false
cpu_math_library_num_threads: ""
export FLAGS_profile_pipeline=1
alias python3="python3.7"
modelname="imagenet"
use_gpu=1
gpu_id="0"
benchmark_config_filename="benchmark_config.yaml"
# HTTP
ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
sleep 3
if [ $use_gpu -eq 1 ]; then
python3 benchmark.py yaml local_predictor 1 gpu $gpu_id
else
python3 benchmark.py yaml local_predictor 1 cpu
fi
rm -rf profile_log_$modelname
for thread_num in 1
do
for batch_size in 1
do
echo "#----imagenet thread num: $thread_num batch size: $batch_size mode:http use_gpu:$use_gpu----" >>profile_log_$modelname
rm -rf PipelineServingLogs
rm -rf cpu_utilization.py
python3 resnet50_web_service.py >web.log 2>&1 &
sleep 3
nvidia-smi --id=${gpu_id} --query-compute-apps=used_memory --format=csv -lms 100 > gpu_use.log 2>&1 &
nvidia-smi --id=${gpu_id} --query-gpu=utilization.gpu --format=csv -lms 100 > gpu_utilization.log 2>&1 &
echo "import psutil\ncpu_utilization=psutil.cpu_percent(1,False)\nprint('CPU_UTILIZATION:', cpu_utilization)\n" > cpu_utilization.py
python3 benchmark.py run http $thread_num $batch_size
python3 cpu_utilization.py >>profile_log_$modelname
python3 -m paddle_serving_server_gpu.profiler >>profile_log_$modelname
ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
python3 benchmark.py dump benchmark.log benchmark.tmp
mv benchmark.tmp benchmark.log
awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_MEM:", max}' gpu_use.log >> profile_log_$modelname
awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_UTIL:", max}' gpu_utilization.log >> profile_log_$modelname
cat benchmark.log >> profile_log_$modelname
python3 -m paddle_serving_server_gpu.parse_profile --benchmark_cfg $benchmark_config_filename --benchmark_log profile_log_$modelname
#rm -rf gpu_use.log gpu_utilization.log
done
done
cuda_version: "10.1"
cudnn_version: "7.6"
trt_version: "6.0"
python_version: "3.7"
gcc_version: "8.2"
paddle_version: "2.0.1"
cpu: "Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz X12"
gpu: "T4"
xpu: "None"
api: ""
owner: "cuicheng01"
model_name: "ResNeXt101_vd_64x4d"
model_type: "static"
model_source: "PaddleClas"
model_url: "https://paddle-imagenet-models-name.bj.bcebos.com/ResNeXt101_vd_64x4d_pretrained.tar"
batch_size: 1
num_of_samples: 1000
input_shape: "3,224,224"
runtime_device: "gpu"
ir_optim: true
enable_memory_optim: true
enable_tensorrt: false
precision: "fp32"
enable_mkldnn: false
cpu_math_library_num_threads: ""
cuda_version: "10.1"
cudnn_version: "7.6"
trt_version: "6.0"
python_version: "3.7"
gcc_version: "8.2"
paddle_version: "2.0.1"
cpu: "Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz X12"
gpu: "T4"
xpu: "None"
api: ""
owner: "cuicheng01"
model_name: "imagenet"
model_type: "static"
model_source: "PaddleClas"
model_url: "model_url_path"
batch_size: 1
num_of_samples: 1000
input_shape: "3,224,224"
runtime_device: "cpu"
ir_optim: true
enable_memory_optim: true
enable_tensorrt: false
precision: "fp32"
enable_mkldnn: false
cpu_math_library_num_threads: ""
export FLAGS_profile_pipeline=1
alias python3="python3.7"
modelname="imagenet"
use_gpu=1
gpu_id="0"
benchmark_config_filename="benchmark_config.yaml"
# HTTP
ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
sleep 3
if [ $use_gpu -eq 1 ]; then
python3 benchmark.py yaml local_predictor 1 gpu $gpu_id
else
python3 benchmark.py yaml local_predictor 1 cpu
fi
rm -rf profile_log_$modelname
for thread_num in 1
do
for batch_size in 1
do
echo "#----imagenet thread num: $thread_num batch size: $batch_size mode:http use_gpu:$use_gpu----" >>profile_log_$modelname
rm -rf PipelineServingLogs
rm -rf cpu_utilization.py
python3 resnet50_web_service.py >web.log 2>&1 &
sleep 3
nvidia-smi --id=${gpu_id} --query-compute-apps=used_memory --format=csv -lms 100 > gpu_use.log 2>&1 &
nvidia-smi --id=${gpu_id} --query-gpu=utilization.gpu --format=csv -lms 100 > gpu_utilization.log 2>&1 &
echo "import psutil\ncpu_utilization=psutil.cpu_percent(1,False)\nprint('CPU_UTILIZATION:', cpu_utilization)\n" > cpu_utilization.py
python3 benchmark.py run http $thread_num $batch_size
python3 cpu_utilization.py >>profile_log_$modelname
python3 -m paddle_serving_server_gpu.profiler >>profile_log_$modelname
ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
ps -ef | grep nvidia-smi | awk '{print $2}' | xargs kill -9
python3 benchmark.py dump benchmark.log benchmark.tmp
mv benchmark.tmp benchmark.log
awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_MEM:", max}' gpu_use.log >> profile_log_$modelname
awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_UTIL:", max}' gpu_utilization.log >> profile_log_$modelname
cat benchmark.log >> profile_log_$modelname
python3 -m paddle_serving_server_gpu.parse_profile --benchmark_cfg $benchmark_config_filename --benchmark_log profile_log_$modelname
#rm -rf gpu_use.log gpu_utilization.log
done
done
cuda_version: "10.1"
cudnn_version: "7.6"
trt_version: "6.0"
python_version: "3.7"
gcc_version: "8.2"
paddle_version: "2.0.1"
cpu: "Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz X12"
gpu: "T4"
xpu: "None"
api: ""
owner: "cuicheng01"
model_name: "ResNet50_vd"
model_type: "static"
model_source: "PaddleClas"
model_url: "https://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_vd_pretrained.tar"
batch_size: 1
num_of_samples: 1000
input_shape: "3,224,224"
runtime_device: "gpu"
ir_optim: true
enable_memory_optim: true
enable_tensorrt: false
precision: "fp32"
enable_mkldnn: false
cpu_math_library_num_threads: ""
cuda_version: "10.1"
cudnn_version: "7.6"
trt_version: "6.0"
python_version: "3.7"
gcc_version: "8.2"
paddle_version: "2.0.1"
cpu: "Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz X12"
gpu: "T4"
xpu: "None"
api: ""
owner: "cuicheng01"
model_name: "imagenet"
model_type: "static"
model_source: "PaddleClas"
model_url: "model_url_path"
batch_size: 1
num_of_samples: 1000
input_shape: "3,224,224"
runtime_device: "cpu"
ir_optim: true
enable_memory_optim: true
enable_tensorrt: false
precision: "fp32"
enable_mkldnn: false
cpu_math_library_num_threads: ""
export FLAGS_profile_pipeline=1
alias python3="python3.7"
modelname="imagenet"
use_gpu=1
gpu_id="0"
benchmark_config_filename="benchmark_config.yaml"
# HTTP
ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
sleep 3
if [ $use_gpu -eq 1 ]; then
python3 benchmark.py yaml local_predictor 1 gpu $gpu_id
else
python3 benchmark.py yaml local_predictor 1 cpu
fi
rm -rf profile_log_$modelname
for thread_num in 1
do
for batch_size in 1
do
echo "#----imagenet thread num: $thread_num batch size: $batch_size mode:http use_gpu:$use_gpu----" >>profile_log_$modelname
rm -rf PipelineServingLogs
rm -rf cpu_utilization.py
python3 resnet50_web_service.py >web.log 2>&1 &
sleep 3
nvidia-smi --id=${gpu_id} --query-compute-apps=used_memory --format=csv -lms 100 > gpu_use.log 2>&1 &
nvidia-smi --id=${gpu_id} --query-gpu=utilization.gpu --format=csv -lms 100 > gpu_utilization.log 2>&1 &
echo "import psutil\ncpu_utilization=psutil.cpu_percent(1,False)\nprint('CPU_UTILIZATION:', cpu_utilization)\n" > cpu_utilization.py
python3 benchmark.py run http $thread_num $batch_size
python3 cpu_utilization.py >>profile_log_$modelname
python3 -m paddle_serving_server_gpu.profiler >>profile_log_$modelname
ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
ps -ef | grep nvidia-smi | awk '{print $2}' | xargs kill -9
python3 benchmark.py dump benchmark.log benchmark.tmp
mv benchmark.tmp benchmark.log
awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_MEM:", max}' gpu_use.log >> profile_log_$modelname
awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_UTIL:", max}' gpu_utilization.log >> profile_log_$modelname
cat benchmark.log >> profile_log_$modelname
python3 -m paddle_serving_server_gpu.parse_profile --benchmark_cfg $benchmark_config_filename --benchmark_log profile_log_$modelname
#rm -rf gpu_use.log gpu_utilization.log
done
done
cuda_version: "10.1"
cudnn_version: "7.6"
trt_version: "6.0"
python_version: "3.7"
gcc_version: "8.2"
paddle_version: "2.0.1"
cpu: "Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz X12"
gpu: "T4"
xpu: "None"
api: ""
owner: "cuicheng01"
model_name: "ResNet50_vd_FPGM"
model_type: "static"
model_source: "PaddleClas"
model_url: "https://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_vd_FPGM_pretrained.tar"
batch_size: 1
num_of_samples: 1000
input_shape: "3,224,224"
runtime_device: "gpu"
ir_optim: true
enable_memory_optim: true
enable_tensorrt: false
precision: "fp32"
enable_mkldnn: false
cpu_math_library_num_threads: ""
cuda_version: "10.1"
cudnn_version: "7.6"
trt_version: "6.0"
python_version: "3.7"
gcc_version: "8.2"
paddle_version: "2.0.1"
cpu: "Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz X12"
gpu: "T4"
xpu: "None"
api: ""
owner: "cuicheng01"
model_name: "imagenet"
model_type: "static"
model_source: "PaddleClas"
model_url: "model_url_path"
batch_size: 1
num_of_samples: 1000
input_shape: "3,224,224"
runtime_device: "cpu"
ir_optim: true
enable_memory_optim: true
enable_tensorrt: false
precision: "fp32"
enable_mkldnn: false
cpu_math_library_num_threads: ""
export FLAGS_profile_pipeline=1
alias python3="python3.7"
modelname="imagenet"
use_gpu=1
gpu_id="0"
benchmark_config_filename="benchmark_config.yaml"
# HTTP
ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
sleep 3
if [ $use_gpu -eq 1 ]; then
python3 benchmark.py yaml local_predictor 1 gpu $gpu_id
else
python3 benchmark.py yaml local_predictor 1 cpu
fi
rm -rf profile_log_$modelname
for thread_num in 1
do
for batch_size in 1
do
echo "#----imagenet thread num: $thread_num batch size: $batch_size mode:http use_gpu:$use_gpu----" >>profile_log_$modelname
rm -rf PipelineServingLogs
rm -rf cpu_utilization.py
python3 resnet50_web_service.py >web.log 2>&1 &
sleep 3
nvidia-smi --id=${gpu_id} --query-compute-apps=used_memory --format=csv -lms 100 > gpu_use.log 2>&1 &
nvidia-smi --id=${gpu_id} --query-gpu=utilization.gpu --format=csv -lms 100 > gpu_utilization.log 2>&1 &
echo "import psutil\ncpu_utilization=psutil.cpu_percent(1,False)\nprint('CPU_UTILIZATION:', cpu_utilization)\n" > cpu_utilization.py
python3 benchmark.py run http $thread_num $batch_size
python3 cpu_utilization.py >>profile_log_$modelname
python3 -m paddle_serving_server_gpu.profiler >>profile_log_$modelname
ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
ps -ef | grep nvidia-smi | awk '{print $2}' | xargs kill -9
python3 benchmark.py dump benchmark.log benchmark.tmp
mv benchmark.tmp benchmark.log
awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_MEM:", max}' gpu_use.log >> profile_log_$modelname
awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_UTIL:", max}' gpu_utilization.log >> profile_log_$modelname
cat benchmark.log >> profile_log_$modelname
python3 -m paddle_serving_server_gpu.parse_profile --benchmark_cfg $benchmark_config_filename --benchmark_log profile_log_$modelname
#rm -rf gpu_use.log gpu_utilization.log
done
done
cuda_version: "10.1"
cudnn_version: "7.6"
trt_version: "6.0"
python_version: "3.7"
gcc_version: "8.2"
paddle_version: "2.0.1"
cpu: "Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz X12"
gpu: "T4"
xpu: "None"
api: ""
owner: "cuicheng01"
model_name: "ResNet50_vd_KL"
model_type: "static"
model_source: "PaddleClas"
model_url: "https://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_vd_KL_pretrained.tar"
batch_size: 1
num_of_samples: 1000
input_shape: "3,224,224"
runtime_device: "gpu"
ir_optim: true
enable_memory_optim: true
enable_tensorrt: false
precision: "fp32"
enable_mkldnn: false
cpu_math_library_num_threads: ""
cuda_version: "10.1"
cudnn_version: "7.6"
trt_version: "6.0"
python_version: "3.7"
gcc_version: "8.2"
paddle_version: "2.0.1"
cpu: "Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz X12"
gpu: "T4"
xpu: "None"
api: ""
owner: "cuicheng01"
model_name: "imagenet"
model_type: "static"
model_source: "PaddleClas"
model_url: "model_url_path"
batch_size: 1
num_of_samples: 1000
input_shape: "3,224,224"
runtime_device: "cpu"
ir_optim: true
enable_memory_optim: true
enable_tensorrt: false
precision: "fp32"
enable_mkldnn: false
cpu_math_library_num_threads: ""
export FLAGS_profile_pipeline=1
alias python3="python3.7"
modelname="imagenet"
use_gpu=1
gpu_id="0"
benchmark_config_filename="benchmark_config.yaml"
# HTTP
ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
sleep 3
if [ $use_gpu -eq 1 ]; then
python3 benchmark.py yaml local_predictor 1 gpu $gpu_id
else
python3 benchmark.py yaml local_predictor 1 cpu
fi
rm -rf profile_log_$modelname
for thread_num in 1
do
for batch_size in 1
do
echo "#----imagenet thread num: $thread_num batch size: $batch_size mode:http use_gpu:$use_gpu----" >>profile_log_$modelname
rm -rf PipelineServingLogs
rm -rf cpu_utilization.py
python3 resnet50_web_service.py >web.log 2>&1 &
sleep 3
nvidia-smi --id=${gpu_id} --query-compute-apps=used_memory --format=csv -lms 100 > gpu_use.log 2>&1 &
nvidia-smi --id=${gpu_id} --query-gpu=utilization.gpu --format=csv -lms 100 > gpu_utilization.log 2>&1 &
echo "import psutil\ncpu_utilization=psutil.cpu_percent(1,False)\nprint('CPU_UTILIZATION:', cpu_utilization)\n" > cpu_utilization.py
python3 benchmark.py run http $thread_num $batch_size
python3 cpu_utilization.py >>profile_log_$modelname
python3 -m paddle_serving_server_gpu.profiler >>profile_log_$modelname
ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
ps -ef | grep nvidia-smi | awk '{print $2}' | xargs kill -9
python3 benchmark.py dump benchmark.log benchmark.tmp
mv benchmark.tmp benchmark.log
awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_MEM:", max}' gpu_use.log >> profile_log_$modelname
awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_UTIL:", max}' gpu_utilization.log >> profile_log_$modelname
cat benchmark.log >> profile_log_$modelname
python3 -m paddle_serving_server_gpu.parse_profile --benchmark_cfg $benchmark_config_filename --benchmark_log profile_log_$modelname
#rm -rf gpu_use.log gpu_utilization.log
done
done
cuda_version: "10.1"
cudnn_version: "7.6"
trt_version: "6.0"
python_version: "3.7"
gcc_version: "8.2"
paddle_version: "2.0.1"
cpu: "Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz X12"
gpu: "T4"
xpu: "None"
api: ""
owner: "cuicheng01"
model_name: "ResNet50_vd_PACT"
model_type: "static"
model_source: "PaddleClas"
model_url: "https://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_vd_PACT_pretrained.tar"
batch_size: 1
num_of_samples: 1000
input_shape: "3,224,224"
runtime_device: "gpu"
ir_optim: true
enable_memory_optim: true
enable_tensorrt: false
precision: "fp32"
enable_mkldnn: false
cpu_math_library_num_threads: ""
cuda_version: "10.1"
cudnn_version: "7.6"
trt_version: "6.0"
python_version: "3.7"
gcc_version: "8.2"
paddle_version: "2.0.1"
cpu: "Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz X12"
gpu: "T4"
xpu: "None"
api: ""
owner: "cuicheng01"
model_name: "imagenet"
model_type: "static"
model_source: "PaddleClas"
model_url: "model_url_path"
batch_size: 1
num_of_samples: 1000
input_shape: "3,224,224"
runtime_device: "cpu"
ir_optim: true
enable_memory_optim: true
enable_tensorrt: false
precision: "fp32"
enable_mkldnn: false
cpu_math_library_num_threads: ""
export FLAGS_profile_pipeline=1
alias python3="python3.7"
modelname="imagenet"
use_gpu=1
gpu_id="0"
benchmark_config_filename="benchmark_config.yaml"
# HTTP
ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
sleep 3
if [ $use_gpu -eq 1 ]; then
python3 benchmark.py yaml local_predictor 1 gpu $gpu_id
else
python3 benchmark.py yaml local_predictor 1 cpu
fi
rm -rf profile_log_$modelname
for thread_num in 1
do
for batch_size in 1
do
echo "#----imagenet thread num: $thread_num batch size: $batch_size mode:http use_gpu:$use_gpu----" >>profile_log_$modelname
rm -rf PipelineServingLogs
rm -rf cpu_utilization.py
python3 resnet50_web_service.py >web.log 2>&1 &
sleep 3
nvidia-smi --id=${gpu_id} --query-compute-apps=used_memory --format=csv -lms 100 > gpu_use.log 2>&1 &
nvidia-smi --id=${gpu_id} --query-gpu=utilization.gpu --format=csv -lms 100 > gpu_utilization.log 2>&1 &
echo "import psutil\ncpu_utilization=psutil.cpu_percent(1,False)\nprint('CPU_UTILIZATION:', cpu_utilization)\n" > cpu_utilization.py
python3 benchmark.py run http $thread_num $batch_size
python3 cpu_utilization.py >>profile_log_$modelname
python3 -m paddle_serving_server_gpu.profiler >>profile_log_$modelname
ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
ps -ef | grep nvidia-smi | awk '{print $2}' | xargs kill -9
python3 benchmark.py dump benchmark.log benchmark.tmp
mv benchmark.tmp benchmark.log
awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_MEM:", max}' gpu_use.log >> profile_log_$modelname
awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_UTIL:", max}' gpu_utilization.log >> profile_log_$modelname
cat benchmark.log >> profile_log_$modelname
python3 -m paddle_serving_server_gpu.parse_profile --benchmark_cfg $benchmark_config_filename --benchmark_log profile_log_$modelname
#rm -rf gpu_use.log gpu_utilization.log
done
done
cuda_version: "10.1"
cudnn_version: "7.6"
trt_version: "6.0"
python_version: "3.7"
gcc_version: "8.2"
paddle_version: "2.0.1"
cpu: "Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz X12"
gpu: "T4"
xpu: "None"
api: ""
owner: "cuicheng01"
model_name: "ShuffleNetV2_x1_0"
model_type: "static"
model_source: "PaddleClas"
model_url: "https://paddle-imagenet-models-name.bj.bcebos.com/ShuffleNetV2_x1_0_pretrained.tar"
batch_size: 1
num_of_samples: 1000
input_shape: "3,224,224"
runtime_device: "gpu"
ir_optim: true
enable_memory_optim: true
enable_tensorrt: false
precision: "fp32"
enable_mkldnn: false
cpu_math_library_num_threads: ""
cuda_version: "10.1"
cudnn_version: "7.6"
trt_version: "6.0"
python_version: "3.7"
gcc_version: "8.2"
paddle_version: "2.0.1"
cpu: "Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz X12"
gpu: "T4"
xpu: "None"
api: ""
owner: "cuicheng01"
model_name: "imagenet"
model_type: "static"
model_source: "PaddleClas"
model_url: "model_url_path"
batch_size: 1
num_of_samples: 1000
input_shape: "3,224,224"
runtime_device: "cpu"
ir_optim: true
enable_memory_optim: true
enable_tensorrt: false
precision: "fp32"
enable_mkldnn: false
cpu_math_library_num_threads: ""
export FLAGS_profile_pipeline=1
alias python3="python3.7"
modelname="imagenet"
use_gpu=1
gpu_id="0"
benchmark_config_filename="benchmark_config.yaml"
# HTTP
ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
sleep 3
if [ $use_gpu -eq 1 ]; then
python3 benchmark.py yaml local_predictor 1 gpu $gpu_id
else
python3 benchmark.py yaml local_predictor 1 cpu
fi
rm -rf profile_log_$modelname
for thread_num in 1
do
for batch_size in 1
do
echo "#----imagenet thread num: $thread_num batch size: $batch_size mode:http use_gpu:$use_gpu----" >>profile_log_$modelname
rm -rf PipelineServingLogs
rm -rf cpu_utilization.py
python3 resnet50_web_service.py >web.log 2>&1 &
sleep 3
nvidia-smi --id=${gpu_id} --query-compute-apps=used_memory --format=csv -lms 100 > gpu_use.log 2>&1 &
nvidia-smi --id=${gpu_id} --query-gpu=utilization.gpu --format=csv -lms 100 > gpu_utilization.log 2>&1 &
echo "import psutil\ncpu_utilization=psutil.cpu_percent(1,False)\nprint('CPU_UTILIZATION:', cpu_utilization)\n" > cpu_utilization.py
python3 benchmark.py run http $thread_num $batch_size
python3 cpu_utilization.py >>profile_log_$modelname
python3 -m paddle_serving_server_gpu.profiler >>profile_log_$modelname
ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
ps -ef | grep nvidia-smi | awk '{print $2}' | xargs kill -9
python3 benchmark.py dump benchmark.log benchmark.tmp
mv benchmark.tmp benchmark.log
awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_MEM:", max}' gpu_use.log >> profile_log_$modelname
awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_UTIL:", max}' gpu_utilization.log >> profile_log_$modelname
cat benchmark.log >> profile_log_$modelname
python3 -m paddle_serving_server_gpu.parse_profile --benchmark_cfg $benchmark_config_filename --benchmark_log profile_log_$modelname
#rm -rf gpu_use.log gpu_utilization.log
done
done
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册