From 3d1e2320bf5b6ae30eb5c0fd296e83b8bd88afb4 Mon Sep 17 00:00:00 2001 From: wangguanzhong Date: Sun, 1 May 2022 15:17:21 +0800 Subject: [PATCH] [test_tipc] add serving tipc (#5865) --- ...al_normal_serving_python_linux_gpu_cpu.txt | 20 ++++++ test_tipc/prepare.sh | 10 +++ test_tipc/test_serving.sh | 63 +++++++++++++++++++ 3 files changed, 93 insertions(+) create mode 100644 test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt diff --git a/test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 000000000..0a07de524 --- /dev/null +++ b/test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,20 @@ +===========================cpp_infer_params=========================== +model_name:yolov3_darknet53_270e_coco +python:python +filename:null +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/yolov3_darknet53_270e_coco.pdparams +norm_export:tools/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml -o +quant_export:tools/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/quant/yolov3_darknet_qat.yml -o +fpgm_export:tools/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/prune/yolov3_darknet_prune_fpgm.yml -o +distill_export:null +export1:null +export2:null +kl_quant_export:tools/post_quant.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o +--export_serving_model:True +## +start_serving:-m paddle_serving_server.serve --model serving_server +--port:9393 +--gpu_ids:0 +## diff --git a/test_tipc/prepare.sh b/test_tipc/prepare.sh index c706e7069..754d5c24c 100644 --- a/test_tipc/prepare.sh +++ b/test_tipc/prepare.sh @@ -71,6 +71,16 @@ elif [ ${MODE} = "paddle2onnx_infer" ];then # set paddle2onnx_infer enve ${python} -m pip install install paddle2onnx ${python} -m pip install onnxruntime==1.10.0 +elif [ ${MODE} = "serving_infer" ];then + git clone https://github.com/PaddlePaddle/Serving + bash Serving/tools/paddle_env_install.sh + cd Serving + pip install -r python/requirements.txt + cd .. + pip install paddle-serving-client==0.8.3 -i https://pypi.tuna.tsinghua.edu.cn/simple + pip install paddle-serving-app==0.8.3 -i https://pypi.tuna.tsinghua.edu.cn/simple + pip install paddle-serving-server-gpu==0.8.3.post101 -i https://pypi.tuna.tsinghua.edu.cn/simple + python -m pip install paddlepaddle-gpu==2.2.2.post101 -f https://www.paddlepaddle.org.cn/whl/linux/mkl/avx/stable.html else # download coco lite data wget -nc -P ./dataset/coco/ https://paddledet.bj.bcebos.com/data/tipc/coco_tipc.tar diff --git a/test_tipc/test_serving.sh b/test_tipc/test_serving.sh index e69de29bb..8c122d96d 100644 --- a/test_tipc/test_serving.sh +++ b/test_tipc/test_serving.sh @@ -0,0 +1,63 @@ +#!/bin/bash +source test_tipc/utils_func.sh + +FILENAME=$1 + +# parser model_name +dataline=$(cat ${FILENAME}) +IFS=$'\n' +lines=(${dataline}) +model_name=$(func_parser_value "${lines[1]}") +echo "ppdet serving: ${model_name}" +python=$(func_parser_value "${lines[2]}") +filename_key=$(func_parser_key "${lines[3]}") +filename_value=$(func_parser_value "${lines[3]}") + +# export params +save_export_key=$(func_parser_key "${lines[5]}") +save_export_value=$(func_parser_value "${lines[5]}") +export_weight_key=$(func_parser_key "${lines[6]}") +export_weight_value=$(func_parser_value "${lines[6]}") +norm_export=$(func_parser_value "${lines[7]}") +pact_export=$(func_parser_value "${lines[8]}") +fpgm_export=$(func_parser_value "${lines[9]}") +distill_export=$(func_parser_value "${lines[10]}") +export_key1=$(func_parser_key "${lines[11]}") +export_value1=$(func_parser_value "${lines[11]}") +export_key2=$(func_parser_key "${lines[12]}") +export_value2=$(func_parser_value "${lines[12]}") +kl_quant_export=$(func_parser_value "${lines[13]}") +export_serving_model_key=$(func_parser_key "${lines[14]}") +export_serving_model_value=$(func_parser_value "${lines[14]}") +# parser serving +start_serving=$(func_parser_value "${lines[16]}") +port_key=$(func_parser_key "${lines[17]}") +port_value=$(func_parser_value "${lines[17]}") +gpu_id_key=$(func_parser_key "${lines[18]}") +gpu_id_value=$(func_parser_value "${lines[18]}") + +LOG_PATH="./test_tipc/output" +mkdir -p ${LOG_PATH} +status_log="${LOG_PATH}/results_serving.log" + +function func_serving(){ + IFS='|' + if [ ${gpu_id_key} = "null" ]; then + start_serving_command="nohup ${python} ${start_serving} ${port_key} ${port_value} > serving.log 2>&1 &" + else + start_serving_command="nohup ${python} ${start_serving} ${port_key} ${port_value} ${gpu_id_key} ${gpu_id_value} > serving.log 2>&1 &" + fi + echo $start_serving_command + eval $start_serving_command + last_status=${PIPESTATUS[0]} + status_check $last_status "${start_serving_command}" "${status_log}" +} +cd output_inference/${model_name} +echo $PWD +func_serving +test_command="${python} ../../deploy/serving/test_client.py ../../deploy/serving/label_list.txt ../../demo/000000014439.jpg" +echo $test_command +eval $test_command +last_status=${PIPESTATUS[0]} +status_check $last_status"${test_command}" "${status_log}" + -- GitLab