diff --git a/deploy/paddleserving/preprocess/general_PPLCNet_x2_5_lite_v1.0_client/serving_client_conf.prototxt b/deploy/paddleserving/preprocess/general_PPLCNet_x2_5_lite_v1.0_client/serving_client_conf.prototxt new file mode 100644 index 0000000000000000000000000000000000000000..c781eb6f449fe06afbba7f96e01798c974bccf54 --- /dev/null +++ b/deploy/paddleserving/preprocess/general_PPLCNet_x2_5_lite_v1.0_client/serving_client_conf.prototxt @@ -0,0 +1,32 @@ +feed_var { + name: "x" + alias_name: "x" + is_lod_tensor: false + feed_type: 1 + shape: 3 + shape: 224 + shape: 224 +} +feed_var { + name: "boxes" + alias_name: "boxes" + is_lod_tensor: false + feed_type: 1 + shape: 6 +} +fetch_var { + name: "save_infer_model/scale_0.tmp_1" + alias_name: "features" + is_lod_tensor: false + fetch_type: 1 + shape: 512 +} +fetch_var { + name: "boxes" + alias_name: "boxes" + is_lod_tensor: false + fetch_type: 1 + shape: 6 +} + + diff --git a/deploy/paddleserving/preprocess/general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt b/deploy/paddleserving/preprocess/general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt new file mode 100644 index 0000000000000000000000000000000000000000..04812f42ed90fbbd47c73b9ec706d57c04b4c571 --- /dev/null +++ b/deploy/paddleserving/preprocess/general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt @@ -0,0 +1,30 @@ +feed_var { + name: "x" + alias_name: "x" + is_lod_tensor: false + feed_type: 1 + shape: 3 + shape: 224 + shape: 224 +} +feed_var { + name: "boxes" + alias_name: "boxes" + is_lod_tensor: false + feed_type: 1 + shape: 6 +} +fetch_var { + name: "save_infer_model/scale_0.tmp_1" + alias_name: "features" + is_lod_tensor: false + fetch_type: 1 + shape: 512 +} +fetch_var { + name: "boxes" + alias_name: "boxes" + is_lod_tensor: false + fetch_type: 1 + shape: 6 +} diff --git a/deploy/paddleserving/preprocess/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client/serving_client_conf.prototxt b/deploy/paddleserving/preprocess/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client/serving_client_conf.prototxt new file mode 100644 index 0000000000000000000000000000000000000000..d9ab81a8b3c275f638f314489a84deef46011d73 --- /dev/null +++ b/deploy/paddleserving/preprocess/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client/serving_client_conf.prototxt @@ -0,0 +1,29 @@ +feed_var { + name: "im_shape" + alias_name: "im_shape" + is_lod_tensor: false + feed_type: 1 + shape: 2 +} +feed_var { + name: "image" + alias_name: "image" + is_lod_tensor: false + feed_type: 7 + shape: -1 + shape: -1 + shape: 3 +} +fetch_var { + name: "save_infer_model/scale_0.tmp_1" + alias_name: "save_infer_model/scale_0.tmp_1" + is_lod_tensor: true + fetch_type: 1 + shape: -1 +} +fetch_var { + name: "save_infer_model/scale_1.tmp_1" + alias_name: "save_infer_model/scale_1.tmp_1" + is_lod_tensor: false + fetch_type: 2 +} diff --git a/deploy/paddleserving/preprocess/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/serving_server_conf.prototxt b/deploy/paddleserving/preprocess/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/serving_server_conf.prototxt new file mode 100644 index 0000000000000000000000000000000000000000..d9ab81a8b3c275f638f314489a84deef46011d73 --- /dev/null +++ b/deploy/paddleserving/preprocess/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/serving_server_conf.prototxt @@ -0,0 +1,29 @@ +feed_var { + name: "im_shape" + alias_name: "im_shape" + is_lod_tensor: false + feed_type: 1 + shape: 2 +} +feed_var { + name: "image" + alias_name: "image" + is_lod_tensor: false + feed_type: 7 + shape: -1 + shape: -1 + shape: 3 +} +fetch_var { + name: "save_infer_model/scale_0.tmp_1" + alias_name: "save_infer_model/scale_0.tmp_1" + is_lod_tensor: true + fetch_type: 1 + shape: -1 +} +fetch_var { + name: "save_infer_model/scale_1.tmp_1" + alias_name: "save_infer_model/scale_1.tmp_1" + is_lod_tensor: false + fetch_type: 2 +} diff --git a/deploy/paddleserving/preprocess/serving_client_conf.prototxt b/deploy/paddleserving/preprocess/serving_client_conf.prototxt deleted file mode 100644 index f922abdbfb53de1b2fcc7de988da6b0315d2ad7d..0000000000000000000000000000000000000000 --- a/deploy/paddleserving/preprocess/serving_client_conf.prototxt +++ /dev/null @@ -1,14 +0,0 @@ -feed_var { - name: "inputs" - alias_name: "inputs" - is_lod_tensor: false - feed_type: 20 - shape: 1 -} -fetch_var { - name: "save_infer_model/scale_0.tmp_1" - alias_name: "prediction" - is_lod_tensor: false - fetch_type: 1 - shape: 1000 -} \ No newline at end of file diff --git a/test_tipc/README.md b/test_tipc/README.md index e7765a861d7bccd80ff84d83302437e52a68b11b..154400d741c0c735a83b7d1dc8ba6c6b667b118c 100644 --- a/test_tipc/README.md +++ b/test_tipc/README.md @@ -112,4 +112,5 @@ bash test_tipc/test_train_inference_python.sh ./test_tipc/configs/MobileNetV3/Mo - [test_lite_arm_cpu_cpp 使用](docs/test_lite_arm_cpu_cpp.md): 测试基于Paddle-Lite的ARM CPU端c++预测部署功能. - [test_paddle2onnx 使用](docs/test_paddle2onnx.md):测试Paddle2ONNX的模型转化功能,并验证正确性。 - [test_serving_infer_python 使用](docs/test_serving_infer_python.md):测试python serving功能。 +- [test_serving_infer_cpp 使用](docs/test_serving_infer_python.md):测试python serving功能。 - [test_train_fleet_inference_python 使用](./docs/test_train_fleet_inference_python.md):测试基于Python的多机多卡训练与推理等基本功能。 diff --git a/test_tipc/docs/test_serving_infer_cpp.md b/test_tipc/docs/test_serving_infer_cpp.md new file mode 100644 index 0000000000000000000000000000000000000000..64b9bb04edbb1dedf0af8b416f7e8e81b3dbdabc --- /dev/null +++ b/test_tipc/docs/test_serving_infer_cpp.md @@ -0,0 +1,87 @@ +# Linux GPU/CPU PYTHON 服务化部署测试 + +Linux GPU/CPU PYTHON 服务化部署测试的主程序为`test_serving_infer.sh`,可以测试基于Python的模型服务化部署功能。 + + +## 1. 测试结论汇总 + +- 推理相关: + +| 算法名称 | 模型名称 | device_CPU | device_GPU | +| :----: | :----: | :----: | :----: | +| MobileNetV3 | MobileNetV3_large_x1_0 | 支持 | 支持 | +| PP-ShiTu | PPShiTu_general_rec、PPShiTu_mainbody_det | 支持 | 支持 | +| PPHGNet | PPHGNet_small | 支持 | 支持 | +| PPHGNet | PPHGNet_tiny | 支持 | 支持 | +| PPLCNet | PPLCNet_x0_25 | 支持 | 支持 | +| PPLCNet | PPLCNet_x0_35 | 支持 | 支持 | +| PPLCNet | PPLCNet_x0_5 | 支持 | 支持 | +| PPLCNet | PPLCNet_x0_75 | 支持 | 支持 | +| PPLCNet | PPLCNet_x1_0 | 支持 | 支持 | +| PPLCNet | PPLCNet_x1_5 | 支持 | 支持 | +| PPLCNet | PPLCNet_x2_0 | 支持 | 支持 | +| PPLCNet | PPLCNet_x2_5 | 支持 | 支持 | +| PPLCNetV2 | PPLCNetV2_base | 支持 | 支持 | +| ResNet | ResNet50 | 支持 | 支持 | +| ResNet | ResNet50_vd | 支持 | 支持 | +| SwinTransformer | SwinTransformer_tiny_patch4_window7_224 | 支持 | 支持 | + + +## 2. 测试流程 + +### 2.1 准备数据 + +分类模型默认使用`./deploy/paddleserving/daisy.jpg`作为测试输入图片,无需下载 +识别模型默认使用`drink_dataset_v1.0/test_images/001.jpeg`作为测试输入图片,在**2.2 准备环境**中会下载好。 + +### 2.2 准备环境 + + +- 安装PaddlePaddle:如果您已经安装了2.2或者以上版本的paddlepaddle,那么无需运行下面的命令安装paddlepaddle。 + ```shell + # 需要安装2.2及以上版本的Paddle + # 安装GPU版本的Paddle + python3.7 -m pip install paddlepaddle-gpu==2.2.0 + # 安装CPU版本的Paddle + python3.7 -m pip install paddlepaddle==2.2.0 + ``` + +- 安装依赖 + ```shell + python3.7 -m pip install -r requirements.txt + ``` +- 安装 PaddleServing 相关组件,包括serving_client、serving-app,自动编译带自定义OP的serving_server包(测试PP-ShiTu时),以及自动下载并解压推理模型 + ```bash + bash test_tipc/prepare.sh test_tipc/configs/ResNet50/ResNet50_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt serving_infer + ``` + +### 2.3 功能测试 + +测试方法如下所示,希望测试不同的模型文件,只需更换为自己的参数配置文件,即可完成对应模型的测试。 + +```bash +bash test_tipc/test_serving_infer.sh ${your_params_file} +``` + +以`ResNet50`的`Linux GPU/CPU PYTHON 服务化部署测试`为例,命令如下所示。 + + +```bash +bash test_tipc/test_serving_infer.sh test_tipc/configs/ResNet50/ResNet50_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt +``` + +输出结果如下,表示命令运行成功。 + +``` +Run successfully with command - python3.7 pipeline_http_client.py > ../../test_tipc/output/ResNet50/server_infer_gpu_pipeline_http_batchsize_1.log 2>&1! +Run successfully with command - python3.7 pipeline_http_client.py > ../../test_tipc/output/ResNet50/server_infer_cpu_pipeline_http_batchsize_1.log 2>&1 ! +``` + +预测结果会自动保存在 `./test_tipc/output/ResNet50/server_infer_gpu_pipeline_http_batchsize_1.log` ,可以看到 PaddleServing 的运行结果: + +``` +{'err_no': 0, 'err_msg': '', 'key': ['label', 'prob'], 'value': ["['daisy']", '[0.998314619064331]']} +``` + + +如果运行失败,也会在终端中输出运行失败的日志信息以及对应的运行命令。可以基于该命令,分析运行失败的原因。 diff --git a/test_tipc/docs/test_serving_infer_python.md b/test_tipc/docs/test_serving_infer_python.md index 4d48ccadeef0702fdda923dcc7502afb3904c49c..ddc4ebfb14096db2ee72eb741106924c51d76471 100644 --- a/test_tipc/docs/test_serving_infer_python.md +++ b/test_tipc/docs/test_serving_infer_python.md @@ -60,14 +60,14 @@ Linux GPU/CPU PYTHON 服务化部署测试的主程序为`test_serving_infer.sh 测试方法如下所示,希望测试不同的模型文件,只需更换为自己的参数配置文件,即可完成对应模型的测试。 ```bash -bash test_tipc/test_serving_infer_python.sh ${your_params_file} lite_train_lite_infer +bash test_tipc/test_serving_infer.sh ${your_params_file} ``` 以`ResNet50`的`Linux GPU/CPU PYTHON 服务化部署测试`为例,命令如下所示。 ```bash -bash test_tipc/test_serving_infer_python.sh test_tipc/configs/ResNet50/ResNet50_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt serving_infer +bash test_tipc/test_serving_infer.sh test_tipc/configs/ResNet50/ResNet50_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt ``` 输出结果如下,表示命令运行成功。 diff --git a/test_tipc/prepare.sh b/test_tipc/prepare.sh index 5e93e17fa76f74724f75dbbdc5a1cf34d7c00299..9f98f60b8231f3669fa9d1d567d2f0b27cfd0620 100644 --- a/test_tipc/prepare.sh +++ b/test_tipc/prepare.sh @@ -203,19 +203,19 @@ if [[ ${MODE} = "serving_infer" ]]; then ${python_name} -m pip install paddle_serving_client==0.9.0 -i https://pypi.tuna.tsinghua.edu.cn/simple ${python_name} -m pip install paddle-serving-app==0.9.0 -i https://pypi.tuna.tsinghua.edu.cn/simple python_name=$(func_parser_value "${lines[2]}") - if [[ ${FILENAME} =~ "cpp" ]]; then - pushd ./deploy/paddleserving - # bash build_server.sh ${python_name} - popd + if [[ ${FILENAME} =~ "cpp" ] && [ ${model_name} =~ "ShiTu" ]]; then + bash build_server.sh ${python_name} else ${python_name} -m pip install install paddle-serving-server-gpu==0.9.0.post101 -i https://pypi.tuna.tsinghua.edu.cn/simple fi if [[ ${model_name} =~ "ShiTu" ]]; then + ${python_name} -m pip install faiss-cpu==1.7.1post2 -i https://pypi.tuna.tsinghua.edu.cn/simple cls_inference_model_url=$(func_parser_value "${lines[3]}") cls_tar_name=$(func_get_url_file_name "${cls_inference_model_url}") det_inference_model_url=$(func_parser_value "${lines[4]}") det_tar_name=$(func_get_url_file_name "${det_inference_model_url}") cd ./deploy + wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/data/drink_dataset_v1.0.tar --no-check-certificate && tar -xf drink_dataset_v1.0.tar mkdir models cd models wget -nc ${cls_inference_model_url} && tar xf ${cls_tar_name} diff --git a/test_tipc/test_serving_infer.sh b/test_tipc/test_serving_infer.sh index 0d5e1f4f853d5507edb6d84e42f3636f796c08eb..bccdd0394df51996ee6a71eb1102e6c918a35a0c 100644 --- a/test_tipc/test_serving_infer.sh +++ b/test_tipc/test_serving_infer.sh @@ -206,8 +206,9 @@ function func_serving_cls(){ function func_serving_rec(){ - LOG_PATH="../../../test_tipc/output/${model_name}" + LOG_PATH="test_tipc/output/${model_name}" mkdir -p ${LOG_PATH} + LOG_PATH="../../../${LOG_PATH}" status_log="${LOG_PATH}/results_serving.log" trans_model_py=$(func_parser_value "${lines[5]}") cls_infer_model_dir_key=$(func_parser_key "${lines[6]}") @@ -244,6 +245,7 @@ function func_serving_rec(){ done # pdserving + export SERVING_BIN=$PWD/Serving/server-build-gpu-opencv/core/general-server/serving cd ./deploy set_dirname=$(func_set_params "${cls_infer_model_dir_key}" "${cls_infer_model_dir_value}") set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}") @@ -261,11 +263,14 @@ function func_serving_rec(){ det_trans_model_cmd="${python_interp} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}" eval $det_trans_model_cmd - # modify the alias_name of fetch_var to "outputs" - server_fetch_var_line_cmd="sed -i '/fetch_var/,/is_lod_tensor/s/alias_name: .*/alias_name: \"features\"/' $cls_serving_server_value/serving_server_conf.prototxt" - eval ${server_fetch_var_line_cmd} - client_fetch_var_line_cmd="sed -i '/fetch_var/,/is_lod_tensor/s/alias_name: .*/alias_name: \"features\"/' $cls_serving_client_value/serving_client_conf.prototxt" - eval ${client_fetch_var_line_cmd} + cp_prototxt_cmd="cp ./paddleserving/preprocess/general_PPLCNet_x2_5_lite_v1.0_serving/*.prototxt ${cls_serving_server_value}" + eval ${cp_prototxt_cmd} + cp_prototxt_cmd="cp ./paddleserving/preprocess/general_PPLCNet_x2_5_lite_v1.0_client/*.prototxt ${cls_serving_client_value}" + eval ${cp_prototxt_cmd} + cp_prototxt_cmd="cp ./paddleserving/preprocess/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client/*.prototxt ${det_serving_client_value}" + eval ${cp_prototxt_cmd} + cp_prototxt_cmd="cp ./paddleserving/preprocess/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/*.prototxt ${det_serving_server_value}" + eval ${cp_prototxt_cmd} prototxt_dataline=$(awk 'NR==1, NR==3{print}' ${cls_serving_server_value}/serving_server_conf.prototxt) IFS=$'\n' @@ -278,27 +283,11 @@ function func_serving_rec(){ unset http_proxy if [[ ${FILENAME} =~ "cpp" ]]; then - det_serving_client_dir_name=$(func_get_url_file_name "$det_serving_client_value") - set_det_client_config_line_cmd="sed -i '/MainbodyDetect/,/serving_client_conf.prototxt/s/models\/.*\/serving_client_conf.prototxt/models\/${det_serving_client_dir_name}\/serving_client_conf.prototxt/' ${pipeline_py}" - eval ${set_det_client_config_line_cmd} - - cls_serving_client_dir_name=$(func_get_url_file_name "$cls_serving_client_value") - set_cls_client_config_line_cmd="sed -i '/ObjectRecognition/,/serving_client_conf.prototxt/s/models\/.*\/serving_client_conf.prototxt/models\/${cls_serving_client_dir_name}\/serving_client_conf.prototxt/' ${pipeline_py}" - eval ${set_cls_client_config_line_cmd} - - set_pipeline_py_feed_var_cmd="sed -i '/ObjectRecognition/,/feed={\"x\": batch_imgs}/s/{.*: batch_imgs}/{${feed_var_name}: batch_imgs}/' ${pipeline_py}" - eval ${set_pipeline_py_feed_var_cmd} - for use_gpu in ${web_use_gpu_list[*]}; do if [ ${use_gpu} = "null" ]; then - det_serving_server_dir_name=$(func_get_url_file_name "$det_serving_server_value") - web_service_cpp_cmd="${python_interp} -m paddle_serving_server.serve --model ../../models/${det_serving_server_dir_name} --port 9293 >>log_mainbody_detection.txt &" - - cls_serving_server_dir_name=$(func_get_url_file_name "$cls_serving_server_value") - web_service_cpp_cmd2="${python_interp} -m paddle_serving_server.serve --model ../../models/${cls_serving_server_dir_name} --port 9294 >>log_feature_extraction.txt &" + web_service_cpp_cmd="${python_interp} -m paddle_serving_server.serve --model ../../${det_serving_server_value} ../../${cls_serving_server_value} --op GeneralPicodetOp GeneralFeatureExtractOp --port 9400 &" eval $web_service_cpp_cmd - eval $web_service_cpp_cmd2 sleep 5s _save_log_path="${LOG_PATH}/server_infer_cpp_cpu_batchsize_1.log" pipeline_cmd="${python_interp} test_cpp_serving_client.py > ${_save_log_path} 2>&1 " @@ -310,12 +299,8 @@ function func_serving_rec(){ sleep 5s else det_serving_server_dir_name=$(func_get_url_file_name "$det_serving_server_value") - web_service_cpp_cmd="${python_interp} -m paddle_serving_server.serve --model ../../models/${det_serving_server_dir_name} --port 9293 --gpu_id=${use_gpu} >>log_mainbody_detection.txt &" - - cls_serving_server_dir_name=$(func_get_url_file_name "$cls_serving_server_value") - web_service_cpp_cmd2="${python_interp} -m paddle_serving_server.serve --model ../../models/${cls_serving_server_dir_name} --port 9294 --gpu_id=${use_gpu} >>log_feature_extraction.txt &" + web_service_cpp_cmd="${python_interp} -m paddle_serving_server.serve --model ../../${det_serving_server_value} ../../${cls_serving_server_value} --op GeneralPicodetOp GeneralFeatureExtractOp --port 9400 --gpu_id=${use_gpu} &" eval $web_service_cpp_cmd - eval $web_service_cpp_cmd2 sleep 5s _save_log_path="${LOG_PATH}/server_infer_cpp_gpu_batchsize_1.log" pipeline_cmd="${python_interp} test_cpp_serving_client.py > ${_save_log_path} 2>&1 "