From 725c0da267e6748558ba2d0cc045cc8886f2a84d Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Thu, 16 Jun 2022 12:21:54 +0800 Subject: [PATCH] add kl_quant chain and polish prepare.sh --- ..._normal_normal_infer_cpp_linux_gpu_cpu.txt | 2 +- ...ormal_normal_serving_cpp_linux_gpu_cpu.txt | 14 +++++++ ...al_normal_serving_python_linux_gpu_cpu.txt | 14 +++++++ ...ormal_normal_serving_cpp_linux_gpu_cpu.txt | 14 +++++++ ...al_normal_serving_python_linux_gpu_cpu.txt | 14 +++++++ test_tipc/docs/test_inference_cpp.md | 40 ++++++++++--------- test_tipc/docs/test_serving_infer_cpp.md | 2 + test_tipc/docs/test_serving_infer_python.md | 2 + test_tipc/prepare.sh | 8 +++- 9 files changed, 88 insertions(+), 22 deletions(-) create mode 100644 test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0-KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt create mode 100644 test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0-KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt create mode 100644 test_tipc/config/ResNet/ResNet50_vd-KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt create mode 100644 test_tipc/config/ResNet/ResNet50_vd-KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0-KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0-KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt index f9abf36f..09ab9896 100644 --- a/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0-KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt +++ b/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0-KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -1,5 +1,5 @@ ===========================cpp_infer_params=========================== -model_name:MobileNetV3_large_x1_0_kl +model_name:MobileNetV3_large_x1_0_kl_quant cpp_infer_type:cls cls_inference_model_dir:./MobileNetV3_large_x1_0_kl_quant_infer/ det_inference_model_dir: diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0-KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0-KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 00000000..6fb915c8 --- /dev/null +++ b/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0-KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:MobileNetV3_large_x1_0_kl_quant +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/MobileNetV3_large_x1_0_kl_quant_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/MobileNetV3_large_x1_0_kl_quant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/MobileNetV3_large_x1_0_kl_quant_serving/ +--serving_client:./deploy/paddleserving/MobileNetV3_large_x1_0_kl_quant_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0-KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0-KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 00000000..5ccc1c86 --- /dev/null +++ b/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0-KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:MobileNetV3_large_x1_0_kl_quant +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileNetV3_large_x1_0_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/MobileNetV3_large_x1_0_kl_quant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/MobileNetV3_large_x1_0_kl_quant_serving/ +--serving_client:./deploy/paddleserving/MobileNetV3_large_x1_0_kl_quant_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/config/ResNet/ResNet50_vd-KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/config/ResNet/ResNet50_vd-KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 00000000..98cf404d --- /dev/null +++ b/test_tipc/config/ResNet/ResNet50_vd-KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:ResNet50_vd_kl_quant +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/ResNet50_vd_kl_quant_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/ResNet50_vd_kl_quant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/ResNet50_vd_kl_quant_serving/ +--serving_client:./deploy/paddleserving/ResNet50_vd_kl_quant_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/config/ResNet/ResNet50_vd-KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/config/ResNet/ResNet50_vd-KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 00000000..fcff881e --- /dev/null +++ b/test_tipc/config/ResNet/ResNet50_vd-KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:ResNet50_vd_kl_quant +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/ResNet50_vd_kl_quant_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/ResNet50_vd_kl_quant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/ResNet50_vd_kl_quant_serving/ +--serving_client:./deploy/paddleserving/ResNet50_vd_kl_quant_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/docs/test_inference_cpp.md b/test_tipc/docs/test_inference_cpp.md index ce2936f4..0fa854c3 100644 --- a/test_tipc/docs/test_inference_cpp.md +++ b/test_tipc/docs/test_inference_cpp.md @@ -6,25 +6,27 @@ Linux GPU/CPU C++ 推理功能测试的主程序为`test_inference_cpp.sh`,可 - 推理相关: -| 算法名称 | 模型名称 | device_CPU | device_GPU | -| :----: | :----: | :----: | :----: | -| MobileNetV3 | MobileNetV3_large_x1_0 | 支持 | 支持 | -| PP-ShiTu | PPShiTu_general_rec、PPShiTu_mainbody_det | 支持 | 支持 | -| PP-ShiTu | PPShiTu_mainbody_det | 支持 | 支持 | -| PPHGNet | PPHGNet_small | 支持 | 支持 | -| PPHGNet | PPHGNet_tiny | 支持 | 支持 | -| PPLCNet | PPLCNet_x0_25 | 支持 | 支持 | -| PPLCNet | PPLCNet_x0_35 | 支持 | 支持 | -| PPLCNet | PPLCNet_x0_5 | 支持 | 支持 | -| PPLCNet | PPLCNet_x0_75 | 支持 | 支持 | -| PPLCNet | PPLCNet_x1_0 | 支持 | 支持 | -| PPLCNet | PPLCNet_x1_5 | 支持 | 支持 | -| PPLCNet | PPLCNet_x2_0 | 支持 | 支持 | -| PPLCNet | PPLCNet_x2_5 | 支持 | 支持 | -| PPLCNetV2 | PPLCNetV2_base | 支持 | 支持 | -| ResNet | ResNet50 | 支持 | 支持 | -| ResNet | ResNet50_vd | 支持 | 支持 | -| SwinTransformer | SwinTransformer_tiny_patch4_window7_224 | 支持 | 支持 | +| 算法名称 | 模型名称 | device_CPU | device_GPU | +| :-------------: | :---------------------------------------: | :--------: | :--------: | +| MobileNetV3 | MobileNetV3_large_x1_0 | 支持 | 支持 | +| MobileNetV3 | MobileNetV3_large_x1_0-KL | 支持 | 支持 | +| PP-ShiTu | PPShiTu_general_rec、PPShiTu_mainbody_det | 支持 | 支持 | +| PP-ShiTu | PPShiTu_mainbody_det | 支持 | 支持 | +| PPHGNet | PPHGNet_small | 支持 | 支持 | +| PPHGNet | PPHGNet_tiny | 支持 | 支持 | +| PPLCNet | PPLCNet_x0_25 | 支持 | 支持 | +| PPLCNet | PPLCNet_x0_35 | 支持 | 支持 | +| PPLCNet | PPLCNet_x0_5 | 支持 | 支持 | +| PPLCNet | PPLCNet_x0_75 | 支持 | 支持 | +| PPLCNet | PPLCNet_x1_0 | 支持 | 支持 | +| PPLCNet | PPLCNet_x1_5 | 支持 | 支持 | +| PPLCNet | PPLCNet_x2_0 | 支持 | 支持 | +| PPLCNet | PPLCNet_x2_5 | 支持 | 支持 | +| PPLCNetV2 | PPLCNetV2_base | 支持 | 支持 | +| ResNet | ResNet50 | 支持 | 支持 | +| ResNet | ResNet50_vd | 支持 | 支持 | +| ResNet | ResNet50_vd-KL | 支持 | 支持 | +| SwinTransformer | SwinTransformer_tiny_patch4_window7_224 | 支持 | 支持 | ## 2. 测试流程(以**ResNet50**为例) diff --git a/test_tipc/docs/test_serving_infer_cpp.md b/test_tipc/docs/test_serving_infer_cpp.md index 736e18ac..2c8e6ac6 100644 --- a/test_tipc/docs/test_serving_infer_cpp.md +++ b/test_tipc/docs/test_serving_infer_cpp.md @@ -10,6 +10,7 @@ Linux GPU/CPU C++ 服务化部署测试的主程序为`test_serving_infer_cpp.sh | 算法名称 | 模型名称 | device_CPU | device_GPU | | :-------------: | :---------------------------------------: | :--------: | :--------: | | MobileNetV3 | MobileNetV3_large_x1_0 | 支持 | 支持 | +| MobileNetV3 | MobileNetV3_large_x1_0-KL | 支持 | 支持 | | PP-ShiTu | PPShiTu_general_rec、PPShiTu_mainbody_det | 支持 | 支持 | | PPHGNet | PPHGNet_small | 支持 | 支持 | | PPHGNet | PPHGNet_tiny | 支持 | 支持 | @@ -24,6 +25,7 @@ Linux GPU/CPU C++ 服务化部署测试的主程序为`test_serving_infer_cpp.sh | PPLCNetV2 | PPLCNetV2_base | 支持 | 支持 | | ResNet | ResNet50 | 支持 | 支持 | | ResNet | ResNet50_vd | 支持 | 支持 | +| ResNet | ResNet50_vd-KL | 支持 | 支持 | | SwinTransformer | SwinTransformer_tiny_patch4_window7_224 | 支持 | 支持 | diff --git a/test_tipc/docs/test_serving_infer_python.md b/test_tipc/docs/test_serving_infer_python.md index c336e3ec..7b65e444 100644 --- a/test_tipc/docs/test_serving_infer_python.md +++ b/test_tipc/docs/test_serving_infer_python.md @@ -10,6 +10,7 @@ Linux GPU/CPU PYTHON 服务化部署测试的主程序为`test_serving_infer_pyt | 算法名称 | 模型名称 | device_CPU | device_GPU | | :-------------: | :---------------------------------------: | :--------: | :--------: | | MobileNetV3 | MobileNetV3_large_x1_0 | 支持 | 支持 | +| MobileNetV3 | MobileNetV3_large_x1_0-KL | 支持 | 支持 | | PP-ShiTu | PPShiTu_general_rec、PPShiTu_mainbody_det | 支持 | 支持 | | PPHGNet | PPHGNet_small | 支持 | 支持 | | PPHGNet | PPHGNet_tiny | 支持 | 支持 | @@ -24,6 +25,7 @@ Linux GPU/CPU PYTHON 服务化部署测试的主程序为`test_serving_infer_pyt | PPLCNetV2 | PPLCNetV2_base | 支持 | 支持 | | ResNet | ResNet50 | 支持 | 支持 | | ResNet | ResNet50_vd | 支持 | 支持 | +| ResNet | ResNet50_vd-KL | 支持 | 支持 | | SwinTransformer | SwinTransformer_tiny_patch4_window7_224 | 支持 | 支持 | diff --git a/test_tipc/prepare.sh b/test_tipc/prepare.sh index 8ed4122f..60ea5ab9 100644 --- a/test_tipc/prepare.sh +++ b/test_tipc/prepare.sh @@ -50,8 +50,6 @@ if [[ ${MODE} = "cpp_infer" ]]; then echo "################### build opencv ###################" rm -rf ./deploy/cpp/opencv-3.4.7.tar.gz ./deploy/cpp/opencv-3.4.7/ pushd ./deploy/cpp/ - wget -nc https://paddle-inference-lib.bj.bcebos.com/2.2.2/cxx_c/Linux/GPU/x86-64_gcc8.2_avx_mkl_cuda10.1_cudnn7.6.5_trt6.0.1.5/paddle_inference.tgz - tar xf paddle_inference.tgz wget -nc https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/opencv-3.4.7.tar.gz tar -xf opencv-3.4.7.tar.gz @@ -85,6 +83,12 @@ if [[ ${MODE} = "cpp_infer" ]]; then popd echo "################### build opencv finished ###################" fi + if [[ ! -d "./deploy/cpp/paddle_inference/" ]]; then + pushd ./deploy/cpp/ + wget -nc https://paddle-inference-lib.bj.bcebos.com/2.2.2/cxx_c/Linux/GPU/x86-64_gcc8.2_avx_mkl_cuda10.1_cudnn7.6.5_trt6.0.1.5/paddle_inference.tgz + tar xf paddle_inference.tgz + popd + fi if [[ $FILENAME == *infer_cpp_linux_gpu_cpu.txt ]]; then cpp_type=$(func_parser_value "${lines[2]}") cls_inference_model_dir=$(func_parser_value "${lines[3]}") -- GitLab