diff --git a/test_tipc/README.md b/test_tipc/README.md index fd62226d1db5bba8bc61cd2a7e5c0697c954f3a1..d9a0d577e4c4b33064cdc6f0997edc87a781e3e8 100644 --- a/test_tipc/README.md +++ b/test_tipc/README.md @@ -107,6 +107,7 @@ bash test_tipc/test_train_inference_python.sh ./test_tipc/configs/yolov3/yolov3_ - [test_train_inference_python 使用](docs/test_train_inference_python.md) :测试基于Python的模型训练、评估、推理等基本功能,包括裁剪、量化、蒸馏。 - [test_train_fleet_inference_python 使用](./docs/test_train_fleet_inference_python.md):测试基于Python的多机多卡训练与推理等基本功能。 - [test_inference_cpp 使用](docs/test_inference_cpp.md):测试基于C++的模型推理。 -- [test_serving 使用](docs/test_serving.md):测试基于Paddle Serving的服务化部署功能。 -- [test_lite_arm_cpu_cpp 使用](./):测试基于Paddle-Lite的ARM CPU端c++预测部署功能。 +- [test_serving 使用](docs/test_serving.md):测试基于Paddle Serving的服务化部署功能,包括Python、C++。 +- test_lite_arm_cpu_cpp 使用(待开发):测试基于Paddle-Lite的ARM CPU端c++预测部署功能。 - [test_paddle2onnx 使用](docs/test_paddle2onnx.md):测试Paddle2ONNX的模型转化功能,并验证正确性。 +- [test_ptq_inference_python 使用](docs/test_ptq_inference_python.md):测试基于Python的离线量化功能。 diff --git a/test_tipc/configs/keypoint/tinypose_128x96_KL_train_infer_python.txt b/test_tipc/configs/keypoint/tinypose_128x96_KL_train_infer_python.txt index d0c13368d12188e94e417babe871637881f13f36..650d0e3bcfe398722fc9e97b946617a0af0bdc56 100644 --- a/test_tipc/configs/keypoint/tinypose_128x96_KL_train_infer_python.txt +++ b/test_tipc/configs/keypoint/tinypose_128x96_KL_train_infer_python.txt @@ -37,7 +37,7 @@ kl_quant_export:tools/post_quant.py -c configs/keypoint/tiny_pose/tinypose_128x9 ## infer_mode:kl_quant infer_quant:True -inference:./deploy/python/infer.py +inference:./deploy/python/keypoint_infer.py --device:gpu|cpu --enable_mkldnn:False --cpu_threads:4 diff --git a/test_tipc/configs/keypoint/tinypose_128x96_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/keypoint/tinypose_128x96_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt index 98111895219632c6390bdbfeb79d772dfc8c0ebc..b910577037cff28ad324bd588bdd2f8039f24856 100644 --- a/test_tipc/configs/keypoint/tinypose_128x96_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/keypoint/tinypose_128x96_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt @@ -37,7 +37,7 @@ kl_quant_export:tools/post_quant.py -c configs/keypoint/tiny_pose/tinypose_128x9 ## infer_mode:norm infer_quant:False -inference:./deploy/python/infer.py +inference:./deploy/python/keypoint_infer.py --device:gpu|cpu --enable_mkldnn:False --cpu_threads:4 diff --git a/test_tipc/configs/keypoint/tinypose_128x96_train_ptq_infer_python.txt b/test_tipc/configs/keypoint/tinypose_128x96_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..d44a30c638671326dd8b2126505e78ab4c45f796 --- /dev/null +++ b/test_tipc/configs/keypoint/tinypose_128x96_train_ptq_infer_python.txt @@ -0,0 +1,20 @@ +===========================ptq_params=========================== +model_name:tinypose_128x96 +python:python3.7 +filename: +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/keypoint/tinypose_128x96.pdparams +kl_quant_export:tools/post_quant.py -c configs/keypoint/tiny_pose/tinypose_128x96.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o +export_param1:null +## +inference:./deploy/python/keypoint_infer.py +--device:gpu|cpu +--enable_mkldnn:False +--cpu_threads:4 +--batch_size:1|2 +--run_mode:paddle +--model_dir: +--image_dir:./dataset/coco/test2017/ +--run_benchmark:False +null:null \ No newline at end of file diff --git a/test_tipc/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco_train_ptq_infer_python.txt b/test_tipc/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..c58b7a254a2e934b264ea4a4757125760f651b9e --- /dev/null +++ b/test_tipc/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco_train_ptq_infer_python.txt @@ -0,0 +1,20 @@ +===========================ptq_params=========================== +model_name:mask_rcnn_r50_fpn_1x_coco +python:python3.7 +filename: +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/mask_rcnn_r50_fpn_1x_coco.pdparams +kl_quant_export:tools/post_quant.py -c configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o +export_param1:null +## +inference:./deploy/python/infer.py +--device:gpu|cpu +--enable_mkldnn:False +--cpu_threads:4 +--batch_size:1|2 +--run_mode:paddle +--model_dir: +--image_dir:./dataset/coco/test2017/ +--run_benchmark:False +null:null \ No newline at end of file diff --git a/test_tipc/configs/picodet/picodet_l_640_coco_train_ptq_infer_python.txt b/test_tipc/configs/picodet/picodet_l_640_coco_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..58365209730f696a695d57dc88ac76582ba1cc8c --- /dev/null +++ b/test_tipc/configs/picodet/picodet_l_640_coco_train_ptq_infer_python.txt @@ -0,0 +1,20 @@ +===========================ptq_params=========================== +model_name:picodet_l_640_coco +python:python3.7 +filename: +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/picodet_l_640_coco.pdparams +kl_quant_export:tools/post_quant.py -c configs/picodet/legacy_model/picodet_l_640_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o +export_param1:null +## +inference:./deploy/python/infer.py +--device:gpu|cpu +--enable_mkldnn:False +--cpu_threads:4 +--batch_size:1|2 +--run_mode:paddle +--model_dir: +--image_dir:./dataset/coco/test2017/ +--run_benchmark:False +null:null \ No newline at end of file diff --git a/test_tipc/configs/picodet/picodet_lcnet_1_5x_416_coco_train_ptq_infer_python.txt b/test_tipc/configs/picodet/picodet_lcnet_1_5x_416_coco_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..91198f1298c01050dce664f5dd411bfdc74aa886 --- /dev/null +++ b/test_tipc/configs/picodet/picodet_lcnet_1_5x_416_coco_train_ptq_infer_python.txt @@ -0,0 +1,20 @@ +===========================ptq_params=========================== +model_name:picodet_lcnet_1_5x_416_coco +python:python3.7 +filename: +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/picodet_lcnet_1_5x_416_coco.pdparams +kl_quant_export:tools/post_quant.py -c configs/picodet/legacy_model/more_config/picodet_lcnet_1_5x_416_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o +export_param1:null +## +inference:./deploy/python/infer.py +--device:gpu|cpu +--enable_mkldnn:False +--cpu_threads:4 +--batch_size:1|2 +--run_mode:paddle +--model_dir: +--image_dir:./dataset/coco/test2017/ +--run_benchmark:False +null:null \ No newline at end of file diff --git a/test_tipc/configs/picodet/picodet_m_416_coco_train_ptq_infer_python.txt b/test_tipc/configs/picodet/picodet_m_416_coco_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..fbb0d34be8ecb5b9a95c25bdcfd1225b03341d01 --- /dev/null +++ b/test_tipc/configs/picodet/picodet_m_416_coco_train_ptq_infer_python.txt @@ -0,0 +1,20 @@ +===========================ptq_params=========================== +model_name:picodet_m_416_coco +python:python3.7 +filename: +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/picodet_m_416_coco.pdparams +kl_quant_export:tools/post_quant.py -c configs/picodet/legacy_model/picodet_m_416_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o +export_param1:null +## +inference:./deploy/python/infer.py +--device:gpu|cpu +--enable_mkldnn:False +--cpu_threads:4 +--batch_size:1|2 +--run_mode:paddle +--model_dir: +--image_dir:./dataset/coco/test2017/ +--run_benchmark:False +null:null \ No newline at end of file diff --git a/test_tipc/configs/picodet/picodet_mobilenetv3_large_1x_416_coco_train_ptq_infer_python.txt b/test_tipc/configs/picodet/picodet_mobilenetv3_large_1x_416_coco_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..99d6aedb37584dc683e9fb20c030a1bc0c357daf --- /dev/null +++ b/test_tipc/configs/picodet/picodet_mobilenetv3_large_1x_416_coco_train_ptq_infer_python.txt @@ -0,0 +1,20 @@ +===========================ptq_params=========================== +model_name:picodet_mobilenetv3_large_1x_416_coco +python:python3.7 +filename: +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/picodet_mobilenetv3_large_1x_416_coco.pdparams +kl_quant_export:tools/post_quant.py -c configs/picodet/legacy_model/more_config/picodet_mobilenetv3_large_1x_416_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o +export_param1:null +## +inference:./deploy/python/infer.py +--device:gpu|cpu +--enable_mkldnn:False +--cpu_threads:4 +--batch_size:1|2 +--run_mode:paddle +--model_dir: +--image_dir:./dataset/coco/test2017/ +--run_benchmark:False +null:null \ No newline at end of file diff --git a/test_tipc/configs/picodet/picodet_r18_640_coco_train_ptq_infer_python.txt b/test_tipc/configs/picodet/picodet_r18_640_coco_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..0c19b56399f036b5a37a1dcaad5755f7b47a9689 --- /dev/null +++ b/test_tipc/configs/picodet/picodet_r18_640_coco_train_ptq_infer_python.txt @@ -0,0 +1,20 @@ +===========================ptq_params=========================== +model_name:picodet_r18_640_coco +python:python3.7 +filename: +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/picodet_r18_640_coco.pdparams +kl_quant_export:tools/post_quant.py -c configs/picodet/legacy_model/more_config/picodet_r18_640_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o +export_param1:null +## +inference:./deploy/python/infer.py +--device:gpu|cpu +--enable_mkldnn:False +--cpu_threads:4 +--batch_size:1|2 +--run_mode:paddle +--model_dir: +--image_dir:./dataset/coco/test2017/ +--run_benchmark:False +null:null \ No newline at end of file diff --git a/test_tipc/configs/picodet/picodet_s_320_coco_train_ptq_infer_python.txt b/test_tipc/configs/picodet/picodet_s_320_coco_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..24c9c209d27427b8dbb4502cd221411df19b85dd --- /dev/null +++ b/test_tipc/configs/picodet/picodet_s_320_coco_train_ptq_infer_python.txt @@ -0,0 +1,20 @@ +===========================ptq_params=========================== +model_name:picodet_s_320_coco +python:python3.7 +filename: +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/picodet_s_320_coco.pdparams +kl_quant_export:tools/post_quant.py -c configs/picodet/legacy_model/picodet_s_320_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o +export_param1:null +## +inference:./deploy/python/infer.py +--device:gpu|cpu +--enable_mkldnn:False +--cpu_threads:4 +--batch_size:1|2 +--run_mode:paddle +--model_dir: +--image_dir:./dataset/coco/test2017/ +--run_benchmark:False +null:null \ No newline at end of file diff --git a/test_tipc/configs/picodet/picodet_shufflenetv2_1x_416_coco_train_ptq_infer_python.txt b/test_tipc/configs/picodet/picodet_shufflenetv2_1x_416_coco_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..a323084ddc5da2fb2097d2819122238662184dd1 --- /dev/null +++ b/test_tipc/configs/picodet/picodet_shufflenetv2_1x_416_coco_train_ptq_infer_python.txt @@ -0,0 +1,20 @@ +===========================ptq_params=========================== +model_name:picodet_shufflenetv2_1x_416_coco +python:python3.7 +filename: +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/picodet_shufflenetv2_1x_416_coco.pdparams +kl_quant_export:tools/post_quant.py -c configs/picodet/legacy_model/more_config/picodet_shufflenetv2_1x_416_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o +export_param1:null +## +inference:./deploy/python/infer.py +--device:gpu|cpu +--enable_mkldnn:False +--cpu_threads:4 +--batch_size:1|2 +--run_mode:paddle +--model_dir: +--image_dir:./dataset/coco/test2017/ +--run_benchmark:False +null:null \ No newline at end of file diff --git a/test_tipc/configs/ppyolo/ppyolo_mbv3_large_coco_train_ptq_infer_python.txt b/test_tipc/configs/ppyolo/ppyolo_mbv3_large_coco_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..6601ba2a494157e20537e4f9fbacf7d9ad7fb4d1 --- /dev/null +++ b/test_tipc/configs/ppyolo/ppyolo_mbv3_large_coco_train_ptq_infer_python.txt @@ -0,0 +1,20 @@ +===========================ptq_params=========================== +model_name:ppyolo_mbv3_large_coco +python:python3.7 +filename: +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/ppyolo_mbv3_large_coco.pdparams +kl_quant_export:tools/post_quant.py -c configs/ppyolo/ppyolo_mbv3_large_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o +export_param1:null +## +inference:./deploy/python/infer.py +--device:gpu|cpu +--enable_mkldnn:False +--cpu_threads:4 +--batch_size:1|2 +--run_mode:paddle +--model_dir: +--image_dir:./dataset/coco/test2017/ +--run_benchmark:False +null:null \ No newline at end of file diff --git a/test_tipc/configs/ppyolo/ppyolo_mbv3_small_coco_train_ptq_infer_python.txt b/test_tipc/configs/ppyolo/ppyolo_mbv3_small_coco_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..777b7296a623740a31a007340af3ec1525a42a7a --- /dev/null +++ b/test_tipc/configs/ppyolo/ppyolo_mbv3_small_coco_train_ptq_infer_python.txt @@ -0,0 +1,20 @@ +===========================ptq_params=========================== +model_name:ppyolo_mbv3_small_coco +python:python3.7 +filename: +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/ppyolo_mbv3_small_coco.pdparams +kl_quant_export:tools/post_quant.py -c configs/ppyolo/ppyolo_mbv3_small_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o +export_param1:null +## +inference:./deploy/python/infer.py +--device:gpu|cpu +--enable_mkldnn:False +--cpu_threads:4 +--batch_size:1|2 +--run_mode:paddle +--model_dir: +--image_dir:./dataset/coco/test2017/ +--run_benchmark:False +null:null \ No newline at end of file diff --git a/test_tipc/configs/ppyolo/ppyolo_r18vd_coco_train_ptq_infer_python.txt b/test_tipc/configs/ppyolo/ppyolo_r18vd_coco_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..27441e2692fcec9217e6e9559ae47dced4cf99b6 --- /dev/null +++ b/test_tipc/configs/ppyolo/ppyolo_r18vd_coco_train_ptq_infer_python.txt @@ -0,0 +1,20 @@ +===========================ptq_params=========================== +model_name:ppyolo_r18vd_coco +python:python3.7 +filename: +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/ppyolo_r18vd_coco.pdparams +kl_quant_export:tools/post_quant.py -c configs/ppyolo/ppyolo_r18vd_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o +export_param1:null +## +inference:./deploy/python/infer.py +--device:gpu|cpu +--enable_mkldnn:False +--cpu_threads:4 +--batch_size:1|2 +--run_mode:paddle +--model_dir: +--image_dir:./dataset/coco/test2017/ +--run_benchmark:False +null:null \ No newline at end of file diff --git a/test_tipc/configs/ppyolo/ppyolo_r50vd_dcn_1x_coco_train_ptq_infer_python.txt b/test_tipc/configs/ppyolo/ppyolo_r50vd_dcn_1x_coco_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..d15b9617ffd459afd19b5e619dfbd2c36d0abc6f --- /dev/null +++ b/test_tipc/configs/ppyolo/ppyolo_r50vd_dcn_1x_coco_train_ptq_infer_python.txt @@ -0,0 +1,20 @@ +===========================ptq_params=========================== +model_name:ppyolo_r50vd_dcn_1x_coco +python:python3.7 +filename: +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams +kl_quant_export:tools/post_quant.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o +export_param1:null +## +inference:./deploy/python/infer.py +--device:gpu|cpu +--enable_mkldnn:False +--cpu_threads:4 +--batch_size:1|2 +--run_mode:paddle +--model_dir: +--image_dir:./dataset/coco/test2017/ +--run_benchmark:False +null:null \ No newline at end of file diff --git a/test_tipc/configs/ppyolo/ppyolo_tiny_650e_coco_train_ptq_infer_python.txt b/test_tipc/configs/ppyolo/ppyolo_tiny_650e_coco_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..d01c05fbbd536647ca8b6ff087779dd86771dbc9 --- /dev/null +++ b/test_tipc/configs/ppyolo/ppyolo_tiny_650e_coco_train_ptq_infer_python.txt @@ -0,0 +1,20 @@ +===========================ptq_params=========================== +model_name:ppyolo_tiny_650e_coco +python:python3.7 +filename: +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/ppyolo_tiny_650e_coco.pdparams +kl_quant_export:tools/post_quant.py -c configs/ppyolo/ppyolo_tiny_650e_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o +export_param1:null +## +inference:./deploy/python/infer.py +--device:gpu|cpu +--enable_mkldnn:False +--cpu_threads:4 +--batch_size:1|2 +--run_mode:paddle +--model_dir: +--image_dir:./dataset/coco/test2017/ +--run_benchmark:False +null:null \ No newline at end of file diff --git a/test_tipc/configs/ppyolo/ppyolov2_r101vd_dcn_365e_coco_train_ptq_infer_python.txt b/test_tipc/configs/ppyolo/ppyolov2_r101vd_dcn_365e_coco_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..0a95be847ee87ccb704ea6d3ee7fe020e2e7ab3d --- /dev/null +++ b/test_tipc/configs/ppyolo/ppyolov2_r101vd_dcn_365e_coco_train_ptq_infer_python.txt @@ -0,0 +1,20 @@ +===========================ptq_params=========================== +model_name:ppyolov2_r101vd_dcn_365e_coco +python:python3.7 +filename: +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/ppyolov2_r101vd_dcn_365e_coco.pdparams +kl_quant_export:tools/post_quant.py -c configs/ppyolo/ppyolov2_r101vd_dcn_365e_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o +export_param1:null +## +inference:./deploy/python/infer.py +--device:gpu|cpu +--enable_mkldnn:False +--cpu_threads:4 +--batch_size:1|2 +--run_mode:paddle +--model_dir: +--image_dir:./dataset/coco/test2017/ +--run_benchmark:False +null:null \ No newline at end of file diff --git a/test_tipc/configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco_train_ptq_infer_python.txt b/test_tipc/configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..5a369cef7334903c1d7ea19668a12833272266e8 --- /dev/null +++ b/test_tipc/configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco_train_ptq_infer_python.txt @@ -0,0 +1,20 @@ +===========================ptq_params=========================== +model_name:ppyolov2_r50vd_dcn_365e_coco +python:python3.7 +filename: +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/ppyolov2_r50vd_dcn_365e_coco.pdparams +kl_quant_export:tools/post_quant.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o +export_param1:null +## +inference:./deploy/python/infer.py +--device:gpu|cpu +--enable_mkldnn:False +--cpu_threads:4 +--batch_size:1|2 +--run_mode:paddle +--model_dir: +--image_dir:./dataset/coco/test2017/ +--run_benchmark:False +null:null \ No newline at end of file diff --git a/test_tipc/configs/ppyoloe/ppyoloe_crn_l_300e_coco_train_ptq_infer_python.txt b/test_tipc/configs/ppyoloe/ppyoloe_crn_l_300e_coco_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..1b7e28852be2a66168d2fe309bc1ea243e22903e --- /dev/null +++ b/test_tipc/configs/ppyoloe/ppyoloe_crn_l_300e_coco_train_ptq_infer_python.txt @@ -0,0 +1,20 @@ +===========================ptq_params=========================== +model_name:ppyoloe_crn_l_300e_coco +python:python3.7 +filename: +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_300e_coco.pdparams +kl_quant_export:tools/post_quant.py -c configs/ppyoloe/ppyoloe_crn_l_300e_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o +export_param1:null +## +inference:./deploy/python/infer.py +--device:gpu|cpu +--enable_mkldnn:False +--cpu_threads:4 +--batch_size:1|2 +--run_mode:paddle +--model_dir: +--image_dir:./dataset/coco/test2017/ +--run_benchmark:False +null:null \ No newline at end of file diff --git a/test_tipc/configs/ppyoloe/ppyoloe_crn_m_300e_coco_train_ptq_infer_python.txt b/test_tipc/configs/ppyoloe/ppyoloe_crn_m_300e_coco_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..c994a6da90e207c3f0c0c02c041431d635e877e6 --- /dev/null +++ b/test_tipc/configs/ppyoloe/ppyoloe_crn_m_300e_coco_train_ptq_infer_python.txt @@ -0,0 +1,20 @@ +===========================ptq_params=========================== +model_name:ppyoloe_crn_m_300e_coco +python:python3.7 +filename: +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/ppyoloe_crn_m_300e_coco.pdparams +kl_quant_export:tools/post_quant.py -c configs/ppyoloe/ppyoloe_crn_m_300e_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o +export_param1:null +## +inference:./deploy/python/infer.py +--device:gpu|cpu +--enable_mkldnn:False +--cpu_threads:4 +--batch_size:1|2 +--run_mode:paddle +--model_dir: +--image_dir:./dataset/coco/test2017/ +--run_benchmark:False +null:null \ No newline at end of file diff --git a/test_tipc/configs/ppyoloe/ppyoloe_crn_s_300e_coco_train_ptq_infer_python.txt b/test_tipc/configs/ppyoloe/ppyoloe_crn_s_300e_coco_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..77ef6a1cf1fd6f4600946fddd7507e603e5d0f94 --- /dev/null +++ b/test_tipc/configs/ppyoloe/ppyoloe_crn_s_300e_coco_train_ptq_infer_python.txt @@ -0,0 +1,20 @@ +===========================ptq_params=========================== +model_name:ppyoloe_crn_s_300e_coco +python:python3.7 +filename: +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/ppyoloe_crn_s_300e_coco.pdparams +kl_quant_export:tools/post_quant.py -c configs/ppyoloe/ppyoloe_crn_s_300e_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o +export_param1:null +## +inference:./deploy/python/infer.py +--device:gpu|cpu +--enable_mkldnn:False +--cpu_threads:4 +--batch_size:1|2 +--run_mode:paddle +--model_dir: +--image_dir:./dataset/coco/test2017/ +--run_benchmark:False +null:null \ No newline at end of file diff --git a/test_tipc/configs/ppyoloe/ppyoloe_crn_x_300e_coco_train_ptq_infer_python.txt b/test_tipc/configs/ppyoloe/ppyoloe_crn_x_300e_coco_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..53e5455c9da4553ce5742456759da34f0b147905 --- /dev/null +++ b/test_tipc/configs/ppyoloe/ppyoloe_crn_x_300e_coco_train_ptq_infer_python.txt @@ -0,0 +1,20 @@ +===========================ptq_params=========================== +model_name:ppyoloe_crn_x_300e_coco +python:python3.7 +filename: +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/ppyoloe_crn_x_300e_coco.pdparams +kl_quant_export:tools/post_quant.py -c configs/ppyoloe/ppyoloe_crn_x_300e_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o +export_param1:null +## +inference:./deploy/python/infer.py +--device:gpu|cpu +--enable_mkldnn:False +--cpu_threads:4 +--batch_size:1|2 +--run_mode:paddle +--model_dir: +--image_dir:./dataset/coco/test2017/ +--run_benchmark:False +null:null \ No newline at end of file diff --git a/test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_train_ptq_infer_python.txt b/test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..49383b726fd1e8fa118cce5fb068b5feabe63f1d --- /dev/null +++ b/test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_train_ptq_infer_python.txt @@ -0,0 +1,20 @@ +===========================ptq_params=========================== +model_name:yolov3_darknet53_270e_coco +python:python3.7 +filename: +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/yolov3_darknet53_270e_coco.pdparams +kl_quant_export:tools/post_quant.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o +export_param1:null +## +inference:./deploy/python/infer.py +--device:gpu|cpu +--enable_mkldnn:False +--cpu_threads:4 +--batch_size:1|2 +--run_mode:paddle +--model_dir: +--image_dir:./dataset/coco/test2017/ +--run_benchmark:False +null:null \ No newline at end of file diff --git a/test_tipc/docs/test_ptq_inference_python.md b/test_tipc/docs/test_ptq_inference_python.md new file mode 100644 index 0000000000000000000000000000000000000000..7b1c04c5b01b5d67ba285e88b1b8c9e3361c2b82 --- /dev/null +++ b/test_tipc/docs/test_ptq_inference_python.md @@ -0,0 +1,44 @@ +# Linux GPU/CPU 离线量化功能测试 + +Linux GPU/CPU 离线量化功能测试的主程序为`test_ptq_inference_python.sh`,可以测试基于Python的离线量化功能。 + +## 1. 测试结论汇总 + +| 模型类型 |device | batchsize | tensorrt | mkldnn | cpu多线程 | +| ---- | ---- |-----------| :----: | :----: | :----: | +| 量化模型 | GPU | 1/2 | int8 | - | - | +| 量化模型 | CPU | 1/2 | - | int8 | 支持 | + +## 2. 测试流程 +### 2.1 功能测试 +先运行`prepare.sh`准备数据和模型,然后运行`test_ptq_inference_python.sh`进行测试,最终在```test_tipc/output```目录下生成`python_infer_*.log`后缀的日志文件。 + +```shell +bash test_tipc/prepare.sh ./test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_train_ptq_infer_python.txt "whole_infer" + +# 用法: +bash test_tipc/test_ptq_inference_python.sh ./test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_train_ptq_infer_python.txt +``` + +#### 运行结果 + +各测试的运行情况会打印在 `test_tipc/output/results_ptq_python.log` 中: +运行成功时会输出: + +``` +Run successfully with command - yolov3_darknet53_270e_coco - python3.7 tools/post_quant.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o weights=https://paddledet.bj.bcebos.com/models/yolov3_darknet53_270e_coco.pdparams filename=yolov3_darknet53_270e_coco --output_dir=./output_inference ! +Run successfully with command - yolov3_darknet53_270e_coco - python3.7 ./deploy/python/infer.py --device=gpu --run_mode=paddle --model_dir=./output_inference/yolov3_darknet53_270e_coco --batch_size=2 --image_dir=./dataset/coco/test2017/ --run_benchmark=False > ./test_tipc/output/yolov3_darknet53_270e_coco/whole_infer/python_infer_gpu_mode_paddle_batchsize_2.log 2>&1 ! +... +``` + +运行失败时会输出: + +``` +Run failed with command - yolov3_darknet53_270e_coco - python3.7 tools/post_quant.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o weights=https://paddledet.bj.bcebos.com/models/yolov3_darknet53_270e_coco.pdparams filename=yolov3_darknet53_270e_coco --output_dir=./output_inference! +... +``` + + +## 3. 更多教程 + +本文档为功能测试用,更详细的离线量化功能使用教程请参考:[Paddle 离线量化官网教程](https://github.com/PaddlePaddle/PaddleSlim/blob/develop/docs/zh_cn/api_cn/static/quant/quantization_api.rst#quant_post_static) diff --git a/test_tipc/test_inference_cpp.sh b/test_tipc/test_inference_cpp.sh index d6ed5ac5df12220c1be7773cf100631fa5bf7123..6da92064d7b1958452f8c577d427493756c9e6bf 100644 --- a/test_tipc/test_inference_cpp.sh +++ b/test_tipc/test_inference_cpp.sh @@ -2,6 +2,7 @@ source test_tipc/utils_func.sh FILENAME=$1 +MODE="cpp_infer" # parser model_name dataline=$(cat ${FILENAME}) @@ -54,7 +55,7 @@ cpp_benchmark_value=$(func_parser_value "${lines[27]}") cpp_infer_key1=$(func_parser_key "${lines[28]}") cpp_infer_value1=$(func_parser_value "${lines[28]}") -LOG_PATH="./test_tipc/output" +LOG_PATH="./test_tipc/output/${model_name}/${MODE}" mkdir -p ${LOG_PATH} status_log="${LOG_PATH}/results_cpp.log" @@ -74,7 +75,7 @@ function func_cpp_inference(){ fi for threads in ${cpp_cpu_threads_list[*]}; do for batch_size in ${cpp_batch_size_list[*]}; do - _save_log_path="${_log_path}/cpp_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_fluid_batchsize_${batch_size}.log" + _save_log_path="${_log_path}/cpp_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_mode_paddle_batchsize_${batch_size}.log" set_infer_data=$(func_set_params "${cpp_image_dir_key}" "${_img_dir}") set_benchmark=$(func_set_params "${cpp_benchmark_key}" "${cpp_benchmark_value}") set_batchsize=$(func_set_params "${cpp_batch_size_key}" "${batch_size}") @@ -91,7 +92,7 @@ function func_cpp_inference(){ done elif [ ${use_gpu} = "True" ] || [ ${use_gpu} = "gpu" ]; then for precision in ${cpp_precision_list[*]}; do - if [[ ${precision} != "fluid" ]]; then + if [[ ${precision} != "paddle" ]]; then if [[ ${_flag_quant} = "False" ]] && [[ ${precision} = "trt_int8" ]]; then continue fi @@ -100,7 +101,7 @@ function func_cpp_inference(){ fi fi for batch_size in ${cpp_batch_size_list[*]}; do - _save_log_path="${_log_path}/cpp_infer_gpu_precision_${precision}_batchsize_${batch_size}.log" + _save_log_path="${_log_path}/cpp_infer_gpu_mode_${precision}_batchsize_${batch_size}.log" set_infer_data=$(func_set_params "${cpp_image_dir_key}" "${_img_dir}") set_benchmark=$(func_set_params "${cpp_benchmark_key}" "${cpp_benchmark_value}") set_batchsize=$(func_set_params "${cpp_batch_size_key}" "${batch_size}") @@ -183,6 +184,7 @@ else env="export CUDA_VISIBLE_DEVICES=${GPUID}" fi eval $env + # run cpp infer Count=0 IFS="|" @@ -201,9 +203,10 @@ for infer_mode in ${cpp_infer_mode_list[*]}; do set_export_weight=$(func_set_params "${export_weight_key}" "${export_weight_value}") set_save_export_dir=$(func_set_params "${save_export_key}" "${save_export_value}") set_filename=$(func_set_params "${filename_key}" "${model_name}") + export_log_path="${LOG_PATH}/export.log" export_cmd="${python} ${run_export} ${set_export_weight} ${set_filename} ${set_save_export_dir} " echo $export_cmd - eval $export_cmd + eval "${export_cmd} > ${export_log_path} 2>&1" status_export=$? status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" fi diff --git a/test_tipc/test_paddle2onnx.sh b/test_tipc/test_paddle2onnx.sh index 50db0f510633b5edc956f9c7e966e6de6e129501..b2a161a9408ecfae354e93deacc82156cd9c8a22 100644 --- a/test_tipc/test_paddle2onnx.sh +++ b/test_tipc/test_paddle2onnx.sh @@ -2,6 +2,7 @@ source test_tipc/utils_func.sh FILENAME=$1 +MODE="paddle2onnx_infer" # parser model_name dataline=$(cat ${FILENAME}) @@ -56,7 +57,7 @@ infer_image_value=$(func_parser_value "${lines[28]}") infer_param1_key=$(func_parser_key "${lines[29]}") infer_param1_value=$(func_parser_value "${lines[29]}") -LOG_PATH="./test_tipc/output" +LOG_PATH="./test_tipc/output/${model_name}/${MODE}" mkdir -p ${LOG_PATH} status_log="${LOG_PATH}/results_paddle2onnx.log" @@ -68,7 +69,6 @@ function func_paddle2onnx_inference(){ # paddle2onnx echo "################### run paddle2onnx ###################" - _save_log_path="${LOG_PATH}/paddle2onnx_infer_cpu.log" set_dirname=$(func_set_params "${model_dir_key}" "${_export_model_dir}") set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}") set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}") @@ -76,8 +76,9 @@ function func_paddle2onnx_inference(){ set_opset_version=$(func_set_params "${opset_version_key}" "${opset_version_value}") set_enable_onnx_checker=$(func_set_params "${enable_onnx_checker_key}" "${enable_onnx_checker_value}") set_paddle2onnx_params1=$(func_set_params "${paddle2onnx_params1_key}" "${paddle2onnx_params1_value}") + trans_log_path="${_log_path}/trans_model.log" trans_model_cmd="${padlle2onnx_cmd} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_save_model} ${set_opset_version} ${set_enable_onnx_checker} ${set_paddle2onnx_params1}" - eval $trans_model_cmd + eval "${trans_model_cmd} > ${trans_log_path} 2>&1" last_status=${PIPESTATUS[0]} status_check $last_status "${trans_model_cmd}" "${status_log}" "${model_name}" @@ -87,8 +88,9 @@ function func_paddle2onnx_inference(){ set_onnx_file=$(func_set_params "${onnx_file_key}" "${_export_model_dir}/${save_file_value}") set_infer_image_file=$(func_set_params "${infer_image_key}" "${infer_image_value}") set_infer_param1=$(func_set_params "${infer_param1_key}" "${infer_param1_value}") - infer_model_cmd="${python} ${inference_py} ${set_infer_cfg} ${set_onnx_file} ${set_infer_image_file} ${set_infer_param1} > ${_save_log_path} 2>&1 " - eval $infer_model_cmd + _save_log_path="${_log_path}/paddle2onnx_infer_cpu.log" + infer_model_cmd="${python} ${inference_py} ${set_infer_cfg} ${set_onnx_file} ${set_infer_image_file} ${set_infer_param1}" + eval "${infer_model_cmd} > ${_save_log_path} 2>&1" last_status=${PIPESTATUS[0]} status_check $last_status "${infer_model_cmd}" "${status_log}" "${model_name}" } @@ -110,9 +112,10 @@ for infer_mode in ${infer_mode_list[*]}; do set_save_export_dir=$(func_set_params "${save_export_key}" "${save_export_value}") set_filename=$(func_set_params "${filename_key}" "${model_name}") set_export_param=$(func_set_params "${export_param_key}" "${export_param_value}") + export_log_path="${LOG_PATH}/export.log" export_cmd="${python} ${run_export} ${set_export_weight} ${set_filename} ${set_export_param} ${set_save_export_dir} " echo $export_cmd - eval $export_cmd + eval "${export_cmd} > ${export_log_path} 2>&1" status_export=$? status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" fi diff --git a/test_tipc/test_ptq_inference_python.sh b/test_tipc/test_ptq_inference_python.sh new file mode 100644 index 0000000000000000000000000000000000000000..ed44ec5aff06eff31f6bdf5d1b3056e31c3bfef2 --- /dev/null +++ b/test_tipc/test_ptq_inference_python.sh @@ -0,0 +1,114 @@ +#!/bin/bash +source test_tipc/utils_func.sh + +FILENAME=$1 +MODE="whole_infer" + +# parser model_name +dataline=$(cat ${FILENAME}) +IFS=$'\n' +lines=(${dataline}) +model_name=$(func_parser_value "${lines[1]}") +echo "ppdet ptq: ${model_name}" +python=$(func_parser_value "${lines[2]}") +filename_key=$(func_parser_key "${lines[3]}") + +# parser export params +save_export_key=$(func_parser_key "${lines[5]}") +save_export_value=$(func_parser_value "${lines[5]}") +export_weight_key=$(func_parser_key "${lines[6]}") +export_weight_value=$(func_parser_value "${lines[6]}") +kl_quant_export=$(func_parser_value "${lines[7]}") +export_param1_key=$(func_parser_key "${lines[8]}") +export_param1_value=$(func_parser_value "${lines[8]}") + +# parser infer params +inference_py=$(func_parser_value "${lines[10]}") +device_key=$(func_parser_key "${lines[11]}") +device_list=$(func_parser_value "${lines[11]}") +use_mkldnn_key=$(func_parser_key "${lines[12]}") +use_mkldnn_list=$(func_parser_value "${lines[12]}") +cpu_threads_key=$(func_parser_key "${lines[13]}") +cpu_threads_list=$(func_parser_value "${lines[13]}") +batch_size_key=$(func_parser_key "${lines[14]}") +batch_size_list=$(func_parser_value "${lines[14]}") +run_mode_key=$(func_parser_key "${lines[15]}") +run_mode_list=$(func_parser_value "${lines[15]}") +model_dir_key=$(func_parser_key "${lines[16]}") +image_dir_key=$(func_parser_key "${lines[17]}") +image_dir_value=$(func_parser_value "${lines[17]}") +run_benchmark_key=$(func_parser_key "${lines[18]}") +run_benchmark_value=$(func_parser_value "${lines[18]}") +infer_param1_key=$(func_parser_key "${lines[19]}") +infer_param1_value=$(func_parser_value "${lines[19]}") + + +LOG_PATH="./test_tipc/output/${model_name}/${MODE}" +mkdir -p ${LOG_PATH} +status_log="${LOG_PATH}/results_ptq_python.log" + +function func_ptq_inference(){ + IFS='|' + _python=$1 + _log_path=$2 + _script=$3 + _set_model_dir=$4 + + set_image_dir=$(func_set_params "${image_dir_key}" "${image_dir_value}") + set_run_benchmark=$(func_set_params "${run_benchmark_key}" "${run_benchmark_value}") + set_infer_param1=$(func_set_params "${infer_param1_key}" "${infer_param1_value}") + # inference + for device in ${device_list[*]}; do + set_device=$(func_set_params "${device_key}" "${device}") + if [ ${device} = "cpu" ]; then + for use_mkldnn in ${use_mkldnn_list[*]}; do + set_use_mkldnn=$(func_set_params "${use_mkldnn_key}" "${use_mkldnn}") + for threads in ${cpu_threads_list[*]}; do + set_cpu_threads=$(func_set_params "${cpu_threads_key}" "${threads}") + for batch_size in ${batch_size_list[*]}; do + _save_log_path="${_log_path}/python_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_mode_paddle_batchsize_${batch_size}.log" + set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}") + command="${_python} ${_script} ${set_device} ${set_use_mkldnn} ${set_cpu_threads} ${_set_model_dir} ${set_batchsize} ${set_image_dir} ${set_run_benchmark} ${set_infer_param1} > ${_save_log_path} 2>&1 " + eval $command + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check $last_status "${command}" "${status_log}" "${model_name}" + done + done + done + elif [ ${device} = "gpu" ]; then + for run_mode in ${run_mode_list[*]}; do + if [[ ${run_mode} = "paddle" ]] || [[ ${run_mode} = "trt_int8" ]]; then + for batch_size in ${batch_size_list[*]}; do + _save_log_path="${_log_path}/python_infer_gpu_mode_${run_mode}_batchsize_${batch_size}.log" + set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}") + set_run_mode=$(func_set_params "${run_mode_key}" "${run_mode}") + command="${_python} ${_script} ${set_device} ${set_run_mode} ${_set_model_dir} ${set_batchsize} ${set_image_dir} ${set_run_benchmark} ${set_infer_param1} > ${_save_log_path} 2>&1 " + eval $command + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check $last_status "${command}" "${status_log}" "${model_name}" + done + fi + done + else + echo "Does not support hardware other than CPU and GPU Currently!" + fi + done +} + +IFS="|" +# run ptq +set_export_weight=$(func_set_params "${export_weight_key}" "${export_weight_value}") +set_save_export_dir=$(func_set_params "${save_export_key}" "${save_export_value}") +set_filename=$(func_set_params "${filename_key}" "${model_name}") +export_log_path="${LOG_PATH}/export.log" +ptq_cmd="${python} ${kl_quant_export} ${set_export_weight} ${set_filename} ${set_save_export_dir}" +echo $ptq_cmd +eval "${ptq_cmd} > ${export_log_path} 2>&1" +status_export=$? +status_check $status_export "${ptq_cmd}" "${status_log}" "${model_name}" + +#run inference +set_export_model_dir=$(func_set_params "${model_dir_key}" "${save_export_value}/${model_name}") +func_ptq_inference "${python}" "${LOG_PATH}" "${inference_py}" "${set_export_model_dir}" diff --git a/test_tipc/test_serving_infer_cpp.sh b/test_tipc/test_serving_infer_cpp.sh index 17c3a50ec6c7ba951fd19686a72bc1ad093b833f..d8371a8383467b951895418fab209ec36bea94a8 100644 --- a/test_tipc/test_serving_infer_cpp.sh +++ b/test_tipc/test_serving_infer_cpp.sh @@ -2,13 +2,14 @@ source test_tipc/utils_func.sh FILENAME=$1 +MODE="serving_infer" # parser model_name dataline=$(cat ${FILENAME}) IFS=$'\n' lines=(${dataline}) model_name=$(func_parser_value "${lines[1]}") -echo "ppdet serving_infer: ${model_name}" +echo "ppdet serving_cpp_infer: ${model_name}" python=$(func_parser_value "${lines[2]}") filename_key=$(func_parser_key "${lines[3]}") filename_value=$(func_parser_value "${lines[3]}") @@ -48,7 +49,7 @@ infer_image_value=$(func_parser_value "${lines[24]}") http_client_key1=$(func_parser_key "${lines[25]}") http_client_value1=$(func_parser_value "${lines[25]}") -LOG_PATH="./test_tipc/output" +LOG_PATH="./test_tipc/output/${model_name}/${MODE}" mkdir -p ${LOG_PATH} status_log="${LOG_PATH}/results_serving_cpp.log" @@ -67,23 +68,25 @@ function func_serving_inference(){ # inference for gpu_ids in ${gpu_ids_value[*]}; do if [ ${gpu_ids} = "null" ];then - _save_log_path="${_log_path}/serving_infer_cpp_cpu_batchsize_1.log" + server_log_path="${_log_path}/cpp_server_cpu.log" + client_log_path="${_log_path}/cpp_client_cpu.log" else - _save_log_path="${_log_path}/serving_infer_cpp_gpu_batchsize_1.log" + server_log_path="${_log_path}/cpp_server_gpu.log" + client_log_path="${_log_path}/cpp_client_gpu.log" fi set_gpu_ids=$(func_set_params "${gpu_ids_key}" "${gpu_ids}") # run web service - web_service_cmd="${_python} -m paddle_serving_server.serve ${_set_server_model_dir} ${set_op} ${set_port} ${set_gpu_ids} ${set_web_service_params1} &" + web_service_cmd="${_python} -m paddle_serving_server.serve ${_set_server_model_dir} ${set_op} ${set_port} ${set_gpu_ids} ${set_web_service_params1} > ${server_log_path} 2>&1 &" eval $web_service_cmd last_status=${PIPESTATUS[0]} status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" sleep 5s # run http client - http_client_cmd="${_python} ${http_client_py} ${_set_client_model_dir} ${_set_image_file} ${set_http_client_params1} > ${_save_log_path} 2>&1 " + http_client_cmd="${_python} ${http_client_py} ${_set_client_model_dir} ${_set_image_file} ${set_http_client_params1} > ${client_log_path} 2>&1" eval $http_client_cmd last_status=${PIPESTATUS[0]} status_check $last_status "${http_client_cmd}" "${status_log}" "${model_name}" - eval "cat ${_save_log_path}" + eval "cat ${client_log_path}" ps ux | grep -i ${port_value} | awk '{print $2}' | xargs kill -s 9 sleep 2s done @@ -107,9 +110,10 @@ for infer_mode in ${infer_mode_list[*]}; do set_export_weight=$(func_set_params "${export_weight_key}" "${export_weight_value}") set_save_export_dir=$(func_set_params "${save_export_key}" "${save_export_value}") set_filename=$(func_set_params "${filename_key}" "${model_name}") + export_log_path="${LOG_PATH}/export.log" export_cmd="${python} ${run_export} ${set_export_weight} ${set_filename} ${set_save_export_dir} " echo $export_cmd - eval $export_cmd + eval "${export_cmd} > ${export_log_path} 2>&1" status_export=$? status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" fi diff --git a/test_tipc/test_serving_infer_python.sh b/test_tipc/test_serving_infer_python.sh index 4b16cb24a5af63b93e51d62181621a2a413b64fe..e76f2edb48138817c8cf08e8f0b8602a5fbe199c 100644 --- a/test_tipc/test_serving_infer_python.sh +++ b/test_tipc/test_serving_infer_python.sh @@ -2,13 +2,14 @@ source test_tipc/utils_func.sh FILENAME=$1 +MODE="serving_infer" # parser model_name dataline=$(cat ${FILENAME}) IFS=$'\n' lines=(${dataline}) model_name=$(func_parser_value "${lines[1]}") -echo "ppdet serving_infer: ${model_name}" +echo "ppdet serving_python_infer: ${model_name}" python=$(func_parser_value "${lines[2]}") filename_key=$(func_parser_key "${lines[3]}") filename_value=$(func_parser_value "${lines[3]}") @@ -44,7 +45,7 @@ infer_image_value=$(func_parser_value "${lines[22]}") http_client_key1=$(func_parser_key "${lines[23]}") http_client_value1=$(func_parser_value "${lines[23]}") -LOG_PATH="./test_tipc/output" +LOG_PATH="./test_tipc/output/${model_name}/${MODE}" mkdir -p ${LOG_PATH} status_log="${LOG_PATH}/results_serving_python.log" @@ -61,21 +62,22 @@ function func_serving_inference(){ # inference for opt in ${opt_use_gpu_list[*]}; do device_type=$(func_parser_key "${opt}") - _save_log_path="${_log_path}/serving_infer_python_${device_type}_batchsize_1.log" + server_log_path="${_log_path}/python_server_${device_type}.log" + client_log_path="${_log_path}/python_client_${device_type}.log" opt_value=$(func_parser_value "${opt}") _set_opt=$(func_set_params "${opt_key}" "${opt_value}") # run web service - web_service_cmd="${_python} ${_service_script} ${_set_model_dir} ${_set_opt} ${set_web_service_params1} &" + web_service_cmd="${_python} ${_service_script} ${_set_model_dir} ${_set_opt} ${set_web_service_params1} > ${server_log_path} 2>&1 &" eval $web_service_cmd last_status=${PIPESTATUS[0]} status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" sleep 5s # run http client - http_client_cmd="${_python} ${_client_script} ${_set_image_file} ${set_http_client_params1} > ${_save_log_path} 2>&1 " + http_client_cmd="${_python} ${_client_script} ${_set_image_file} ${set_http_client_params1} > ${client_log_path} 2>&1" eval $http_client_cmd last_status=${PIPESTATUS[0]} status_check $last_status "${http_client_cmd}" "${status_log}" "${model_name}" - eval "cat ${_save_log_path}" + eval "cat ${client_log_path}" ps ux | grep -E 'web_service' | awk '{print $2}' | xargs kill -s 9 sleep 2s done @@ -108,9 +110,10 @@ for infer_mode in ${infer_mode_list[*]}; do set_export_weight=$(func_set_params "${export_weight_key}" "${export_weight_value}") set_save_export_dir=$(func_set_params "${save_export_key}" "${save_export_value}") set_filename=$(func_set_params "${filename_key}" "${model_name}") + export_log_path="${LOG_PATH}/export.log" export_cmd="${python} ${run_export} ${set_export_weight} ${set_filename} ${set_save_export_dir} " echo $export_cmd - eval $export_cmd + eval "${export_cmd} > ${export_log_path} 2>&1" status_export=$? status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" fi diff --git a/test_tipc/test_train_inference_python.sh b/test_tipc/test_train_inference_python.sh index 117c7279517b9b14a1c12afc2b47a8c2d013f652..e36644462771dded1c05649fe75fb0cec3b5b2bf 100644 --- a/test_tipc/test_train_inference_python.sh +++ b/test_tipc/test_train_inference_python.sh @@ -92,7 +92,7 @@ benchmark_value=$(func_parser_value "${lines[49]}") infer_key1=$(func_parser_key "${lines[50]}") infer_value1=$(func_parser_value "${lines[50]}") -LOG_PATH="./test_tipc/output/${model_name}" +LOG_PATH="./test_tipc/output/${model_name}/${MODE}" mkdir -p ${LOG_PATH} status_log="${LOG_PATH}/results_python.log" @@ -114,7 +114,7 @@ function func_inference(){ fi for threads in ${cpu_threads_list[*]}; do for batch_size in ${batch_size_list[*]}; do - _save_log_path="${_log_path}/python_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_fluid_batchsize_${batch_size}.log" + _save_log_path="${_log_path}/python_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_mode_paddle_batchsize_${batch_size}.log" set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}") set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}") set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}") @@ -131,7 +131,7 @@ function func_inference(){ done elif [ ${use_gpu} = "True" ] || [ ${use_gpu} = "gpu" ]; then for precision in ${precision_list[*]}; do - if [[ ${precision} != "fluid" ]]; then + if [[ ${precision} != "paddle" ]]; then if [[ ${_flag_quant} = "False" ]] && [[ ${precision} = "trt_int8" ]]; then continue fi @@ -140,7 +140,7 @@ function func_inference(){ fi fi for batch_size in ${batch_size_list[*]}; do - _save_log_path="${_log_path}/python_infer_gpu_precision_${precision}_batchsize_${batch_size}.log" + _save_log_path="${_log_path}/python_infer_gpu_mode_${precision}_batchsize_${batch_size}.log" set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}") set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}") set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}") @@ -276,6 +276,7 @@ else save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}" set_save_model=$(func_set_params "${save_model_key}" "${save_log}") + nodes="1" if [ ${#gpu} -le 2 ];then # train with cpu or single gpu cmd="${python} ${run_train} LearningRate.base_lr=0.0001 log_iter=1 ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_batchsize} ${set_filename} ${set_train_params1} ${set_autocast}" elif [ ${#ips} -le 15 ];then # train with multi-gpu @@ -290,15 +291,17 @@ else cmd="${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} log_iter=1 ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_batchsize} ${set_filename} ${set_train_params1} ${set_autocast}" fi # run train - eval $cmd + train_log_path="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}_nodes_${nodes}.log" + eval "${cmd} > ${train_log_path} 2>&1" status_check $? "${cmd}" "${status_log}" "${model_name}" set_eval_trained_weight=$(func_set_params "${export_weight_key}" "${save_log}/${model_name}/${train_model_name}") # run eval if [ ${eval_py} != "null" ]; then set_eval_params1=$(func_set_params "${eval_key1}" "${eval_value1}") + eval_log_path="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}_nodes_${nodes}_eval.log" eval_cmd="${python} ${eval_py} ${set_eval_trained_weight} ${set_use_gpu} ${set_eval_params1}" - eval $eval_cmd + eval "${eval_cmd} > ${eval_log_path} 2>&1" status_check $? "${eval_cmd}" "${status_log}" "${model_name}" fi # run export model @@ -315,8 +318,9 @@ else eval "cp ${save_export_model_dir}/* ${save_log}/" fi # run export model + export_log_path="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}_nodes_${nodes}_export.log" export_cmd="${python} ${run_export} ${set_export_weight} ${set_filename} ${set_save_export_dir} " - eval $export_cmd + eval "${export_cmd} > ${export_log_path} 2>&1" status_check $? "${export_cmd}" "${status_log}" "${model_name}" #run inference