diff --git a/deploy/serving/python/pipeline_http_client.py b/deploy/serving/python/pipeline_http_client.py index 9f5dbb8b7def41eddda3482579a6b7d5b266630d..fa9b30c0d79bf5a7e0d5da7a2538580e7452f8bb 100644 --- a/deploy/serving/python/pipeline_http_client.py +++ b/deploy/serving/python/pipeline_http_client.py @@ -22,6 +22,8 @@ import argparse parser = argparse.ArgumentParser(description="args for paddleserving") parser.add_argument("--image_dir", type=str) parser.add_argument("--image_file", type=str) +parser.add_argument("--http_port", type=int, default=18093) +parser.add_argument("--service_name", type=str, default="ppdet") args = parser.parse_args() @@ -57,7 +59,7 @@ def get_test_images(infer_dir, infer_img): if __name__ == "__main__": - url = "http://127.0.0.1:18093/ppdet/prediction" + url = f"http://127.0.0.1:{args.http_port}/{args.service_name}/prediction" logid = 10000 img_list = get_test_images(args.image_dir, args.image_file) diff --git a/test_tipc/README.md b/test_tipc/README.md index 8df5e22307bba12a580eeea4082f1e44e035db2e..244813e748cff6516299f70f59ef12b25e3898f5 100644 --- a/test_tipc/README.md +++ b/test_tipc/README.md @@ -107,6 +107,6 @@ bash test_tipc/test_train_inference_python.sh ./test_tipc/configs/yolov3/yolov3_ - [test_train_inference_python 使用](docs/test_train_inference_python.md) :测试基于Python的模型训练、评估、推理等基本功能,包括裁剪、量化、蒸馏。 - [test_train_fleet_inference_python 使用](./docs/test_train_fleet_inference_python.md):测试基于Python的多机多卡训练与推理等基本功能。 - [test_inference_cpp 使用](docs/test_inference_cpp.md):测试基于C++的模型推理。 -- [test_serving 使用](./):测试基于Paddle Serving的服务化部署功能。 +- [test_serving 使用](docs/test_serving.md):测试基于Paddle Serving的服务化部署功能。 - [test_lite_arm_cpu_cpp 使用](./):测试基于Paddle-Lite的ARM CPU端c++预测部署功能。 - [test_paddle2onnx 使用](./):测试Paddle2ONNX的模型转化功能,并验证正确性。 diff --git a/test_tipc/benchmark_train.sh b/test_tipc/benchmark_train.sh index 47415bbd7e8da3961b368e264d273e8132724136..794f2302db663297b82260e5d891719cd03a931e 100644 --- a/test_tipc/benchmark_train.sh +++ b/test_tipc/benchmark_train.sh @@ -206,7 +206,7 @@ for batch_size in ${batch_size_list[*]}; do echo $cmd eval $cmd last_status=${PIPESTATUS[0]} - status_check $last_status "${cmd}" "${status_log}" + status_check $last_status "${cmd}" "${status_log}" "${model_name}" else IFS=";" unset_env=`unset CUDA_VISIBLE_DEVICES` @@ -242,7 +242,7 @@ for batch_size in ${batch_size_list[*]}; do echo $cmd eval $cmd last_status=${PIPESTATUS[0]} - status_check $last_status "${cmd}" "${status_log}" + status_check $last_status "${cmd}" "${status_log}" "${model_name}" fi done done diff --git a/test_tipc/configs/keypoint/tinypose_128x96_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/keypoint/tinypose_128x96_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt index 59ed9b673a9d61690bea0c526fa388f070c2a253..b609cb26dc2040d92039228e13899ec75619e100 100644 --- a/test_tipc/configs/keypoint/tinypose_128x96_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt +++ b/test_tipc/configs/keypoint/tinypose_128x96_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -23,7 +23,7 @@ inference:./deploy/cpp/build/main --batch_size:1|2 --use_tensorrt:null --run_mode:paddle ---model_dir: +--model_dir_keypoint: --image_dir:./dataset/coco/test2017/ --run_benchmark:False -null:null \ No newline at end of file +--model_dir:./output_inference/picodet_s_320_pedestrian \ No newline at end of file diff --git a/test_tipc/configs/keypoint/tinypose_128x96_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/keypoint/tinypose_128x96_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..c1acbdf003c5eeca77a98e06fd1578e9c246eb64 --- /dev/null +++ b/test_tipc/configs/keypoint/tinypose_128x96_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,24 @@ +===========================serving_infer_python_params=========================== +model_name:tinypose_128x96 +python:python3.7 +filename:null +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/keypoint/tinypose_128x96.pdparams +norm_export:tools/export_model.py -c configs/keypoint/tiny_pose/tinypose_128x96.yml --export_serving_model True -o +quant_export:tools/export_model.py -c configs/keypoint/tiny_pose/tinypose_128x96.yml --slim_config _template_pact --export_serving_model True -o +fpgm_export:tools/export_model.py -c configs/keypoint/tiny_pose/tinypose_128x96.yml --slim_config _template_fpgm --export_serving_model True -o +distill_export:null +export1:null +export2:null +kl_quant_export:tools/post_quant.py -c configs/keypoint/tiny_pose/tinypose_128x96.yml --slim_config configs/slim/post_quant/tinypose_128x96_ptq.yml --export_serving_model True -o +## +infer_mode:norm +infer_quant:False +web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml +--model_dir:null +--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1 +null:null +http_client:deploy/serving/python/pipeline_http_client.py +--image_file:./demo/hrnet_demo.jpg +null:null \ No newline at end of file diff --git a/test_tipc/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..59106a4c4797271bd922c00f0f592a1896950ce6 --- /dev/null +++ b/test_tipc/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,24 @@ +===========================serving_infer_python_params=========================== +model_name:mask_rcnn_r50_fpn_1x_coco +python:python3.7 +filename:null +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/mask_rcnn_r50_fpn_1x_coco.pdparams +norm_export:tools/export_model.py -c configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.yml --export_serving_model True -o +quant_export:tools/export_model.py -c configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.yml --slim_config _template_pact --export_serving_model True -o +fpgm_export:tools/export_model.py -c configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.yml --slim_config _template_fpgm --export_serving_model True -o +distill_export:null +export1:null +export2:null +kl_quant_export:tools/post_quant.py -c configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.yml --slim_config configs/slim/post_quant/mask_rcnn_r50_fpn_1x_coco_ptq.yml --export_serving_model True -o +## +infer_mode:norm +infer_quant:False +web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml +--model_dir:null +--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1 +null:null +http_client:deploy/serving/python/pipeline_http_client.py +--image_file:./demo/000000014439.jpg +null:null \ No newline at end of file diff --git a/test_tipc/configs/picodet/picodet_l_640_coco_lcnet_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/picodet/picodet_l_640_coco_lcnet_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..472b6c139bc606ea31cf86b3a745ba95a2e6bda6 --- /dev/null +++ b/test_tipc/configs/picodet/picodet_l_640_coco_lcnet_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,24 @@ +===========================serving_infer_python_params=========================== +model_name:picodet_l_640_coco_lcnet +python:python3.7 +filename:null +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/picodet_l_640_coco_lcnet.pdparams +norm_export:tools/export_model.py -c configs/picodet/picodet_l_640_coco_lcnet.yml --export_serving_model True -o +quant_export:tools/export_model.py -c configs/picodet/picodet_l_640_coco_lcnet.yml --slim_config _template_pact --export_serving_model True -o +fpgm_export:tools/export_model.py -c configs/picodet/picodet_l_640_coco_lcnet.yml --slim_config _template_fpgm --export_serving_model True -o +distill_export:null +export1:null +export2:null +kl_quant_export:tools/post_quant.py -c configs/picodet/picodet_l_640_coco_lcnet.yml --slim_config _template_kl_quant --export_serving_model True -o +## +infer_mode:norm +infer_quant:False +web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml +--model_dir:null +--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1 +null:null +http_client:deploy/serving/python/pipeline_http_client.py +--image_file:./demo/000000014439.jpg +null:null \ No newline at end of file diff --git a/test_tipc/configs/picodet/picodet_l_640_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/picodet/picodet_l_640_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..75c5ba6615642f0ac61eafcf2601ac6e9179ae4c --- /dev/null +++ b/test_tipc/configs/picodet/picodet_l_640_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,24 @@ +===========================serving_infer_python_params=========================== +model_name:picodet_l_640_coco +python:python3.7 +filename:null +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/picodet_l_640_coco.pdparams +norm_export:tools/export_model.py -c configs/picodet/legacy_model/picodet_l_640_coco.yml --export_serving_model True -o +quant_export:tools/export_model.py -c configs/picodet/legacy_model/picodet_l_640_coco.yml --slim_config _template_pact --export_serving_model True -o +fpgm_export:tools/export_model.py -c configs/picodet/legacy_model/picodet_l_640_coco.yml --slim_config _template_fpgm --export_serving_model True -o +distill_export:null +export1:null +export2:null +kl_quant_export:tools/post_quant.py -c configs/picodet/legacy_model/picodet_l_640_coco.yml --slim_config _template_kl_quant --export_serving_model True -o +## +infer_mode:norm +infer_quant:False +web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml +--model_dir:null +--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1 +null:null +http_client:deploy/serving/python/pipeline_http_client.py +--image_file:./demo/000000014439.jpg +null:null \ No newline at end of file diff --git a/test_tipc/configs/picodet/picodet_lcnet_1_5x_416_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/picodet/picodet_lcnet_1_5x_416_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..c99ec0bf710e543d4baaa1e198edafea8c3c9e2b --- /dev/null +++ b/test_tipc/configs/picodet/picodet_lcnet_1_5x_416_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,24 @@ +===========================serving_infer_python_params=========================== +model_name:picodet_lcnet_1_5x_416_coco +python:python3.7 +filename:null +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/picodet_lcnet_1_5x_416_coco.pdparams +norm_export:tools/export_model.py -c configs/picodet/legacy_model/more_config/picodet_lcnet_1_5x_416_coco.yml --export_serving_model True -o +quant_export:tools/export_model.py -c configs/picodet/legacy_model/more_config/picodet_lcnet_1_5x_416_coco.yml --slim_config _template_pact --export_serving_model True -o +fpgm_export:tools/export_model.py -c configs/picodet/legacy_model/more_config/picodet_lcnet_1_5x_416_coco.yml --slim_config _template_fpgm --export_serving_model True -o +distill_export:null +export1:null +export2:null +kl_quant_export:tools/post_quant.py -c configs/picodet/legacy_model/more_config/picodet_lcnet_1_5x_416_coco.yml --slim_config _template_kl_quant --export_serving_model True -o +## +infer_mode:norm +infer_quant:False +web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml +--model_dir:null +--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1 +null:null +http_client:deploy/serving/python/pipeline_http_client.py +--image_file:./demo/000000014439.jpg +null:null \ No newline at end of file diff --git a/test_tipc/configs/picodet/picodet_m_416_coco_lcnet_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/picodet/picodet_m_416_coco_lcnet_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..1e7c5ed7da4b98336b37cdc08a8fc370aa2a91a0 --- /dev/null +++ b/test_tipc/configs/picodet/picodet_m_416_coco_lcnet_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,24 @@ +===========================serving_infer_python_params=========================== +model_name:picodet_m_416_coco_lcnet +python:python3.7 +filename:null +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/picodet_m_416_coco_lcnet.pdparams +norm_export:tools/export_model.py -c configs/picodet/picodet_m_416_coco_lcnet.yml --export_serving_model True -o +quant_export:tools/export_model.py -c configs/picodet/picodet_m_416_coco_lcnet.yml --slim_config _template_pact --export_serving_model True -o +fpgm_export:tools/export_model.py -c configs/picodet/picodet_m_416_coco_lcnet.yml --slim_config _template_fpgm --export_serving_model True -o +distill_export:null +export1:null +export2:null +kl_quant_export:tools/post_quant.py -c configs/picodet/picodet_m_416_coco_lcnet.yml --slim_config _template_kl_quant --export_serving_model True -o +## +infer_mode:norm +infer_quant:False +web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml +--model_dir:null +--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1 +null:null +http_client:deploy/serving/python/pipeline_http_client.py +--image_file:./demo/000000014439.jpg +null:null \ No newline at end of file diff --git a/test_tipc/configs/picodet/picodet_m_416_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/picodet/picodet_m_416_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..5f012cefd80f673e7d3a3ccb747e5d82860f09d4 --- /dev/null +++ b/test_tipc/configs/picodet/picodet_m_416_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,24 @@ +===========================serving_infer_python_params=========================== +model_name:picodet_m_416_coco +python:python3.7 +filename:null +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/picodet_m_416_coco.pdparams +norm_export:tools/export_model.py -c configs/picodet/legacy_model/picodet_m_416_coco.yml --export_serving_model True -o +quant_export:tools/export_model.py -c configs/picodet/legacy_model/picodet_m_416_coco.yml --slim_config _template_pact --export_serving_model True -o +fpgm_export:tools/export_model.py -c configs/picodet/legacy_model/picodet_m_416_coco.yml --slim_config _template_fpgm --export_serving_model True -o +distill_export:null +export1:null +export2:null +kl_quant_export:tools/post_quant.py -c configs/picodet/legacy_model/picodet_m_416_coco.yml --slim_config _template_kl_quant --export_serving_model True -o +## +infer_mode:norm +infer_quant:False +web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml +--model_dir:null +--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1 +null:null +http_client:deploy/serving/python/pipeline_http_client.py +--image_file:./demo/000000014439.jpg +null:null \ No newline at end of file diff --git a/test_tipc/configs/picodet/picodet_mobilenetv3_large_1x_416_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/picodet/picodet_mobilenetv3_large_1x_416_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..266e2fb18f06082f5e285df59db163a4e06b5d3a --- /dev/null +++ b/test_tipc/configs/picodet/picodet_mobilenetv3_large_1x_416_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,24 @@ +===========================serving_infer_python_params=========================== +model_name:picodet_mobilenetv3_large_1x_416_coco +python:python3.7 +filename:null +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/picodet_mobilenetv3_large_1x_416_coco.pdparams +norm_export:tools/export_model.py -c configs/picodet/legacy_model/more_config/picodet_mobilenetv3_large_1x_416_coco.yml --export_serving_model True -o +quant_export:tools/export_model.py -c configs/picodet/legacy_model/more_config/picodet_mobilenetv3_large_1x_416_coco.yml --slim_config _template_pact --export_serving_model True -o +fpgm_export:tools/export_model.py -c configs/picodet/legacy_model/more_config/picodet_mobilenetv3_large_1x_416_coco.yml --slim_config _template_fpgm --export_serving_model True -o +distill_export:null +export1:null +export2:null +kl_quant_export:tools/post_quant.py -c configs/picodet/legacy_model/more_config/picodet_mobilenetv3_large_1x_416_coco.yml --slim_config _template_kl_quant --export_serving_model True -o +## +infer_mode:norm +infer_quant:False +web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml +--model_dir:null +--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1 +null:null +http_client:deploy/serving/python/pipeline_http_client.py +--image_file:./demo/000000014439.jpg +null:null \ No newline at end of file diff --git a/test_tipc/configs/picodet/picodet_r18_640_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/picodet/picodet_r18_640_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..3ee56765bcad25a8d6336f28534e001ff495f027 --- /dev/null +++ b/test_tipc/configs/picodet/picodet_r18_640_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,24 @@ +===========================serving_infer_python_params=========================== +model_name:picodet_r18_640_coco +python:python3.7 +filename:null +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/picodet_r18_640_coco.pdparams +norm_export:tools/export_model.py -c configs/picodet/legacy_model/more_config/picodet_r18_640_coco.yml --export_serving_model True -o +quant_export:tools/export_model.py -c configs/picodet/legacy_model/more_config/picodet_r18_640_coco.yml --slim_config _template_pact --export_serving_model True -o +fpgm_export:tools/export_model.py -c configs/picodet/legacy_model/more_config/picodet_r18_640_coco.yml --slim_config _template_fpgm --export_serving_model True -o +distill_export:null +export1:null +export2:null +kl_quant_export:tools/post_quant.py -c configs/picodet/legacy_model/more_config/picodet_r18_640_coco.yml --slim_config _template_kl_quant --export_serving_model True -o +## +infer_mode:norm +infer_quant:False +web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml +--model_dir:null +--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1 +null:null +http_client:deploy/serving/python/pipeline_http_client.py +--image_file:./demo/000000014439.jpg +null:null \ No newline at end of file diff --git a/test_tipc/configs/picodet/picodet_s_320_coco_lcnet_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/picodet/picodet_s_320_coco_lcnet_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..b69999e3797fbdeb7024934f1f3c830c11000d31 --- /dev/null +++ b/test_tipc/configs/picodet/picodet_s_320_coco_lcnet_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,24 @@ +===========================serving_infer_python_params=========================== +model_name:picodet_s_320_coco_lcnet +python:python3.7 +filename:null +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/picodet_s_320_coco_lcnet.pdparams +norm_export:tools/export_model.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --export_serving_model True -o +quant_export:tools/export_model.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --slim_config _template_pact --export_serving_model True -o +fpgm_export:tools/export_model.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --slim_config _template_fpgm --export_serving_model True -o +distill_export:null +export1:null +export2:null +kl_quant_export:tools/post_quant.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --slim_config _template_kl_quant --export_serving_model True -o +## +infer_mode:norm +infer_quant:False +web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml +--model_dir:null +--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1 +null:null +http_client:deploy/serving/python/pipeline_http_client.py +--image_file:./demo/000000014439.jpg +null:null \ No newline at end of file diff --git a/test_tipc/configs/picodet/picodet_s_320_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/picodet/picodet_s_320_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..f3cf5d80645c734103bd2aa5e82ae69ba8d231e4 --- /dev/null +++ b/test_tipc/configs/picodet/picodet_s_320_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,24 @@ +===========================serving_infer_python_params=========================== +model_name:picodet_s_320_coco +python:python3.7 +filename:null +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/picodet_s_320_coco.pdparams +norm_export:tools/export_model.py -c configs/picodet/legacy_model/picodet_s_320_coco.yml --export_serving_model True -o +quant_export:tools/export_model.py -c configs/picodet/legacy_model/picodet_s_320_coco.yml --slim_config _template_pact --export_serving_model True -o +fpgm_export:tools/export_model.py -c configs/picodet/legacy_model/picodet_s_320_coco.yml --slim_config _template_fpgm --export_serving_model True -o +distill_export:null +export1:null +export2:null +kl_quant_export:tools/post_quant.py -c configs/picodet/legacy_model/picodet_s_320_coco.yml --slim_config configs/slim/post_quant/picodet_s_ptq.yml --export_serving_model True -o +## +infer_mode:norm +infer_quant:False +web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml +--model_dir:null +--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1 +null:null +http_client:deploy/serving/python/pipeline_http_client.py +--image_file:./demo/000000014439.jpg +null:null \ No newline at end of file diff --git a/test_tipc/configs/picodet/picodet_shufflenetv2_1x_416_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/picodet/picodet_shufflenetv2_1x_416_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..ddee7eac6ffd684cd30d077da5d9b68f39c33521 --- /dev/null +++ b/test_tipc/configs/picodet/picodet_shufflenetv2_1x_416_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,24 @@ +===========================serving_infer_python_params=========================== +model_name:picodet_shufflenetv2_1x_416_coco +python:python3.7 +filename:null +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/picodet_shufflenetv2_1x_416_coco.pdparams +norm_export:tools/export_model.py -c configs/picodet/legacy_model/more_config/picodet_shufflenetv2_1x_416_coco.yml --export_serving_model True -o +quant_export:tools/export_model.py -c configs/picodet/legacy_model/more_config/picodet_shufflenetv2_1x_416_coco.yml --slim_config _template_pact --export_serving_model True -o +fpgm_export:tools/export_model.py -c configs/picodet/legacy_model/more_config/picodet_shufflenetv2_1x_416_coco.yml --slim_config _template_fpgm --export_serving_model True -o +distill_export:null +export1:null +export2:null +kl_quant_export:tools/post_quant.py -c configs/picodet/legacy_model/more_config/picodet_shufflenetv2_1x_416_coco.yml --slim_config _template_kl_quant --export_serving_model True -o +## +infer_mode:norm +infer_quant:False +web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml +--model_dir:null +--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1 +null:null +http_client:deploy/serving/python/pipeline_http_client.py +--image_file:./demo/000000014439.jpg +null:null \ No newline at end of file diff --git a/test_tipc/configs/picodet/picodet_xs_320_coco_lcnet_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/picodet/picodet_xs_320_coco_lcnet_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..99b5ce003586ecfd5be49fe455cccfc4852729c1 --- /dev/null +++ b/test_tipc/configs/picodet/picodet_xs_320_coco_lcnet_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,24 @@ +===========================serving_infer_python_params=========================== +model_name:picodet_xs_320_coco_lcnet +python:python3.7 +filename:null +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/picodet_xs_320_coco_lcnet.pdparams +norm_export:tools/export_model.py -c configs/picodet/picodet_xs_320_coco_lcnet.yml --export_serving_model True -o +quant_export:tools/export_model.py -c configs/picodet/picodet_xs_320_coco_lcnet.yml --slim_config _template_pact --export_serving_model True -o +fpgm_export:tools/export_model.py -c configs/picodet/picodet_xs_320_coco_lcnet.yml --slim_config _template_fpgm --export_serving_model True -o +distill_export:null +export1:null +export2:null +kl_quant_export:tools/post_quant.py -c configs/picodet/picodet_xs_320_coco_lcnet.yml --slim_config _template_kl_quant --export_serving_model True -o +## +infer_mode:norm +infer_quant:False +web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml +--model_dir:null +--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1 +null:null +http_client:deploy/serving/python/pipeline_http_client.py +--image_file:./demo/000000014439.jpg +null:null \ No newline at end of file diff --git a/test_tipc/configs/ppyolo/ppyolo_mbv3_large_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ppyolo/ppyolo_mbv3_large_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..2fb74136a1c40267311b10ee40dd0687ac7bce8e --- /dev/null +++ b/test_tipc/configs/ppyolo/ppyolo_mbv3_large_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,24 @@ +===========================serving_infer_python_params=========================== +model_name:ppyolo_mbv3_large_coco +python:python3.7 +filename:null +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/ppyolo_mbv3_large_coco.pdparams +norm_export:tools/export_model.py -c configs/ppyolo/ppyolo_mbv3_large_coco.yml --export_serving_model True -o +quant_export:tools/export_model.py -c configs/ppyolo/ppyolo_mbv3_large_coco.yml --slim_config configs/slim/quant/ppyolo_mbv3_large_qat.yml --export_serving_model True -o +fpgm_export:tools/export_model.py -c configs/ppyolo/ppyolo_mbv3_large_coco.yml --slim_config configs/slim/prune/ppyolo_mbv3_large_prune_fpgm.yml --export_serving_model True -o +distill_export:null +export1:null +export2:null +kl_quant_export:tools/post_quant.py -c configs/ppyolo/ppyolo_mbv3_large_coco.yml --slim_config configs/slim/post_quant/ppyolo_mbv3_large_ptq.yml --export_serving_model True -o +## +infer_mode:norm +infer_quant:False +web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml +--model_dir:null +--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1 +null:null +http_client:deploy/serving/python/pipeline_http_client.py +--image_file:./demo/000000014439.jpg +null:null \ No newline at end of file diff --git a/test_tipc/configs/ppyolo/ppyolo_mbv3_small_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ppyolo/ppyolo_mbv3_small_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..5c6caaa18e9ab20f23163883d6bafb33283932c7 --- /dev/null +++ b/test_tipc/configs/ppyolo/ppyolo_mbv3_small_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,24 @@ +===========================serving_infer_python_params=========================== +model_name:ppyolo_mbv3_small_coco +python:python3.7 +filename:null +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/ppyolo_mbv3_small_coco.pdparams +norm_export:tools/export_model.py -c configs/ppyolo/ppyolo_mbv3_small_coco.yml --export_serving_model True -o +quant_export:tools/export_model.py -c configs/ppyolo/ppyolo_mbv3_small_coco.yml --slim_config _template_pact --export_serving_model True -o +fpgm_export:tools/export_model.py -c configs/ppyolo/ppyolo_mbv3_small_coco.yml --slim_config _template_fpgm --export_serving_model True -o +distill_export:null +export1:null +export2:null +kl_quant_export:tools/post_quant.py -c configs/ppyolo/ppyolo_mbv3_small_coco.yml --slim_config _template_kl_quant --export_serving_model True -o +## +infer_mode:norm +infer_quant:False +web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml +--model_dir:null +--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1 +null:null +http_client:deploy/serving/python/pipeline_http_client.py +--image_file:./demo/000000014439.jpg +null:null \ No newline at end of file diff --git a/test_tipc/configs/ppyolo/ppyolo_r18vd_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ppyolo/ppyolo_r18vd_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..8c28accbcc91d498eaa4b59ae4347c7502066363 --- /dev/null +++ b/test_tipc/configs/ppyolo/ppyolo_r18vd_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,24 @@ +===========================serving_infer_python_params=========================== +model_name:ppyolo_r18vd_coco +python:python3.7 +filename:null +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/ppyolo_r18vd_coco.pdparams +norm_export:tools/export_model.py -c configs/ppyolo/ppyolo_r18vd_coco.yml --export_serving_model True -o +quant_export:tools/export_model.py -c configs/ppyolo/ppyolo_r18vd_coco.yml --slim_config _template_pact --export_serving_model True -o +fpgm_export:tools/export_model.py -c configs/ppyolo/ppyolo_r18vd_coco.yml --slim_config _template_fpgm --export_serving_model True -o +distill_export:null +export1:null +export2:null +kl_quant_export:tools/post_quant.py -c configs/ppyolo/ppyolo_r18vd_coco.yml --slim_config _template_kl_quant --export_serving_model True -o +## +infer_mode:norm +infer_quant:False +web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml +--model_dir:null +--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1 +null:null +http_client:deploy/serving/python/pipeline_http_client.py +--image_file:./demo/000000014439.jpg +null:null \ No newline at end of file diff --git a/test_tipc/configs/ppyolo/ppyolo_r50vd_dcn_1x_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ppyolo/ppyolo_r50vd_dcn_1x_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..b09d63c939c5d59f4c81179df9e6b8deca42e441 --- /dev/null +++ b/test_tipc/configs/ppyolo/ppyolo_r50vd_dcn_1x_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,24 @@ +===========================serving_infer_python_params=========================== +model_name:ppyolo_r50vd_dcn_1x_coco +python:python3.7 +filename:null +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams +norm_export:tools/export_model.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml --export_serving_model True -o +quant_export:tools/export_model.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml --slim_config configs/slim/quant/ppyolo_r50vd_qat_pact.yml --export_serving_model True -o +fpgm_export:tools/export_model.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml --slim_config configs/slim/prune/ppyolo_r50vd_prune_fpgm.yml --export_serving_model True -o +distill_export:null +export1:null +export2:null +kl_quant_export:tools/post_quant.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml --slim_config configs/slim/post_quant/ppyolo_r50vd_dcn_ptq.yml --export_serving_model True -o +## +infer_mode:norm +infer_quant:False +web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml +--model_dir:null +--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1 +null:null +http_client:deploy/serving/python/pipeline_http_client.py +--image_file:./demo/000000014439.jpg +null:null \ No newline at end of file diff --git a/test_tipc/configs/ppyolo/ppyolo_tiny_650e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ppyolo/ppyolo_tiny_650e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..cf1fef2a8acc01434af814152999fc929c3d94ad --- /dev/null +++ b/test_tipc/configs/ppyolo/ppyolo_tiny_650e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,24 @@ +===========================serving_infer_python_params=========================== +model_name:ppyolo_tiny_650e_coco +python:python3.7 +filename:null +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/ppyolo_tiny_650e_coco.pdparams +norm_export:tools/export_model.py -c configs/ppyolo/ppyolo_tiny_650e_coco.yml --export_serving_model True -o +quant_export:tools/export_model.py -c configs/ppyolo/ppyolo_tiny_650e_coco.yml --slim_config _template_pact --export_serving_model True -o +fpgm_export:tools/export_model.py -c configs/ppyolo/ppyolo_tiny_650e_coco.yml --slim_config _template_fpgm --export_serving_model True -o +distill_export:null +export1:null +export2:null +kl_quant_export:tools/post_quant.py -c configs/ppyolo/ppyolo_tiny_650e_coco.yml --slim_config _template_kl_quant --export_serving_model True -o +## +infer_mode:norm +infer_quant:False +web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml +--model_dir:null +--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1 +null:null +http_client:deploy/serving/python/pipeline_http_client.py +--image_file:./demo/000000014439.jpg +null:null \ No newline at end of file diff --git a/test_tipc/configs/ppyolo/ppyolov2_r101vd_dcn_365e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ppyolo/ppyolov2_r101vd_dcn_365e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..21e97220ee6771d6a334722672be2eb07ea86468 --- /dev/null +++ b/test_tipc/configs/ppyolo/ppyolov2_r101vd_dcn_365e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,24 @@ +===========================serving_infer_python_params=========================== +model_name:ppyolov2_r101vd_dcn_365e_coco +python:python3.7 +filename:null +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/ppyolov2_r101vd_dcn_365e_coco.pdparams +norm_export:tools/export_model.py -c configs/ppyolo/ppyolov2_r101vd_dcn_365e_coco.yml --export_serving_model True -o +quant_export:tools/export_model.py -c configs/ppyolo/ppyolov2_r101vd_dcn_365e_coco.yml --slim_config _template_pact --export_serving_model True -o +fpgm_export:tools/export_model.py -c configs/ppyolo/ppyolov2_r101vd_dcn_365e_coco.yml --slim_config _template_fpgm --export_serving_model True -o +distill_export:null +export1:null +export2:null +kl_quant_export:tools/post_quant.py -c configs/ppyolo/ppyolov2_r101vd_dcn_365e_coco.yml --slim_config _template_kl_quant --export_serving_model True -o +## +infer_mode:norm +infer_quant:False +web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml +--model_dir:null +--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1 +null:null +http_client:deploy/serving/python/pipeline_http_client.py +--image_file:./demo/000000014439.jpg +null:null \ No newline at end of file diff --git a/test_tipc/configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..1a346009f63c60d1fbf2f1a8989fc407b42eb6f3 --- /dev/null +++ b/test_tipc/configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,24 @@ +===========================serving_infer_python_params=========================== +model_name:ppyolov2_r50vd_dcn_365e_coco +python:python3.7 +filename:null +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/ppyolov2_r50vd_dcn_365e_coco.pdparams +norm_export:tools/export_model.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --export_serving_model True -o +quant_export:tools/export_model.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --slim_config _template_pact --export_serving_model True -o +fpgm_export:tools/export_model.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --slim_config _template_fpgm --export_serving_model True -o +distill_export:null +export1:null +export2:null +kl_quant_export:tools/post_quant.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --slim_config _template_kl_quant --export_serving_model True -o +## +infer_mode:norm +infer_quant:False +web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml +--model_dir:null +--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1 +null:null +http_client:deploy/serving/python/pipeline_http_client.py +--image_file:./demo/000000014439.jpg +null:null \ No newline at end of file diff --git a/test_tipc/configs/ppyoloe/ppyoloe_crn_l_300e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ppyoloe/ppyoloe_crn_l_300e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..fe5a495546a2baa26c3d89770ceada2a4b64eed7 --- /dev/null +++ b/test_tipc/configs/ppyoloe/ppyoloe_crn_l_300e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,24 @@ +===========================serving_infer_python_params=========================== +model_name:ppyoloe_crn_l_300e_coco +python:python3.7 +filename:null +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_300e_coco.pdparams +norm_export:tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_l_300e_coco.yml --export_serving_model True -o +quant_export:tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_l_300e_coco.yml --slim_config _template_pact --export_serving_model True -o +fpgm_export:tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_l_300e_coco.yml --slim_config _template_fpgm --export_serving_model True -o +distill_export:null +export1:null +export2:null +kl_quant_export:tools/post_quant.py -c configs/ppyoloe/ppyoloe_crn_l_300e_coco.yml --slim_config _template_kl_quant --export_serving_model True -o +## +infer_mode:norm +infer_quant:False +web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml +--model_dir:null +--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1 +null:null +http_client:deploy/serving/python/pipeline_http_client.py +--image_file:./demo/000000014439.jpg +null:null \ No newline at end of file diff --git a/test_tipc/configs/ppyoloe/ppyoloe_crn_m_300e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ppyoloe/ppyoloe_crn_m_300e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..6b0b52140240d05008d74f0b720724ce3f6a42b3 --- /dev/null +++ b/test_tipc/configs/ppyoloe/ppyoloe_crn_m_300e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,24 @@ +===========================serving_infer_python_params=========================== +model_name:ppyoloe_crn_m_300e_coco +python:python3.7 +filename:null +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/ppyoloe_crn_m_300e_coco.pdparams +norm_export:tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_m_300e_coco.yml --export_serving_model True -o +quant_export:tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_m_300e_coco.yml --slim_config _template_pact --export_serving_model True -o +fpgm_export:tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_m_300e_coco.yml --slim_config _template_fpgm --export_serving_model True -o +distill_export:null +export1:null +export2:null +kl_quant_export:tools/post_quant.py -c configs/ppyoloe/ppyoloe_crn_m_300e_coco.yml --slim_config _template_kl_quant --export_serving_model True -o +## +infer_mode:norm +infer_quant:False +web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml +--model_dir:null +--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1 +null:null +http_client:deploy/serving/python/pipeline_http_client.py +--image_file:./demo/000000014439.jpg +null:null \ No newline at end of file diff --git a/test_tipc/configs/ppyoloe/ppyoloe_crn_s_300e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ppyoloe/ppyoloe_crn_s_300e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..8e0283e535cb1f793eaa748873b94a3303a18eed --- /dev/null +++ b/test_tipc/configs/ppyoloe/ppyoloe_crn_s_300e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,24 @@ +===========================serving_infer_python_params=========================== +model_name:ppyoloe_crn_s_300e_coco +python:python3.7 +filename:null +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/ppyoloe_crn_s_300e_coco.pdparams +norm_export:tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_s_300e_coco.yml --export_serving_model True -o +quant_export:tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_s_300e_coco.yml --slim_config _template_pact --export_serving_model True -o +fpgm_export:tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_s_300e_coco.yml --slim_config _template_fpgm --export_serving_model True -o +distill_export:null +export1:null +export2:null +kl_quant_export:tools/post_quant.py -c configs/ppyoloe/ppyoloe_crn_s_300e_coco.yml --slim_config configs/slim/post_quant/ppyoloe_crn_s_300e_coco_ptq.yml --export_serving_model True -o +## +infer_mode:norm +infer_quant:False +web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml +--model_dir:null +--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1 +null:null +http_client:deploy/serving/python/pipeline_http_client.py +--image_file:./demo/000000014439.jpg +null:null \ No newline at end of file diff --git a/test_tipc/configs/ppyoloe/ppyoloe_crn_x_300e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ppyoloe/ppyoloe_crn_x_300e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..8c5810b6c883c466a881be20d9b6501e6e707808 --- /dev/null +++ b/test_tipc/configs/ppyoloe/ppyoloe_crn_x_300e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,24 @@ +===========================serving_infer_python_params=========================== +model_name:ppyoloe_crn_x_300e_coco +python:python3.7 +filename:null +## +--output_dir:./output_inference +weights:https://paddledet.bj.bcebos.com/models/ppyoloe_crn_x_300e_coco.pdparams +norm_export:tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_x_300e_coco.yml --export_serving_model True -o +quant_export:tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_x_300e_coco.yml --slim_config _template_pact --export_serving_model True -o +fpgm_export:tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_x_300e_coco.yml --slim_config _template_fpgm --export_serving_model True -o +distill_export:null +export1:null +export2:null +kl_quant_export:tools/post_quant.py -c configs/ppyoloe/ppyoloe_crn_x_300e_coco.yml --slim_config _template_kl_quant --export_serving_model True -o +## +infer_mode:norm +infer_quant:False +web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml +--model_dir:null +--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1 +null:null +http_client:deploy/serving/python/pipeline_http_client.py +--image_file:./demo/000000014439.jpg +null:null \ No newline at end of file diff --git a/test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt index 0a07de5246525b5014eb46564eeab09a034f7293..70852a112ebd0b2c6c529c8366ee73b98b80c560 100644 --- a/test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -1,20 +1,24 @@ -===========================cpp_infer_params=========================== +===========================serving_infer_python_params=========================== model_name:yolov3_darknet53_270e_coco -python:python +python:python3.7 filename:null ## --output_dir:./output_inference weights:https://paddledet.bj.bcebos.com/models/yolov3_darknet53_270e_coco.pdparams -norm_export:tools/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml -o -quant_export:tools/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/quant/yolov3_darknet_qat.yml -o -fpgm_export:tools/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/prune/yolov3_darknet_prune_fpgm.yml -o +norm_export:tools/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --export_serving_model True -o +quant_export:tools/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/quant/yolov3_darknet_qat.yml --export_serving_model True -o +fpgm_export:tools/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/prune/yolov3_darknet_prune_fpgm.yml --export_serving_model True -o distill_export:null export1:null export2:null -kl_quant_export:tools/post_quant.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o ---export_serving_model:True -## -start_serving:-m paddle_serving_server.serve --model serving_server ---port:9393 ---gpu_ids:0 +kl_quant_export:tools/post_quant.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml --export_serving_model True -o ## +infer_mode:norm +infer_quant:False +web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml +--model_dir:null +--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1 +null:null +http_client:deploy/serving/python/pipeline_http_client.py +--image_file:./demo/000000014439.jpg +null:null \ No newline at end of file diff --git a/test_tipc/docs/test_serving.md b/test_tipc/docs/test_serving.md new file mode 100644 index 0000000000000000000000000000000000000000..593c7beabd7c9a535128d3dc201c2341e9880051 --- /dev/null +++ b/test_tipc/docs/test_serving.md @@ -0,0 +1,91 @@ +# PaddleServing预测功能测试 + +PaddleServing预测功能测试的主程序为`test_serving_infer_python.sh`和`test_serving_infer_cpp.sh`,可以测试基于PaddleServing的部署功能。 + +## 1. 测试结论汇总 + +基于训练是否使用量化,进行本测试的模型可以分为`正常模型`和`量化模型`,这两类模型对应的Serving预测功能汇总如下: + +| 模型类型 |device | batchsize | tensorrt | mkldnn | cpu多线程 | +| ---- | ---- |-----------| :----: | :----: | :----: | +| 正常模型 | GPU | 1/2 | fp32/fp16 | - | - | +| 正常模型 | CPU | 1/2 | - | fp32 | 支持 | +| 量化模型 | GPU | 1/2 | int8 | - | - | +| 量化模型 | CPU | 1/2 | - | int8 | 支持 | + +## 2. 测试流程 +运行环境配置请参考[文档](./install.md)的内容配置TIPC的运行环境。 + +### 2.1 功能测试 +**python serving** +先运行`prepare.sh`准备数据和模型,然后运行`test_serving_infer_python.sh`进行测试,最终在```test_tipc/output```目录下生成`serving_infer_python*.log`后缀的日志文件。 + +```shell +bash test_tipc/prepare.sh test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt "serving_infer" + +# 用法1: +bash test_tipc/test_serving_infer_python.sh test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt +# 用法2: 指定GPU卡预测,第二个传入参数为GPU卡号 +bash test_tipc/test_serving_infer_python.sh test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt "1" +``` +**cpp serving** +先运行`prepare.sh`准备数据和模型,然后运行`test_serving_infer_cpp.sh`进行测试,最终在```test_tipc/output```目录下生成`serving_infer_cpp*.log`后缀的日志文件。 + +```shell +bash test_tipc/prepare.sh test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt "serving_infer" + +# 用法: +bash test_tipc/test_serving_infer_cpp.sh test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt +``` + +#### 运行结果 + +各测试的运行情况会打印在 `test_tipc/output/results_serving.log` 中: +运行成功时会输出: + +``` +Run successfully with command - python3.7 pipeline_http_client.py --image_dir=../../doc/imgs > ../../tests/output/server_infer_cpu_usemkldnn_True_threads_1_batchsize_1.log 2>&1 ! +Run successfully with command - xxxxx +... +``` + +运行失败时会输出: + +``` +Run failed with command - python3.7 pipeline_http_client.py --image_dir=../../doc/imgs > ../../tests/output/server_infer_cpu_usemkldnn_True_threads_1_batchsize_1.log 2>&1 ! +Run failed with command - python3.7 pipeline_http_client.py --image_dir=../../doc/imgs > ../../tests/output/server_infer_cpu_usemkldnn_True_threads_6_batchsize_1.log 2>&1 ! +Run failed with command - xxxxx +... +``` + +详细的预测结果会存在 test_tipc/output/ 文件夹下,例如`server_infer_gpu_usetrt_True_precision_fp32_batchsize_1.log`中会返回检测框的坐标: + +``` +{'err_no': 0, 'err_msg': '', 'key': ['dt_boxes'], 'value': ['[[[ 78. 642.]\n [409. 640.]\n [409. 657.]\n +[ 78. 659.]]\n\n [[ 75. 614.]\n [211. 614.]\n [211. 635.]\n [ 75. 635.]]\n\n +[[103. 554.]\n [135. 554.]\n [135. 575.]\n [103. 575.]]\n\n [[ 75. 531.]\n +[347. 531.]\n [347. 549.]\n [ 75. 549.] ]\n\n [[ 76. 503.]\n [309. 498.]\n +[309. 521.]\n [ 76. 526.]]\n\n [[163. 462.]\n [317. 462.]\n [317. 493.]\n +[163. 493.]]\n\n [[324. 431.]\n [414. 431.]\n [414. 452.]\n [324. 452.]]\n\n +[[ 76. 412.]\n [208. 408.]\n [209. 424.]\n [ 76. 428.]]\n\n [[307. 409.]\n +[428. 409.]\n [428. 426.]\n [307 . 426.]]\n\n [[ 74. 385.]\n [217. 382.]\n +[217. 400.]\n [ 74. 403.]]\n\n [[308. 381.]\n [427. 380.]\n [427. 400.]\n +[308. 401.]]\n\n [[ 74. 363.]\n [195. 362.]\n [195. 378.]\n [ 74. 379.]]\n\n +[[303. 359.]\n [423. 357.]\n [423. 375.]\n [303. 377.]]\n\n [[ 70. 336.]\n +[239. 334.]\n [239. 354.]\ n [ 70. 356.]]\n\n [[ 70. 312.]\n [204. 310.]\n +[204. 327.]\n [ 70. 330.]]\n\n [[303. 308.]\n [419. 306.]\n [419. 326.]\n +[303. 328.]]\n\n [[113. 2 72.]\n [246. 270.]\n [247. 299.]\n [113. 301.]]\n\n + [[361. 269.]\n [384. 269.]\n [384. 296.]\n [361. 296.]]\n\n [[ 70. 250.]\n + [243. 246.]\n [243. 265.]\n [ 70. 269.]]\n\n [[ 65. 221.]\n [187. 220.]\n +[187. 240.]\n [ 65. 241.]]\n\n [[337. 216.]\n [382. 216.]\n [382. 240.]\n +[337. 240.]]\n\n [ [ 65. 196.]\n [247. 193.]\n [247. 213.]\n [ 65. 216.]]\n\n +[[296. 197.]\n [423. 191.]\n [424. 209.]\n [296. 215.]]\n\n [[ 65. 167.]\n [244. 167.]\n +[244. 186.]\n [ 65. 186.]]\n\n [[ 67. 139.]\n [290. 139.]\n [290. 159.]\n [ 67. 159.]]\n\n +[[ 68. 113.]\n [410. 113.]\n [410. 128.]\n [ 68. 129.] ]\n\n [[277. 87.]\n [416. 87.]\n +[416. 108.]\n [277. 108.]]\n\n [[ 79. 28.]\n [132. 28.]\n [132. 62.]\n [ 79. 62.]]\n\n +[[163. 17.]\n [410. 14.]\n [410. 50.]\n [163. 53.]]]']} +``` + +## 3. 更多教程 + +本文档为功能测试用,更详细的Serving预测使用教程请参考:[PaddleDetection 服务化部署](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/deploy/serving) diff --git a/test_tipc/prepare.sh b/test_tipc/prepare.sh index ca854ed71d9f761de4c1b3d640fcc5c1331c0e76..a9fbfff6f166ee40f09fc07d240c27ff1039ef0a 100644 --- a/test_tipc/prepare.sh +++ b/test_tipc/prepare.sh @@ -36,6 +36,10 @@ elif [ ${MODE} = "cpp_infer" ];then if [[ ${model_name} =~ "s2anet" ]]; then cd ./ppdet/ext_op && eval "${python} setup.py install" cd ../../ + elif [[ ${model_name} =~ "tinypose" ]]; then + wget -nc -P ./output_inference/ https://bj.bcebos.com/v1/paddledet/models/keypoint/picodet_s_320_pedestrian.tar --no-check-certificate + cd ./output_inference/ && tar -xvf picodet_s_320_pedestrian.tar + cd ../ fi # download mot lite data wget -nc -P ./dataset/mot/ https://paddledet.bj.bcebos.com/data/tipc/mot_tipc.tar --no-check-certificate @@ -77,14 +81,14 @@ elif [ ${MODE} = "paddle2onnx_infer" ];then ${python} -m pip install onnxruntime==1.10.0 elif [ ${MODE} = "serving_infer" ];then git clone https://github.com/PaddlePaddle/Serving - bash Serving/tools/paddle_env_install.sh cd Serving - pip install -r python/requirements.txt + bash tools/paddle_env_install.sh + ${python} -m pip install -r python/requirements.txt cd .. - pip install paddle-serving-client==0.8.3 -i https://pypi.tuna.tsinghua.edu.cn/simple - pip install paddle-serving-app==0.8.3 -i https://pypi.tuna.tsinghua.edu.cn/simple - pip install paddle-serving-server-gpu==0.8.3.post101 -i https://pypi.tuna.tsinghua.edu.cn/simple - python -m pip install paddlepaddle-gpu==2.2.2.post101 -f https://www.paddlepaddle.org.cn/whl/linux/mkl/avx/stable.html + ${python} -m pip install paddle-serving-client -i https://pypi.tuna.tsinghua.edu.cn/simple + ${python} -m pip install paddle-serving-app -i https://pypi.tuna.tsinghua.edu.cn/simple + ${python} -m pip install paddle-serving-server-gpu -i https://pypi.tuna.tsinghua.edu.cn/simple + unset https_proxy http_proxy else # download coco lite data wget -nc -P ./dataset/coco/ https://paddledet.bj.bcebos.com/data/tipc/coco_tipc.tar --no-check-certificate diff --git a/test_tipc/test_inference_cpp.sh b/test_tipc/test_inference_cpp.sh index 4a335d0582e3f4c068707e339f17d145678fe417..86a0947241413595c48c6972f8f64eaed0d4dd97 100644 --- a/test_tipc/test_inference_cpp.sh +++ b/test_tipc/test_inference_cpp.sh @@ -85,7 +85,7 @@ function func_cpp_inference(){ eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${command}" "${status_log}" + status_check $last_status "${command}" "${status_log}" "${model_name}" done done done @@ -111,7 +111,7 @@ function func_cpp_inference(){ eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${command}" "${status_log}" + status_check $last_status "${command}" "${status_log}" "${model_name}" done done else @@ -207,7 +207,7 @@ for infer_mode in ${cpp_infer_mode_list[*]}; do echo $export_cmd eval $export_cmd status_export=$? - status_check $status_export "${export_cmd}" "${status_log}" + status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" #run inference save_export_model_dir="${save_export_value}/${model_name}" diff --git a/test_tipc/test_serving.sh b/test_tipc/test_serving.sh deleted file mode 100644 index 8c122d96dffa4437e16199f6772a027d8b984d42..0000000000000000000000000000000000000000 --- a/test_tipc/test_serving.sh +++ /dev/null @@ -1,63 +0,0 @@ -#!/bin/bash -source test_tipc/utils_func.sh - -FILENAME=$1 - -# parser model_name -dataline=$(cat ${FILENAME}) -IFS=$'\n' -lines=(${dataline}) -model_name=$(func_parser_value "${lines[1]}") -echo "ppdet serving: ${model_name}" -python=$(func_parser_value "${lines[2]}") -filename_key=$(func_parser_key "${lines[3]}") -filename_value=$(func_parser_value "${lines[3]}") - -# export params -save_export_key=$(func_parser_key "${lines[5]}") -save_export_value=$(func_parser_value "${lines[5]}") -export_weight_key=$(func_parser_key "${lines[6]}") -export_weight_value=$(func_parser_value "${lines[6]}") -norm_export=$(func_parser_value "${lines[7]}") -pact_export=$(func_parser_value "${lines[8]}") -fpgm_export=$(func_parser_value "${lines[9]}") -distill_export=$(func_parser_value "${lines[10]}") -export_key1=$(func_parser_key "${lines[11]}") -export_value1=$(func_parser_value "${lines[11]}") -export_key2=$(func_parser_key "${lines[12]}") -export_value2=$(func_parser_value "${lines[12]}") -kl_quant_export=$(func_parser_value "${lines[13]}") -export_serving_model_key=$(func_parser_key "${lines[14]}") -export_serving_model_value=$(func_parser_value "${lines[14]}") -# parser serving -start_serving=$(func_parser_value "${lines[16]}") -port_key=$(func_parser_key "${lines[17]}") -port_value=$(func_parser_value "${lines[17]}") -gpu_id_key=$(func_parser_key "${lines[18]}") -gpu_id_value=$(func_parser_value "${lines[18]}") - -LOG_PATH="./test_tipc/output" -mkdir -p ${LOG_PATH} -status_log="${LOG_PATH}/results_serving.log" - -function func_serving(){ - IFS='|' - if [ ${gpu_id_key} = "null" ]; then - start_serving_command="nohup ${python} ${start_serving} ${port_key} ${port_value} > serving.log 2>&1 &" - else - start_serving_command="nohup ${python} ${start_serving} ${port_key} ${port_value} ${gpu_id_key} ${gpu_id_value} > serving.log 2>&1 &" - fi - echo $start_serving_command - eval $start_serving_command - last_status=${PIPESTATUS[0]} - status_check $last_status "${start_serving_command}" "${status_log}" -} -cd output_inference/${model_name} -echo $PWD -func_serving -test_command="${python} ../../deploy/serving/test_client.py ../../deploy/serving/label_list.txt ../../demo/000000014439.jpg" -echo $test_command -eval $test_command -last_status=${PIPESTATUS[0]} -status_check $last_status"${test_command}" "${status_log}" - diff --git a/test_tipc/test_serving_infer_cpp.sh b/test_tipc/test_serving_infer_cpp.sh new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/test_tipc/test_serving_infer_python.sh b/test_tipc/test_serving_infer_python.sh new file mode 100644 index 0000000000000000000000000000000000000000..987b3de763983cbf5bc8b3b214179f2cb93d16f5 --- /dev/null +++ b/test_tipc/test_serving_infer_python.sh @@ -0,0 +1,125 @@ +#!/bin/bash +source test_tipc/utils_func.sh + +FILENAME=$1 + +# parser model_name +dataline=$(cat ${FILENAME}) +IFS=$'\n' +lines=(${dataline}) +model_name=$(func_parser_value "${lines[1]}") +echo "ppdet serving_infer: ${model_name}" +python=$(func_parser_value "${lines[2]}") +filename_key=$(func_parser_key "${lines[3]}") +filename_value=$(func_parser_value "${lines[3]}") + +# parser export params +save_export_key=$(func_parser_key "${lines[5]}") +save_export_value=$(func_parser_value "${lines[5]}") +export_weight_key=$(func_parser_key "${lines[6]}") +export_weight_value=$(func_parser_value "${lines[6]}") +norm_export=$(func_parser_value "${lines[7]}") +pact_export=$(func_parser_value "${lines[8]}") +fpgm_export=$(func_parser_value "${lines[9]}") +distill_export=$(func_parser_value "${lines[10]}") +export_key1=$(func_parser_key "${lines[11]}") +export_value1=$(func_parser_value "${lines[11]}") +export_key2=$(func_parser_key "${lines[12]}") +export_value2=$(func_parser_value "${lines[12]}") +kl_quant_export=$(func_parser_value "${lines[13]}") + +# parser serving params +infer_mode_list=$(func_parser_value "${lines[15]}") +infer_is_quant_list=$(func_parser_value "${lines[16]}") + +web_service_py=$(func_parser_value "${lines[17]}") +model_dir_key=$(func_parser_key "${lines[18]}") +opt_key=$(func_parser_key "${lines[19]}") +opt_use_gpu_list=$(func_parser_value "${lines[19]}") +web_service_key1=$(func_parser_key "${lines[20]}") +web_service_value1=$(func_parser_value "${lines[20]}") +http_client_py=$(func_parser_value "${lines[21]}") +infer_image_key=$(func_parser_key "${lines[22]}") +infer_image_value=$(func_parser_value "${lines[22]}") +http_client_key1=$(func_parser_key "${lines[23]}") +http_client_value1=$(func_parser_value "${lines[23]}") + +LOG_PATH="./test_tipc/output" +mkdir -p ${LOG_PATH} +status_log="${LOG_PATH}/results_serving_python.log" + +function func_serving_inference(){ + IFS='|' + _python=$1 + _log_path=$2 + _service_script=$3 + _client_script=$4 + _set_model_dir=$5 + _set_image_file=$6 + set_web_service_params1=$(func_set_params "${web_service_key1}" "${web_service_value1}") + set_http_client_params1=$(func_set_params "${http_client_key1}" "${http_client_value1}") + # inference + for opt in ${opt_use_gpu_list[*]}; do + device_type=$(func_parser_key "${opt}") + _save_log_path="${_log_path}/serving_infer_python_${device_type}_batchsize_1.log" + opt_value=$(func_parser_value "${opt}") + _set_opt=$(func_set_params "${opt_key}" "${opt_value}") + # run web service + web_service_cmd="${_python} ${_service_script} ${_set_model_dir} ${_set_opt} ${set_web_service_params1} &" + eval $web_service_cmd + last_status=${PIPESTATUS[0]} + status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" + sleep 5s + # run http client + http_client_cmd="${_python} ${_client_script} ${_set_image_file} ${set_http_client_params1} > ${_save_log_path} 2>&1 " + eval $http_client_cmd + last_status=${PIPESTATUS[0]} + status_check $last_status "${http_client_cmd}" "${status_log}" "${model_name}" + eval "cat ${_save_log_path}" + ps ux | grep -E 'web_service' | awk '{print $2}' | xargs kill -s 9 + done +} + +# set cuda device +GPUID=$2 +if [ ${#GPUID} -le 0 ];then + env=" " +else + env="export CUDA_VISIBLE_DEVICES=${GPUID}" +fi +eval $env + +# run serving infer +Count=0 +IFS="|" +infer_quant_flag=(${infer_is_quant_list}) +for infer_mode in ${infer_mode_list[*]}; do + # run export + case ${infer_mode} in + norm) run_export=${norm_export} ;; + quant) run_export=${pact_export} ;; + fpgm) run_export=${fpgm_export} ;; + distill) run_export=${distill_export} ;; + kl_quant) run_export=${kl_quant_export} ;; + *) echo "Undefined infer_mode!"; exit 1; + esac + if [ ${run_export} = "null" ]; then + continue + fi + set_export_weight=$(func_set_params "${export_weight_key}" "${export_weight_value}") + set_save_export_dir=$(func_set_params "${save_export_key}" "${save_export_value}") + set_filename=$(func_set_params "${filename_key}" "${model_name}") + export_cmd="${python} ${run_export} ${set_export_weight} ${set_filename} ${set_save_export_dir} " + echo $export_cmd + eval $export_cmd + status_export=$? + status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" + + #run inference + set_export_model_dir=$(func_set_params "${model_dir_key}" "${save_export_value}/${model_name}") + set_infer_image_file=$(func_set_params "${infer_image_key}" "${infer_image_value}") + is_quant=${infer_quant_flag[Count]} + func_serving_inference "${python}" "${LOG_PATH}" "${web_service_py}" "${http_client_py}" "${set_export_model_dir}" ${set_infer_image_file} + Count=$(($Count + 1)) +done +eval "unset CUDA_VISIBLE_DEVICES" diff --git a/test_tipc/test_train_inference_python.sh b/test_tipc/test_train_inference_python.sh index 91b32271ed776849242d0103c8dc95a373c00e9a..117c7279517b9b14a1c12afc2b47a8c2d013f652 100644 --- a/test_tipc/test_train_inference_python.sh +++ b/test_tipc/test_train_inference_python.sh @@ -125,7 +125,7 @@ function func_inference(){ eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${command}" "${status_log}" + status_check $last_status "${command}" "${status_log}" "${model_name}" done done done @@ -151,7 +151,7 @@ function func_inference(){ eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${command}" "${status_log}" + status_check $last_status "${command}" "${status_log}" "${model_name}" done done else @@ -198,7 +198,7 @@ if [ ${MODE} = "whole_infer" ] || [ ${MODE} = "klquant_whole_infer" ]; then export_cmd="${python} ${run_export} ${set_export_weight} ${set_filename} ${set_save_export_dir} " echo $export_cmd eval $export_cmd - status_check $? "${export_cmd}" "${status_log}" + status_check $? "${export_cmd}" "${status_log}" "${model_name}" #run inference save_export_model_dir="${save_export_value}/${model_name}" @@ -291,7 +291,7 @@ else fi # run train eval $cmd - status_check $? "${cmd}" "${status_log}" + status_check $? "${cmd}" "${status_log}" "${model_name}" set_eval_trained_weight=$(func_set_params "${export_weight_key}" "${save_log}/${model_name}/${train_model_name}") # run eval @@ -299,7 +299,7 @@ else set_eval_params1=$(func_set_params "${eval_key1}" "${eval_value1}") eval_cmd="${python} ${eval_py} ${set_eval_trained_weight} ${set_use_gpu} ${set_eval_params1}" eval $eval_cmd - status_check $? "${eval_cmd}" "${status_log}" + status_check $? "${eval_cmd}" "${status_log}" "${model_name}" fi # run export model if [ ${run_export} != "null" ]; then @@ -310,14 +310,14 @@ else # run export onnx model for rcnn export_cmd="${python} ${run_export} ${set_export_weight} ${set_filename} export_onnx=True ${set_save_export_dir} " eval $export_cmd - status_check $? "${export_cmd}" "${status_log}" + status_check $? "${export_cmd}" "${status_log}" "${model_name}" # copy model for inference benchmark eval "cp ${save_export_model_dir}/* ${save_log}/" fi # run export model export_cmd="${python} ${run_export} ${set_export_weight} ${set_filename} ${set_save_export_dir} " eval $export_cmd - status_check $? "${export_cmd}" "${status_log}" + status_check $? "${export_cmd}" "${status_log}" "${model_name}" #run inference if [ ${export_onnx_key} != "export_onnx" ]; then diff --git a/test_tipc/utils_func.sh b/test_tipc/utils_func.sh index b1c17caa4ca121dddefc92898b245bb6c74cfc6e..023bdf5bfd551bf8ca22cdf0d5c861423d0e43ab 100644 --- a/test_tipc/utils_func.sh +++ b/test_tipc/utils_func.sh @@ -50,9 +50,10 @@ function status_check(){ last_status=$1 # the exit code run_command=$2 run_log=$3 + model_name=$4 if [ $last_status -eq 0 ]; then - echo -e "\033[33m Run successfully with command - ${run_command}! \033[0m" | tee -a ${run_log} + echo -e "\033[33m Run successfully with command - ${model_name} - ${run_command}! \033[0m" | tee -a ${run_log} else - echo -e "\033[33m Run failed with command - ${run_command}! \033[0m" | tee -a ${run_log} + echo -e "\033[33m Run failed with command - ${model_name} - ${run_command}! \033[0m" | tee -a ${run_log} fi }