未验证 提交 60631c65 编写于 作者: S shangliang Xu 提交者: GitHub

[TIPC] add serving python infer test shell, test=document_fix (#6175)

上级 636b8c47
...@@ -22,6 +22,8 @@ import argparse ...@@ -22,6 +22,8 @@ import argparse
parser = argparse.ArgumentParser(description="args for paddleserving") parser = argparse.ArgumentParser(description="args for paddleserving")
parser.add_argument("--image_dir", type=str) parser.add_argument("--image_dir", type=str)
parser.add_argument("--image_file", type=str) parser.add_argument("--image_file", type=str)
parser.add_argument("--http_port", type=int, default=18093)
parser.add_argument("--service_name", type=str, default="ppdet")
args = parser.parse_args() args = parser.parse_args()
...@@ -57,7 +59,7 @@ def get_test_images(infer_dir, infer_img): ...@@ -57,7 +59,7 @@ def get_test_images(infer_dir, infer_img):
if __name__ == "__main__": if __name__ == "__main__":
url = "http://127.0.0.1:18093/ppdet/prediction" url = f"http://127.0.0.1:{args.http_port}/{args.service_name}/prediction"
logid = 10000 logid = 10000
img_list = get_test_images(args.image_dir, args.image_file) img_list = get_test_images(args.image_dir, args.image_file)
......
...@@ -107,6 +107,6 @@ bash test_tipc/test_train_inference_python.sh ./test_tipc/configs/yolov3/yolov3_ ...@@ -107,6 +107,6 @@ bash test_tipc/test_train_inference_python.sh ./test_tipc/configs/yolov3/yolov3_
- [test_train_inference_python 使用](docs/test_train_inference_python.md) :测试基于Python的模型训练、评估、推理等基本功能,包括裁剪、量化、蒸馏。 - [test_train_inference_python 使用](docs/test_train_inference_python.md) :测试基于Python的模型训练、评估、推理等基本功能,包括裁剪、量化、蒸馏。
- [test_train_fleet_inference_python 使用](./docs/test_train_fleet_inference_python.md):测试基于Python的多机多卡训练与推理等基本功能。 - [test_train_fleet_inference_python 使用](./docs/test_train_fleet_inference_python.md):测试基于Python的多机多卡训练与推理等基本功能。
- [test_inference_cpp 使用](docs/test_inference_cpp.md):测试基于C++的模型推理。 - [test_inference_cpp 使用](docs/test_inference_cpp.md):测试基于C++的模型推理。
- [test_serving 使用](./):测试基于Paddle Serving的服务化部署功能。 - [test_serving 使用](docs/test_serving.md):测试基于Paddle Serving的服务化部署功能。
- [test_lite_arm_cpu_cpp 使用](./):测试基于Paddle-Lite的ARM CPU端c++预测部署功能。 - [test_lite_arm_cpu_cpp 使用](./):测试基于Paddle-Lite的ARM CPU端c++预测部署功能。
- [test_paddle2onnx 使用](./):测试Paddle2ONNX的模型转化功能,并验证正确性。 - [test_paddle2onnx 使用](./):测试Paddle2ONNX的模型转化功能,并验证正确性。
...@@ -206,7 +206,7 @@ for batch_size in ${batch_size_list[*]}; do ...@@ -206,7 +206,7 @@ for batch_size in ${batch_size_list[*]}; do
echo $cmd echo $cmd
eval $cmd eval $cmd
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
status_check $last_status "${cmd}" "${status_log}" status_check $last_status "${cmd}" "${status_log}" "${model_name}"
else else
IFS=";" IFS=";"
unset_env=`unset CUDA_VISIBLE_DEVICES` unset_env=`unset CUDA_VISIBLE_DEVICES`
...@@ -242,7 +242,7 @@ for batch_size in ${batch_size_list[*]}; do ...@@ -242,7 +242,7 @@ for batch_size in ${batch_size_list[*]}; do
echo $cmd echo $cmd
eval $cmd eval $cmd
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
status_check $last_status "${cmd}" "${status_log}" status_check $last_status "${cmd}" "${status_log}" "${model_name}"
fi fi
done done
done done
......
...@@ -23,7 +23,7 @@ inference:./deploy/cpp/build/main ...@@ -23,7 +23,7 @@ inference:./deploy/cpp/build/main
--batch_size:1|2 --batch_size:1|2
--use_tensorrt:null --use_tensorrt:null
--run_mode:paddle --run_mode:paddle
--model_dir: --model_dir_keypoint:
--image_dir:./dataset/coco/test2017/ --image_dir:./dataset/coco/test2017/
--run_benchmark:False --run_benchmark:False
null:null --model_dir:./output_inference/picodet_s_320_pedestrian
\ No newline at end of file \ No newline at end of file
===========================serving_infer_python_params===========================
model_name:tinypose_128x96
python:python3.7
filename:null
##
--output_dir:./output_inference
weights:https://paddledet.bj.bcebos.com/models/keypoint/tinypose_128x96.pdparams
norm_export:tools/export_model.py -c configs/keypoint/tiny_pose/tinypose_128x96.yml --export_serving_model True -o
quant_export:tools/export_model.py -c configs/keypoint/tiny_pose/tinypose_128x96.yml --slim_config _template_pact --export_serving_model True -o
fpgm_export:tools/export_model.py -c configs/keypoint/tiny_pose/tinypose_128x96.yml --slim_config _template_fpgm --export_serving_model True -o
distill_export:null
export1:null
export2:null
kl_quant_export:tools/post_quant.py -c configs/keypoint/tiny_pose/tinypose_128x96.yml --slim_config configs/slim/post_quant/tinypose_128x96_ptq.yml --export_serving_model True -o
##
infer_mode:norm
infer_quant:False
web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml
--model_dir:null
--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1
null:null
http_client:deploy/serving/python/pipeline_http_client.py
--image_file:./demo/hrnet_demo.jpg
null:null
\ No newline at end of file
===========================serving_infer_python_params===========================
model_name:mask_rcnn_r50_fpn_1x_coco
python:python3.7
filename:null
##
--output_dir:./output_inference
weights:https://paddledet.bj.bcebos.com/models/mask_rcnn_r50_fpn_1x_coco.pdparams
norm_export:tools/export_model.py -c configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.yml --export_serving_model True -o
quant_export:tools/export_model.py -c configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.yml --slim_config _template_pact --export_serving_model True -o
fpgm_export:tools/export_model.py -c configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.yml --slim_config _template_fpgm --export_serving_model True -o
distill_export:null
export1:null
export2:null
kl_quant_export:tools/post_quant.py -c configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.yml --slim_config configs/slim/post_quant/mask_rcnn_r50_fpn_1x_coco_ptq.yml --export_serving_model True -o
##
infer_mode:norm
infer_quant:False
web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml
--model_dir:null
--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1
null:null
http_client:deploy/serving/python/pipeline_http_client.py
--image_file:./demo/000000014439.jpg
null:null
\ No newline at end of file
===========================serving_infer_python_params===========================
model_name:picodet_l_640_coco_lcnet
python:python3.7
filename:null
##
--output_dir:./output_inference
weights:https://paddledet.bj.bcebos.com/models/picodet_l_640_coco_lcnet.pdparams
norm_export:tools/export_model.py -c configs/picodet/picodet_l_640_coco_lcnet.yml --export_serving_model True -o
quant_export:tools/export_model.py -c configs/picodet/picodet_l_640_coco_lcnet.yml --slim_config _template_pact --export_serving_model True -o
fpgm_export:tools/export_model.py -c configs/picodet/picodet_l_640_coco_lcnet.yml --slim_config _template_fpgm --export_serving_model True -o
distill_export:null
export1:null
export2:null
kl_quant_export:tools/post_quant.py -c configs/picodet/picodet_l_640_coco_lcnet.yml --slim_config _template_kl_quant --export_serving_model True -o
##
infer_mode:norm
infer_quant:False
web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml
--model_dir:null
--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1
null:null
http_client:deploy/serving/python/pipeline_http_client.py
--image_file:./demo/000000014439.jpg
null:null
\ No newline at end of file
===========================serving_infer_python_params===========================
model_name:picodet_l_640_coco
python:python3.7
filename:null
##
--output_dir:./output_inference
weights:https://paddledet.bj.bcebos.com/models/picodet_l_640_coco.pdparams
norm_export:tools/export_model.py -c configs/picodet/legacy_model/picodet_l_640_coco.yml --export_serving_model True -o
quant_export:tools/export_model.py -c configs/picodet/legacy_model/picodet_l_640_coco.yml --slim_config _template_pact --export_serving_model True -o
fpgm_export:tools/export_model.py -c configs/picodet/legacy_model/picodet_l_640_coco.yml --slim_config _template_fpgm --export_serving_model True -o
distill_export:null
export1:null
export2:null
kl_quant_export:tools/post_quant.py -c configs/picodet/legacy_model/picodet_l_640_coco.yml --slim_config _template_kl_quant --export_serving_model True -o
##
infer_mode:norm
infer_quant:False
web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml
--model_dir:null
--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1
null:null
http_client:deploy/serving/python/pipeline_http_client.py
--image_file:./demo/000000014439.jpg
null:null
\ No newline at end of file
===========================serving_infer_python_params===========================
model_name:picodet_lcnet_1_5x_416_coco
python:python3.7
filename:null
##
--output_dir:./output_inference
weights:https://paddledet.bj.bcebos.com/models/picodet_lcnet_1_5x_416_coco.pdparams
norm_export:tools/export_model.py -c configs/picodet/legacy_model/more_config/picodet_lcnet_1_5x_416_coco.yml --export_serving_model True -o
quant_export:tools/export_model.py -c configs/picodet/legacy_model/more_config/picodet_lcnet_1_5x_416_coco.yml --slim_config _template_pact --export_serving_model True -o
fpgm_export:tools/export_model.py -c configs/picodet/legacy_model/more_config/picodet_lcnet_1_5x_416_coco.yml --slim_config _template_fpgm --export_serving_model True -o
distill_export:null
export1:null
export2:null
kl_quant_export:tools/post_quant.py -c configs/picodet/legacy_model/more_config/picodet_lcnet_1_5x_416_coco.yml --slim_config _template_kl_quant --export_serving_model True -o
##
infer_mode:norm
infer_quant:False
web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml
--model_dir:null
--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1
null:null
http_client:deploy/serving/python/pipeline_http_client.py
--image_file:./demo/000000014439.jpg
null:null
\ No newline at end of file
===========================serving_infer_python_params===========================
model_name:picodet_m_416_coco_lcnet
python:python3.7
filename:null
##
--output_dir:./output_inference
weights:https://paddledet.bj.bcebos.com/models/picodet_m_416_coco_lcnet.pdparams
norm_export:tools/export_model.py -c configs/picodet/picodet_m_416_coco_lcnet.yml --export_serving_model True -o
quant_export:tools/export_model.py -c configs/picodet/picodet_m_416_coco_lcnet.yml --slim_config _template_pact --export_serving_model True -o
fpgm_export:tools/export_model.py -c configs/picodet/picodet_m_416_coco_lcnet.yml --slim_config _template_fpgm --export_serving_model True -o
distill_export:null
export1:null
export2:null
kl_quant_export:tools/post_quant.py -c configs/picodet/picodet_m_416_coco_lcnet.yml --slim_config _template_kl_quant --export_serving_model True -o
##
infer_mode:norm
infer_quant:False
web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml
--model_dir:null
--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1
null:null
http_client:deploy/serving/python/pipeline_http_client.py
--image_file:./demo/000000014439.jpg
null:null
\ No newline at end of file
===========================serving_infer_python_params===========================
model_name:picodet_m_416_coco
python:python3.7
filename:null
##
--output_dir:./output_inference
weights:https://paddledet.bj.bcebos.com/models/picodet_m_416_coco.pdparams
norm_export:tools/export_model.py -c configs/picodet/legacy_model/picodet_m_416_coco.yml --export_serving_model True -o
quant_export:tools/export_model.py -c configs/picodet/legacy_model/picodet_m_416_coco.yml --slim_config _template_pact --export_serving_model True -o
fpgm_export:tools/export_model.py -c configs/picodet/legacy_model/picodet_m_416_coco.yml --slim_config _template_fpgm --export_serving_model True -o
distill_export:null
export1:null
export2:null
kl_quant_export:tools/post_quant.py -c configs/picodet/legacy_model/picodet_m_416_coco.yml --slim_config _template_kl_quant --export_serving_model True -o
##
infer_mode:norm
infer_quant:False
web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml
--model_dir:null
--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1
null:null
http_client:deploy/serving/python/pipeline_http_client.py
--image_file:./demo/000000014439.jpg
null:null
\ No newline at end of file
===========================serving_infer_python_params===========================
model_name:picodet_mobilenetv3_large_1x_416_coco
python:python3.7
filename:null
##
--output_dir:./output_inference
weights:https://paddledet.bj.bcebos.com/models/picodet_mobilenetv3_large_1x_416_coco.pdparams
norm_export:tools/export_model.py -c configs/picodet/legacy_model/more_config/picodet_mobilenetv3_large_1x_416_coco.yml --export_serving_model True -o
quant_export:tools/export_model.py -c configs/picodet/legacy_model/more_config/picodet_mobilenetv3_large_1x_416_coco.yml --slim_config _template_pact --export_serving_model True -o
fpgm_export:tools/export_model.py -c configs/picodet/legacy_model/more_config/picodet_mobilenetv3_large_1x_416_coco.yml --slim_config _template_fpgm --export_serving_model True -o
distill_export:null
export1:null
export2:null
kl_quant_export:tools/post_quant.py -c configs/picodet/legacy_model/more_config/picodet_mobilenetv3_large_1x_416_coco.yml --slim_config _template_kl_quant --export_serving_model True -o
##
infer_mode:norm
infer_quant:False
web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml
--model_dir:null
--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1
null:null
http_client:deploy/serving/python/pipeline_http_client.py
--image_file:./demo/000000014439.jpg
null:null
\ No newline at end of file
===========================serving_infer_python_params===========================
model_name:picodet_r18_640_coco
python:python3.7
filename:null
##
--output_dir:./output_inference
weights:https://paddledet.bj.bcebos.com/models/picodet_r18_640_coco.pdparams
norm_export:tools/export_model.py -c configs/picodet/legacy_model/more_config/picodet_r18_640_coco.yml --export_serving_model True -o
quant_export:tools/export_model.py -c configs/picodet/legacy_model/more_config/picodet_r18_640_coco.yml --slim_config _template_pact --export_serving_model True -o
fpgm_export:tools/export_model.py -c configs/picodet/legacy_model/more_config/picodet_r18_640_coco.yml --slim_config _template_fpgm --export_serving_model True -o
distill_export:null
export1:null
export2:null
kl_quant_export:tools/post_quant.py -c configs/picodet/legacy_model/more_config/picodet_r18_640_coco.yml --slim_config _template_kl_quant --export_serving_model True -o
##
infer_mode:norm
infer_quant:False
web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml
--model_dir:null
--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1
null:null
http_client:deploy/serving/python/pipeline_http_client.py
--image_file:./demo/000000014439.jpg
null:null
\ No newline at end of file
===========================serving_infer_python_params===========================
model_name:picodet_s_320_coco_lcnet
python:python3.7
filename:null
##
--output_dir:./output_inference
weights:https://paddledet.bj.bcebos.com/models/picodet_s_320_coco_lcnet.pdparams
norm_export:tools/export_model.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --export_serving_model True -o
quant_export:tools/export_model.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --slim_config _template_pact --export_serving_model True -o
fpgm_export:tools/export_model.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --slim_config _template_fpgm --export_serving_model True -o
distill_export:null
export1:null
export2:null
kl_quant_export:tools/post_quant.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --slim_config _template_kl_quant --export_serving_model True -o
##
infer_mode:norm
infer_quant:False
web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml
--model_dir:null
--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1
null:null
http_client:deploy/serving/python/pipeline_http_client.py
--image_file:./demo/000000014439.jpg
null:null
\ No newline at end of file
===========================serving_infer_python_params===========================
model_name:picodet_s_320_coco
python:python3.7
filename:null
##
--output_dir:./output_inference
weights:https://paddledet.bj.bcebos.com/models/picodet_s_320_coco.pdparams
norm_export:tools/export_model.py -c configs/picodet/legacy_model/picodet_s_320_coco.yml --export_serving_model True -o
quant_export:tools/export_model.py -c configs/picodet/legacy_model/picodet_s_320_coco.yml --slim_config _template_pact --export_serving_model True -o
fpgm_export:tools/export_model.py -c configs/picodet/legacy_model/picodet_s_320_coco.yml --slim_config _template_fpgm --export_serving_model True -o
distill_export:null
export1:null
export2:null
kl_quant_export:tools/post_quant.py -c configs/picodet/legacy_model/picodet_s_320_coco.yml --slim_config configs/slim/post_quant/picodet_s_ptq.yml --export_serving_model True -o
##
infer_mode:norm
infer_quant:False
web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml
--model_dir:null
--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1
null:null
http_client:deploy/serving/python/pipeline_http_client.py
--image_file:./demo/000000014439.jpg
null:null
\ No newline at end of file
===========================serving_infer_python_params===========================
model_name:picodet_shufflenetv2_1x_416_coco
python:python3.7
filename:null
##
--output_dir:./output_inference
weights:https://paddledet.bj.bcebos.com/models/picodet_shufflenetv2_1x_416_coco.pdparams
norm_export:tools/export_model.py -c configs/picodet/legacy_model/more_config/picodet_shufflenetv2_1x_416_coco.yml --export_serving_model True -o
quant_export:tools/export_model.py -c configs/picodet/legacy_model/more_config/picodet_shufflenetv2_1x_416_coco.yml --slim_config _template_pact --export_serving_model True -o
fpgm_export:tools/export_model.py -c configs/picodet/legacy_model/more_config/picodet_shufflenetv2_1x_416_coco.yml --slim_config _template_fpgm --export_serving_model True -o
distill_export:null
export1:null
export2:null
kl_quant_export:tools/post_quant.py -c configs/picodet/legacy_model/more_config/picodet_shufflenetv2_1x_416_coco.yml --slim_config _template_kl_quant --export_serving_model True -o
##
infer_mode:norm
infer_quant:False
web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml
--model_dir:null
--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1
null:null
http_client:deploy/serving/python/pipeline_http_client.py
--image_file:./demo/000000014439.jpg
null:null
\ No newline at end of file
===========================serving_infer_python_params===========================
model_name:picodet_xs_320_coco_lcnet
python:python3.7
filename:null
##
--output_dir:./output_inference
weights:https://paddledet.bj.bcebos.com/models/picodet_xs_320_coco_lcnet.pdparams
norm_export:tools/export_model.py -c configs/picodet/picodet_xs_320_coco_lcnet.yml --export_serving_model True -o
quant_export:tools/export_model.py -c configs/picodet/picodet_xs_320_coco_lcnet.yml --slim_config _template_pact --export_serving_model True -o
fpgm_export:tools/export_model.py -c configs/picodet/picodet_xs_320_coco_lcnet.yml --slim_config _template_fpgm --export_serving_model True -o
distill_export:null
export1:null
export2:null
kl_quant_export:tools/post_quant.py -c configs/picodet/picodet_xs_320_coco_lcnet.yml --slim_config _template_kl_quant --export_serving_model True -o
##
infer_mode:norm
infer_quant:False
web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml
--model_dir:null
--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1
null:null
http_client:deploy/serving/python/pipeline_http_client.py
--image_file:./demo/000000014439.jpg
null:null
\ No newline at end of file
===========================serving_infer_python_params===========================
model_name:ppyolo_mbv3_large_coco
python:python3.7
filename:null
##
--output_dir:./output_inference
weights:https://paddledet.bj.bcebos.com/models/ppyolo_mbv3_large_coco.pdparams
norm_export:tools/export_model.py -c configs/ppyolo/ppyolo_mbv3_large_coco.yml --export_serving_model True -o
quant_export:tools/export_model.py -c configs/ppyolo/ppyolo_mbv3_large_coco.yml --slim_config configs/slim/quant/ppyolo_mbv3_large_qat.yml --export_serving_model True -o
fpgm_export:tools/export_model.py -c configs/ppyolo/ppyolo_mbv3_large_coco.yml --slim_config configs/slim/prune/ppyolo_mbv3_large_prune_fpgm.yml --export_serving_model True -o
distill_export:null
export1:null
export2:null
kl_quant_export:tools/post_quant.py -c configs/ppyolo/ppyolo_mbv3_large_coco.yml --slim_config configs/slim/post_quant/ppyolo_mbv3_large_ptq.yml --export_serving_model True -o
##
infer_mode:norm
infer_quant:False
web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml
--model_dir:null
--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1
null:null
http_client:deploy/serving/python/pipeline_http_client.py
--image_file:./demo/000000014439.jpg
null:null
\ No newline at end of file
===========================serving_infer_python_params===========================
model_name:ppyolo_mbv3_small_coco
python:python3.7
filename:null
##
--output_dir:./output_inference
weights:https://paddledet.bj.bcebos.com/models/ppyolo_mbv3_small_coco.pdparams
norm_export:tools/export_model.py -c configs/ppyolo/ppyolo_mbv3_small_coco.yml --export_serving_model True -o
quant_export:tools/export_model.py -c configs/ppyolo/ppyolo_mbv3_small_coco.yml --slim_config _template_pact --export_serving_model True -o
fpgm_export:tools/export_model.py -c configs/ppyolo/ppyolo_mbv3_small_coco.yml --slim_config _template_fpgm --export_serving_model True -o
distill_export:null
export1:null
export2:null
kl_quant_export:tools/post_quant.py -c configs/ppyolo/ppyolo_mbv3_small_coco.yml --slim_config _template_kl_quant --export_serving_model True -o
##
infer_mode:norm
infer_quant:False
web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml
--model_dir:null
--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1
null:null
http_client:deploy/serving/python/pipeline_http_client.py
--image_file:./demo/000000014439.jpg
null:null
\ No newline at end of file
===========================serving_infer_python_params===========================
model_name:ppyolo_r18vd_coco
python:python3.7
filename:null
##
--output_dir:./output_inference
weights:https://paddledet.bj.bcebos.com/models/ppyolo_r18vd_coco.pdparams
norm_export:tools/export_model.py -c configs/ppyolo/ppyolo_r18vd_coco.yml --export_serving_model True -o
quant_export:tools/export_model.py -c configs/ppyolo/ppyolo_r18vd_coco.yml --slim_config _template_pact --export_serving_model True -o
fpgm_export:tools/export_model.py -c configs/ppyolo/ppyolo_r18vd_coco.yml --slim_config _template_fpgm --export_serving_model True -o
distill_export:null
export1:null
export2:null
kl_quant_export:tools/post_quant.py -c configs/ppyolo/ppyolo_r18vd_coco.yml --slim_config _template_kl_quant --export_serving_model True -o
##
infer_mode:norm
infer_quant:False
web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml
--model_dir:null
--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1
null:null
http_client:deploy/serving/python/pipeline_http_client.py
--image_file:./demo/000000014439.jpg
null:null
\ No newline at end of file
===========================serving_infer_python_params===========================
model_name:ppyolo_r50vd_dcn_1x_coco
python:python3.7
filename:null
##
--output_dir:./output_inference
weights:https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams
norm_export:tools/export_model.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml --export_serving_model True -o
quant_export:tools/export_model.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml --slim_config configs/slim/quant/ppyolo_r50vd_qat_pact.yml --export_serving_model True -o
fpgm_export:tools/export_model.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml --slim_config configs/slim/prune/ppyolo_r50vd_prune_fpgm.yml --export_serving_model True -o
distill_export:null
export1:null
export2:null
kl_quant_export:tools/post_quant.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml --slim_config configs/slim/post_quant/ppyolo_r50vd_dcn_ptq.yml --export_serving_model True -o
##
infer_mode:norm
infer_quant:False
web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml
--model_dir:null
--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1
null:null
http_client:deploy/serving/python/pipeline_http_client.py
--image_file:./demo/000000014439.jpg
null:null
\ No newline at end of file
===========================serving_infer_python_params===========================
model_name:ppyolo_tiny_650e_coco
python:python3.7
filename:null
##
--output_dir:./output_inference
weights:https://paddledet.bj.bcebos.com/models/ppyolo_tiny_650e_coco.pdparams
norm_export:tools/export_model.py -c configs/ppyolo/ppyolo_tiny_650e_coco.yml --export_serving_model True -o
quant_export:tools/export_model.py -c configs/ppyolo/ppyolo_tiny_650e_coco.yml --slim_config _template_pact --export_serving_model True -o
fpgm_export:tools/export_model.py -c configs/ppyolo/ppyolo_tiny_650e_coco.yml --slim_config _template_fpgm --export_serving_model True -o
distill_export:null
export1:null
export2:null
kl_quant_export:tools/post_quant.py -c configs/ppyolo/ppyolo_tiny_650e_coco.yml --slim_config _template_kl_quant --export_serving_model True -o
##
infer_mode:norm
infer_quant:False
web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml
--model_dir:null
--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1
null:null
http_client:deploy/serving/python/pipeline_http_client.py
--image_file:./demo/000000014439.jpg
null:null
\ No newline at end of file
===========================serving_infer_python_params===========================
model_name:ppyolov2_r101vd_dcn_365e_coco
python:python3.7
filename:null
##
--output_dir:./output_inference
weights:https://paddledet.bj.bcebos.com/models/ppyolov2_r101vd_dcn_365e_coco.pdparams
norm_export:tools/export_model.py -c configs/ppyolo/ppyolov2_r101vd_dcn_365e_coco.yml --export_serving_model True -o
quant_export:tools/export_model.py -c configs/ppyolo/ppyolov2_r101vd_dcn_365e_coco.yml --slim_config _template_pact --export_serving_model True -o
fpgm_export:tools/export_model.py -c configs/ppyolo/ppyolov2_r101vd_dcn_365e_coco.yml --slim_config _template_fpgm --export_serving_model True -o
distill_export:null
export1:null
export2:null
kl_quant_export:tools/post_quant.py -c configs/ppyolo/ppyolov2_r101vd_dcn_365e_coco.yml --slim_config _template_kl_quant --export_serving_model True -o
##
infer_mode:norm
infer_quant:False
web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml
--model_dir:null
--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1
null:null
http_client:deploy/serving/python/pipeline_http_client.py
--image_file:./demo/000000014439.jpg
null:null
\ No newline at end of file
===========================serving_infer_python_params===========================
model_name:ppyolov2_r50vd_dcn_365e_coco
python:python3.7
filename:null
##
--output_dir:./output_inference
weights:https://paddledet.bj.bcebos.com/models/ppyolov2_r50vd_dcn_365e_coco.pdparams
norm_export:tools/export_model.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --export_serving_model True -o
quant_export:tools/export_model.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --slim_config _template_pact --export_serving_model True -o
fpgm_export:tools/export_model.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --slim_config _template_fpgm --export_serving_model True -o
distill_export:null
export1:null
export2:null
kl_quant_export:tools/post_quant.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --slim_config _template_kl_quant --export_serving_model True -o
##
infer_mode:norm
infer_quant:False
web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml
--model_dir:null
--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1
null:null
http_client:deploy/serving/python/pipeline_http_client.py
--image_file:./demo/000000014439.jpg
null:null
\ No newline at end of file
===========================serving_infer_python_params===========================
model_name:ppyoloe_crn_l_300e_coco
python:python3.7
filename:null
##
--output_dir:./output_inference
weights:https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_300e_coco.pdparams
norm_export:tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_l_300e_coco.yml --export_serving_model True -o
quant_export:tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_l_300e_coco.yml --slim_config _template_pact --export_serving_model True -o
fpgm_export:tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_l_300e_coco.yml --slim_config _template_fpgm --export_serving_model True -o
distill_export:null
export1:null
export2:null
kl_quant_export:tools/post_quant.py -c configs/ppyoloe/ppyoloe_crn_l_300e_coco.yml --slim_config _template_kl_quant --export_serving_model True -o
##
infer_mode:norm
infer_quant:False
web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml
--model_dir:null
--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1
null:null
http_client:deploy/serving/python/pipeline_http_client.py
--image_file:./demo/000000014439.jpg
null:null
\ No newline at end of file
===========================serving_infer_python_params===========================
model_name:ppyoloe_crn_m_300e_coco
python:python3.7
filename:null
##
--output_dir:./output_inference
weights:https://paddledet.bj.bcebos.com/models/ppyoloe_crn_m_300e_coco.pdparams
norm_export:tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_m_300e_coco.yml --export_serving_model True -o
quant_export:tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_m_300e_coco.yml --slim_config _template_pact --export_serving_model True -o
fpgm_export:tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_m_300e_coco.yml --slim_config _template_fpgm --export_serving_model True -o
distill_export:null
export1:null
export2:null
kl_quant_export:tools/post_quant.py -c configs/ppyoloe/ppyoloe_crn_m_300e_coco.yml --slim_config _template_kl_quant --export_serving_model True -o
##
infer_mode:norm
infer_quant:False
web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml
--model_dir:null
--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1
null:null
http_client:deploy/serving/python/pipeline_http_client.py
--image_file:./demo/000000014439.jpg
null:null
\ No newline at end of file
===========================serving_infer_python_params===========================
model_name:ppyoloe_crn_s_300e_coco
python:python3.7
filename:null
##
--output_dir:./output_inference
weights:https://paddledet.bj.bcebos.com/models/ppyoloe_crn_s_300e_coco.pdparams
norm_export:tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_s_300e_coco.yml --export_serving_model True -o
quant_export:tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_s_300e_coco.yml --slim_config _template_pact --export_serving_model True -o
fpgm_export:tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_s_300e_coco.yml --slim_config _template_fpgm --export_serving_model True -o
distill_export:null
export1:null
export2:null
kl_quant_export:tools/post_quant.py -c configs/ppyoloe/ppyoloe_crn_s_300e_coco.yml --slim_config configs/slim/post_quant/ppyoloe_crn_s_300e_coco_ptq.yml --export_serving_model True -o
##
infer_mode:norm
infer_quant:False
web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml
--model_dir:null
--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1
null:null
http_client:deploy/serving/python/pipeline_http_client.py
--image_file:./demo/000000014439.jpg
null:null
\ No newline at end of file
===========================serving_infer_python_params===========================
model_name:ppyoloe_crn_x_300e_coco
python:python3.7
filename:null
##
--output_dir:./output_inference
weights:https://paddledet.bj.bcebos.com/models/ppyoloe_crn_x_300e_coco.pdparams
norm_export:tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_x_300e_coco.yml --export_serving_model True -o
quant_export:tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_x_300e_coco.yml --slim_config _template_pact --export_serving_model True -o
fpgm_export:tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_x_300e_coco.yml --slim_config _template_fpgm --export_serving_model True -o
distill_export:null
export1:null
export2:null
kl_quant_export:tools/post_quant.py -c configs/ppyoloe/ppyoloe_crn_x_300e_coco.yml --slim_config _template_kl_quant --export_serving_model True -o
##
infer_mode:norm
infer_quant:False
web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml
--model_dir:null
--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1
null:null
http_client:deploy/serving/python/pipeline_http_client.py
--image_file:./demo/000000014439.jpg
null:null
\ No newline at end of file
===========================cpp_infer_params=========================== ===========================serving_infer_python_params===========================
model_name:yolov3_darknet53_270e_coco model_name:yolov3_darknet53_270e_coco
python:python python:python3.7
filename:null filename:null
## ##
--output_dir:./output_inference --output_dir:./output_inference
weights:https://paddledet.bj.bcebos.com/models/yolov3_darknet53_270e_coco.pdparams weights:https://paddledet.bj.bcebos.com/models/yolov3_darknet53_270e_coco.pdparams
norm_export:tools/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml -o norm_export:tools/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --export_serving_model True -o
quant_export:tools/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/quant/yolov3_darknet_qat.yml -o quant_export:tools/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/quant/yolov3_darknet_qat.yml --export_serving_model True -o
fpgm_export:tools/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/prune/yolov3_darknet_prune_fpgm.yml -o fpgm_export:tools/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/prune/yolov3_darknet_prune_fpgm.yml --export_serving_model True -o
distill_export:null distill_export:null
export1:null export1:null
export2:null export2:null
kl_quant_export:tools/post_quant.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o kl_quant_export:tools/post_quant.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml --export_serving_model True -o
--export_serving_model:True
##
start_serving:-m paddle_serving_server.serve --model serving_server
--port:9393
--gpu_ids:0
## ##
infer_mode:norm
infer_quant:False
web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml
--model_dir:null
--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1
null:null
http_client:deploy/serving/python/pipeline_http_client.py
--image_file:./demo/000000014439.jpg
null:null
\ No newline at end of file
# PaddleServing预测功能测试
PaddleServing预测功能测试的主程序为`test_serving_infer_python.sh``test_serving_infer_cpp.sh`,可以测试基于PaddleServing的部署功能。
## 1. 测试结论汇总
基于训练是否使用量化,进行本测试的模型可以分为`正常模型``量化模型`,这两类模型对应的Serving预测功能汇总如下:
| 模型类型 |device | batchsize | tensorrt | mkldnn | cpu多线程 |
| ---- | ---- |-----------| :----: | :----: | :----: |
| 正常模型 | GPU | 1/2 | fp32/fp16 | - | - |
| 正常模型 | CPU | 1/2 | - | fp32 | 支持 |
| 量化模型 | GPU | 1/2 | int8 | - | - |
| 量化模型 | CPU | 1/2 | - | int8 | 支持 |
## 2. 测试流程
运行环境配置请参考[文档](./install.md)的内容配置TIPC的运行环境。
### 2.1 功能测试
**python serving**
先运行`prepare.sh`准备数据和模型,然后运行`test_serving_infer_python.sh`进行测试,最终在```test_tipc/output```目录下生成`serving_infer_python*.log`后缀的日志文件。
```shell
bash test_tipc/prepare.sh test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt "serving_infer"
# 用法1:
bash test_tipc/test_serving_infer_python.sh test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
# 用法2: 指定GPU卡预测,第二个传入参数为GPU卡号
bash test_tipc/test_serving_infer_python.sh test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt "1"
```
**cpp serving**
先运行`prepare.sh`准备数据和模型,然后运行`test_serving_infer_cpp.sh`进行测试,最终在```test_tipc/output```目录下生成`serving_infer_cpp*.log`后缀的日志文件。
```shell
bash test_tipc/prepare.sh test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt "serving_infer"
# 用法:
bash test_tipc/test_serving_infer_cpp.sh test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt
```
#### 运行结果
各测试的运行情况会打印在 `test_tipc/output/results_serving.log` 中:
运行成功时会输出:
```
Run successfully with command - python3.7 pipeline_http_client.py --image_dir=../../doc/imgs > ../../tests/output/server_infer_cpu_usemkldnn_True_threads_1_batchsize_1.log 2>&1 !
Run successfully with command - xxxxx
...
```
运行失败时会输出:
```
Run failed with command - python3.7 pipeline_http_client.py --image_dir=../../doc/imgs > ../../tests/output/server_infer_cpu_usemkldnn_True_threads_1_batchsize_1.log 2>&1 !
Run failed with command - python3.7 pipeline_http_client.py --image_dir=../../doc/imgs > ../../tests/output/server_infer_cpu_usemkldnn_True_threads_6_batchsize_1.log 2>&1 !
Run failed with command - xxxxx
...
```
详细的预测结果会存在 test_tipc/output/ 文件夹下,例如`server_infer_gpu_usetrt_True_precision_fp32_batchsize_1.log`中会返回检测框的坐标:
```
{'err_no': 0, 'err_msg': '', 'key': ['dt_boxes'], 'value': ['[[[ 78. 642.]\n [409. 640.]\n [409. 657.]\n
[ 78. 659.]]\n\n [[ 75. 614.]\n [211. 614.]\n [211. 635.]\n [ 75. 635.]]\n\n
[[103. 554.]\n [135. 554.]\n [135. 575.]\n [103. 575.]]\n\n [[ 75. 531.]\n
[347. 531.]\n [347. 549.]\n [ 75. 549.] ]\n\n [[ 76. 503.]\n [309. 498.]\n
[309. 521.]\n [ 76. 526.]]\n\n [[163. 462.]\n [317. 462.]\n [317. 493.]\n
[163. 493.]]\n\n [[324. 431.]\n [414. 431.]\n [414. 452.]\n [324. 452.]]\n\n
[[ 76. 412.]\n [208. 408.]\n [209. 424.]\n [ 76. 428.]]\n\n [[307. 409.]\n
[428. 409.]\n [428. 426.]\n [307 . 426.]]\n\n [[ 74. 385.]\n [217. 382.]\n
[217. 400.]\n [ 74. 403.]]\n\n [[308. 381.]\n [427. 380.]\n [427. 400.]\n
[308. 401.]]\n\n [[ 74. 363.]\n [195. 362.]\n [195. 378.]\n [ 74. 379.]]\n\n
[[303. 359.]\n [423. 357.]\n [423. 375.]\n [303. 377.]]\n\n [[ 70. 336.]\n
[239. 334.]\n [239. 354.]\ n [ 70. 356.]]\n\n [[ 70. 312.]\n [204. 310.]\n
[204. 327.]\n [ 70. 330.]]\n\n [[303. 308.]\n [419. 306.]\n [419. 326.]\n
[303. 328.]]\n\n [[113. 2 72.]\n [246. 270.]\n [247. 299.]\n [113. 301.]]\n\n
[[361. 269.]\n [384. 269.]\n [384. 296.]\n [361. 296.]]\n\n [[ 70. 250.]\n
[243. 246.]\n [243. 265.]\n [ 70. 269.]]\n\n [[ 65. 221.]\n [187. 220.]\n
[187. 240.]\n [ 65. 241.]]\n\n [[337. 216.]\n [382. 216.]\n [382. 240.]\n
[337. 240.]]\n\n [ [ 65. 196.]\n [247. 193.]\n [247. 213.]\n [ 65. 216.]]\n\n
[[296. 197.]\n [423. 191.]\n [424. 209.]\n [296. 215.]]\n\n [[ 65. 167.]\n [244. 167.]\n
[244. 186.]\n [ 65. 186.]]\n\n [[ 67. 139.]\n [290. 139.]\n [290. 159.]\n [ 67. 159.]]\n\n
[[ 68. 113.]\n [410. 113.]\n [410. 128.]\n [ 68. 129.] ]\n\n [[277. 87.]\n [416. 87.]\n
[416. 108.]\n [277. 108.]]\n\n [[ 79. 28.]\n [132. 28.]\n [132. 62.]\n [ 79. 62.]]\n\n
[[163. 17.]\n [410. 14.]\n [410. 50.]\n [163. 53.]]]']}
```
## 3. 更多教程
本文档为功能测试用,更详细的Serving预测使用教程请参考:[PaddleDetection 服务化部署](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/deploy/serving)
...@@ -36,6 +36,10 @@ elif [ ${MODE} = "cpp_infer" ];then ...@@ -36,6 +36,10 @@ elif [ ${MODE} = "cpp_infer" ];then
if [[ ${model_name} =~ "s2anet" ]]; then if [[ ${model_name} =~ "s2anet" ]]; then
cd ./ppdet/ext_op && eval "${python} setup.py install" cd ./ppdet/ext_op && eval "${python} setup.py install"
cd ../../ cd ../../
elif [[ ${model_name} =~ "tinypose" ]]; then
wget -nc -P ./output_inference/ https://bj.bcebos.com/v1/paddledet/models/keypoint/picodet_s_320_pedestrian.tar --no-check-certificate
cd ./output_inference/ && tar -xvf picodet_s_320_pedestrian.tar
cd ../
fi fi
# download mot lite data # download mot lite data
wget -nc -P ./dataset/mot/ https://paddledet.bj.bcebos.com/data/tipc/mot_tipc.tar --no-check-certificate wget -nc -P ./dataset/mot/ https://paddledet.bj.bcebos.com/data/tipc/mot_tipc.tar --no-check-certificate
...@@ -77,14 +81,14 @@ elif [ ${MODE} = "paddle2onnx_infer" ];then ...@@ -77,14 +81,14 @@ elif [ ${MODE} = "paddle2onnx_infer" ];then
${python} -m pip install onnxruntime==1.10.0 ${python} -m pip install onnxruntime==1.10.0
elif [ ${MODE} = "serving_infer" ];then elif [ ${MODE} = "serving_infer" ];then
git clone https://github.com/PaddlePaddle/Serving git clone https://github.com/PaddlePaddle/Serving
bash Serving/tools/paddle_env_install.sh
cd Serving cd Serving
pip install -r python/requirements.txt bash tools/paddle_env_install.sh
${python} -m pip install -r python/requirements.txt
cd .. cd ..
pip install paddle-serving-client==0.8.3 -i https://pypi.tuna.tsinghua.edu.cn/simple ${python} -m pip install paddle-serving-client -i https://pypi.tuna.tsinghua.edu.cn/simple
pip install paddle-serving-app==0.8.3 -i https://pypi.tuna.tsinghua.edu.cn/simple ${python} -m pip install paddle-serving-app -i https://pypi.tuna.tsinghua.edu.cn/simple
pip install paddle-serving-server-gpu==0.8.3.post101 -i https://pypi.tuna.tsinghua.edu.cn/simple ${python} -m pip install paddle-serving-server-gpu -i https://pypi.tuna.tsinghua.edu.cn/simple
python -m pip install paddlepaddle-gpu==2.2.2.post101 -f https://www.paddlepaddle.org.cn/whl/linux/mkl/avx/stable.html unset https_proxy http_proxy
else else
# download coco lite data # download coco lite data
wget -nc -P ./dataset/coco/ https://paddledet.bj.bcebos.com/data/tipc/coco_tipc.tar --no-check-certificate wget -nc -P ./dataset/coco/ https://paddledet.bj.bcebos.com/data/tipc/coco_tipc.tar --no-check-certificate
......
...@@ -85,7 +85,7 @@ function func_cpp_inference(){ ...@@ -85,7 +85,7 @@ function func_cpp_inference(){
eval $command eval $command
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}" eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}" status_check $last_status "${command}" "${status_log}" "${model_name}"
done done
done done
done done
...@@ -111,7 +111,7 @@ function func_cpp_inference(){ ...@@ -111,7 +111,7 @@ function func_cpp_inference(){
eval $command eval $command
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}" eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}" status_check $last_status "${command}" "${status_log}" "${model_name}"
done done
done done
else else
...@@ -207,7 +207,7 @@ for infer_mode in ${cpp_infer_mode_list[*]}; do ...@@ -207,7 +207,7 @@ for infer_mode in ${cpp_infer_mode_list[*]}; do
echo $export_cmd echo $export_cmd
eval $export_cmd eval $export_cmd
status_export=$? status_export=$?
status_check $status_export "${export_cmd}" "${status_log}" status_check $status_export "${export_cmd}" "${status_log}" "${model_name}"
#run inference #run inference
save_export_model_dir="${save_export_value}/${model_name}" save_export_model_dir="${save_export_value}/${model_name}"
......
#!/bin/bash
source test_tipc/utils_func.sh
FILENAME=$1
# parser model_name
dataline=$(cat ${FILENAME})
IFS=$'\n'
lines=(${dataline})
model_name=$(func_parser_value "${lines[1]}")
echo "ppdet serving: ${model_name}"
python=$(func_parser_value "${lines[2]}")
filename_key=$(func_parser_key "${lines[3]}")
filename_value=$(func_parser_value "${lines[3]}")
# export params
save_export_key=$(func_parser_key "${lines[5]}")
save_export_value=$(func_parser_value "${lines[5]}")
export_weight_key=$(func_parser_key "${lines[6]}")
export_weight_value=$(func_parser_value "${lines[6]}")
norm_export=$(func_parser_value "${lines[7]}")
pact_export=$(func_parser_value "${lines[8]}")
fpgm_export=$(func_parser_value "${lines[9]}")
distill_export=$(func_parser_value "${lines[10]}")
export_key1=$(func_parser_key "${lines[11]}")
export_value1=$(func_parser_value "${lines[11]}")
export_key2=$(func_parser_key "${lines[12]}")
export_value2=$(func_parser_value "${lines[12]}")
kl_quant_export=$(func_parser_value "${lines[13]}")
export_serving_model_key=$(func_parser_key "${lines[14]}")
export_serving_model_value=$(func_parser_value "${lines[14]}")
# parser serving
start_serving=$(func_parser_value "${lines[16]}")
port_key=$(func_parser_key "${lines[17]}")
port_value=$(func_parser_value "${lines[17]}")
gpu_id_key=$(func_parser_key "${lines[18]}")
gpu_id_value=$(func_parser_value "${lines[18]}")
LOG_PATH="./test_tipc/output"
mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results_serving.log"
function func_serving(){
IFS='|'
if [ ${gpu_id_key} = "null" ]; then
start_serving_command="nohup ${python} ${start_serving} ${port_key} ${port_value} > serving.log 2>&1 &"
else
start_serving_command="nohup ${python} ${start_serving} ${port_key} ${port_value} ${gpu_id_key} ${gpu_id_value} > serving.log 2>&1 &"
fi
echo $start_serving_command
eval $start_serving_command
last_status=${PIPESTATUS[0]}
status_check $last_status "${start_serving_command}" "${status_log}"
}
cd output_inference/${model_name}
echo $PWD
func_serving
test_command="${python} ../../deploy/serving/test_client.py ../../deploy/serving/label_list.txt ../../demo/000000014439.jpg"
echo $test_command
eval $test_command
last_status=${PIPESTATUS[0]}
status_check $last_status"${test_command}" "${status_log}"
#!/bin/bash
source test_tipc/utils_func.sh
FILENAME=$1
# parser model_name
dataline=$(cat ${FILENAME})
IFS=$'\n'
lines=(${dataline})
model_name=$(func_parser_value "${lines[1]}")
echo "ppdet serving_infer: ${model_name}"
python=$(func_parser_value "${lines[2]}")
filename_key=$(func_parser_key "${lines[3]}")
filename_value=$(func_parser_value "${lines[3]}")
# parser export params
save_export_key=$(func_parser_key "${lines[5]}")
save_export_value=$(func_parser_value "${lines[5]}")
export_weight_key=$(func_parser_key "${lines[6]}")
export_weight_value=$(func_parser_value "${lines[6]}")
norm_export=$(func_parser_value "${lines[7]}")
pact_export=$(func_parser_value "${lines[8]}")
fpgm_export=$(func_parser_value "${lines[9]}")
distill_export=$(func_parser_value "${lines[10]}")
export_key1=$(func_parser_key "${lines[11]}")
export_value1=$(func_parser_value "${lines[11]}")
export_key2=$(func_parser_key "${lines[12]}")
export_value2=$(func_parser_value "${lines[12]}")
kl_quant_export=$(func_parser_value "${lines[13]}")
# parser serving params
infer_mode_list=$(func_parser_value "${lines[15]}")
infer_is_quant_list=$(func_parser_value "${lines[16]}")
web_service_py=$(func_parser_value "${lines[17]}")
model_dir_key=$(func_parser_key "${lines[18]}")
opt_key=$(func_parser_key "${lines[19]}")
opt_use_gpu_list=$(func_parser_value "${lines[19]}")
web_service_key1=$(func_parser_key "${lines[20]}")
web_service_value1=$(func_parser_value "${lines[20]}")
http_client_py=$(func_parser_value "${lines[21]}")
infer_image_key=$(func_parser_key "${lines[22]}")
infer_image_value=$(func_parser_value "${lines[22]}")
http_client_key1=$(func_parser_key "${lines[23]}")
http_client_value1=$(func_parser_value "${lines[23]}")
LOG_PATH="./test_tipc/output"
mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results_serving_python.log"
function func_serving_inference(){
IFS='|'
_python=$1
_log_path=$2
_service_script=$3
_client_script=$4
_set_model_dir=$5
_set_image_file=$6
set_web_service_params1=$(func_set_params "${web_service_key1}" "${web_service_value1}")
set_http_client_params1=$(func_set_params "${http_client_key1}" "${http_client_value1}")
# inference
for opt in ${opt_use_gpu_list[*]}; do
device_type=$(func_parser_key "${opt}")
_save_log_path="${_log_path}/serving_infer_python_${device_type}_batchsize_1.log"
opt_value=$(func_parser_value "${opt}")
_set_opt=$(func_set_params "${opt_key}" "${opt_value}")
# run web service
web_service_cmd="${_python} ${_service_script} ${_set_model_dir} ${_set_opt} ${set_web_service_params1} &"
eval $web_service_cmd
last_status=${PIPESTATUS[0]}
status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}"
sleep 5s
# run http client
http_client_cmd="${_python} ${_client_script} ${_set_image_file} ${set_http_client_params1} > ${_save_log_path} 2>&1 "
eval $http_client_cmd
last_status=${PIPESTATUS[0]}
status_check $last_status "${http_client_cmd}" "${status_log}" "${model_name}"
eval "cat ${_save_log_path}"
ps ux | grep -E 'web_service' | awk '{print $2}' | xargs kill -s 9
done
}
# set cuda device
GPUID=$2
if [ ${#GPUID} -le 0 ];then
env=" "
else
env="export CUDA_VISIBLE_DEVICES=${GPUID}"
fi
eval $env
# run serving infer
Count=0
IFS="|"
infer_quant_flag=(${infer_is_quant_list})
for infer_mode in ${infer_mode_list[*]}; do
# run export
case ${infer_mode} in
norm) run_export=${norm_export} ;;
quant) run_export=${pact_export} ;;
fpgm) run_export=${fpgm_export} ;;
distill) run_export=${distill_export} ;;
kl_quant) run_export=${kl_quant_export} ;;
*) echo "Undefined infer_mode!"; exit 1;
esac
if [ ${run_export} = "null" ]; then
continue
fi
set_export_weight=$(func_set_params "${export_weight_key}" "${export_weight_value}")
set_save_export_dir=$(func_set_params "${save_export_key}" "${save_export_value}")
set_filename=$(func_set_params "${filename_key}" "${model_name}")
export_cmd="${python} ${run_export} ${set_export_weight} ${set_filename} ${set_save_export_dir} "
echo $export_cmd
eval $export_cmd
status_export=$?
status_check $status_export "${export_cmd}" "${status_log}" "${model_name}"
#run inference
set_export_model_dir=$(func_set_params "${model_dir_key}" "${save_export_value}/${model_name}")
set_infer_image_file=$(func_set_params "${infer_image_key}" "${infer_image_value}")
is_quant=${infer_quant_flag[Count]}
func_serving_inference "${python}" "${LOG_PATH}" "${web_service_py}" "${http_client_py}" "${set_export_model_dir}" ${set_infer_image_file}
Count=$(($Count + 1))
done
eval "unset CUDA_VISIBLE_DEVICES"
...@@ -125,7 +125,7 @@ function func_inference(){ ...@@ -125,7 +125,7 @@ function func_inference(){
eval $command eval $command
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}" eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}" status_check $last_status "${command}" "${status_log}" "${model_name}"
done done
done done
done done
...@@ -151,7 +151,7 @@ function func_inference(){ ...@@ -151,7 +151,7 @@ function func_inference(){
eval $command eval $command
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}" eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}" status_check $last_status "${command}" "${status_log}" "${model_name}"
done done
done done
else else
...@@ -198,7 +198,7 @@ if [ ${MODE} = "whole_infer" ] || [ ${MODE} = "klquant_whole_infer" ]; then ...@@ -198,7 +198,7 @@ if [ ${MODE} = "whole_infer" ] || [ ${MODE} = "klquant_whole_infer" ]; then
export_cmd="${python} ${run_export} ${set_export_weight} ${set_filename} ${set_save_export_dir} " export_cmd="${python} ${run_export} ${set_export_weight} ${set_filename} ${set_save_export_dir} "
echo $export_cmd echo $export_cmd
eval $export_cmd eval $export_cmd
status_check $? "${export_cmd}" "${status_log}" status_check $? "${export_cmd}" "${status_log}" "${model_name}"
#run inference #run inference
save_export_model_dir="${save_export_value}/${model_name}" save_export_model_dir="${save_export_value}/${model_name}"
...@@ -291,7 +291,7 @@ else ...@@ -291,7 +291,7 @@ else
fi fi
# run train # run train
eval $cmd eval $cmd
status_check $? "${cmd}" "${status_log}" status_check $? "${cmd}" "${status_log}" "${model_name}"
set_eval_trained_weight=$(func_set_params "${export_weight_key}" "${save_log}/${model_name}/${train_model_name}") set_eval_trained_weight=$(func_set_params "${export_weight_key}" "${save_log}/${model_name}/${train_model_name}")
# run eval # run eval
...@@ -299,7 +299,7 @@ else ...@@ -299,7 +299,7 @@ else
set_eval_params1=$(func_set_params "${eval_key1}" "${eval_value1}") set_eval_params1=$(func_set_params "${eval_key1}" "${eval_value1}")
eval_cmd="${python} ${eval_py} ${set_eval_trained_weight} ${set_use_gpu} ${set_eval_params1}" eval_cmd="${python} ${eval_py} ${set_eval_trained_weight} ${set_use_gpu} ${set_eval_params1}"
eval $eval_cmd eval $eval_cmd
status_check $? "${eval_cmd}" "${status_log}" status_check $? "${eval_cmd}" "${status_log}" "${model_name}"
fi fi
# run export model # run export model
if [ ${run_export} != "null" ]; then if [ ${run_export} != "null" ]; then
...@@ -310,14 +310,14 @@ else ...@@ -310,14 +310,14 @@ else
# run export onnx model for rcnn # run export onnx model for rcnn
export_cmd="${python} ${run_export} ${set_export_weight} ${set_filename} export_onnx=True ${set_save_export_dir} " export_cmd="${python} ${run_export} ${set_export_weight} ${set_filename} export_onnx=True ${set_save_export_dir} "
eval $export_cmd eval $export_cmd
status_check $? "${export_cmd}" "${status_log}" status_check $? "${export_cmd}" "${status_log}" "${model_name}"
# copy model for inference benchmark # copy model for inference benchmark
eval "cp ${save_export_model_dir}/* ${save_log}/" eval "cp ${save_export_model_dir}/* ${save_log}/"
fi fi
# run export model # run export model
export_cmd="${python} ${run_export} ${set_export_weight} ${set_filename} ${set_save_export_dir} " export_cmd="${python} ${run_export} ${set_export_weight} ${set_filename} ${set_save_export_dir} "
eval $export_cmd eval $export_cmd
status_check $? "${export_cmd}" "${status_log}" status_check $? "${export_cmd}" "${status_log}" "${model_name}"
#run inference #run inference
if [ ${export_onnx_key} != "export_onnx" ]; then if [ ${export_onnx_key} != "export_onnx" ]; then
......
...@@ -50,9 +50,10 @@ function status_check(){ ...@@ -50,9 +50,10 @@ function status_check(){
last_status=$1 # the exit code last_status=$1 # the exit code
run_command=$2 run_command=$2
run_log=$3 run_log=$3
model_name=$4
if [ $last_status -eq 0 ]; then if [ $last_status -eq 0 ]; then
echo -e "\033[33m Run successfully with command - ${run_command}! \033[0m" | tee -a ${run_log} echo -e "\033[33m Run successfully with command - ${model_name} - ${run_command}! \033[0m" | tee -a ${run_log}
else else
echo -e "\033[33m Run failed with command - ${run_command}! \033[0m" | tee -a ${run_log} echo -e "\033[33m Run failed with command - ${model_name} - ${run_command}! \033[0m" | tee -a ${run_log}
fi fi
} }
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册