diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..73392cf3bedb5bf0f8b005d59d0e2862564d10cd --- /dev/null +++ b/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:MobileNetV3_large_x1_0_PACT +cpp_infer_type:cls +cls_inference_model_dir:./MobileNetV3_large_x1_0_pact_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/MobileNetV3_large_x1_0_pact_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..53e1b2cad462bfed06a531086ca520dc2399a764 --- /dev/null +++ b/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:MobileNetV3_large_x1_0_PACT +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/MobileNetV3_large_x1_0_pact_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/MobileNetV3_large_x1_0_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/MobileNetV3_large_x1_0_pact_serving/ +--serving_client:./deploy/paddleserving/MobileNetV3_large_x1_0_pact_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..f4f6ce22a04468528956f283d8496cab27dfe474 --- /dev/null +++ b/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:MobileNetV3_large_x1_0_PACT +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/MobileNetV3_large_x1_0_pact_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/MobileNetV3_large_x1_0_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/MobileNetV3_large_x1_0_pact_serving/ +--serving_client:./deploy/paddleserving/MobileNetV3_large_x1_0_pact_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/config/PP-ShiTu/PPShiTu_general_rec_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/config/PP-ShiTu/PPShiTu_general_rec_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..e40702e0284bb628548d71b8234489729d8e305b --- /dev/null +++ b/test_tipc/config/PP-ShiTu/PPShiTu_general_rec_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:GeneralRecognition_PPLCNet_x2_5_PACT +cpp_infer_type:cls +cls_inference_model_dir:./general_PPLCNet_x2_5_lite_v1.0_pact_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/general_PPLCNet_x2_5_lite_v1.0_pact_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/config/PP-ShiTu/PPShiTu_general_rec_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/config/PP-ShiTu/PPShiTu_general_rec_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..6e0d91686ebbb876362b4b00c017f221e0d47d09 --- /dev/null +++ b/test_tipc/config/PP-ShiTu/PPShiTu_general_rec_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:GeneralRecognition_PPLCNet_x2_5_PACT +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/general_PPLCNet_x2_5_lite_v1.0_pact_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/general_PPLCNet_x2_5_lite_v1.0_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/GeneralRecognition_PPLCNet_x2_5_pact_serving/ +--serving_client:./deploy/paddleserving/GeneralRecognition_PPLCNet_x2_5_pact_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/config/PP-ShiTu/PPShiTu_general_rec_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/config/PP-ShiTu/PPShiTu_general_rec_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..a4280e44af04e42a6c6c162ae4f24e294c1aa625 --- /dev/null +++ b/test_tipc/config/PP-ShiTu/PPShiTu_general_rec_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:GeneralRecognition_PPLCNet_x2_5_PACT +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/general_PPLCNet_x2_5_lite_v1.0_pact_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/general_PPLCNet_x2_5_lite_v1.0_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/GeneralRecognition_PPLCNet_x2_5_pact_serving/ +--serving_client:./deploy/paddleserving/GeneralRecognition_PPLCNet_x2_5_pact_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/config/PPHGNet/PPHGNet_small_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/config/PPHGNet/PPHGNet_small_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..cdbf87b7fb698c3729439f15a476f045b69d632b --- /dev/null +++ b/test_tipc/config/PPHGNet/PPHGNet_small_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:PPHGNet_small_PACT +cpp_infer_type:cls +cls_inference_model_dir:./PPHGNet_small_pact_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPHGNet_small_pact_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/config/PPHGNet/PPHGNet_small_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/config/PPHGNet/PPHGNet_small_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..ae8cf0920e089a4638f02ffec336ab5c99fc3339 --- /dev/null +++ b/test_tipc/config/PPHGNet/PPHGNet_small_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPHGNet_small_PACT +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPHGNet_small_pact_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPHGNet_small_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPHGNet_small_pact_serving/ +--serving_client:./deploy/paddleserving/PPHGNet_small_pact_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/config/PPHGNet/PPHGNet_small_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/config/PPHGNet/PPHGNet_small_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..7b770999367863f3057678704b2f671c81688a1c --- /dev/null +++ b/test_tipc/config/PPHGNet/PPHGNet_small_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPHGNet_small_PACT +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPHGNet_small_pact_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPHGNet_small_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPHGNet_small_pact_serving/ +--serving_client:./deploy/paddleserving/PPHGNet_small_pact_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/config/PPLCNet/PPLCNet_x1_0_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/config/PPLCNet/PPLCNet_x1_0_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..77c6c26d1791508f59e7aaa0da22fd7400ba9836 --- /dev/null +++ b/test_tipc/config/PPLCNet/PPLCNet_x1_0_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:PPLCNet_x1_0_PACT +cpp_infer_type:cls +cls_inference_model_dir:./PPLCNet_x1_0_pact_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPLCNet_x1_0_pact_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/config/PPLCNet/PPLCNet_x1_0_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/config/PPLCNet/PPLCNet_x1_0_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..e576feb7fab5da4cab826e2bce1845333b8c5b79 --- /dev/null +++ b/test_tipc/config/PPLCNet/PPLCNet_x1_0_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPLCNet_x1_0_PACT +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPLCNet_x1_0_pact_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPLCNet_x1_0_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPLCNet_x1_0_pact_serving/ +--serving_client:./deploy/paddleserving/PPLCNet_x1_0_pact_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/config/PPLCNet/PPLCNet_x1_0_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/config/PPLCNet/PPLCNet_x1_0_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..b13025cc526f74dac2ddc472087fac229e0c90e8 --- /dev/null +++ b/test_tipc/config/PPLCNet/PPLCNet_x1_0_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPLCNet_x1_0_PACT +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPLCNet_x1_0_pact_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPLCNet_x1_0_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPLCNet_x1_0_pact_serving/ +--serving_client:./deploy/paddleserving/PPLCNet_x1_0_pact_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/config/PPLCNetV2/PPLCNetV2_base_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/config/PPLCNetV2/PPLCNetV2_base_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..085a6c35ca6a5723f73687d8f33886f93702e22a --- /dev/null +++ b/test_tipc/config/PPLCNetV2/PPLCNetV2_base_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:PPLCNetV2_base_PACT +cpp_infer_type:cls +cls_inference_model_dir:./PPLCNetV2_base_pact_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPLCNetV2_base_pact_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/config/PPLCNetV2/PPLCNetV2_base_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/config/PPLCNetV2/PPLCNetV2_base_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..8e050fcbd450cf55b27036c1af08d35109610972 --- /dev/null +++ b/test_tipc/config/PPLCNetV2/PPLCNetV2_base_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPLCNetV2_base_PACT +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPLCNetV2_base_pact_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPLCNetV2_base_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPLCNetV2_base_pact_serving/ +--serving_client:./deploy/paddleserving/PPLCNetV2_base_pact_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/config/PPLCNetV2/PPLCNetV2_base_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/config/PPLCNetV2/PPLCNetV2_base_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..b0e34f912c40b86332786f6fc92917c9a7bc19d7 --- /dev/null +++ b/test_tipc/config/PPLCNetV2/PPLCNetV2_base_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPLCNetV2_base_PACT +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPLCNetV2_base_pact_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPLCNetV2_base_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPLCNetV2_base_pact_serving/ +--serving_client:./deploy/paddleserving/PPLCNetV2_base_pact_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/config/ResNet/ResNet50_vd_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/config/ResNet/ResNet50_vd_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..a7de8e40da12067278a67759a05c09794a654eaa --- /dev/null +++ b/test_tipc/config/ResNet/ResNet50_vd_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:ResNet50_vd_PACT +cpp_infer_type:cls +cls_inference_model_dir:./ResNet50_vd_pact_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/ResNet50_vd_pact_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/config/ResNet/ResNet50_vd_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/config/ResNet/ResNet50_vd_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..8889726448f3c01e3c6449c223376c8f9fb5b3a6 --- /dev/null +++ b/test_tipc/config/ResNet/ResNet50_vd_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:ResNet50_vd_PACT +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/ResNet50_vd_pact_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/ResNet50_vd_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/ResNet50_vd_pact_serving/ +--serving_client:./deploy/paddleserving/ResNet50_vd_pact_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/config/ResNet/ResNet50_vd_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/config/ResNet/ResNet50_vd_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..4e50eff09796295c40b9e147a45b482f8c683d3d --- /dev/null +++ b/test_tipc/config/ResNet/ResNet50_vd_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:ResNet50_vd_PACT +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/ResNet50_vd_pact_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/ResNet50_vd_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/ResNet50_vd_pact_serving/ +--serving_client:./deploy/paddleserving/ResNet50_vd_pact_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/config/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/config/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..820f397a26ab5550b72387415973694cf2a8b75d --- /dev/null +++ b/test_tipc/config/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:SwinTransformer_tiny_patch4_window7_224_PACT +cpp_infer_type:cls +cls_inference_model_dir:./SwinTransformer_tiny_patch4_window7_224_pact_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/SwinTransformer_tiny_patch4_window7_224_pact_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/config/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/config/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..a0e017e5aac0bb1032066739725dd519e8c9bfaa --- /dev/null +++ b/test_tipc/config/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:SwinTransformer_tiny_patch4_window7_224_PACT +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/SwinTransformer_tiny_patch4_window7_224_pact_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/SwinTransformer_tiny_patch4_window7_224_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/SwinTransformer_tiny_patch4_window7_224_pact_serving/ +--serving_client:./deploy/paddleserving/SwinTransformer_tiny_patch4_window7_224_pact_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/config/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/config/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..23602963966e3387b85404b9d4edad1fd7a75e87 --- /dev/null +++ b/test_tipc/config/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:SwinTransformer_tiny_patch4_window7_224_PACT +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/SwinTransformer_tiny_patch4_window7_224_pact_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/SwinTransformer_tiny_patch4_window7_224_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/SwinTransformer_tiny_patch4_window7_224_pact_serving/ +--serving_client:./deploy/paddleserving/SwinTransformer_tiny_patch4_window7_224_pact_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/prepare.sh b/test_tipc/prepare.sh index 9b78c0ff1e6a726b1d329bfa13e1cdd6cd86842f..f1047fd0e38db5a297790490a59d1fd0c486fc44 100644 --- a/test_tipc/prepare.sh +++ b/test_tipc/prepare.sh @@ -2,8 +2,7 @@ FILENAME=$1 # MODE be one of ['lite_train_lite_infer' 'lite_train_whole_infer' 'whole_train_whole_infer', -# 'whole_infer', 'klquant_whole_infer', -# 'cpp_infer', 'serving_infer', 'lite_infer'] +# 'whole_infer', 'cpp_infer', 'serving_infer', 'lite_infer'] MODE=$2 @@ -171,7 +170,7 @@ if [[ ${MODE} = "lite_train_lite_infer" ]] || [[ ${MODE} = "lite_train_whole_inf mv val.txt val_list.txt cp -r train/* val/ cd ../../ -elif [[ ${MODE} = "whole_infer" ]] || [[ ${MODE} = "klquant_whole_infer" ]]; then +elif [[ ${MODE} = "whole_infer" ]]; then # download data cd dataset rm -rf ILSVRC2012