From dfd0a0d076ac2f12e13ae2f7476d67ee620e157a Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Tue, 21 Jun 2022 05:57:06 +0000 Subject: [PATCH] add 5 more KL models --- deploy/slim/quant_post_static.py | 2 + ..._normal_normal_infer_cpp_linux_gpu_cpu.txt | 18 ++++++++ ...ormal_normal_serving_cpp_linux_gpu_cpu.txt | 14 ++++++ ...al_normal_serving_python_linux_gpu_cpu.txt | 14 ++++++ ...normal_normal_infer_cpp_linux_gpu_cpu.txt} | 0 ...rmal_normal_serving_cpp_linux_gpu_cpu.txt} | 0 ...l_normal_serving_python_linux_gpu_cpu.txt} | 0 ..._normal_normal_infer_cpp_linux_gpu_cpu.txt | 18 ++++++++ ...ormal_normal_serving_cpp_linux_gpu_cpu.txt | 14 ++++++ ...al_normal_serving_python_linux_gpu_cpu.txt | 14 ++++++ ..._normal_normal_infer_cpp_linux_gpu_cpu.txt | 18 ++++++++ ...ormal_normal_serving_cpp_linux_gpu_cpu.txt | 14 ++++++ ...al_normal_serving_python_linux_gpu_cpu.txt | 14 ++++++ ..._normal_normal_infer_cpp_linux_gpu_cpu.txt | 18 ++++++++ ...ormal_normal_serving_cpp_linux_gpu_cpu.txt | 14 ++++++ ...al_normal_serving_python_linux_gpu_cpu.txt | 14 ++++++ ...normal_normal_infer_cpp_linux_gpu_cpu.txt} | 0 ...rmal_normal_serving_cpp_linux_gpu_cpu.txt} | 0 ...l_normal_serving_python_linux_gpu_cpu.txt} | 0 ..._normal_normal_infer_cpp_linux_gpu_cpu.txt | 18 ++++++++ ...ormal_normal_serving_cpp_linux_gpu_cpu.txt | 14 ++++++ ...al_normal_serving_python_linux_gpu_cpu.txt | 14 ++++++ test_tipc/docs/test_inference_cpp.md | 46 ++++++++++--------- test_tipc/docs/test_serving_infer_cpp.md | 45 ++++++++++-------- test_tipc/docs/test_serving_infer_python.md | 45 ++++++++++-------- 25 files changed, 307 insertions(+), 61 deletions(-) create mode 100644 test_tipc/config/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt create mode 100644 test_tipc/config/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt create mode 100644 test_tipc/config/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt rename test_tipc/config/MobileNetV3/{MobileNetV3_large_x1_0-KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt => MobileNetV3_large_x1_0_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt} (100%) rename test_tipc/config/MobileNetV3/{MobileNetV3_large_x1_0-KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt => MobileNetV3_large_x1_0_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt} (100%) rename test_tipc/config/MobileNetV3/{MobileNetV3_large_x1_0-KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt => MobileNetV3_large_x1_0_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt} (100%) create mode 100644 test_tipc/config/PPHGNet/PPHGNet_small_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt create mode 100644 test_tipc/config/PPHGNet/PPHGNet_small_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt create mode 100644 test_tipc/config/PPHGNet/PPHGNet_small_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt create mode 100644 test_tipc/config/PPLCNet/PPLCNet_x1_0_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt create mode 100644 test_tipc/config/PPLCNet/PPLCNet_x1_0_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt create mode 100644 test_tipc/config/PPLCNet/PPLCNet_x1_0_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt create mode 100644 test_tipc/config/PPLCNetV2/PPLCNetV2_base_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt create mode 100644 test_tipc/config/PPLCNetV2/PPLCNetV2_base_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt create mode 100644 test_tipc/config/PPLCNetV2/PPLCNetV2_base_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt rename test_tipc/config/ResNet/{ResNet50_vd-KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt => ResNet50_vd_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt} (100%) rename test_tipc/config/ResNet/{ResNet50_vd-KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt => ResNet50_vd_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt} (100%) rename test_tipc/config/ResNet/{ResNet50_vd-KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt => ResNet50_vd_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt} (100%) create mode 100644 test_tipc/config/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt create mode 100644 test_tipc/config/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt create mode 100644 test_tipc/config/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt diff --git a/deploy/slim/quant_post_static.py b/deploy/slim/quant_post_static.py index 20507c66..4e53b241 100644 --- a/deploy/slim/quant_post_static.py +++ b/deploy/slim/quant_post_static.py @@ -41,6 +41,8 @@ def main(): 'inference.pdmodel')) and os.path.exists( os.path.join(config["Global"]["save_inference_dir"], 'inference.pdiparams')) + if "Query" in config["DataLoader"]["Eval"]: + config["DataLoader"]["Eval"] = config["DataLoader"]["Eval"]["Query"] config["DataLoader"]["Eval"]["sampler"]["batch_size"] = 1 config["DataLoader"]["Eval"]["loader"]["num_workers"] = 0 diff --git a/test_tipc/config/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/config/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 00000000..45a51dc4 --- /dev/null +++ b/test_tipc/config/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:GeneralRecognition_PPLCNet_x2_5_KL +cpp_infer_type:cls +cls_inference_model_dir:./GeneralRecognition_PPLCNet_x2_5_kl_quant_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/GeneralRecognition_PPLCNet_x2_5_kl_quant_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/config/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/config/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 00000000..2b8d160a --- /dev/null +++ b/test_tipc/config/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:GeneralRecognition_PPLCNet_x2_5_KL +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/GeneralRecognition_PPLCNet_x2_5_kl_quant_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/GeneralRecognition_PPLCNet_x2_5_kl_quant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/GeneralRecognition_PPLCNet_x2_5_kl_quant_serving/ +--serving_client:./deploy/paddleserving/GeneralRecognition_PPLCNet_x2_5_kl_quant_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/config/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/config/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 00000000..bc9bbc76 --- /dev/null +++ b/test_tipc/config/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:GeneralRecognition_PPLCNet_x2_5_KL +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/GeneralRecognition_PPLCNet_x2_5_kl_quant_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/GeneralRecognition_PPLCNet_x2_5_kl_quant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/GeneralRecognition_PPLCNet_x2_5_kl_quant_serving/ +--serving_client:./deploy/paddleserving/GeneralRecognition_PPLCNet_x2_5_kl_quant_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0-KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt similarity index 100% rename from test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0-KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt rename to test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0-KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt similarity index 100% rename from test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0-KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt rename to test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0-KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt similarity index 100% rename from test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0-KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt rename to test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt diff --git a/test_tipc/config/PPHGNet/PPHGNet_small_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/config/PPHGNet/PPHGNet_small_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 00000000..1c962c76 --- /dev/null +++ b/test_tipc/config/PPHGNet/PPHGNet_small_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:PPHGNet_small_KL +cpp_infer_type:cls +cls_inference_model_dir:./PPHGNet_small_kl_quant_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPHGNet_small_kl_quant_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/config/PPHGNet/PPHGNet_small_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/config/PPHGNet/PPHGNet_small_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 00000000..6890b2fd --- /dev/null +++ b/test_tipc/config/PPHGNet/PPHGNet_small_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPHGNet_small_KL +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPHGNet_small_kl_quant_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPHGNet_small_kl_quant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPHGNet_small_kl_quant_serving/ +--serving_client:./deploy/paddleserving/PPHGNet_small_kl_quant_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/config/PPHGNet/PPHGNet_small_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/config/PPHGNet/PPHGNet_small_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 00000000..786fac93 --- /dev/null +++ b/test_tipc/config/PPHGNet/PPHGNet_small_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPHGNet_small_KL +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPHGNet_small_kl_quant_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPHGNet_small_kl_quant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPHGNet_small_kl_quant_serving/ +--serving_client:./deploy/paddleserving/PPHGNet_small_kl_quant_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/config/PPLCNet/PPLCNet_x1_0_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/config/PPLCNet/PPLCNet_x1_0_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 00000000..1489dff0 --- /dev/null +++ b/test_tipc/config/PPLCNet/PPLCNet_x1_0_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:PPLCNet_x1_0_KL +cpp_infer_type:cls +cls_inference_model_dir:./PPLCNet_x1_0_kl_quant_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPLCNet_x1_0_kl_quant_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/config/PPLCNet/PPLCNet_x1_0_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/config/PPLCNet/PPLCNet_x1_0_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 00000000..d923f9b5 --- /dev/null +++ b/test_tipc/config/PPLCNet/PPLCNet_x1_0_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPLCNet_x1_0_KL +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPLCNet_x1_0_kl_quant_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPLCNet_x1_0_kl_quant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPLCNet_x1_0_kl_quant_serving/ +--serving_client:./deploy/paddleserving/PPLCNet_x1_0_kl_quant_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/config/PPLCNet/PPLCNet_x1_0_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/config/PPLCNet/PPLCNet_x1_0_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 00000000..8e18074a --- /dev/null +++ b/test_tipc/config/PPLCNet/PPLCNet_x1_0_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPLCNet_x1_0_KL +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPLCNet_x1_0_kl_quant_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPLCNet_x1_0_kl_quant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPLCNet_x1_0_kl_quant_serving/ +--serving_client:./deploy/paddleserving/PPLCNet_x1_0_kl_quant_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/config/PPLCNetV2/PPLCNetV2_base_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/config/PPLCNetV2/PPLCNetV2_base_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 00000000..1700fcd1 --- /dev/null +++ b/test_tipc/config/PPLCNetV2/PPLCNetV2_base_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:PPLCNetV2_base_KL +cpp_infer_type:cls +cls_inference_model_dir:./PPLCNetV2_base_kl_quant_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPLCNetV2_base_kl_quant_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/config/PPLCNetV2/PPLCNetV2_base_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/config/PPLCNetV2/PPLCNetV2_base_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 00000000..d444589b --- /dev/null +++ b/test_tipc/config/PPLCNetV2/PPLCNetV2_base_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPLCNetV2_base_KL +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPLCNetV2_base_kl_quant_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPLCNetV2_base_kl_quant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPLCNetV2_base_kl_quant_serving/ +--serving_client:./deploy/paddleserving/PPLCNetV2_base_kl_quant_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/config/PPLCNetV2/PPLCNetV2_base_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/config/PPLCNetV2/PPLCNetV2_base_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 00000000..b909c073 --- /dev/null +++ b/test_tipc/config/PPLCNetV2/PPLCNetV2_base_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPLCNetV2_base_KL +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPLCNetV2_base_kl_quant_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPLCNetV2_base_kl_quant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPLCNetV2_base_kl_quant_serving/ +--serving_client:./deploy/paddleserving/PPLCNetV2_base_kl_quant_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/config/ResNet/ResNet50_vd-KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/config/ResNet/ResNet50_vd_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt similarity index 100% rename from test_tipc/config/ResNet/ResNet50_vd-KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt rename to test_tipc/config/ResNet/ResNet50_vd_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt diff --git a/test_tipc/config/ResNet/ResNet50_vd-KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/config/ResNet/ResNet50_vd_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt similarity index 100% rename from test_tipc/config/ResNet/ResNet50_vd-KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt rename to test_tipc/config/ResNet/ResNet50_vd_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt diff --git a/test_tipc/config/ResNet/ResNet50_vd-KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/config/ResNet/ResNet50_vd_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt similarity index 100% rename from test_tipc/config/ResNet/ResNet50_vd-KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt rename to test_tipc/config/ResNet/ResNet50_vd_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt diff --git a/test_tipc/config/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/config/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 00000000..7282e64b --- /dev/null +++ b/test_tipc/config/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:SwinTransformer_tiny_patch4_window7_224_KL +cpp_infer_type:cls +cls_inference_model_dir:./SwinTransformer_tiny_patch4_window7_224_kl_quant_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/SwinTransformer_tiny_patch4_window7_224_kl_quant_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/config/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/config/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 00000000..efa3cd20 --- /dev/null +++ b/test_tipc/config/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:SwinTransformer_tiny_patch4_window7_224_KL +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/SwinTransformer_tiny_patch4_window7_224_kl_quant_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/SwinTransformer_tiny_patch4_window7_224_kl_quant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/SwinTransformer_tiny_patch4_window7_224_kl_quant_serving/ +--serving_client:./deploy/paddleserving/SwinTransformer_tiny_patch4_window7_224_kl_quant_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/config/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/config/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 00000000..45486c3f --- /dev/null +++ b/test_tipc/config/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:SwinTransformer_tiny_patch4_window7_224_KL +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/SwinTransformer_tiny_patch4_window7_224_kl_quant_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/SwinTransformer_tiny_patch4_window7_224_kl_quant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/SwinTransformer_tiny_patch4_window7_224_kl_quant_serving/ +--serving_client:./deploy/paddleserving/SwinTransformer_tiny_patch4_window7_224_kl_quant_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/docs/test_inference_cpp.md b/test_tipc/docs/test_inference_cpp.md index e82b8ed8..db1e27d9 100644 --- a/test_tipc/docs/test_inference_cpp.md +++ b/test_tipc/docs/test_inference_cpp.md @@ -6,27 +6,31 @@ Linux GPU/CPU C++ 推理功能测试的主程序为`test_inference_cpp.sh`,可 - 推理相关: -| 算法名称 | 模型名称 | device_CPU | device_GPU | -| :-------------: | :---------------------------------------: | :--------: | :--------: | -| MobileNetV3 | MobileNetV3_large_x1_0 | 支持 | 支持 | -| MobileNetV3 | MobileNetV3_large_x1_0_KL | 支持 | 支持 | -| PP-ShiTu | PPShiTu_general_rec、PPShiTu_mainbody_det | 支持 | 支持 | -| PP-ShiTu | PPShiTu_mainbody_det | 支持 | 支持 | -| PPHGNet | PPHGNet_small | 支持 | 支持 | -| PPHGNet | PPHGNet_tiny | 支持 | 支持 | -| PPLCNet | PPLCNet_x0_25 | 支持 | 支持 | -| PPLCNet | PPLCNet_x0_35 | 支持 | 支持 | -| PPLCNet | PPLCNet_x0_5 | 支持 | 支持 | -| PPLCNet | PPLCNet_x0_75 | 支持 | 支持 | -| PPLCNet | PPLCNet_x1_0 | 支持 | 支持 | -| PPLCNet | PPLCNet_x1_5 | 支持 | 支持 | -| PPLCNet | PPLCNet_x2_0 | 支持 | 支持 | -| PPLCNet | PPLCNet_x2_5 | 支持 | 支持 | -| PPLCNetV2 | PPLCNetV2_base | 支持 | 支持 | -| ResNet | ResNet50 | 支持 | 支持 | -| ResNet | ResNet50_vd | 支持 | 支持 | -| ResNet | ResNet50_vd_KL | 支持 | 支持 | -| SwinTransformer | SwinTransformer_tiny_patch4_window7_224 | 支持 | 支持 | +| 算法名称 | 模型名称 | device_CPU | device_GPU | +| :-------------: | :----------------------------------------: | :--------: | :--------: | +| MobileNetV3 | MobileNetV3_large_x1_0 | 支持 | 支持 | +| MobileNetV3 | MobileNetV3_large_x1_0_KL | 支持 | 支持 | +| PP-ShiTu | PPShiTu_general_rec、PPShiTu_mainbody_det | 支持 | 支持 | +| PP-ShiTu | GeneralRecognition_PPLCNet_x2_5_KL | 支持 | 支持 | +| PPHGNet | PPHGNet_small | 支持 | 支持 | +| PPHGNet | PPHGNet_small_KL | 支持 | 支持 | +| PPHGNet | PPHGNet_tiny | 支持 | 支持 | +| PPLCNet | PPLCNet_x0_25 | 支持 | 支持 | +| PPLCNet | PPLCNet_x0_35 | 支持 | 支持 | +| PPLCNet | PPLCNet_x0_5 | 支持 | 支持 | +| PPLCNet | PPLCNet_x0_75 | 支持 | 支持 | +| PPLCNet | PPLCNet_x1_0 | 支持 | 支持 | +| PPLCNet | PPLCNet_x1_0_KL | 支持 | 支持 | +| PPLCNet | PPLCNet_x1_5 | 支持 | 支持 | +| PPLCNet | PPLCNet_x2_0 | 支持 | 支持 | +| PPLCNet | PPLCNet_x2_5 | 支持 | 支持 | +| PPLCNetV2 | PPLCNetV2_base | 支持 | 支持 | +| PPLCNetV2 | PPLCNetV2_base_KL | 支持 | 支持 | +| ResNet | ResNet50 | 支持 | 支持 | +| ResNet | ResNet50_vd | 支持 | 支持 | +| ResNet | ResNet50_vd_KL | 支持 | 支持 | +| SwinTransformer | SwinTransformer_tiny_patch4_window7_224 | 支持 | 支持 | +| SwinTransformer | SwinTransformer_tiny_patch4_window7_224_KL | 支持 | 支持 | ## 2. 测试流程(以**ResNet50**为例) diff --git a/test_tipc/docs/test_serving_infer_cpp.md b/test_tipc/docs/test_serving_infer_cpp.md index 2018f4c9..370f96a2 100644 --- a/test_tipc/docs/test_serving_infer_cpp.md +++ b/test_tipc/docs/test_serving_infer_cpp.md @@ -7,26 +7,31 @@ Linux GPU/CPU C++ 服务化部署测试的主程序为`test_serving_infer_cpp.sh - 推理相关: -| 算法名称 | 模型名称 | device_CPU | device_GPU | -| :-------------: | :---------------------------------------: | :--------: | :--------: | -| MobileNetV3 | MobileNetV3_large_x1_0 | 支持 | 支持 | -| MobileNetV3 | MobileNetV3_large_x1_0_KL | 支持 | 支持 | -| PP-ShiTu | PPShiTu_general_rec、PPShiTu_mainbody_det | 支持 | 支持 | -| PPHGNet | PPHGNet_small | 支持 | 支持 | -| PPHGNet | PPHGNet_tiny | 支持 | 支持 | -| PPLCNet | PPLCNet_x0_25 | 支持 | 支持 | -| PPLCNet | PPLCNet_x0_35 | 支持 | 支持 | -| PPLCNet | PPLCNet_x0_5 | 支持 | 支持 | -| PPLCNet | PPLCNet_x0_75 | 支持 | 支持 | -| PPLCNet | PPLCNet_x1_0 | 支持 | 支持 | -| PPLCNet | PPLCNet_x1_5 | 支持 | 支持 | -| PPLCNet | PPLCNet_x2_0 | 支持 | 支持 | -| PPLCNet | PPLCNet_x2_5 | 支持 | 支持 | -| PPLCNetV2 | PPLCNetV2_base | 支持 | 支持 | -| ResNet | ResNet50 | 支持 | 支持 | -| ResNet | ResNet50_vd | 支持 | 支持 | -| ResNet | ResNet50_vd_KL | 支持 | 支持 | -| SwinTransformer | SwinTransformer_tiny_patch4_window7_224 | 支持 | 支持 | +| 算法名称 | 模型名称 | device_CPU | device_GPU | +| :-------------: | :----------------------------------------: | :--------: | :--------: | +| MobileNetV3 | MobileNetV3_large_x1_0 | 支持 | 支持 | +| MobileNetV3 | MobileNetV3_large_x1_0_KL | 支持 | 支持 | +| PP-ShiTu | PPShiTu_general_rec、PPShiTu_mainbody_det | 支持 | 支持 | +| PP-ShiTu | GeneralRecognition_PPLCNet_x2_5_KL | 支持 | 支持 | +| PPHGNet | PPHGNet_small | 支持 | 支持 | +| PPHGNet | PPHGNet_small_KL | 支持 | 支持 | +| PPHGNet | PPHGNet_tiny | 支持 | 支持 | +| PPLCNet | PPLCNet_x0_25 | 支持 | 支持 | +| PPLCNet | PPLCNet_x0_35 | 支持 | 支持 | +| PPLCNet | PPLCNet_x0_5 | 支持 | 支持 | +| PPLCNet | PPLCNet_x0_75 | 支持 | 支持 | +| PPLCNet | PPLCNet_x1_0 | 支持 | 支持 | +| PPLCNet | PPLCNet_x1_0_KL | 支持 | 支持 | +| PPLCNet | PPLCNet_x1_5 | 支持 | 支持 | +| PPLCNet | PPLCNet_x2_0 | 支持 | 支持 | +| PPLCNet | PPLCNet_x2_5 | 支持 | 支持 | +| PPLCNetV2 | PPLCNetV2_base | 支持 | 支持 | +| PPLCNetV2 | PPLCNetV2_base_KL | 支持 | 支持 | +| ResNet | ResNet50 | 支持 | 支持 | +| ResNet | ResNet50_vd | 支持 | 支持 | +| ResNet | ResNet50_vd_KL | 支持 | 支持 | +| SwinTransformer | SwinTransformer_tiny_patch4_window7_224 | 支持 | 支持 | +| SwinTransformer | SwinTransformer_tiny_patch4_window7_224_KL | 支持 | 支持 | ## 2. 测试流程 diff --git a/test_tipc/docs/test_serving_infer_python.md b/test_tipc/docs/test_serving_infer_python.md index 1ba4c151..3662563f 100644 --- a/test_tipc/docs/test_serving_infer_python.md +++ b/test_tipc/docs/test_serving_infer_python.md @@ -7,26 +7,31 @@ Linux GPU/CPU PYTHON 服务化部署测试的主程序为`test_serving_infer_pyt - 推理相关: -| 算法名称 | 模型名称 | device_CPU | device_GPU | -| :-------------: | :---------------------------------------: | :--------: | :--------: | -| MobileNetV3 | MobileNetV3_large_x1_0 | 支持 | 支持 | -| MobileNetV3 | MobileNetV3_large_x1_0_KL | 支持 | 支持 | -| PP-ShiTu | PPShiTu_general_rec、PPShiTu_mainbody_det | 支持 | 支持 | -| PPHGNet | PPHGNet_small | 支持 | 支持 | -| PPHGNet | PPHGNet_tiny | 支持 | 支持 | -| PPLCNet | PPLCNet_x0_25 | 支持 | 支持 | -| PPLCNet | PPLCNet_x0_35 | 支持 | 支持 | -| PPLCNet | PPLCNet_x0_5 | 支持 | 支持 | -| PPLCNet | PPLCNet_x0_75 | 支持 | 支持 | -| PPLCNet | PPLCNet_x1_0 | 支持 | 支持 | -| PPLCNet | PPLCNet_x1_5 | 支持 | 支持 | -| PPLCNet | PPLCNet_x2_0 | 支持 | 支持 | -| PPLCNet | PPLCNet_x2_5 | 支持 | 支持 | -| PPLCNetV2 | PPLCNetV2_base | 支持 | 支持 | -| ResNet | ResNet50 | 支持 | 支持 | -| ResNet | ResNet50_vd | 支持 | 支持 | -| ResNet | ResNet50_vd_KL | 支持 | 支持 | -| SwinTransformer | SwinTransformer_tiny_patch4_window7_224 | 支持 | 支持 | +| 算法名称 | 模型名称 | device_CPU | device_GPU | +| :-------------: | :----------------------------------------: | :--------: | :--------: | +| MobileNetV3 | MobileNetV3_large_x1_0 | 支持 | 支持 | +| MobileNetV3 | MobileNetV3_large_x1_0_KL | 支持 | 支持 | +| PP-ShiTu | PPShiTu_general_rec、PPShiTu_mainbody_det | 支持 | 支持 | +| PP-ShiTu | GeneralRecognition_PPLCNet_x2_5_KL | 支持 | 支持 | +| PPHGNet | PPHGNet_small | 支持 | 支持 | +| PPHGNet | PPHGNet_small_KL | 支持 | 支持 | +| PPHGNet | PPHGNet_tiny | 支持 | 支持 | +| PPLCNet | PPLCNet_x0_25 | 支持 | 支持 | +| PPLCNet | PPLCNet_x0_35 | 支持 | 支持 | +| PPLCNet | PPLCNet_x0_5 | 支持 | 支持 | +| PPLCNet | PPLCNet_x0_75 | 支持 | 支持 | +| PPLCNet | PPLCNet_x1_0 | 支持 | 支持 | +| PPLCNet | PPLCNet_x1_0_KL | 支持 | 支持 | +| PPLCNet | PPLCNet_x1_5 | 支持 | 支持 | +| PPLCNet | PPLCNet_x2_0 | 支持 | 支持 | +| PPLCNet | PPLCNet_x2_5 | 支持 | 支持 | +| PPLCNetV2 | PPLCNetV2_base | 支持 | 支持 | +| PPLCNetV2 | PPLCNetV2_base_KL | 支持 | 支持 | +| ResNet | ResNet50 | 支持 | 支持 | +| ResNet | ResNet50_vd | 支持 | 支持 | +| ResNet | ResNet50_vd_KL | 支持 | 支持 | +| SwinTransformer | SwinTransformer_tiny_patch4_window7_224 | 支持 | 支持 | +| SwinTransformer | SwinTransformer_tiny_patch4_window7_224_KL | 支持 | 支持 | ## 2. 测试流程 -- GitLab