diff --git a/test_tipc/README.md b/test_tipc/README.md
index 4869f6e11ddc78b7c05c7805bfb25ba7e41b683d..42fcb8faf62083241386a2d3767e880b695726b0 100644
--- a/test_tipc/README.md
+++ b/test_tipc/README.md
@@ -35,10 +35,12 @@
│ ├── MobileNetV3 # MobileNetV3系列模型测试配置文件目录
│ │ ├── MobileNetV3_large_x1_0_train_infer_python.txt #基础训练预测配置文件
│ │ ├── MobileNetV3_large_x1_0_train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt #多机多卡训练预测配置文件
+│ │ ├── MobileNetV3_large_x1_0_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt #C++推理测试配置文件
│ │ └── MobileNetV3_large_x1_0_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt #混合精度训练预测配置文件
│ └── ResNet # ResNet系列模型测试配置文件目录
│ ├── ResNet50_vd_train_infer_python.txt #基础训练预测配置文件
│ ├── ResNet50_vd_train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt #多机多卡训练预测配置文件
+│ ├── ResNet50_vd_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt #C++推理测试配置文件
│ └── ResNet50_vd_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt #混合精度训练预测配置文件
| ......
├── docs
@@ -99,7 +101,7 @@ bash test_tipc/test_train_inference_python.sh ./test_tipc/configs/MobileNetV3/Mo
## 4 开始测试
-各功能测试中涉及混合精度、裁剪、量化等训练相关,及mkldnn、Tensorrt等多种预测相关参数配置,请点击下方相应链接了解更多细节和使用教程:
+各功能测试中涉及混合精度、裁剪、量化等训练相关,及mkldnn、Tensorrt等多种预测相关参数配置,请点击下方相应链接了解更多细节和使用教程:
- [test_train_inference_python 使用](docs/test_train_inference_python.md):测试基于Python的模型训练、评估、推理等基本功能,包括裁剪、量化、蒸馏。
- [test_inference_cpp 使用](docs/test_inference_cpp.md) :测试基于C++的模型推理。
diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7a1f69fb21c693a5e532148e9829d230478ac218
--- /dev/null
+++ b/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
@@ -0,0 +1,12 @@
+# model load config
+model_name:MobileNetV3_large_x1_0
+use_gpu:True|False
+gpu_id:0
+gpu_mem:4000
+cpu_math_library_num_threads:10
+
+# cls config
+cls_model_path:./deploy/models/MobileNetV3_large_x1_0_infer/inference.pdmodel
+cls_params_path:./deploy/models/MobileNetV3_large_x1_0_infer/inference.pdiparams
+resize_short_size:256
+crop_size:224
\ No newline at end of file
diff --git a/test_tipc/config/PP-ShiTu/PPShiTu_general_rec_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/config/PP-ShiTu/PPShiTu_general_rec_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7a025e80af56d4dcd53fadf9221bee1c92f68790
--- /dev/null
+++ b/test_tipc/config/PP-ShiTu/PPShiTu_general_rec_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
@@ -0,0 +1,12 @@
+# model load config
+model_name:general_PPLCNet_x2_5_lite_v1.0
+use_gpu:True|False
+gpu_id:0
+gpu_mem:4000
+cpu_math_library_num_threads:10
+
+# cls config
+cls_model_path:./deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer/inference.pdmodel
+cls_params_path:./deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer/inference.pdiparams
+resize_short_size:256
+crop_size:224
\ No newline at end of file
diff --git a/test_tipc/config/PP-ShiTu/PPShiTu_mainbody_det_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/config/PP-ShiTu/PPShiTu_mainbody_det_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b1067c2561c71fb357f0455f32e6a9ceb3129861
--- /dev/null
+++ b/test_tipc/config/PP-ShiTu/PPShiTu_mainbody_det_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
@@ -0,0 +1,12 @@
+# model load config
+model_name:picodet_PPLCNet_x2_5_mainbody_lite_v1.0
+use_gpu:True|False
+gpu_id:0
+gpu_mem:4000
+cpu_math_library_num_threads:10
+
+# cls config
+cls_model_path:./deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/inference.pdmodel
+cls_params_path:./deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/inference.pdiparams
+resize_short_size:256
+crop_size:224
\ No newline at end of file
diff --git a/test_tipc/config/PPHGNet/PPHGNet_small_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/config/PPHGNet/PPHGNet_small_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
new file mode 100644
index 0000000000000000000000000000000000000000..566f34530bf99da8ab0f87bd64d0d5e377a008be
--- /dev/null
+++ b/test_tipc/config/PPHGNet/PPHGNet_small_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
@@ -0,0 +1,12 @@
+# model load config
+model_name:PPHGNet_small
+use_gpu:True|False
+gpu_id:0
+gpu_mem:4000
+cpu_math_library_num_threads:10
+
+# cls config
+cls_model_path:./deploy/models/PPHGNet_small_infer/inference.pdmodel
+cls_params_path:./deploy/models/PPHGNet_small_infer/inference.pdiparams
+resize_short_size:256
+crop_size:224
\ No newline at end of file
diff --git a/test_tipc/config/PPHGNet/PPHGNet_tiny_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/config/PPHGNet/PPHGNet_tiny_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
new file mode 100644
index 0000000000000000000000000000000000000000..0420a43eb650fc27ff3840d9ea153787243f379c
--- /dev/null
+++ b/test_tipc/config/PPHGNet/PPHGNet_tiny_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
@@ -0,0 +1,12 @@
+# model load config
+model_name:PPHGNet_tiny
+use_gpu:True|False
+gpu_id:0
+gpu_mem:4000
+cpu_math_library_num_threads:10
+
+# cls config
+cls_model_path:./deploy/models/PPHGNet_tiny_infer/inference.pdmodel
+cls_params_path:./deploy/models/PPHGNet_tiny_infer/inference.pdiparams
+resize_short_size:256
+crop_size:224
\ No newline at end of file
diff --git a/test_tipc/config/PPLCNet/PPLCNet_x0_25_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/config/PPLCNet/PPLCNet_x0_25_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
new file mode 100644
index 0000000000000000000000000000000000000000..93d6f352e4c4993e31b10952581fb8f1fe496cbe
--- /dev/null
+++ b/test_tipc/config/PPLCNet/PPLCNet_x0_25_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
@@ -0,0 +1,12 @@
+# model load config
+model_name:PPLCNet_x0_25
+use_gpu:True|False
+gpu_id:0
+gpu_mem:4000
+cpu_math_library_num_threads:10
+
+# cls config
+cls_model_path:./deploy/models/PPLCNet_x0_25_infer/inference.pdmodel
+cls_params_path:./deploy/models/PPLCNet_x0_25_infer/inference.pdiparams
+resize_short_size:256
+crop_size:224
\ No newline at end of file
diff --git a/test_tipc/config/PPLCNet/PPLCNet_x0_35_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/config/PPLCNet/PPLCNet_x0_35_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
new file mode 100644
index 0000000000000000000000000000000000000000..6f41fe3db1088c73016d87894e7e263d0d825d52
--- /dev/null
+++ b/test_tipc/config/PPLCNet/PPLCNet_x0_35_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
@@ -0,0 +1,12 @@
+# model load config
+model_name:PPLCNet_x0_35
+use_gpu:True|False
+gpu_id:0
+gpu_mem:4000
+cpu_math_library_num_threads:10
+
+# cls config
+cls_model_path:./deploy/models/PPLCNet_x0_35_infer/inference.pdmodel
+cls_params_path:./deploy/models/PPLCNet_x0_35_infer/inference.pdiparams
+resize_short_size:256
+crop_size:224
\ No newline at end of file
diff --git a/test_tipc/config/PPLCNet/PPLCNet_x0_5_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/config/PPLCNet/PPLCNet_x0_5_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e2ae4d14d1281674df7ba7e016ffcafcb303f2e7
--- /dev/null
+++ b/test_tipc/config/PPLCNet/PPLCNet_x0_5_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
@@ -0,0 +1,12 @@
+# model load config
+model_name:PPLCNet_x0_5
+use_gpu:True|False
+gpu_id:0
+gpu_mem:4000
+cpu_math_library_num_threads:10
+
+# cls config
+cls_model_path:./deploy/models/PPLCNet_x0_5_infer/inference.pdmodel
+cls_params_path:./deploy/models/PPLCNet_x0_5_infer/inference.pdiparams
+resize_short_size:256
+crop_size:224
\ No newline at end of file
diff --git a/test_tipc/config/PPLCNet/PPLCNet_x0_75_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/config/PPLCNet/PPLCNet_x0_75_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
new file mode 100644
index 0000000000000000000000000000000000000000..bf377e7a470becc98665136cc9cde0a5470b5aff
--- /dev/null
+++ b/test_tipc/config/PPLCNet/PPLCNet_x0_75_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
@@ -0,0 +1,12 @@
+# model load config
+model_name:PPLCNet_x0_75
+use_gpu:True|False
+gpu_id:0
+gpu_mem:4000
+cpu_math_library_num_threads:10
+
+# cls config
+cls_model_path:./deploy/models/PPLCNet_x0_75_infer/inference.pdmodel
+cls_params_path:./deploy/models/PPLCNet_x0_75_infer/inference.pdiparams
+resize_short_size:256
+crop_size:224
\ No newline at end of file
diff --git a/test_tipc/config/PPLCNet/PPLCNet_x1_0_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/config/PPLCNet/PPLCNet_x1_0_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
new file mode 100644
index 0000000000000000000000000000000000000000..f16deb1b0c1eeab3888799b335ef6f992d3986d3
--- /dev/null
+++ b/test_tipc/config/PPLCNet/PPLCNet_x1_0_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
@@ -0,0 +1,12 @@
+# model load config
+model_name:PPLCNet_x1_0
+use_gpu:True|False
+gpu_id:0
+gpu_mem:4000
+cpu_math_library_num_threads:10
+
+# cls config
+cls_model_path:./deploy/models/PPLCNet_x1_0_infer/inference.pdmodel
+cls_params_path:./deploy/models/PPLCNet_x1_0_infer/inference.pdiparams
+resize_short_size:256
+crop_size:224
\ No newline at end of file
diff --git a/test_tipc/config/PPLCNet/PPLCNet_x1_5_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/config/PPLCNet/PPLCNet_x1_5_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
new file mode 100644
index 0000000000000000000000000000000000000000..fc3a2e75b37327975cdd1b110b5d0dafb3bbe5bd
--- /dev/null
+++ b/test_tipc/config/PPLCNet/PPLCNet_x1_5_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
@@ -0,0 +1,12 @@
+# model load config
+model_name:PPLCNet_x1_5
+use_gpu:True|False
+gpu_id:0
+gpu_mem:4000
+cpu_math_library_num_threads:10
+
+# cls config
+cls_model_path:./deploy/models/PPLCNet_x1_5_infer/inference.pdmodel
+cls_params_path:./deploy/models/PPLCNet_x1_5_infer/inference.pdiparams
+resize_short_size:256
+crop_size:224
\ No newline at end of file
diff --git a/test_tipc/config/PPLCNet/PPLCNet_x2_0_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/config/PPLCNet/PPLCNet_x2_0_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
new file mode 100644
index 0000000000000000000000000000000000000000..52cf9b0d0e6cb135757c4f4e7ac4f60f4e10d374
--- /dev/null
+++ b/test_tipc/config/PPLCNet/PPLCNet_x2_0_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
@@ -0,0 +1,12 @@
+# model load config
+model_name:PPLCNet_x2_0
+use_gpu:True|False
+gpu_id:0
+gpu_mem:4000
+cpu_math_library_num_threads:10
+
+# cls config
+cls_model_path:./deploy/models/PPLCNet_x2_0_infer/inference.pdmodel
+cls_params_path:./deploy/models/PPLCNet_x2_0_infer/inference.pdiparams
+resize_short_size:256
+crop_size:224
\ No newline at end of file
diff --git a/test_tipc/config/PPLCNet/PPLCNet_x2_5_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/config/PPLCNet/PPLCNet_x2_5_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b26c82cec352558b6e99e4d715b0de7b70780398
--- /dev/null
+++ b/test_tipc/config/PPLCNet/PPLCNet_x2_5_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
@@ -0,0 +1,12 @@
+# model load config
+model_name:PPLCNet_x2_5
+use_gpu:True|False
+gpu_id:0
+gpu_mem:4000
+cpu_math_library_num_threads:10
+
+# cls config
+cls_model_path:./deploy/models/PPLCNet_x2_5_infer/inference.pdmodel
+cls_params_path:./deploy/models/PPLCNet_x2_5_infer/inference.pdiparams
+resize_short_size:256
+crop_size:224
\ No newline at end of file
diff --git a/test_tipc/config/PPLCNetV2/PPLCNetV2_base_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/config/PPLCNetV2/PPLCNetV2_base_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e2ae4d14d1281674df7ba7e016ffcafcb303f2e7
--- /dev/null
+++ b/test_tipc/config/PPLCNetV2/PPLCNetV2_base_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
@@ -0,0 +1,12 @@
+# model load config
+model_name:PPLCNet_x0_5
+use_gpu:True|False
+gpu_id:0
+gpu_mem:4000
+cpu_math_library_num_threads:10
+
+# cls config
+cls_model_path:./deploy/models/PPLCNet_x0_5_infer/inference.pdmodel
+cls_params_path:./deploy/models/PPLCNet_x0_5_infer/inference.pdiparams
+resize_short_size:256
+crop_size:224
\ No newline at end of file
diff --git a/test_tipc/config/ResNet/ResNet50_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/config/ResNet/ResNet50_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
new file mode 100644
index 0000000000000000000000000000000000000000..60d9a40436b4e85ddb918c8e3a2939d71ef04f66
--- /dev/null
+++ b/test_tipc/config/ResNet/ResNet50_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
@@ -0,0 +1,12 @@
+# model load config
+model_name:ResNet50
+use_gpu:True|False
+gpu_id:0
+gpu_mem:4000
+cpu_math_library_num_threads:10
+
+# cls config
+cls_model_path:./deploy/models/ResNet50_infer/inference.pdmodel
+cls_params_path:./deploy/models/ResNet50_infer/inference.pdiparams
+resize_short_size:256
+crop_size:224
\ No newline at end of file
diff --git a/test_tipc/config/ResNet/ResNet50_vd_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/config/ResNet/ResNet50_vd_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
index 51c73f13d46c3a8793f9b5db92a74e0aa7b4e599..dfdd5b7f55f1397a7568c72b260fa3f4c9a916ad 100644
--- a/test_tipc/config/ResNet/ResNet50_vd_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
+++ b/test_tipc/config/ResNet/ResNet50_vd_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
@@ -1,18 +1,12 @@
-===========================cpp_infer_params===========================
+# model load config
model_name:ResNet50_vd
-cpp_infer_type:cls
-cls_inference_model_dir:./cls_inference/
-det_inference_model_dir:
-cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/whole_chain/ResNet50_vd_inference.tar
-det_inference_url:
-infer_quant:False
-inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml
use_gpu:True|False
-enable_mkldnn:True|False
-cpu_threads:1|6
-batch_size:1
-use_tensorrt:False|True
-precision:fp32|fp16
-image_dir:./dataset/ILSVRC2012/val
-benchmark:True
-generate_yaml_cmd:python3 test_tipc/generate_cpp_yaml.py
+gpu_id:0
+gpu_mem:4000
+cpu_math_library_num_threads:10
+
+# cls config
+cls_model_path:./deploy/models/ResNet50_vd_infer/inference.pdmodel
+cls_params_path:./deploy/models/ResNet50_vd_infer/inference.pdiparams
+resize_short_size:256
+crop_size:224
\ No newline at end of file
diff --git a/test_tipc/config/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/config/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8a76d870e8a8ffc39877d658e6a23e58611fa5e9
--- /dev/null
+++ b/test_tipc/config/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
@@ -0,0 +1,12 @@
+# model load config
+model_name:SwinTransformer_tiny_patch4_window7_224
+use_gpu:True|False
+gpu_id:0
+gpu_mem:4000
+cpu_math_library_num_threads:10
+
+# cls config
+cls_model_path:./deploy/models/SwinTransformer_tiny_patch4_window7_224_infer/inference.pdmodel
+cls_params_path:./deploy/models/SwinTransformer_tiny_patch4_window7_224_infer/inference.pdiparams
+resize_short_size:256
+crop_size:224
\ No newline at end of file
diff --git a/test_tipc/docs/test_inference_cpp.md b/test_tipc/docs/test_inference_cpp.md
index eabf8774d37454461fe28a5fc1d5ed0b3135fcad..37a73a7067f5b6e2f8e7aaca698b224a42be03ab 100644
--- a/test_tipc/docs/test_inference_cpp.md
+++ b/test_tipc/docs/test_inference_cpp.md
@@ -1,86 +1,311 @@
-# C++预测功能测试
+# Linux GPU/CPU C++ 推理功能测试
-C++预测功能测试的主程序为`test_inference_cpp.sh`,可以测试基于C++预测库的模型推理功能。
+Linux GPU/CPU C++ 推理功能测试的主程序为`test_inference_cpp.sh`,可以测试基于C++预测引擎的推理功能。
## 1. 测试结论汇总
-基于训练是否使用量化,进行本测试的模型可以分为`正常模型`和`量化模型`,这两类模型对应的C++预测功能汇总如下:
+- 推理相关:
-| 模型类型 |device | batchsize | tensorrt | mkldnn | cpu多线程 |
-| ---- | ---- | ---- | :----: | :----: | :----: |
-| 正常模型 | GPU | 1/6 | fp32/fp16 | - | - |
-| 正常模型 | CPU | 1/6 | - | fp32 | 支持 |
-| 量化模型 | GPU | 1/6 | int8 | - | - |
-| 量化模型 | CPU | 1/6 | - | int8 | 支持 |
+| 算法名称 | 模型名称 | device_CPU | device_GPU |
+| :----: | :----: | :----: | :----: |
+| MobileNetV3 | MobileNetV3_large_x1_0 | 支持 | 支持 |
+| PP-ShiTu | PPShiTu_general_rec | 支持 | 支持 |
+| PP-ShiTu | PPShiTu_mainbody_det | 暂不支持 | 暂不支持 |
+| PPHGNet | PPHGNet_small | 支持 | 支持 |
+| PPHGNet | PPHGNet_tiny | 支持 | 支持 |
+| PPLCNet | PPLCNet_x0_25 | 支持 | 支持 |
+| PPLCNet | PPLCNet_x0_35 | 支持 | 支持 |
+| PPLCNet | PPLCNet_x0_5 | 支持 | 支持 |
+| PPLCNet | PPLCNet_x0_75 | 支持 | 支持 |
+| PPLCNet | PPLCNet_x1_0 | 支持 | 支持 |
+| PPLCNet | PPLCNet_x1_5 | 支持 | 支持 |
+| PPLCNet | PPLCNet_x2_0 | 支持 | 支持 |
+| PPLCNet | PPLCNet_x2_5 | 支持 | 支持 |
+| PPLCNetV2 | PPLCNetV2_base | 支持 | 支持 |
+| ResNet | ResNet50 | 支持 | 支持 |
+| ResNet | ResNet50_vd | 支持 | 支持 |
+| SwinTransformer | SwinTransformer_tiny_patch4_window7_224 | 支持 | 支持 |
-## 2. 测试流程
-运行环境配置请参考[文档](./install.md)的内容配置TIPC的运行环境。
+## 2. 测试流程(以**ResNet50**为例)
-### 2.1 功能测试
-先运行`prepare.sh`准备数据和模型,然后运行`test_inference_cpp.sh`进行测试,最终在```test_tipc/output```目录下生成`cpp_infer_*.log`后缀的日志文件。
+
+
+准备数据、准备推理模型、编译opencv、编译(下载)Paddle Inference、编译C++预测Demo(已写入prepare.sh自动执行,点击以展开详细内容或者折叠)
+
+
+### 2.1 准备数据和推理模型
+
+#### 2.1.1 准备数据
+
+默认使用`./deploy/images/ILSVRC2012_val_00000010.jpeg`作为测试输入图片。
+
+#### 2.1.2 准备推理模型
+
+* 如果已经训练好了模型,可以参考[模型导出](../../docs/zh_CN/inference_deployment/export_model.md),导出`inference model`,并将导出路径设置为`./deploy/models/ResNet50_infer`,
+导出完毕后文件结构如下
```shell
-bash test_tipc/prepare.sh test_tipc/config/ResNet/ResNet50_vd_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt cpp_infer
+./deploy/models/ResNet50_infer/
+├── inference.pdmodel
+├── inference.pdiparams
+└── inference.pdiparams.info
+```
+
+### 2.2 准备环境
+
+#### 2.2.1 运行准备
+
+配置合适的编译和执行环境,其中包括编译器,cuda等一些基础库,建议安装docker环境,[参考链接](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/docker/linux-docker.html)。
-# 用法1:
-bash test_tipc/test_inference_cpp.sh test_tipc/config/ResNet/ResNet50_vd_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
-# 用法2: 指定GPU卡预测,第三个传入参数为GPU卡号
-bash test_tipc/test_inference_cpp.sh test_tipc/config/ResNet/ResNet50_vd_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt 1
+#### 2.2.2 编译opencv库
+
+* 首先需要从opencv官网上下载Linux环境下的源码,以3.4.7版本为例,下载及解压缩命令如下:
+
+```
+cd deploy/cpp
+wget https://github.com/opencv/opencv/archive/3.4.7.tar.gz
+tar -xvf 3.4.7.tar.gz
```
-运行预测指令后,在`test_tipc/output`文件夹下自动会保存运行日志,包括以下文件:
+* 编译opencv,首先设置opencv源码路径(`root_path`)以及安装路径(`install_path`),`root_path`为下载的opencv源码路径,`install_path`为opencv的安装路径。在本例中,源码路径即为当前目录下的`opencv-3.4.7/`。
```shell
-test_tipc/output/
-|- results_cpp.log # 运行指令状态的日志
-|- cls_cpp_infer_cpu_usemkldnn_False_threads_1_precision_fp32_batchsize_1.log # CPU上不开启Mkldnn,线程数设置为1,测试batch_size=1条件下的预测运行日志
-|- cls_cpp_infer_cpu_usemkldnn_False_threads_6_precision_fp32_batchsize_1.log # CPU上不开启Mkldnn,线程数设置为6,测试batch_size=1条件下的预测运行日志
-|- cls_cpp_infer_gpu_usetrt_False_precision_fp32_batchsize_1.log # GPU上不开启TensorRT,测试batch_size=1的fp32精度预测日志
-|- cls_cpp_infer_gpu_usetrt_True_precision_fp16_batchsize_1.log # GPU上开启TensorRT,测试batch_size=1的fp16精度预测日志
-......
+cd ./opencv-3.4.7
+export root_path=$PWD
+export install_path=${root_path}/opencv3
```
-其中results_cpp.log中包含了每条指令的运行状态,如果运行成功会输出:
+* 然后在opencv源码路径下,按照下面的命令进行编译。
+
+```shell
+rm -rf build
+mkdir build
+cd build
+
+cmake .. \
+ -DCMAKE_INSTALL_PREFIX=${install_path} \
+ -DCMAKE_BUILD_TYPE=Release \
+ -DBUILD_SHARED_LIBS=OFF \
+ -DWITH_IPP=OFF \
+ -DBUILD_IPP_IW=OFF \
+ -DWITH_LAPACK=OFF \
+ -DWITH_EIGEN=OFF \
+ -DCMAKE_INSTALL_LIBDIR=lib64 \
+ -DWITH_ZLIB=ON \
+ -DBUILD_ZLIB=ON \
+ -DWITH_JPEG=ON \
+ -DBUILD_JPEG=ON \
+ -DWITH_PNG=ON \
+ -DBUILD_PNG=ON \
+ -DWITH_TIFF=ON \
+ -DBUILD_TIFF=ON
+
+make -j
+make install
```
-Run successfully with command - ./deploy/cpp/build/clas_system -c inference_cls.yaml 2>&1|tee test_tipc/output/cls_cpp_infer_gpu_usetrt_False_precision_fp32_batchsize_1.log
-......
+
+* `make install`完成之后,会在该文件夹下生成opencv头文件和库文件,用于后面的代码编译。
+
+以opencv3.4.7版本为例,最终在安装路径下的文件结构如下所示。**注意**:不同的opencv版本,下述的文件结构可能不同。
+
+```shell
+opencv3/
+├── bin :可执行文件
+├── include :头文件
+├── lib64 :库文件
+└── share :部分第三方库
+```
+
+#### 2.2.3 下载或者编译Paddle预测库
+
+* 有2种方式获取Paddle预测库,下面进行详细介绍。
+
+##### 预测库源码编译
+* 如果希望获取最新预测库特性,可以从Paddle github上克隆最新代码,源码编译预测库。
+* 可以参考[Paddle预测库官网](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/05_inference_deployment/inference/build_and_install_lib_cn.html#id16)的说明,从github上获取Paddle代码,然后进行编译,生成最新的预测库。使用git获取代码方法如下。
+
+```shell
+git clone https://github.com/PaddlePaddle/Paddle.git
+```
+
+* 进入Paddle目录后,使用如下命令编译。
+
+```shell
+rm -rf build
+mkdir build
+cd build
+
+cmake .. \
+ -DWITH_CONTRIB=OFF \
+ -DWITH_MKL=ON \
+ -DWITH_MKLDNN=ON \
+ -DWITH_TESTING=OFF \
+ -DCMAKE_BUILD_TYPE=Release \
+ -DWITH_INFERENCE_API_TEST=OFF \
+ -DON_INFER=ON \
+ -DWITH_PYTHON=ON
+make -j
+make inference_lib_dist
```
-如果运行失败,会输出:
+
+更多编译参数选项可以参考Paddle C++预测库官网:[https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/05_inference_deployment/inference/build_and_install_lib_cn.html#id16](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/05_inference_deployment/inference/build_and_install_lib_cn.html#id16)。
+
+
+* 编译完成之后,可以在`build/paddle_inference_install_dir/`文件下看到生成了以下文件及文件夹。
+
```
-Run failed with command - ./deploy/cpp/build/clas_system -c inference_cls.yaml 2>&1|tee test_tipc/output/cls_cpp_infer_gpu_usetrt_False_precision_fp32_batchsize_1.log
-......
+build/paddle_inference_install_dir/
+├── CMakeCache.txt
+├── paddle
+├── third_party
+└── version.txt
```
-可以很方便的根据results_cpp.log中的内容判定哪一个指令运行错误。
+其中`paddle`就是之后进行C++预测时所需的Paddle库,`version.txt`中包含当前预测库的版本信息。
+
+##### 直接下载安装
-### 2.2 精度测试
+* [Paddle预测库官网](https://paddleinference.paddlepaddle.org.cn/user_guides/download_lib.html)上提供了不同cuda版本的Linux预测库,可以在官网查看并选择合适的预测库版本。
-使用compare_results.py脚本比较模型预测的结果是否符合预期,主要步骤包括:
-- 提取日志中的预测坐标;
-- 从本地文件中提取保存好的坐标结果;
-- 比较上述两个结果是否符合精度预期,误差大于设置阈值时会报错。
+ 以`manylinux_cuda11.1_cudnn8.1_avx_mkl_trt7_gcc8.2`版本为例,使用下述命令下载并解压:
+
+
+```shell
+wget https://paddle-inference-lib.bj.bcebos.com/2.2.2/cxx_c/Linux/GPU/x86-64_gcc8.2_avx_mkl_cuda11.1_cudnn8.1.1_trt7.2.3.4/paddle_inference.tgz
+
+tar -xvf paddle_inference.tgz
+```
+
+最终会在当前的文件夹中生成`paddle_inference/`的子文件夹,文件内容和上述的paddle_inference_install_dir一样。
+
+
+#### 2.2.4 编译C++预测Demo
+
+* 编译命令如下,其中Paddle C++预测库、opencv等其他依赖库的地址需要换成自己机器上的实际地址。
+
+
+```shell
+# 在deploy/cpp下执行以下命令
+bash tools/build.sh
+```
+
+具体地,`tools/build.sh`中内容如下。
+
+```shell
+OPENCV_DIR=your_opencv_dir
+LIB_DIR=your_paddle_inference_dir
+CUDA_LIB_DIR=your_cuda_lib_dir
+CUDNN_LIB_DIR=your_cudnn_lib_dir
+TENSORRT_DIR=your_tensorrt_lib_dir
+
+BUILD_DIR=build
+rm -rf ${BUILD_DIR}
+mkdir ${BUILD_DIR}
+cd ${BUILD_DIR}
+cmake .. \
+ -DPADDLE_LIB=${LIB_DIR} \
+ -DWITH_MKL=ON \
+ -DDEMO_NAME=clas_system \
+ -DWITH_GPU=OFF \
+ -DWITH_STATIC_LIB=OFF \
+ -DWITH_TENSORRT=OFF \
+ -DTENSORRT_DIR=${TENSORRT_DIR} \
+ -DOPENCV_DIR=${OPENCV_DIR} \
+ -DCUDNN_LIB=${CUDNN_LIB_DIR} \
+ -DCUDA_LIB=${CUDA_LIB_DIR} \
+
+make -j
+```
+
+上述命令中,
+
+* `OPENCV_DIR`为opencv编译安装的地址(本例中需修改为`opencv-3.4.7/opencv3`文件夹的路径);
+
+* `LIB_DIR`为下载的Paddle预测库(`paddle_inference`文件夹),或编译生成的Paddle预测库(`build/paddle_inference_install_dir`文件夹)的路径;
+
+* `CUDA_LIB_DIR`为cuda库文件地址,在docker中一般为`/usr/local/cuda/lib64`;
+
+* `CUDNN_LIB_DIR`为cudnn库文件地址,在docker中一般为`/usr/lib64`。
+
+* `TENSORRT_DIR`是tensorrt库文件地址,在dokcer中一般为`/usr/local/TensorRT-7.2.3.4/`,TensorRT需要结合GPU使用。
+
+在执行上述命令,编译完成之后,会在当前路径下生成`build`文件夹,其中生成一个名为`clas_system`的可执行文件。
+
+
+* 可执行以下命令,自动完成上述准备环境中的所需内容
+```shell
+bash test_tipc/prepare.sh test_tipc/configs/ResNet50/ResNet50_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt cpp_infer
+```
+### 2.3 功能测试
+
+
+测试方法如下所示,希望测试不同的模型文件,只需更换为自己的参数配置文件,即可完成对应模型的测试。
-#### 使用方式
-运行命令:
```shell
-python3.7 test_tipc/compare_results.py --gt_file=./test_tipc/results/cls_cpp_*.txt --log_file=./test_tipc/output/cls_cpp_*.log --atol=1e-3 --rtol=1e-3
+bash test_tipc/test_inference_cpp.sh ${your_params_file}
```
-参数介绍:
-- gt_file: 指向事先保存好的预测结果路径,支持*.txt 结尾,会自动索引*.txt格式的文件,文件默认保存在test_tipc/result/ 文件夹下
-- log_file: 指向运行test_tipc/test_inference_cpp.sh 脚本的infer模式保存的预测日志,预测日志中打印的有预测结果,比如:文本框,预测文本,类别等等,同样支持cpp_infer_*.log格式传入
-- atol: 设置的绝对误差
-- rtol: 设置的相对误差
+以`ResNet50`的`Linux GPU/CPU C++推理测试`为例,命令如下所示。
-#### 运行结果
+```shell
+bash test_tipc/test_inference_cpp.sh test_tipc/configs/ResNet50/ResNet50_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
+```
-正常运行效果如下图:
-
+输出结果如下,表示命令运行成功。
-出现不一致结果时的运行输出:
-
+```shell
+Run successfully with command - ./deploy/cpp/build/clas_system -c ./deploy/configs/inference_cls.yaml > ./test_tipc/output/ResNet50/infer_cpp/infer_cpp_use_gpu.log 2>&1 !
+Run successfully with command - ./deploy/cpp/build/clas_system -c ./deploy/configs/inference_cls.yaml > ./test_tipc/output/ResNet50/infer_cpp/infer_cpp_use_cpu.log 2>&1 !
+```
+最终log中会打印出结果,如下所示
+```log
+You are using Paddle compiled with TensorRT, but TensorRT dynamic library is not found. Ignore this if TensorRT is not needed.
+=======Paddle Class inference config======
+Global:
+ infer_imgs: ./deploy/images/ILSVRC2012_val_00000010.jpeg
+ inference_model_dir: ./deploy/models/ResNet50_infer
+ batch_size: 1
+ use_gpu: True
+ enable_mkldnn: True
+ cpu_num_threads: 10
+ enable_benchmark: True
+ use_fp16: False
+ ir_optim: True
+ use_tensorrt: False
+ gpu_mem: 8000
+ enable_profile: False
+PreProcess:
+ transform_ops:
+ - ResizeImage:
+ resize_short: 256
+ - CropImage:
+ size: 224
+ - NormalizeImage:
+ scale: 0.00392157
+ mean: [0.485, 0.456, 0.406]
+ std: [0.229, 0.224, 0.225]
+ order: ""
+ channel_num: 3
+ - ToCHWImage: ~
+PostProcess:
+ main_indicator: Topk
+ Topk:
+ topk: 5
+ class_id_map_file: ./ppcls/utils/imagenet1k_label_list.txt
+ SavePreLabel:
+ save_dir: ./pre_label/
+=======End of Paddle Class inference config======
+img_file_list length: 1
+Current image path: ./deploy/images/ILSVRC2012_val_00000010.jpeg
+Current total inferen time cost: 5449.39 ms.
+ Top1: class_id: 153, score: 0.4144, label: Maltese dog, Maltese terrier, Maltese
+ Top2: class_id: 332, score: 0.3909, label: Angora, Angora rabbit
+ Top3: class_id: 229, score: 0.0514, label: Old English sheepdog, bobtail
+ Top4: class_id: 204, score: 0.0430, label: Lhasa, Lhasa apso
+ Top5: class_id: 265, score: 0.0420, label: toy poodle
-## 3. 更多教程
+```
+详细log位于`./test_tipc/output/ResNet50/infer_cpp/infer_cpp_use_gpu.log`和`./test_tipc/output/ResNet50/infer_cpp/infer_cpp_use_cpu.log`中。
-本文档为功能测试用,更详细的c++预测使用教程请参考:[服务器端C++预测](../../docs/zh_CN/inference_deployment/)
+如果运行失败,也会在终端中输出运行失败的日志信息以及对应的运行命令。可以基于该命令,分析运行失败的原因。
diff --git a/test_tipc/test_inference_cpp.sh b/test_tipc/test_inference_cpp.sh
index 129e439562980a233924995141ea864d052f6dfb..36ff584cd969d8ec1349f80d5020a28f7214bc74 100644
--- a/test_tipc/test_inference_cpp.sh
+++ b/test_tipc/test_inference_cpp.sh
@@ -1,303 +1,65 @@
#!/bin/bash
source test_tipc/common_func.sh
+function func_parser_key_cpp(){
+ strs=$1
+ IFS=" "
+ array=(${strs})
+ tmp=${array[0]}
+ echo ${tmp}
+}
+
+function func_parser_value_cpp(){
+ strs=$1
+ IFS=":"
+ array=(${strs})
+ tmp=${array[1]}
+ echo ${tmp}
+}
+
FILENAME=$1
-GPUID=$2
-if [[ ! $GPUID ]];then
- GPUID=0
-fi
-dataline=$(awk 'NR==1, NR==16{print}' $FILENAME)
+
+dataline=$(cat ${FILENAME})
+lines=(${dataline})
# parser params
+dataline=$(awk 'NR==1, NR==14{print}' $FILENAME)
IFS=$'\n'
lines=(${dataline})
-# parser cpp inference model
-model_name=$(func_parser_value "${lines[1]}")
-cpp_infer_type=$(func_parser_value "${lines[2]}")
-cpp_infer_model_dir=$(func_parser_value "${lines[3]}")
-cpp_det_infer_model_dir=$(func_parser_value "${lines[4]}")
-cpp_infer_is_quant=$(func_parser_value "${lines[7]}")
-# parser cpp inference
-inference_cmd=$(func_parser_value "${lines[8]}")
-cpp_use_gpu_list=$(func_parser_value "${lines[9]}")
-cpp_use_mkldnn_list=$(func_parser_value "${lines[10]}")
-cpp_cpu_threads_list=$(func_parser_value "${lines[11]}")
-cpp_batch_size_list=$(func_parser_value "${lines[12]}")
-cpp_use_trt_list=$(func_parser_value "${lines[13]}")
-cpp_precision_list=$(func_parser_value "${lines[14]}")
-cpp_image_dir_value=$(func_parser_value "${lines[15]}")
-cpp_benchmark_value=$(func_parser_value "${lines[16]}")
-generate_yaml_cmd=$(func_parser_value "${lines[17]}")
-transform_index_cmd=$(func_parser_value "${lines[18]}")
-
-LOG_PATH="./test_tipc/output"
+# parser load config
+model_name=$(func_parser_value_cpp "${lines[1]}")
+use_gpu_key=$(func_parser_key_cpp "${lines[2]}")
+use_gpu_value=$(func_parser_value_cpp "${lines[2]}")
+LOG_PATH="./test_tipc/output/${model_name}/infer_cpp"
mkdir -p ${LOG_PATH}
-status_log="${LOG_PATH}/results_cpp.log"
-# generate_yaml_cmd="python3 test_tipc/generate_cpp_yaml.py"
-
-function func_shitu_cpp_inference(){
- IFS='|'
- _script=$1
- _model_dir=$2
- _log_path=$3
- _img_dir=$4
- _flag_quant=$5
- # inference
-
- for use_gpu in ${cpp_use_gpu_list[*]}; do
- if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then
- for use_mkldnn in ${cpp_use_mkldnn_list[*]}; do
- if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then
- continue
- fi
- for threads in ${cpp_cpu_threads_list[*]}; do
- for batch_size in ${cpp_batch_size_list[*]}; do
- precision="fp32"
- if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then
- precison="int8"
- fi
- _save_log_path="${_log_path}/shitu_cpp_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log"
-
- command="${generate_yaml_cmd} --type shitu --batch_size ${batch_size} --mkldnn ${use_mkldnn} --gpu ${use_gpu} --cpu_thread ${threads} --tensorrt False --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir} --det_model_dir ${cpp_det_infer_model_dir} --gpu_id ${GPUID}"
- eval $command
- eval $transform_index_cmd
- command="${_script} 2>&1|tee ${_save_log_path}"
- eval $command
- last_status=${PIPESTATUS[0]}
- status_check $last_status "${command}" "${status_log}"
- done
- done
- done
- elif [ ${use_gpu} = "True" ] || [ ${use_gpu} = "gpu" ]; then
- for use_trt in ${cpp_use_trt_list[*]}; do
- for precision in ${cpp_precision_list[*]}; do
- if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then
- continue
- fi
- if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then
- continue
- fi
- if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [ ${_flag_quant} = "True" ]; then
- continue
- fi
- for batch_size in ${cpp_batch_size_list[*]}; do
- _save_log_path="${_log_path}/shitu_cpp_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log"
- command="${generate_yaml_cmd} --type shitu --batch_size ${batch_size} --mkldnn False --gpu ${use_gpu} --cpu_thread 1 --tensorrt ${use_trt} --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir} --det_model_dir ${cpp_det_infer_model_dir} --gpu_id ${GPUID}"
- eval $command
- eval $transform_index_cmd
- command="${_script} 2>&1|tee ${_save_log_path}"
- eval $command
- last_status=${PIPESTATUS[0]}
- status_check $last_status "${_script}" "${status_log}"
- done
- done
- done
- else
- echo "Does not support hardware other than CPU and GPU Currently!"
- fi
- done
-}
+status_log="${LOG_PATH}/results_infer_cpp.log"
-function func_cls_cpp_inference(){
+line_inference_model_dir=3
+line_use_gpu=5
+function func_infer_cpp(){
+ # inference cpp
IFS='|'
- _script=$1
- _model_dir=$2
- _log_path=$3
- _img_dir=$4
- _flag_quant=$5
- # inference
-
- for use_gpu in ${cpp_use_gpu_list[*]}; do
- if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then
- for use_mkldnn in ${cpp_use_mkldnn_list[*]}; do
- if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then
- continue
- fi
- for threads in ${cpp_cpu_threads_list[*]}; do
- for batch_size in ${cpp_batch_size_list[*]}; do
- precision="fp32"
- if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then
- precison="int8"
- fi
- _save_log_path="${_log_path}/cls_cpp_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log"
-
- command="${generate_yaml_cmd} --type cls --batch_size ${batch_size} --mkldnn ${use_mkldnn} --gpu ${use_gpu} --cpu_thread ${threads} --tensorrt False --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir} --gpu_id ${GPUID}"
- eval $command
- command1="${_script} 2>&1|tee ${_save_log_path}"
- eval ${command1}
- last_status=${PIPESTATUS[0]}
- status_check $last_status "${command1}" "${status_log}"
- done
- done
- done
- elif [ ${use_gpu} = "True" ] || [ ${use_gpu} = "gpu" ]; then
- for use_trt in ${cpp_use_trt_list[*]}; do
- for precision in ${cpp_precision_list[*]}; do
- if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then
- continue
- fi
- if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then
- continue
- fi
- if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [ ${_flag_quant} = "True" ]; then
- continue
- fi
- for batch_size in ${cpp_batch_size_list[*]}; do
- _save_log_path="${_log_path}/cls_cpp_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log"
- command="${generate_yaml_cmd} --type cls --batch_size ${batch_size} --mkldnn False --gpu ${use_gpu} --cpu_thread 1 --tensorrt ${use_trt} --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir} --gpu_id ${GPUID}"
- eval $command
- command="${_script} 2>&1|tee ${_save_log_path}"
- eval $command
- last_status=${PIPESTATUS[0]}
- status_check $last_status "${command}" "${status_log}"
- done
- done
- done
+ for use_gpu in ${use_gpu_value[*]}; do
+ if [[ ${use_gpu} = "True" ]]; then
+ _save_log_path="${LOG_PATH}/infer_cpp_use_gpu.log"
else
- echo "Does not support hardware other than CPU and GPU Currently!"
+ _save_log_path="${LOG_PATH}/infer_cpp_use_cpu.log"
fi
+ # run infer cpp
+ inference_cpp_cmd="./deploy/cpp/build/clas_system"
+ inference_cpp_cfg="./deploy/configs/inference_cls.yaml"
+ set_model_name_cmd="sed -i '${line_inference_model_dir}s#: .*#: ./deploy/models/${model_name}_infer#' '${inference_cpp_cfg}'"
+ set_use_gpu_cmd="sed -i '${line_use_gpu}s#: .*#: ${use_gpu}#' '${inference_cpp_cfg}'"
+ eval $set_model_name_cmd
+ eval $set_use_gpu_cmd
+ infer_cpp_full_cmd="${inference_cpp_cmd} -c ${inference_cpp_cfg} > ${_save_log_path} 2>&1 "
+ eval $infer_cpp_full_cmd
+ last_status=${PIPESTATUS[0]}
+ status_check $last_status "${infer_cpp_full_cmd}" "${status_log}" "${model_name}"
done
}
+echo "################### run test cpp inference ###################"
-if [[ $cpp_infer_type == "cls" ]]; then
- cd deploy/cpp
-elif [[ $cpp_infer_type == "shitu" ]]; then
- cd deploy/cpp_shitu
-else
- echo "Only support cls and shitu"
- exit 0
-fi
-
-if [[ $cpp_infer_type == "shitu" ]]; then
- echo "################### update cmake ###################"
- wget -nc https://github.com/Kitware/CMake/releases/download/v3.22.0/cmake-3.22.0.tar.gz
- tar xf cmake-3.22.0.tar.gz
- cd ./cmake-3.22.0
- export root_path=$PWD
- export install_path=${root_path}/cmake
- eval "./bootstrap --prefix=${install_path}"
- make -j
- make install
- export PATH=${install_path}/bin:$PATH
- cd ..
- echo "################### update cmake done ###################"
-
- echo "################### build faiss ###################"
- apt-get install -y libopenblas-dev
- git clone https://github.com/facebookresearch/faiss.git
- cd faiss
- export faiss_install_path=$PWD/faiss_install
- eval "cmake -B build . -DFAISS_ENABLE_PYTHON=OFF -DCMAKE_INSTALL_PREFIX=${faiss_install_path}"
- make -C build -j faiss
- make -C build install
- cd ..
-fi
-
-if [ -d "opencv-3.4.7/opencv3/" ] && [ $(md5sum opencv-3.4.7.tar.gz | awk -F ' ' '{print $1}') = "faa2b5950f8bee3f03118e600c74746a" ];then
- echo "################### build opencv skipped ###################"
-else
- echo "################### build opencv ###################"
- rm -rf opencv-3.4.7.tar.gz opencv-3.4.7/
- wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/opencv-3.4.7.tar.gz
- tar -xf opencv-3.4.7.tar.gz
-
- cd opencv-3.4.7/
- install_path=$(pwd)/opencv3
-
- rm -rf build
- mkdir build
- cd build
-
- cmake .. \
- -DCMAKE_INSTALL_PREFIX=${install_path} \
- -DCMAKE_BUILD_TYPE=Release \
- -DBUILD_SHARED_LIBS=OFF \
- -DWITH_IPP=OFF \
- -DBUILD_IPP_IW=OFF \
- -DWITH_LAPACK=OFF \
- -DWITH_EIGEN=OFF \
- -DCMAKE_INSTALL_LIBDIR=lib64 \
- -DWITH_ZLIB=ON \
- -DBUILD_ZLIB=ON \
- -DWITH_JPEG=ON \
- -DBUILD_JPEG=ON \
- -DWITH_PNG=ON \
- -DBUILD_PNG=ON \
- -DWITH_TIFF=ON \
- -DBUILD_TIFF=ON
-
- make -j
- make install
- cd ../../
- echo "################### build opencv finished ###################"
-fi
-
-echo "################### build PaddleClas demo ####################"
-OPENCV_DIR=$(pwd)/opencv-3.4.7/opencv3/
-# LIB_DIR=/work/project/project/test/paddle_inference/
-LIB_DIR=$(pwd)/Paddle/build/paddle_inference_install_dir/
-CUDA_LIB_DIR=$(dirname `find /usr -name libcudart.so`)
-CUDNN_LIB_DIR=$(dirname `find /usr -name libcudnn.so`)
-
-BUILD_DIR=build
-rm -rf ${BUILD_DIR}
-mkdir ${BUILD_DIR}
-cd ${BUILD_DIR}
-if [[ $cpp_infer_type == cls ]]; then
- cmake .. \
- -DPADDLE_LIB=${LIB_DIR} \
- -DWITH_MKL=ON \
- -DWITH_GPU=ON \
- -DWITH_STATIC_LIB=OFF \
- -DWITH_TENSORRT=OFF \
- -DOPENCV_DIR=${OPENCV_DIR} \
- -DCUDNN_LIB=${CUDNN_LIB_DIR} \
- -DCUDA_LIB=${CUDA_LIB_DIR} \
- -DTENSORRT_DIR=${TENSORRT_DIR}
-else
- cmake ..\
- -DPADDLE_LIB=${LIB_DIR} \
- -DWITH_MKL=ON \
- -DWITH_GPU=ON \
- -DWITH_STATIC_LIB=OFF \
- -DWITH_TENSORRT=OFF \
- -DOPENCV_DIR=${OPENCV_DIR} \
- -DCUDNN_LIB=${CUDNN_LIB_DIR} \
- -DCUDA_LIB=${CUDA_LIB_DIR} \
- -DTENSORRT_DIR=${TENSORRT_DIR} \
- -DFAISS_DIR=${faiss_install_path} \
- -DFAISS_WITH_MKL=OFF
-fi
-make -j
-cd ../../../
-# cd ../../
-echo "################### build PaddleClas demo finished ###################"
-
-
-# set cuda device
-# GPUID=$2
-# if [ ${#GPUID} -le 0 ];then
-# env="export CUDA_VISIBLE_DEVICES=0"
-# else
-# env="export CUDA_VISIBLE_DEVICES=${GPUID}"
-# fi
-# set CUDA_VISIBLE_DEVICES
-# eval $env
-
-
-echo "################### run test ###################"
-export Count=0
-IFS="|"
-infer_quant_flag=(${cpp_infer_is_quant})
-for infer_model in ${cpp_infer_model_dir[*]}; do
- #run inference
- is_quant=${infer_quant_flag[Count]}
- if [[ $cpp_infer_type == "cls" ]]; then
- func_cls_cpp_inference "${inference_cmd}" "${infer_model}" "${LOG_PATH}" "${cpp_image_dir_value}" ${is_quant}
- else
- func_shitu_cpp_inference "${inference_cmd}" "${infer_model}" "${LOG_PATH}" "${cpp_image_dir_value}" ${is_quant}
- fi
- Count=$(($Count + 1))
-done
+func_infer_cpp
\ No newline at end of file