提交 f2b20cff 编写于 作者: H HydrogenSulfate

add paddle2onnx tipc chain

上级 4091592c
...@@ -35,11 +35,14 @@ ...@@ -35,11 +35,14 @@
│ ├── MobileNetV3 # MobileNetV3系列模型测试配置文件目录 │ ├── MobileNetV3 # MobileNetV3系列模型测试配置文件目录
│ │ ├── MobileNetV3_large_x1_0_train_infer_python.txt #基础训练预测配置文件 │ │ ├── MobileNetV3_large_x1_0_train_infer_python.txt #基础训练预测配置文件
│ │ ├── MobileNetV3_large_x1_0_train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt #多机多卡训练预测配置文件 │ │ ├── MobileNetV3_large_x1_0_train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt #多机多卡训练预测配置文件
│ │ └── MobileNetV3_large_x1_0_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt #混合精度训练预测配置文件 │ │ ├── MobileNetV3_large_x1_0_train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt #多机多卡训练预测配置文件
│ │ └── MobileNetV3_large_x1_0_paddle2onnx_infer_python.txt #paddle2onnx推理测试配置文件
│ └── ResNet # ResNet系列模型测试配置文件目录 │ └── ResNet # ResNet系列模型测试配置文件目录
│ ├── ResNet50_vd_train_infer_python.txt #基础训练预测配置文件 │ ├── ResNet50_vd_train_infer_python.txt #基础训练预测配置文件
│ ├── ResNet50_vd_train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt #多机多卡训练预测配置文件 │ ├── ResNet50_vd_train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt #多机多卡训练预测配置文件
│ └── ResNet50_vd_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt #混合精度训练预测配置文件 │ ├── ResNet50_vd_train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt #多机多卡训练预测配置文件
│ ├── ResNet50_vd_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt #混合精度训练预测配置文件
│ └── ResNet50_vd_paddle2onnx_infer_python.txt #paddle2onnx推理测试配置文件
| ...... | ......
├── docs ├── docs
│ ├── guide.png │ ├── guide.png
...@@ -47,6 +50,7 @@ ...@@ -47,6 +50,7 @@
├── prepare.sh # 完成test_*.sh运行所需要的数据和模型下载 ├── prepare.sh # 完成test_*.sh运行所需要的数据和模型下载
├── README.md # 使用文档 ├── README.md # 使用文档
├── results # 预先保存的预测结果,用于和实际预测结果进行精读比对 ├── results # 预先保存的预测结果,用于和实际预测结果进行精读比对
├── test_paddle2onnx.sh # 测试paddle2onnx推理预测的主程序
└── test_train_inference_python.sh # 测试python训练预测的主程序 └── test_train_inference_python.sh # 测试python训练预测的主程序
``` ```
......
===========================paddle2onnx_params===========================
model_name:MobileNetV3_large_x1_0
python:python3.7
2onnx: paddle2onnx
--model_dir:./deploy/models/MobileNetV3_large_x1_0_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--save_file:./deploy/models/MobileNetV3_large_x1_0_infer/inference.onnx
--opset_version:10
--enable_onnx_checker:True
inference:./python/predict_cls.py
Global.use_onnx:True
Global.inference_model_dir:./models/MobileNetV3_large_x1_0_infer
Global.use_gpu:False
-c:configs/inference_cls.yaml
\ No newline at end of file
===========================paddle2onnx_params===========================
model_name:PP-ShiTu_general_rec
python:python3.7
2onnx: paddle2onnx
--model_dir:./deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--save_file:./deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer/inference.onnx
--opset_version:10
--enable_onnx_checker:True
inference:./python/predict_cls.py
Global.use_onnx:True
Global.inference_model_dir:./models/general_PPLCNet_x2_5_lite_v1.0_infer
Global.use_gpu:False
-c:configs/inference_cls.yaml
\ No newline at end of file
===========================paddle2onnx_params===========================
model_name:PP-ShiTu_mainbody_det
python:python3.7
2onnx: paddle2onnx
--model_dir:./deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--save_file:./deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/inference.onnx
--opset_version:10
--enable_onnx_checker:True
inference:./python/predict_cls.py
Global.use_onnx:True
Global.inference_model_dir:./models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer
Global.use_gpu:False
-c:configs/inference_cls.yaml
\ No newline at end of file
===========================paddle2onnx_params===========================
model_name:PPHGNet_small
python:python3.7
2onnx: paddle2onnx
--model_dir:./deploy/models/PPHGNet_small_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--save_file:./deploy/models/PPHGNet_small_infer/inference.onnx
--opset_version:10
--enable_onnx_checker:True
inference:./python/predict_cls.py
Global.use_onnx:True
Global.inference_model_dir:./models/PPHGNet_small_infer
Global.use_gpu:False
-c:configs/inference_cls.yaml
\ No newline at end of file
===========================paddle2onnx_params===========================
model_name:PPHGNet_tiny
python:python3.7
2onnx: paddle2onnx
--model_dir:./deploy/models/PPHGNet_tiny_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--save_file:./deploy/models/PPHGNet_tiny_infer/inference.onnx
--opset_version:10
--enable_onnx_checker:True
inference:./python/predict_cls.py
Global.use_onnx:True
Global.inference_model_dir:./models/PPHGNet_tiny_infer
Global.use_gpu:False
-c:configs/inference_cls.yaml
\ No newline at end of file
===========================paddle2onnx_params===========================
model_name:PPLCNet_x0_25
python:python3.7
2onnx: paddle2onnx
--model_dir:./deploy/models/PPLCNet_x0_25_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--save_file:./deploy/models/PPLCNet_x0_25_infer/inference.onnx
--opset_version:10
--enable_onnx_checker:True
inference:./python/predict_cls.py
Global.use_onnx:True
Global.inference_model_dir:./models/PPLCNet_x0_25_infer
Global.use_gpu:False
-c:configs/inference_cls.yaml
\ No newline at end of file
===========================paddle2onnx_params===========================
model_name:PPLCNet_x0_35
python:python3.7
2onnx: paddle2onnx
--model_dir:./deploy/models/PPLCNet_x0_35_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--save_file:./deploy/models/PPLCNet_x0_35_infer/inference.onnx
--opset_version:10
--enable_onnx_checker:True
inference:./python/predict_cls.py
Global.use_onnx:True
Global.inference_model_dir:./models/PPLCNet_x0_35_infer
Global.use_gpu:False
-c:configs/inference_cls.yaml
\ No newline at end of file
===========================paddle2onnx_params===========================
model_name:PPLCNet_x0_5
python:python3.7
2onnx: paddle2onnx
--model_dir:./deploy/models/PPLCNet_x0_5_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--save_file:./deploy/models/PPLCNet_x0_5_infer/inference.onnx
--opset_version:10
--enable_onnx_checker:True
inference:./python/predict_cls.py
Global.use_onnx:True
Global.inference_model_dir:./models/PPLCNet_x0_5_infer
Global.use_gpu:False
-c:configs/inference_cls.yaml
\ No newline at end of file
===========================paddle2onnx_params===========================
model_name:PPLCNet_x0_75
python:python3.7
2onnx: paddle2onnx
--model_dir:./deploy/models/PPLCNet_x0_75_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--save_file:./deploy/models/PPLCNet_x0_75_infer/inference.onnx
--opset_version:10
--enable_onnx_checker:True
inference:./python/predict_cls.py
Global.use_onnx:True
Global.inference_model_dir:./models/PPLCNet_x0_75_infer
Global.use_gpu:False
-c:configs/inference_cls.yaml
\ No newline at end of file
===========================paddle2onnx_params===========================
model_name:PPLCNet_x1_0
python:python3.7
2onnx: paddle2onnx
--model_dir:./deploy/models/PPLCNet_x1_0_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--save_file:./deploy/models/PPLCNet_x1_0_infer/inference.onnx
--opset_version:10
--enable_onnx_checker:True
inference:./python/predict_cls.py
Global.use_onnx:True
Global.inference_model_dir:./models/PPLCNet_x1_0_infer
Global.use_gpu:False
-c:configs/inference_cls.yaml
\ No newline at end of file
===========================paddle2onnx_params===========================
model_name:PPLCNet_x1_5
python:python3.7
2onnx: paddle2onnx
--model_dir:./deploy/models/PPLCNet_x1_5_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--save_file:./deploy/models/PPLCNet_x1_5_infer/inference.onnx
--opset_version:10
--enable_onnx_checker:True
inference:./python/predict_cls.py
Global.use_onnx:True
Global.inference_model_dir:./models/PPLCNet_x1_5_infer
Global.use_gpu:False
-c:configs/inference_cls.yaml
\ No newline at end of file
===========================paddle2onnx_params===========================
model_name:SwinTransformer_tiny_patch4_window7_224
python:python3.7
2onnx: paddle2onnx
--model_dir:./deploy/models/SwinTransformer_tiny_patch4_window7_224_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--save_file:./deploy/models/SwinTransformer_tiny_patch4_window7_224_infer/inference.onnx
--opset_version:10
--enable_onnx_checker:True
inference:./python/predict_cls.py
Global.use_onnx:True
Global.inference_model_dir:./models/SwinTransformer_tiny_patch4_window7_224_infer
Global.use_gpu:False
-c:configs/inference_cls.yaml
\ No newline at end of file
===========================paddle2onnx_params===========================
model_name:PPLCNet_x2_5
python:python3.7
2onnx: paddle2onnx
--model_dir:./deploy/models/PPLCNet_x2_5_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--save_file:./deploy/models/PPLCNet_x2_5_infer/inference.onnx
--opset_version:10
--enable_onnx_checker:True
inference:./python/predict_cls.py
Global.use_onnx:True
Global.inference_model_dir:./models/PPLCNet_x2_5_infer
Global.use_gpu:False
-c:configs/inference_cls.yaml
\ No newline at end of file
===========================paddle2onnx_params===========================
model_name:ResNet50
python:python3.7
2onnx: paddle2onnx
--model_dir:./deploy/models/ResNet50_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--save_file:./deploy/models/ResNet50_infer/inference.onnx
--opset_version:10
--enable_onnx_checker:True
inference:./python/predict_cls.py
Global.use_onnx:True
Global.inference_model_dir:./models/ResNet50_infer
Global.use_gpu:False
-c:configs/inference_cls.yaml
\ No newline at end of file
===========================paddle2onnx_params===========================
model_name:ResNet50_vd
python:python3.7
2onnx: paddle2onnx
--model_dir:./deploy/models/ResNet50_vd_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--save_file:./deploy/models/ResNet50_vd_infer/inference.onnx
--opset_version:10
--enable_onnx_checker:True
inference:./python/predict_cls.py
Global.use_onnx:True
Global.inference_model_dir:./models/ResNet50_vd_infer
Global.use_gpu:False
-c:configs/inference_cls.yaml
\ No newline at end of file
===========================paddle2onnx_params===========================
model_name:SwinTransformer_tiny_patch4_window7_224
python:python3.7
2onnx: paddle2onnx
--model_dir:./deploy/models/SwinTransformer_tiny_patch4_window7_224_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--save_file:./deploy/models/SwinTransformer_tiny_patch4_window7_224_infer/inference.onnx
--opset_version:10
--enable_onnx_checker:True
inference:./python/predict_cls.py
Global.use_onnx:True
Global.inference_model_dir:./models/SwinTransformer_tiny_patch4_window7_224_infer
Global.use_gpu:False
-c:configs/inference_cls.yaml
\ No newline at end of file
# Paddle2onnx预测功能测试
PaddleServing预测功能测试的主程序为`test_paddle2onnx.sh`,可以测试Paddle2ONNX的模型转化功能,并验证正确性。
## 1. 测试结论汇总
基于训练是否使用量化,进行本测试的模型可以分为`正常模型``量化模型`,这两类模型对应的Paddle2ONNX预测功能汇总如下:
| 模型类型 |device |
| ---- | ---- |
| 正常模型 | GPU |
| 正常模型 | CPU |
| 量化模型 | GPU |
| 量化模型 | CPU |
## 2. 测试流程
### 2.1 功能测试
先运行`prepare.sh`准备数据和模型,然后运行`test_paddle2onnx.sh`进行测试,最终在`test_tipc/output`目录下生成`paddle2onnx_infer_*.log`后缀的日志文件
下方展示以PPHGNet_small为例的测试命令与结果。
```shell
bash test_tipc/prepare.sh ./test_tipc/config/PPHGNet/PPHGNet_small_paddle2onnx_infer_python.txt paddle2onnx_infer
# 用法:
bash test_tipc/test_paddle2onnx.sh ./test_tipc/config/PPHGNet/PPHGNet_small_paddle2onnx_infer_python.txt
```
#### 运行结果
各测试的运行情况会打印在 `test_tipc/output/results_paddle2onnx.log` 中:
运行成功时会输出:
```
Run successfully with command - paddle2onnx --model_dir=./deploy/models/PPHGNet_tiny_infer/ --model_filename=inference.pdmodel --params_filename=inference.pdiparams --save_file=./deploy/models/PPHGNet_tiny_infer/inference.onnx --opset_version=10 --enable_onnx_checker=True!
Run successfully with command - cd deploy && python3.7 ./python/predict_cls.py -o Global.inference_model_dir=./models/PPHGNet_tiny_infer -o Global.use_onnx=True -o Global.use_gpu=False -c=configs/inference_cls.yaml > ../test_tipc/output/paddle2onnx_infer_cpu.log 2>&1 && cd ../!
```
运行失败时会输出:
```
Run failed with command - paddle2onnx --model_dir=./deploy/models/PPHGNet_tiny_infer/ --model_filename=inference.pdmodel --params_filename=inference.pdiparams --save_file=./deploy/models/PPHGNet_tiny_infer/inference.onnx --opset_version=10 --enable_onnx_checker=True!
...
```
## 3. 更多教程
本文档为功能测试用,更详细的Paddle2onnx预测使用教程请参考:[Paddle2ONNX](https://github.com/PaddlePaddle/Paddle2ONNX)
...@@ -176,11 +176,160 @@ if [ ${MODE} = "paddle2onnx_infer" ];then ...@@ -176,11 +176,160 @@ if [ ${MODE} = "paddle2onnx_infer" ];then
python_name=$(func_parser_value "${lines[2]}") python_name=$(func_parser_value "${lines[2]}")
${python_name} -m pip install install paddle2onnx ${python_name} -m pip install install paddle2onnx
${python_name} -m pip install onnxruntime ${python_name} -m pip install onnxruntime
if [ ${model_name} == "ResNet50" ]; then
# wget model
cd deploy
mkdir models
cd models
wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_infer.tar
tar xf ResNet50_infer.tar
cd ../../
fi
if [ ${model_name} == "ResNet50_vd" ]; then
# wget model
cd deploy
mkdir models
cd models
wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_infer.tar
tar xf ResNet50_vd_infer.tar
cd ../../
fi
if [ ${model_name} == "MobileNetV3_large_x1_0" ]; then
# wget model
cd deploy
mkdir models
cd models
wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileNetV3_large_x1_0_infer.tar
tar xf MobileNetV3_large_x1_0_infer.tar
cd ../../
fi
if [ ${model_name} == "SwinTransformer_tiny_patch4_window7_224" ]; then
# wget model
cd deploy
mkdir models
cd models
wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/SwinTransformer_tiny_patch4_window7_224_infer.tar
tar xf SwinTransformer_tiny_patch4_window7_224_infer.tar
cd ../../
fi
if [ ${model_name} == "PPLCNet_x0_25" ]; then
# wget model
cd deploy
mkdir models
cd models
wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_25_infer.tar
tar xf PPLCNet_x0_25_infer.tar
cd ../../
fi
if [ ${model_name} == "PPLCNet_x0_35" ]; then
# wget model
cd deploy
mkdir models
cd models
wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_35_infer.tar
tar xf PPLCNet_x0_35_infer.tar
cd ../../
fi
if [ ${model_name} == "PPLCNet_x0_5" ]; then
# wget model
cd deploy
mkdir models
cd models
wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_5_infer.tar
tar xf PPLCNet_x0_5_infer.tar
cd ../../
fi
if [ ${model_name} == "PPLCNet_x0_75" ]; then
# wget model
cd deploy
mkdir models
cd models
wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_75_infer.tar
tar xf PPLCNet_x0_75_infer.tar
cd ../../
fi
if [ ${model_name} == "PPLCNet_x1_0" ]; then
# wget model
cd deploy
mkdir models
cd models
wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x1_0_infer.tar
tar xf PPLCNet_x1_0_infer.tar
cd ../../
fi
if [ ${model_name} == "PPLCNet_x1_5" ]; then
# wget model
cd deploy
mkdir models
cd models
wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x1_5_infer.tar
tar xf PPLCNet_x1_5_infer.tar
cd ../../
fi
if [ ${model_name} == "PPLCNet_x2_0" ]; then
# wget model
cd deploy
mkdir models
cd models
wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x2_0_infer.tar
tar xf PPLCNet_x2_0_infer.tar
cd ../../
fi
if [ ${model_name} == "PPLCNet_x2_5" ]; then
# wget model
cd deploy
mkdir models
cd models
wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x2_5_infer.tar
tar xf PPLCNet_x2_5_infer.tar
cd ../../
fi
if [ ${model_name} == "PP-ShiTu_general_rec" ]; then
# wget model
cd deploy
mkdir models
cd models
wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/general_PPLCNet_x2_5_lite_v1.0_infer.tar
tar xf general_PPLCNet_x2_5_lite_v1.0_infer.tar
cd ../../
fi
if [ ${model_name} == "PP-ShiTu_mainbody_det" ]; then
# wget model
cd deploy
mkdir models
cd models
wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer.tar
tar xf picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer.tar
cd ../../
fi
if [ ${model_name} == "PPLCNetV2_base" ]; then
# wget model
cd deploy
mkdir models
cd models
wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNetV2_base_infer.tar
tar xf PPLCNetV2_base_infer.tar
cd ../../
fi
if [ ${model_name} == "PPHGNet_tiny" ]; then
# wget model
cd deploy
mkdir models
cd models
wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_tiny_infer.tar
tar xf PPHGNet_tiny_infer.tar
cd ../../
fi
if [ ${model_name} == "PPHGNet_small" ]; then
# wget model
cd deploy
mkdir models
cd models
wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_small_infer.tar
tar xf PPHGNet_small_infer.tar
cd ../../
fi
# wget model
cd deploy && mkdir models && cd models
wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_infer.tar && tar xf ResNet50_vd_infer.tar
cd ../../
fi fi
if [ ${MODE} = "benchmark_train" ];then if [ ${MODE} = "benchmark_train" ];then
......
#!/bin/bash #!/bin/bash
source test_tipc/common_func.sh source test_tipc/common_func.sh
FILENAME=$1 FILENAME=$1
...@@ -11,7 +11,7 @@ python=$(func_parser_value "${lines[2]}") ...@@ -11,7 +11,7 @@ python=$(func_parser_value "${lines[2]}")
# parser params # parser params
dataline=$(awk 'NR==1, NR==14{print}' $FILENAME) dataline=$(awk 'NR==1, NR==15{print}' $FILENAME)
IFS=$'\n' IFS=$'\n'
lines=(${dataline}) lines=(${dataline})
...@@ -31,7 +31,7 @@ opset_version_key=$(func_parser_key "${lines[8]}") ...@@ -31,7 +31,7 @@ opset_version_key=$(func_parser_key "${lines[8]}")
opset_version_value=$(func_parser_value "${lines[8]}") opset_version_value=$(func_parser_value "${lines[8]}")
enable_onnx_checker_key=$(func_parser_key "${lines[9]}") enable_onnx_checker_key=$(func_parser_key "${lines[9]}")
enable_onnx_checker_value=$(func_parser_value "${lines[9]}") enable_onnx_checker_value=$(func_parser_value "${lines[9]}")
# parser onnx inference # parser onnx inference
inference_py=$(func_parser_value "${lines[10]}") inference_py=$(func_parser_value "${lines[10]}")
use_onnx_key=$(func_parser_key "${lines[11]}") use_onnx_key=$(func_parser_key "${lines[11]}")
use_onnx_value=$(func_parser_value "${lines[11]}") use_onnx_value=$(func_parser_value "${lines[11]}")
...@@ -39,6 +39,8 @@ inference_model_dir_key=$(func_parser_key "${lines[12]}") ...@@ -39,6 +39,8 @@ inference_model_dir_key=$(func_parser_key "${lines[12]}")
inference_model_dir_value=$(func_parser_value "${lines[12]}") inference_model_dir_value=$(func_parser_value "${lines[12]}")
inference_hardware_key=$(func_parser_key "${lines[13]}") inference_hardware_key=$(func_parser_key "${lines[13]}")
inference_hardware_value=$(func_parser_value "${lines[13]}") inference_hardware_value=$(func_parser_value "${lines[13]}")
inference_config_key=$(func_parser_key "${lines[14]}")
inference_config_value=$(func_parser_value "${lines[14]}")
LOG_PATH="./test_tipc/output" LOG_PATH="./test_tipc/output"
mkdir -p ./test_tipc/output mkdir -p ./test_tipc/output
...@@ -65,7 +67,8 @@ function func_paddle2onnx(){ ...@@ -65,7 +67,8 @@ function func_paddle2onnx(){
set_model_dir=$(func_set_params "${inference_model_dir_key}" "${inference_model_dir_value}") set_model_dir=$(func_set_params "${inference_model_dir_key}" "${inference_model_dir_value}")
set_use_onnx=$(func_set_params "${use_onnx_key}" "${use_onnx_value}") set_use_onnx=$(func_set_params "${use_onnx_key}" "${use_onnx_value}")
set_hardware=$(func_set_params "${inference_hardware_key}" "${inference_hardware_value}") set_hardware=$(func_set_params "${inference_hardware_key}" "${inference_hardware_value}")
infer_model_cmd="cd deploy && ${python} ${inference_py} -o ${set_model_dir} -o ${set_use_onnx} -o ${set_hardware} >${_save_log_path} 2>&1 && cd ../" set_inference_config=$(func_set_params "${inference_config_key}" "${inference_config_value}")
infer_model_cmd="cd deploy && ${python} ${inference_py} -o ${set_model_dir} -o ${set_use_onnx} -o ${set_hardware} ${set_inference_config} > ${_save_log_path} 2>&1 && cd ../"
eval $infer_model_cmd eval $infer_model_cmd
status_check $last_status "${infer_model_cmd}" "${status_log}" status_check $last_status "${infer_model_cmd}" "${status_log}"
} }
...@@ -75,4 +78,4 @@ echo "################### run test ###################" ...@@ -75,4 +78,4 @@ echo "################### run test ###################"
export Count=0 export Count=0
IFS="|" IFS="|"
func_paddle2onnx func_paddle2onnx
\ No newline at end of file \ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册