diff --git a/test_tipc/config/ResNet/ResNet50_vd_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/config/ResNet/ResNet50_vd_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..dba07d1764924bac6ae0779f496f8808a92826ad --- /dev/null +++ b/test_tipc/config/ResNet/ResNet50_vd_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -0,0 +1,15 @@ +===========================paddle2onnx_params=========================== +model_name:ResNet50_vd +python:python3.7 +2onnx: paddle2onnx +--model_dir:./deploy/models/ResNet50_vd_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--save_file:./deploy/models/ResNet50_vd_infer/inference.onnx +--opset_version:10 +--enable_onnx_checker:True +inference: python/predict_cls.py -c configs/inference_cls.yaml +Global.use_onnx:True +Global.inference_model_dir:models/ResNet50_vd_infer/ +Global.use_gpu:False +Global.infer_imgs:./images/ILSVRC2012_val_00000010.jpeg diff --git a/test_tipc/prepare.sh b/test_tipc/prepare.sh index 646e7f4d8dfef3f1bae4806985e037f9f1f5708c..89cdd505ed9f7781af00d8cffe35fff7cac21edb 100644 --- a/test_tipc/prepare.sh +++ b/test_tipc/prepare.sh @@ -165,3 +165,15 @@ if [ ${MODE} = "serving_infer" ];then cd ./deploy/paddleserving wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_infer.tar && tar xf ResNet50_vd_infer.tar fi + +if [ ${MODE} = "paddle2onnx_infer" ];then + # prepare paddle2onnx env + python_name=$(func_parser_value "${lines[2]}") + ${python_name} -m pip install install paddle2onnx + ${python_name} -m pip install onnxruntime + + # wget model + cd deploy && mkdir models && cd models + wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_infer.tar && tar xf ResNet50_vd_infer.tar + cd ../../ +fi diff --git a/test_tipc/test_paddle2onnx.sh b/test_tipc/test_paddle2onnx.sh index 300c61770d2519fad0502147e2cee4a3e4f50ac9..7d8051fb871a37ca80341d037ccacc86c2834fe7 100644 --- a/test_tipc/test_paddle2onnx.sh +++ b/test_tipc/test_paddle2onnx.sh @@ -11,7 +11,7 @@ python=$(func_parser_value "${lines[2]}") # parser params -dataline=$(awk 'NR==1, NR==12{print}' $FILENAME) +dataline=$(awk 'NR==1, NR==15{print}' $FILENAME) IFS=$'\n' lines=(${dataline}) @@ -33,12 +33,14 @@ enable_onnx_checker_key=$(func_parser_key "${lines[9]}") enable_onnx_checker_value=$(func_parser_value "${lines[9]}") # parser onnx inference inference_py=$(func_parser_value "${lines[10]}") -use_gpu_key=$(func_parser_key "${lines[11]}") -use_gpu_value=$(func_parser_value "${lines[11]}") -det_model_key=$(func_parser_key "${lines[12]}") -image_dir_key=$(func_parser_key "${lines[13]}") -image_dir_value=$(func_parser_value "${lines[13]}") - +use_onnx_key=$(func_parser_key "${lines[11]}") +use_onnx_value=$(func_parser_value "${lines[11]}") +inference_model_dir_key=$(func_parser_key "${lines[12]}") +inference_model_dir_value=$(func_parser_value "${lines[12]}") +inference_hardware_key=$(func_parser_key "${lines[13]}") +inference_hardware_value=$(func_parser_value "${lines[13]}") +inference_imgs_key=$(func_parser_key "${lines[14]}") +inference_imgs_value=$(func_parser_value "${lines[14]}") LOG_PATH="./test_tipc/output" mkdir -p ./test_tipc/output @@ -50,7 +52,7 @@ function func_paddle2onnx(){ _script=$1 # paddle2onnx - _save_log_path="${LOG_PATH}/paddle2onnx_infer_cpu.log" + _save_log_path=".${LOG_PATH}/paddle2onnx_infer_cpu.log" set_dirname=$(func_set_params "${infer_model_dir_key}" "${infer_model_dir_value}") set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}") set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}") @@ -62,10 +64,11 @@ function func_paddle2onnx(){ last_status=${PIPESTATUS[0]} status_check $last_status "${trans_model_cmd}" "${status_log}" # python inference - set_gpu=$(func_set_params "${use_gpu_key}" "${use_gpu_value}") - set_model_dir=$(func_set_params "${det_model_key}" "${save_file_value}") - set_img_dir=$(func_set_params "${image_dir_key}" "${image_dir_value}") - infer_model_cmd="${python} ${inference_py} ${set_gpu} ${set_img_dir} ${set_model_dir} --use_onnx=True > ${_save_log_path} 2>&1 " + set_model_dir=$(func_set_params "${inference_model_dir_key}" "${inference_model_dir_value}") + set_use_onnx=$(func_set_params "${use_onnx_key}" "${use_onnx_value}") + set_hardware=$(func_set_params "${inference_hardware_key}" "${inference_hardware_value}") + set_infer_imgs=$(func_set_params "${inference_imgs_key}" "${inference_imgs_value}") + infer_model_cmd="cd deploy && ${python} ${inference_py} -o ${set_model_dir} -o ${set_use_onnx} -o ${set_hardware} -o ${set_infer_imgs} >${_save_log_path} 2>&1 && cd ../" eval $infer_model_cmd status_check $last_status "${infer_model_cmd}" "${status_log}" }