diff --git a/test_tipc/config/PP-ShiTu/PPShiTu_general_rec_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/config/PP-ShiTu/PPShiTu_general_rec_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt index 56dcff845be57473ca04a4a53b6c84b686b3a1ef..652203bcb5f9569cace42b82daca2421c33be9d4 100644 --- a/test_tipc/config/PP-ShiTu/PPShiTu_general_rec_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt +++ b/test_tipc/config/PP-ShiTu/PPShiTu_general_rec_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -1,5 +1,5 @@ ===========================paddle2onnx_params=========================== -model_name:PP-ShiTu_general_rec +model_name:GeneralRecognition_PPLCNet_x2_5 python:python3.7 2onnx: paddle2onnx --model_dir:./deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer/ @@ -9,8 +9,8 @@ python:python3.7 --opset_version:10 --enable_onnx_checker:True inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/general_PPLCNet_x2_5_lite_v1.0_infer.tar -inference:./python/predict_cls.py +inference:./python/predict_rec.py Global.use_onnx:True -Global.inference_model_dir:./models/general_PPLCNet_x2_5_lite_v1.0_infer +Global.rec_inference_model_dir:./models/general_PPLCNet_x2_5_lite_v1.0_infer Global.use_gpu:False --c:configs/inference_cls.yaml \ No newline at end of file +-c:configs/inference_rec.yaml \ No newline at end of file diff --git a/test_tipc/prepare.sh b/test_tipc/prepare.sh index 1cb53ea4d919f237eb0f8421127ee3bbd16bc9a6..99575fa3f529d75daaf529e6325e31d5ef8584a0 100644 --- a/test_tipc/prepare.sh +++ b/test_tipc/prepare.sh @@ -85,12 +85,18 @@ if [[ ${MODE} = "cpp_infer" ]]; then if [[ ! -d "./deploy/cpp/paddle_inference/" ]]; then pushd ./deploy/cpp/ PADDLEInfer=$3 - if [ "" = "$PADDLEInfer" ];then + if [ "" = "$PADDLEInfer" ]; then wget -nc https://paddle-inference-lib.bj.bcebos.com/2.2.2/cxx_c/Linux/GPU/x86-64_gcc8.2_avx_mkl_cuda10.1_cudnn7.6.5_trt6.0.1.5/paddle_inference.tgz --no-check-certificate + tar xf paddle_inference.tgz else wget -nc ${PADDLEInfer} --no-check-certificate + tar_name=$(func_get_url_file_name "$PADDLEInfer") + tar xf ${tar_name} + paddle_inference_install_dir=${tar_name%.*} + if [ ! -d "paddle_inference" ]; then + ln -s ${paddle_inference_install_dir} paddle_inference + fi fi - tar xf paddle_inference.tgz popd fi if [[ $FILENAME == *infer_cpp_linux_gpu_cpu.txt ]]; then