From faa6bbef2a2928ff5757b2009520ca861b98343a Mon Sep 17 00:00:00 2001 From: WenmuZhou <572459439@qq.com> Date: Thu, 4 Aug 2022 10:15:12 +0800 Subject: [PATCH] rename 2.0 to 2_0 --- .../ch_ppocr_mobile_v2_0_rec/train_pact_infer_python.txt | 4 ++-- .../ch_ppocr_mobile_v2_0_rec/train_ptq_infer_python.txt | 2 +- test_tipc/test_paddle2onnx.sh | 6 +++--- test_tipc/test_serving_infer_python.sh | 6 +++--- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/test_tipc/configs/ch_ppocr_mobile_v2_0_rec/train_pact_infer_python.txt b/test_tipc/configs/ch_ppocr_mobile_v2_0_rec/train_pact_infer_python.txt index 631518ff..9c1223f4 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2_0_rec/train_pact_infer_python.txt +++ b/test_tipc/configs/ch_ppocr_mobile_v2_0_rec/train_pact_infer_python.txt @@ -14,7 +14,7 @@ null:null ## trainer:pact_train norm_train:null -pact_train:deploy/slim/quantization/quant.py -c test_tipc/configs/ch_ppocr_mobile_v2.0_rec_PACT/rec_chinese_lite_train_v2.0.yml -o +pact_train:deploy/slim/quantization/quant.py -c test_tipc/configs/ch_ppocr_mobile_v2_0_rec_PACT/rec_chinese_lite_train_v2.0.yml -o fpgm_train:null distill_train:null null:null @@ -28,7 +28,7 @@ null:null Global.save_inference_dir:./output/ Global.checkpoints: norm_export:null -quant_export:deploy/slim/quantization/export_model.py -c test_tipc/configs/ch_ppocr_mobile_v2.0_rec_PACT/rec_chinese_lite_train_v2.0.yml -o +quant_export:deploy/slim/quantization/export_model.py -c test_tipc/configs/ch_ppocr_mobile_v2_0_rec_PACT/rec_chinese_lite_train_v2.0.yml -o fpgm_export:null distill_export:null export1:null diff --git a/test_tipc/configs/ch_ppocr_mobile_v2_0_rec/train_ptq_infer_python.txt b/test_tipc/configs/ch_ppocr_mobile_v2_0_rec/train_ptq_infer_python.txt index cd0828f3..df47f328 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2_0_rec/train_ptq_infer_python.txt +++ b/test_tipc/configs/ch_ppocr_mobile_v2_0_rec/train_ptq_infer_python.txt @@ -4,7 +4,7 @@ python:python3.7 Global.pretrained_model:null Global.save_inference_dir:null infer_model:./inference/ch_ppocr_mobile_v2.0_rec_infer/ -infer_export:deploy/slim/quantization/quant_kl.py -c test_tipc/configs/ch_ppocr_mobile_v2.0_rec_KL/rec_chinese_lite_train_v2.0.yml -o +infer_export:deploy/slim/quantization/quant_kl.py -c test_tipc/configs/ch_ppocr_mobile_v2_0_rec_KL/rec_chinese_lite_train_v2.0.yml -o infer_quant:True inference:tools/infer/predict_rec.py --rec_image_shape="3,32,320" --use_gpu:False|True diff --git a/test_tipc/test_paddle2onnx.sh b/test_tipc/test_paddle2onnx.sh index 356bc980..78d79d0b 100644 --- a/test_tipc/test_paddle2onnx.sh +++ b/test_tipc/test_paddle2onnx.sh @@ -54,7 +54,7 @@ function func_paddle2onnx(){ _script=$1 # paddle2onnx - if [ ${model_name} = "ch_PP-OCRv2" ] || [ ${model_name} = "ch_PP-OCRv3" ] || [ ${model_name} = "ch_ppocr_mobile_v2.0" ] || [ ${model_name} = "ch_ppocr_server_v2.0" ]; then + if [ ${model_name} = "ch_PP-OCRv2" ] || [ ${model_name} = "ch_PP-OCRv3" ] || [ ${model_name} = "ch_ppocr_mobile_v2_0" ] || [ ${model_name} = "ch_ppocr_server_v2_0" ]; then # trans det set_dirname=$(func_set_params "--model_dir" "${det_infer_model_dir_value}") set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}") @@ -113,7 +113,7 @@ function func_paddle2onnx(){ _save_log_path="${LOG_PATH}/paddle2onnx_infer_cpu.log" set_gpu=$(func_set_params "${use_gpu_key}" "${use_gpu}") set_img_dir=$(func_set_params "${image_dir_key}" "${image_dir_value}") - if [ ${model_name} = "ch_PP-OCRv2" ] || [ ${model_name} = "ch_PP-OCRv3" ] || [ ${model_name} = "ch_ppocr_mobile_v2.0" ] || [ ${model_name} = "ch_ppocr_server_v2.0" ]; then + if [ ${model_name} = "ch_PP-OCRv2" ] || [ ${model_name} = "ch_PP-OCRv3" ] || [ ${model_name} = "ch_ppocr_mobile_v2_0" ] || [ ${model_name} = "ch_ppocr_server_v2_0" ]; then set_det_model_dir=$(func_set_params "${det_model_key}" "${det_save_file_value}") set_rec_model_dir=$(func_set_params "${rec_model_key}" "${rec_save_file_value}") infer_model_cmd="${python} ${inference_py} ${set_gpu} ${set_img_dir} ${set_det_model_dir} ${set_rec_model_dir} --use_onnx=True > ${_save_log_path} 2>&1 " @@ -132,7 +132,7 @@ function func_paddle2onnx(){ _save_log_path="${LOG_PATH}/paddle2onnx_infer_gpu.log" set_gpu=$(func_set_params "${use_gpu_key}" "${use_gpu}") set_img_dir=$(func_set_params "${image_dir_key}" "${image_dir_value}") - if [ ${model_name} = "ch_PP-OCRv2" ] || [ ${model_name} = "ch_PP-OCRv3" ] || [ ${model_name} = "ch_ppocr_mobile_v2.0" ] || [ ${model_name} = "ch_ppocr_server_v2.0" ]; then + if [ ${model_name} = "ch_PP-OCRv2" ] || [ ${model_name} = "ch_PP-OCRv3" ] || [ ${model_name} = "ch_ppocr_mobile_v2_0" ] || [ ${model_name} = "ch_ppocr_server_v2_0" ]; then set_det_model_dir=$(func_set_params "${det_model_key}" "${det_save_file_value}") set_rec_model_dir=$(func_set_params "${rec_model_key}" "${rec_save_file_value}") infer_model_cmd="${python} ${inference_py} ${set_gpu} ${set_img_dir} ${set_det_model_dir} ${set_rec_model_dir} --use_onnx=True > ${_save_log_path} 2>&1 " diff --git a/test_tipc/test_serving_infer_python.sh b/test_tipc/test_serving_infer_python.sh index 4ccccc06..4b7dfcf7 100644 --- a/test_tipc/test_serving_infer_python.sh +++ b/test_tipc/test_serving_infer_python.sh @@ -71,7 +71,7 @@ function func_serving(){ # pdserving set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}") set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}") - if [ ${model_name} = "ch_PP-OCRv2" ] || [ ${model_name} = "ch_PP-OCRv3" ] || [ ${model_name} = "ch_ppocr_mobile_v2.0" ] || [ ${model_name} = "ch_ppocr_server_v2.0" ]; then + if [ ${model_name} = "ch_PP-OCRv2" ] || [ ${model_name} = "ch_PP-OCRv3" ] || [ ${model_name} = "ch_ppocr_mobile_v2_0" ] || [ ${model_name} = "ch_ppocr_server_v2_0" ]; then # trans det set_dirname=$(func_set_params "--dirname" "${det_infer_model_dir_value}") set_serving_server=$(func_set_params "--serving_server" "${det_serving_server_value}") @@ -120,7 +120,7 @@ function func_serving(){ for threads in ${web_cpu_threads_list[*]}; do set_cpu_threads=$(func_set_params "${web_cpu_threads_key}" "${threads}") server_log_path="${LOG_PATH}/python_server_cpu_usemkldnn_${use_mkldnn}_threads_${threads}.log" - if [ ${model_name} = "ch_PP-OCRv2" ] || [ ${model_name} = "ch_PP-OCRv3" ] || [ ${model_name} = "ch_ppocr_mobile_v2.0" ] || [ ${model_name} = "ch_ppocr_server_v2.0" ]; then + if [ ${model_name} = "ch_PP-OCRv2" ] || [ ${model_name} = "ch_PP-OCRv3" ] || [ ${model_name} = "ch_ppocr_mobile_v2_0" ] || [ ${model_name} = "ch_ppocr_server_v2_0" ]; then set_det_model_config=$(func_set_params "${det_server_key}" "${det_server_value}") set_rec_model_config=$(func_set_params "${rec_server_key}" "${rec_server_value}") web_service_cmd="nohup ${python} ${web_service_py} ${web_use_gpu_key}="" ${web_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_det_model_config} ${set_rec_model_config} > ${server_log_path} 2>&1 &" @@ -171,7 +171,7 @@ function func_serving(){ device_type=2 fi set_precision=$(func_set_params "${web_precision_key}" "${precision}") - if [ ${model_name} = "ch_PP-OCRv2" ] || [ ${model_name} = "ch_PP-OCRv3" ] || [ ${model_name} = "ch_ppocr_mobile_v2.0" ] || [ ${model_name} = "ch_ppocr_server_v2.0" ]; then + if [ ${model_name} = "ch_PP-OCRv2" ] || [ ${model_name} = "ch_PP-OCRv3" ] || [ ${model_name} = "ch_ppocr_mobile_v2_0" ] || [ ${model_name} = "ch_ppocr_server_v2_0" ]; then set_det_model_config=$(func_set_params "${det_server_key}" "${det_server_value}") set_rec_model_config=$(func_set_params "${rec_server_key}" "${rec_server_value}") web_service_cmd="nohup ${python} ${web_service_py} ${set_tensorrt} ${set_precision} ${set_det_model_config} ${set_rec_model_config} > ${server_log_path} 2>&1 &" -- GitLab