From 64da3cde7cc3a970611a1d9710bb7d04f01eab17 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Tue, 30 Nov 2021 11:40:13 +0000 Subject: [PATCH] delete unused null --- ...rmal_normal_infer_python_linux_gpu_cpu.txt | 2 - ...rmal_normal_infer_python_linux_gpu_cpu.txt | 2 - ...rmal_normal_infer_python_linux_gpu_cpu.txt | 2 - test_tipc/test_inference_python.sh | 51 +++++++++---------- 4 files changed, 25 insertions(+), 32 deletions(-) diff --git a/test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt index a7120dee..fcac6e39 100644 --- a/test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt @@ -1,8 +1,6 @@ ===========================ch_PP-OCRv2=========================== model_name:ch_PP-OCRv2 python:python3.7 -null:null -null:null infer_model:./inference/ch_PP-OCRv2_det_infer/ infer_export:null infer_quant:False diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt index 4f2139eb..4a46f0cf 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt @@ -1,8 +1,6 @@ ===========================ch_ppocr_mobile_v2.0=========================== model_name:ch_ppocr_mobile_v2.0 python:python3.7 -null:null -null:null infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/ infer_export:null infer_quant:False diff --git a/test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt index b2400460..92d7031e 100644 --- a/test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt @@ -1,8 +1,6 @@ ===========================ch_ppocr_server_v2.0=========================== model_name:ch_ppocr_server_v2.0 python:python3.7 -null:null -null:null infer_model:./inference/ch_ppocr_server_v2.0_det_infer/ infer_export:null infer_quant:True diff --git a/test_tipc/test_inference_python.sh b/test_tipc/test_inference_python.sh index 4f45a43f..72516e04 100644 --- a/test_tipc/test_inference_python.sh +++ b/test_tipc/test_inference_python.sh @@ -15,32 +15,32 @@ lines=(${dataline}) model_name=$(func_parser_value "${lines[1]}") python=$(func_parser_value "${lines[2]}") -infer_model_dir_list=$(func_parser_value "${lines[5]}") -infer_export_list=$(func_parser_value "${lines[6]}") -infer_is_quant=$(func_parser_value "${lines[7]}") +infer_model_dir_list=$(func_parser_value "${lines[3]}") +infer_export_list=$(func_parser_value "${lines[4]}") +infer_is_quant=$(func_parser_value "${lines[5]}") # parser inference -inference_py=$(func_parser_value "${lines[8]}") -use_gpu_key=$(func_parser_key "${lines[9]}") -use_gpu_list=$(func_parser_value "${lines[9]}") -use_mkldnn_key=$(func_parser_key "${lines[10]}") -use_mkldnn_list=$(func_parser_value "${lines[10]}") -cpu_threads_key=$(func_parser_key "${lines[11]}") -cpu_threads_list=$(func_parser_value "${lines[11]}") -batch_size_key=$(func_parser_key "${lines[12]}") -batch_size_list=$(func_parser_value "${lines[12]}") -use_trt_key=$(func_parser_key "${lines[13]}") -use_trt_list=$(func_parser_value "${lines[13]}") -precision_key=$(func_parser_key "${lines[14]}") -precision_list=$(func_parser_value "${lines[14]}") -infer_model_key=$(func_parser_key "${lines[15]}") -image_dir_key=$(func_parser_key "${lines[16]}") -infer_img_dir=$(func_parser_value "${lines[16]}") -rec_model_key=$(func_parser_key "${lines[17]}") -rec_model_value=$(func_parser_value "${lines[17]}") -benchmark_key=$(func_parser_key "${lines[18]}") -benchmark_value=$(func_parser_value "${lines[18]}") -infer_key1=$(func_parser_key "${lines[19]}") -infer_value1=$(func_parser_value "${lines[19]}") +inference_py=$(func_parser_value "${lines[6]}") +use_gpu_key=$(func_parser_key "${lines[7]}") +use_gpu_list=$(func_parser_value "${lines[7]}") +use_mkldnn_key=$(func_parser_key "${lines[8]}") +use_mkldnn_list=$(func_parser_value "${lines[8]}") +cpu_threads_key=$(func_parser_key "${lines[9]}") +cpu_threads_list=$(func_parser_value "${lines[9]}") +batch_size_key=$(func_parser_key "${lines[10]}") +batch_size_list=$(func_parser_value "${lines[10]}") +use_trt_key=$(func_parser_key "${lines[11]}") +use_trt_list=$(func_parser_value "${lines[11]}") +precision_key=$(func_parser_key "${lines[12]}") +precision_list=$(func_parser_value "${lines[12]}") +infer_model_key=$(func_parser_key "${lines[13]}") +image_dir_key=$(func_parser_key "${lines[14]}") +infer_img_dir=$(func_parser_value "${lines[14]}") +rec_model_key=$(func_parser_key "${lines[15]}") +rec_model_value=$(func_parser_value "${lines[15]}") +benchmark_key=$(func_parser_key "${lines[16]}") +benchmark_value=$(func_parser_value "${lines[16]}") +infer_key1=$(func_parser_key "${lines[17]}") +infer_value1=$(func_parser_value "${lines[17]}") @@ -84,7 +84,6 @@ function func_inference(){ set_infer_params0=$(func_set_params "${rec_model_key}" "${rec_model_value}") set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}") command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_params0} ${set_infer_data} ${set_benchmark} ${set_precision} ${set_infer_params1} > ${_save_log_path} 2>&1 " - echo $command eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" -- GitLab