diff --git a/tests/test.sh b/tests/test.sh index 36cf2b6f7828ee44d0464097d66130ed9b026bc9..9caab02aee9610fc6d4e424841985a56ae724f3b 100644 --- a/tests/test.sh +++ b/tests/test.sh @@ -138,8 +138,8 @@ infer_img_dir=$(func_parser_value "${lines[44]}") save_log_key=$(func_parser_key "${lines[45]}") benchmark_key=$(func_parser_key "${lines[46]}") benchmark_value=$(func_parser_value "${lines[46]}") -infer_key2=$(func_parser_key "${lines[47]}") -infer_value2=$(func_parser_value "${lines[47]}") +infer_key1=$(func_parser_key "${lines[47]}") +infer_value1=$(func_parser_value "${lines[47]}") LOG_PATH="./tests/output" mkdir -p ${LOG_PATH} @@ -169,7 +169,8 @@ function func_inference(){ set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}") set_cpu_threads=$(func_set_params "${cpu_threads_key}" "${threads}") set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}") - command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} > ${_save_log_path} 2>&1 " + set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}") + command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 " eval $command status_check $? "${command}" "${status_log}" done @@ -285,7 +286,8 @@ for gpu in ${gpu_list[*]}; do set_eval_pretrain=$(func_set_params "${pretrain_model_key}" "${save_log}/${train_model_name}") # run eval if [ ${eval_py} != "null" ]; then - eval_cmd="${python} ${eval_py} ${set_eval_pretrain} ${set_use_gpu}" + set_eval_params1=$(func_set_params "${eval_key1}" "${eval_value1}") + eval_cmd="${python} ${eval_py} ${set_eval_pretrain} ${set_use_gpu} ${set_eval_params1}" eval $eval_cmd status_check $? "${eval_cmd}" "${status_log}" fi @@ -318,3 +320,4 @@ else #run inference func_inference "${python}" "${inference_py}" "${infer_model}" "${LOG_PATH}" "${infer_img_dir}" "False" fi + diff --git a/tools/infer/predict_det.py b/tools/infer/predict_det.py index 568381b1f7e7e825b174bbcfa43af3e55f683135..3de00d83a8f9f55af9b89d5d2cd5c877399c5930 100755 --- a/tools/infer/predict_det.py +++ b/tools/infer/predict_det.py @@ -114,7 +114,7 @@ class TextDetector(object): time_keys=[ 'preprocess_time', 'inference_time', 'postprocess_time' ], - warmup=10, + warmup=2, logger=logger) def order_points_clockwise(self, pts): @@ -237,7 +237,7 @@ if __name__ == "__main__": if args.warmup: img = np.random.uniform(0, 255, [640, 640, 3]).astype(np.uint8) - for i in range(10): + for i in range(2): res = text_detector(img) if not os.path.exists(draw_img_save): diff --git a/tools/infer/predict_rec.py b/tools/infer/predict_rec.py index bc9f713aeafb9977c60fe65bea56fbe2b395efd5..bb4a31706471b9b1745519ac9f390d01b60d5d44 100755 --- a/tools/infer/predict_rec.py +++ b/tools/infer/predict_rec.py @@ -73,7 +73,7 @@ class TextRecognizer(object): model_precision=args.precision, batch_size=args.rec_batch_num, data_shape="dynamic", - save_path=args.save_log_path, + save_path=None, #args.save_log_path, inference_config=self.config, pids=pid, process_name=None, @@ -81,7 +81,8 @@ class TextRecognizer(object): time_keys=[ 'preprocess_time', 'inference_time', 'postprocess_time' ], - warmup=10) + warmup=2, + logger=logger) def resize_norm_img(self, img, max_wh_ratio): imgC, imgH, imgW = self.rec_image_shape @@ -272,10 +273,10 @@ def main(args): valid_image_file_list = [] img_list = [] - # warmup 10 times + # warmup 2 times if args.warmup: img = np.random.uniform(0, 255, [32, 320, 3]).astype(np.uint8) - for i in range(10): + for i in range(2): res = text_recognizer([img]) for image_file in image_file_list: