From 7be68dbd9d96f2d093443bf1c92c14f038f821f0 Mon Sep 17 00:00:00 2001 From: MissPenguin Date: Wed, 8 Sep 2021 03:13:42 +0000 Subject: [PATCH] fix readme --- tests/readme.md | 46 ++++++++++++++++++++++++++++------------------ tests/test.sh | 8 ++++---- 2 files changed, 32 insertions(+), 22 deletions(-) diff --git a/tests/readme.md b/tests/readme.md index 1c5e0fae..592f6214 100644 --- a/tests/readme.md +++ b/tests/readme.md @@ -23,36 +23,46 @@ test.sh和params.txt文件配合使用,完成OCR轻量检测和识别模型从 ```bash tests/ -├── ocr_det_params.txt # 测试OCR检测模型的参数配置文件 -├── ocr_rec_params.txt # 测试OCR识别模型的参数配置文件 -└── prepare.sh # 完成test.sh运行所需要的数据和模型下载 -└── test.sh # 根据 +├── ocr_det_params.txt # 测试OCR检测模型的参数配置文件 +├── ocr_rec_params.txt # 测试OCR识别模型的参数配置文件 +├── ocr_ppocr_mobile_params.txt # 测试OCR检测+识别模型串联的参数配置文件 +└── prepare.sh # 完成test.sh运行所需要的数据和模型下载 +└── test.sh # 测试主程序 ``` # 使用方法 + test.sh包含四种运行模式,每种模式的运行数据不同,分别用于测试速度和精度,分别是: -- 模式1 lite_train_infer,使用少量数据训练,用于快速验证训练到预测的走通流程,不验证精度和速度; -``` + +- 模式1:lite_train_infer,使用少量数据训练,用于快速验证训练到预测的走通流程,不验证精度和速度; +```shell bash test/prepare.sh ./tests/ocr_det_params.txt 'lite_train_infer' bash tests/test.sh ./tests/ocr_det_params.txt 'lite_train_infer' -``` -- 模式2 whole_infer,使用少量数据训练,一定量数据预测,用于验证训练后的模型执行预测,预测速度是否合理; -``` +``` + +- 模式2:whole_infer,使用少量数据训练,一定量数据预测,用于验证训练后的模型执行预测,预测速度是否合理; +```shell bash tests/prepare.sh ./tests/ocr_det_params.txt 'whole_infer' bash tests/test.sh ./tests/ocr_det_params.txt 'whole_infer' -``` +``` -- 模式3 infer 不训练,全量数据预测,走通开源模型评估、动转静,检查inference model预测时间和精度; -``` +- 模式3:infer 不训练,全量数据预测,走通开源模型评估、动转静,检查inference model预测时间和精度; +```shell bash tests/prepare.sh ./tests/ocr_det_params.txt 'infer' -用法1: +# 用法1: bash tests/test.sh ./tests/ocr_det_params.txt 'infer' -用法2: 指定GPU卡预测,第三个传入参数为GPU卡号 +# 用法2: 指定GPU卡预测,第三个传入参数为GPU卡号 bash tests/test.sh ./tests/ocr_det_params.txt 'infer' '1' -``` +``` -模式4: whole_train_infer , CE: 全量数据训练,全量数据预测,验证模型训练精度,预测精度,预测速度 -``` +- 模式4:whole_train_infer , CE: 全量数据训练,全量数据预测,验证模型训练精度,预测精度,预测速度; +```shell bash tests/prepare.sh ./tests/ocr_det_params.txt 'whole_train_infer' bash tests/test.sh ./tests/ocr_det_params.txt 'whole_train_infer' -``` +``` + +- 模式5:cpp_infer , CE: 验证inference model的c++预测是否走通; +```shell +bash tests/prepare.sh ./tests/ocr_det_params.txt 'cpp_infer' +bash tests/test.sh ./tests/ocr_det_params.txt 'cpp_infer' +``` diff --git a/tests/test.sh b/tests/test.sh index 3444f96f..8040793f 100644 --- a/tests/test.sh +++ b/tests/test.sh @@ -192,8 +192,8 @@ if [ ${MODE} = "cpp_infer" ]; then cpp_infer_model_key=$(func_parser_key "${lines[62]}") cpp_image_dir_key=$(func_parser_key "${lines[63]}") cpp_infer_img_dir=$(func_parser_value "${lines[63]}") - cpp_rec_model_key=$(func_parser_key "${lines[64]}") - cpp_rec_model_value=$(func_parser_value "${lines[64]}") + cpp_infer_key1=$(func_parser_key "${lines[64]}") + cpp_infer_value1=$(func_parser_value "${lines[64]}") cpp_benchmark_key=$(func_parser_key "${lines[65]}") cpp_benchmark_value=$(func_parser_value "${lines[65]}") fi @@ -369,7 +369,7 @@ function func_cpp_inference(){ set_batchsize=$(func_set_params "${cpp_batch_size_key}" "${batch_size}") set_cpu_threads=$(func_set_params "${cpp_cpu_threads_key}" "${threads}") set_model_dir=$(func_set_params "${cpp_infer_model_key}" "${_model_dir}") - set_infer_params1=$(func_set_params "${cpp_rec_model_key}" "${cpp_rec_model_value}") + set_infer_params1=$(func_set_params "${cpp_infer_key1}" "${cpp_infer_value1}") command="${_script} ${cpp_use_gpu_key}=${use_gpu} ${cpp_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 " eval $command last_status=${PIPESTATUS[0]} @@ -398,7 +398,7 @@ function func_cpp_inference(){ set_tensorrt=$(func_set_params "${cpp_use_trt_key}" "${use_trt}") set_precision=$(func_set_params "${cpp_precision_key}" "${precision}") set_model_dir=$(func_set_params "${cpp_infer_model_key}" "${_model_dir}") - set_infer_params1=$(func_set_params "${cpp_rec_model_key}" "${cpp_rec_model_value}") + set_infer_params1=$(func_set_params "${cpp_infer_key1}" "${cpp_infer_value1}") command="${_script} ${cpp_use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 " eval $command last_status=${PIPESTATUS[0]} -- GitLab