未验证 提交 d41586fc 编写于 作者: M MissPenguin 提交者: GitHub

Merge pull request #3958 from MissPenguin/dygraph

support rec for cpp cice
...@@ -63,6 +63,7 @@ DEFINE_double(cls_thresh, 0.9, "Threshold of cls_thresh."); ...@@ -63,6 +63,7 @@ DEFINE_double(cls_thresh, 0.9, "Threshold of cls_thresh.");
DEFINE_string(rec_model_dir, "", "Path of rec inference model."); DEFINE_string(rec_model_dir, "", "Path of rec inference model.");
DEFINE_int32(rec_batch_num, 1, "rec_batch_num."); DEFINE_int32(rec_batch_num, 1, "rec_batch_num.");
DEFINE_string(char_list_file, "../../ppocr/utils/ppocr_keys_v1.txt", "Path of dictionary."); DEFINE_string(char_list_file, "../../ppocr/utils/ppocr_keys_v1.txt", "Path of dictionary.");
// DEFINE_string(char_list_file, "./ppocr/utils/ppocr_keys_v1.txt", "Path of dictionary.");
using namespace std; using namespace std;
...@@ -148,12 +149,28 @@ int main_rec(std::vector<cv::String> cv_all_img_names) { ...@@ -148,12 +149,28 @@ int main_rec(std::vector<cv::String> cv_all_img_names) {
time_info[1] += rec_times[1]; time_info[1] += rec_times[1];
time_info[2] += rec_times[2]; time_info[2] += rec_times[2];
} }
if (FLAGS_benchmark) {
AutoLogger autolog("ocr_rec",
FLAGS_use_gpu,
FLAGS_use_tensorrt,
FLAGS_enable_mkldnn,
FLAGS_cpu_threads,
1,
"dynamic",
FLAGS_precision,
time_info,
cv_all_img_names.size());
autolog.report();
}
return 0; return 0;
} }
int main_system(std::vector<cv::String> cv_all_img_names) { int main_system(std::vector<cv::String> cv_all_img_names) {
std::vector<double> time_info_det = {0, 0, 0};
std::vector<double> time_info_rec = {0, 0, 0};
DBDetector det(FLAGS_det_model_dir, FLAGS_use_gpu, FLAGS_gpu_id, DBDetector det(FLAGS_det_model_dir, FLAGS_use_gpu, FLAGS_gpu_id,
FLAGS_gpu_mem, FLAGS_cpu_threads, FLAGS_gpu_mem, FLAGS_cpu_threads,
FLAGS_enable_mkldnn, FLAGS_max_side_len, FLAGS_det_db_thresh, FLAGS_enable_mkldnn, FLAGS_max_side_len, FLAGS_det_db_thresh,
...@@ -174,12 +191,10 @@ int main_system(std::vector<cv::String> cv_all_img_names) { ...@@ -174,12 +191,10 @@ int main_system(std::vector<cv::String> cv_all_img_names) {
FLAGS_enable_mkldnn, FLAGS_char_list_file, FLAGS_enable_mkldnn, FLAGS_char_list_file,
FLAGS_use_tensorrt, FLAGS_precision); FLAGS_use_tensorrt, FLAGS_precision);
auto start = std::chrono::system_clock::now();
for (int i = 0; i < cv_all_img_names.size(); ++i) { for (int i = 0; i < cv_all_img_names.size(); ++i) {
LOG(INFO) << "The predict img: " << cv_all_img_names[i]; LOG(INFO) << "The predict img: " << cv_all_img_names[i];
cv::Mat srcimg = cv::imread(FLAGS_image_dir, cv::IMREAD_COLOR); cv::Mat srcimg = cv::imread(cv_all_img_names[i], cv::IMREAD_COLOR);
if (!srcimg.data) { if (!srcimg.data) {
std::cerr << "[ERROR] image read failed! image path: " << cv_all_img_names[i] << endl; std::cerr << "[ERROR] image read failed! image path: " << cv_all_img_names[i] << endl;
exit(1); exit(1);
...@@ -189,7 +204,10 @@ int main_system(std::vector<cv::String> cv_all_img_names) { ...@@ -189,7 +204,10 @@ int main_system(std::vector<cv::String> cv_all_img_names) {
std::vector<double> rec_times; std::vector<double> rec_times;
det.Run(srcimg, boxes, &det_times); det.Run(srcimg, boxes, &det_times);
time_info_det[0] += det_times[0];
time_info_det[1] += det_times[1];
time_info_det[2] += det_times[2];
cv::Mat crop_img; cv::Mat crop_img;
for (int j = 0; j < boxes.size(); j++) { for (int j = 0; j < boxes.size(); j++) {
crop_img = Utility::GetRotateCropImage(srcimg, boxes[j]); crop_img = Utility::GetRotateCropImage(srcimg, boxes[j]);
...@@ -198,18 +216,36 @@ int main_system(std::vector<cv::String> cv_all_img_names) { ...@@ -198,18 +216,36 @@ int main_system(std::vector<cv::String> cv_all_img_names) {
crop_img = cls->Run(crop_img); crop_img = cls->Run(crop_img);
} }
rec.Run(crop_img, &rec_times); rec.Run(crop_img, &rec_times);
time_info_rec[0] += rec_times[0];
time_info_rec[1] += rec_times[1];
time_info_rec[2] += rec_times[2];
} }
auto end = std::chrono::system_clock::now();
auto duration =
std::chrono::duration_cast<std::chrono::microseconds>(end - start);
std::cout << "Cost "
<< double(duration.count()) *
std::chrono::microseconds::period::num /
std::chrono::microseconds::period::den
<< "s" << std::endl;
} }
if (FLAGS_benchmark) {
AutoLogger autolog_det("ocr_det",
FLAGS_use_gpu,
FLAGS_use_tensorrt,
FLAGS_enable_mkldnn,
FLAGS_cpu_threads,
1,
"dynamic",
FLAGS_precision,
time_info_det,
cv_all_img_names.size());
AutoLogger autolog_rec("ocr_rec",
FLAGS_use_gpu,
FLAGS_use_tensorrt,
FLAGS_enable_mkldnn,
FLAGS_cpu_threads,
1,
"dynamic",
FLAGS_precision,
time_info_rec,
cv_all_img_names.size());
autolog_det.report();
std::cout << endl;
autolog_rec.report();
}
return 0; return 0;
} }
......
...@@ -62,7 +62,7 @@ inference:./deploy/cpp_infer/build/ppocr det ...@@ -62,7 +62,7 @@ inference:./deploy/cpp_infer/build/ppocr det
--precision:fp32|fp16 --precision:fp32|fp16
--det_model_dir: --det_model_dir:
--image_dir:./inference/ch_det_data_50/all-sum-510/ --image_dir:./inference/ch_det_data_50/all-sum-510/
--save_log_path:null null:null
--benchmark:True --benchmark:True
===========================serving_params=========================== ===========================serving_params===========================
trans_model:-m paddle_serving_client.convert trans_model:-m paddle_serving_client.convert
......
...@@ -53,7 +53,7 @@ inference:tools/infer/predict_system.py ...@@ -53,7 +53,7 @@ inference:tools/infer/predict_system.py
use_opencv:True use_opencv:True
infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/ infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/
infer_quant:False infer_quant:False
inference:./deploy/cpp_infer/build/ppocr det inference:./deploy/cpp_infer/build/ppocr system
--use_gpu:True|False --use_gpu:True|False
--enable_mkldnn:True|False --enable_mkldnn:True|False
--cpu_threads:1|6 --cpu_threads:1|6
...@@ -62,6 +62,6 @@ inference:./deploy/cpp_infer/build/ppocr det ...@@ -62,6 +62,6 @@ inference:./deploy/cpp_infer/build/ppocr det
--precision:fp32|fp16 --precision:fp32|fp16
--det_model_dir: --det_model_dir:
--image_dir:./inference/ch_det_data_50/all-sum-510/ --image_dir:./inference/ch_det_data_50/all-sum-510/
--save_log_path:null --rec_model_dir:./inference/ch_ppocr_mobile_v2.0_rec_infer/
--benchmark:True --benchmark:True
...@@ -49,3 +49,18 @@ inference:tools/infer/predict_rec.py ...@@ -49,3 +49,18 @@ inference:tools/infer/predict_rec.py
--save_log_path:./test/output/ --save_log_path:./test/output/
--benchmark:True --benchmark:True
null:null null:null
===========================cpp_infer_params===========================
use_opencv:True
infer_model:./inference/ch_ppocr_mobile_v2.0_rec_infer/
infer_quant:False
inference:./deploy/cpp_infer/build/ppocr rec
--use_gpu:True|False
--enable_mkldnn:True|False
--cpu_threads:1|6
--rec_batch_num:1
--use_tensorrt:False|True
--precision:fp32|fp16
--rec_model_dir:
--image_dir:./inference/rec_inference/
null:null
--benchmark:True
\ No newline at end of file
...@@ -64,7 +64,7 @@ elif [ ${MODE} = "whole_infer" ];then ...@@ -64,7 +64,7 @@ elif [ ${MODE} = "whole_infer" ];then
cd ./train_data/ && tar xf icdar2015_infer.tar && tar xf ic15_data.tar cd ./train_data/ && tar xf icdar2015_infer.tar && tar xf ic15_data.tar
ln -s ./icdar2015_infer ./icdar2015 ln -s ./icdar2015_infer ./icdar2015
cd ../ cd ../
elif [ ${MODE} = "infer" ] || [ ${MODE} = "cpp_infer" ];then elif [ ${MODE} = "infer" ];then
if [ ${model_name} = "ocr_det" ]; then if [ ${model_name} = "ocr_det" ]; then
eval_model_name="ch_ppocr_mobile_v2.0_det_train" eval_model_name="ch_ppocr_mobile_v2.0_det_train"
rm -rf ./train_data/icdar2015 rm -rf ./train_data/icdar2015
...@@ -87,6 +87,21 @@ elif [ ${MODE} = "infer" ] || [ ${MODE} = "cpp_infer" ];then ...@@ -87,6 +87,21 @@ elif [ ${MODE} = "infer" ] || [ ${MODE} = "cpp_infer" ];then
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar
cd ./inference && tar xf ${eval_model_name}.tar && tar xf rec_inference.tar && cd ../ cd ./inference && tar xf ${eval_model_name}.tar && tar xf rec_inference.tar && cd ../
fi fi
elif [ ${MODE} = "cpp_infer" ];then
if [ ${model_name} = "ocr_det" ]; then
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar
cd ./inference && tar xf ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_det_data_50.tar && cd ../
elif [ ${model_name} = "ocr_rec" ]; then
wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/rec_inference.tar
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar
cd ./inference && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf rec_inference.tar && cd ../
elif [ ${model_name} = "ocr_system" ]; then
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar
cd ./inference && tar xf ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf ch_det_data_50.tar && cd ../
fi
fi fi
if [ ${MODE} = "serving_infer" ];then if [ ${MODE} = "serving_infer" ];then
......
...@@ -23,36 +23,46 @@ test.sh和params.txt文件配合使用,完成OCR轻量检测和识别模型从 ...@@ -23,36 +23,46 @@ test.sh和params.txt文件配合使用,完成OCR轻量检测和识别模型从
```bash ```bash
tests/ tests/
├── ocr_det_params.txt # 测试OCR检测模型的参数配置文件 ├── ocr_det_params.txt # 测试OCR检测模型的参数配置文件
├── ocr_rec_params.txt # 测试OCR识别模型的参数配置文件 ├── ocr_rec_params.txt # 测试OCR识别模型的参数配置文件
└── prepare.sh # 完成test.sh运行所需要的数据和模型下载 ├── ocr_ppocr_mobile_params.txt # 测试OCR检测+识别模型串联的参数配置文件
└── test.sh # 根据 └── prepare.sh # 完成test.sh运行所需要的数据和模型下载
└── test.sh # 测试主程序
``` ```
# 使用方法 # 使用方法
test.sh包含四种运行模式,每种模式的运行数据不同,分别用于测试速度和精度,分别是: test.sh包含四种运行模式,每种模式的运行数据不同,分别用于测试速度和精度,分别是:
- 模式1 lite_train_infer,使用少量数据训练,用于快速验证训练到预测的走通流程,不验证精度和速度;
``` - 模式1:lite_train_infer,使用少量数据训练,用于快速验证训练到预测的走通流程,不验证精度和速度;
```shell
bash test/prepare.sh ./tests/ocr_det_params.txt 'lite_train_infer' bash test/prepare.sh ./tests/ocr_det_params.txt 'lite_train_infer'
bash tests/test.sh ./tests/ocr_det_params.txt 'lite_train_infer' bash tests/test.sh ./tests/ocr_det_params.txt 'lite_train_infer'
``` ```
- 模式2 whole_infer,使用少量数据训练,一定量数据预测,用于验证训练后的模型执行预测,预测速度是否合理;
``` - 模式2:whole_infer,使用少量数据训练,一定量数据预测,用于验证训练后的模型执行预测,预测速度是否合理;
```shell
bash tests/prepare.sh ./tests/ocr_det_params.txt 'whole_infer' bash tests/prepare.sh ./tests/ocr_det_params.txt 'whole_infer'
bash tests/test.sh ./tests/ocr_det_params.txt 'whole_infer' bash tests/test.sh ./tests/ocr_det_params.txt 'whole_infer'
``` ```
- 模式3 infer 不训练,全量数据预测,走通开源模型评估、动转静,检查inference model预测时间和精度; - 模式3infer 不训练,全量数据预测,走通开源模型评估、动转静,检查inference model预测时间和精度;
``` ```shell
bash tests/prepare.sh ./tests/ocr_det_params.txt 'infer' bash tests/prepare.sh ./tests/ocr_det_params.txt 'infer'
用法1: # 用法1:
bash tests/test.sh ./tests/ocr_det_params.txt 'infer' bash tests/test.sh ./tests/ocr_det_params.txt 'infer'
用法2: 指定GPU卡预测,第三个传入参数为GPU卡号 # 用法2: 指定GPU卡预测,第三个传入参数为GPU卡号
bash tests/test.sh ./tests/ocr_det_params.txt 'infer' '1' bash tests/test.sh ./tests/ocr_det_params.txt 'infer' '1'
``` ```
模式4: whole_train_infer , CE: 全量数据训练,全量数据预测,验证模型训练精度,预测精度,预测速度 - 模式4:whole_train_infer , CE: 全量数据训练,全量数据预测,验证模型训练精度,预测精度,预测速度;
``` ```shell
bash tests/prepare.sh ./tests/ocr_det_params.txt 'whole_train_infer' bash tests/prepare.sh ./tests/ocr_det_params.txt 'whole_train_infer'
bash tests/test.sh ./tests/ocr_det_params.txt 'whole_train_infer' bash tests/test.sh ./tests/ocr_det_params.txt 'whole_train_infer'
``` ```
- 模式5:cpp_infer , CE: 验证inference model的c++预测是否走通;
```shell
bash tests/prepare.sh ./tests/ocr_det_params.txt 'cpp_infer'
bash tests/test.sh ./tests/ocr_det_params.txt 'cpp_infer'
```
...@@ -192,7 +192,8 @@ if [ ${MODE} = "cpp_infer" ]; then ...@@ -192,7 +192,8 @@ if [ ${MODE} = "cpp_infer" ]; then
cpp_infer_model_key=$(func_parser_key "${lines[62]}") cpp_infer_model_key=$(func_parser_key "${lines[62]}")
cpp_image_dir_key=$(func_parser_key "${lines[63]}") cpp_image_dir_key=$(func_parser_key "${lines[63]}")
cpp_infer_img_dir=$(func_parser_value "${lines[63]}") cpp_infer_img_dir=$(func_parser_value "${lines[63]}")
cpp_save_log_key=$(func_parser_key "${lines[64]}") cpp_infer_key1=$(func_parser_key "${lines[64]}")
cpp_infer_value1=$(func_parser_value "${lines[64]}")
cpp_benchmark_key=$(func_parser_key "${lines[65]}") cpp_benchmark_key=$(func_parser_key "${lines[65]}")
cpp_benchmark_value=$(func_parser_value "${lines[65]}") cpp_benchmark_value=$(func_parser_value "${lines[65]}")
fi fi
...@@ -368,7 +369,8 @@ function func_cpp_inference(){ ...@@ -368,7 +369,8 @@ function func_cpp_inference(){
set_batchsize=$(func_set_params "${cpp_batch_size_key}" "${batch_size}") set_batchsize=$(func_set_params "${cpp_batch_size_key}" "${batch_size}")
set_cpu_threads=$(func_set_params "${cpp_cpu_threads_key}" "${threads}") set_cpu_threads=$(func_set_params "${cpp_cpu_threads_key}" "${threads}")
set_model_dir=$(func_set_params "${cpp_infer_model_key}" "${_model_dir}") set_model_dir=$(func_set_params "${cpp_infer_model_key}" "${_model_dir}")
command="${_script} ${cpp_use_gpu_key}=${use_gpu} ${cpp_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} > ${_save_log_path} 2>&1 " set_infer_params1=$(func_set_params "${cpp_infer_key1}" "${cpp_infer_value1}")
command="${_script} ${cpp_use_gpu_key}=${use_gpu} ${cpp_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 "
eval $command eval $command
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}" eval "cat ${_save_log_path}"
...@@ -396,7 +398,8 @@ function func_cpp_inference(){ ...@@ -396,7 +398,8 @@ function func_cpp_inference(){
set_tensorrt=$(func_set_params "${cpp_use_trt_key}" "${use_trt}") set_tensorrt=$(func_set_params "${cpp_use_trt_key}" "${use_trt}")
set_precision=$(func_set_params "${cpp_precision_key}" "${precision}") set_precision=$(func_set_params "${cpp_precision_key}" "${precision}")
set_model_dir=$(func_set_params "${cpp_infer_model_key}" "${_model_dir}") set_model_dir=$(func_set_params "${cpp_infer_model_key}" "${_model_dir}")
command="${_script} ${cpp_use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} > ${_save_log_path} 2>&1 " set_infer_params1=$(func_set_params "${cpp_infer_key1}" "${cpp_infer_value1}")
command="${_script} ${cpp_use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 "
eval $command eval $command
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}" eval "cat ${_save_log_path}"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册