diff --git a/deploy/lite/config.txt b/deploy/lite/config.txt index 4c68105d39031830a8222b3d88163aebc8cac257..dda0d2b0320544d3a82f59b0672c086c64d83d3d 100644 --- a/deploy/lite/config.txt +++ b/deploy/lite/config.txt @@ -4,4 +4,5 @@ det_db_box_thresh 0.5 det_db_unclip_ratio 1.6 det_db_use_dilate 0 det_use_polygon_score 1 -use_direction_classify 1 \ No newline at end of file +use_direction_classify 1 +rec_image_height 32 \ No newline at end of file diff --git a/deploy/lite/crnn_process.cc b/deploy/lite/crnn_process.cc index 7528f36fe6316c84724891a4421c047fbdd33fa2..6d5fc1504e7b1b3faa35a80662442f60d2e30499 100644 --- a/deploy/lite/crnn_process.cc +++ b/deploy/lite/crnn_process.cc @@ -19,25 +19,27 @@ const std::vector rec_image_shape{3, 32, 320}; -cv::Mat CrnnResizeImg(cv::Mat img, float wh_ratio) { +cv::Mat CrnnResizeImg(cv::Mat img, float wh_ratio, int rec_image_height) { int imgC, imgH, imgW; imgC = rec_image_shape[0]; + imgH = rec_image_height; imgW = rec_image_shape[2]; - imgH = rec_image_shape[1]; - imgW = int(32 * wh_ratio); + imgW = int(imgH * wh_ratio); - float ratio = static_cast(img.cols) / static_cast(img.rows); + float ratio = float(img.cols) / float(img.rows); int resize_w, resize_h; + if (ceilf(imgH * ratio) > imgW) resize_w = imgW; else - resize_w = static_cast(ceilf(imgH * ratio)); - cv::Mat resize_img; + resize_w = int(ceilf(imgH * ratio)); + cv::resize(img, resize_img, cv::Size(resize_w, imgH), 0.f, 0.f, cv::INTER_LINEAR); - - return resize_img; + cv::copyMakeBorder(resize_img, resize_img, 0, 0, 0, + int(imgW - resize_img.cols), cv::BORDER_CONSTANT, + {127, 127, 127}); } std::vector ReadDict(std::string path) { diff --git a/deploy/lite/crnn_process.h b/deploy/lite/crnn_process.h index 29e67906976198210394c4960786105bf884dce8..ed7a3167069538a0c40d1bc01f0073c36cb7e461 100644 --- a/deploy/lite/crnn_process.h +++ b/deploy/lite/crnn_process.h @@ -26,7 +26,7 @@ #include "opencv2/imgcodecs.hpp" #include "opencv2/imgproc.hpp" -cv::Mat CrnnResizeImg(cv::Mat img, float wh_ratio); +cv::Mat CrnnResizeImg(cv::Mat img, float wh_ratio, int rec_image_height); std::vector ReadDict(std::string path); diff --git a/deploy/lite/ocr_db_crnn.cc b/deploy/lite/ocr_db_crnn.cc index 1ffbbacb74545b0bbea4957e25b6235225bad02b..cb2bf7791a4307d4e8d2167197d41d903410e0b4 100644 --- a/deploy/lite/ocr_db_crnn.cc +++ b/deploy/lite/ocr_db_crnn.cc @@ -162,7 +162,8 @@ void RunRecModel(std::vector>> boxes, cv::Mat img, std::vector charactor_dict, std::shared_ptr predictor_cls, int use_direction_classify, - std::vector *times) { + std::vector *times, + int rec_image_height) { std::vector mean = {0.5f, 0.5f, 0.5f}; std::vector scale = {1 / 0.5f, 1 / 0.5f, 1 / 0.5f}; @@ -183,7 +184,7 @@ void RunRecModel(std::vector>> boxes, cv::Mat img, float wh_ratio = static_cast(crop_img.cols) / static_cast(crop_img.rows); - resize_img = CrnnResizeImg(crop_img, wh_ratio); + resize_img = CrnnResizeImg(crop_img, wh_ratio, rec_image_height); resize_img.convertTo(resize_img, CV_32FC3, 1 / 255.f); const float *dimg = reinterpret_cast(resize_img.data); @@ -444,7 +445,7 @@ void system(char **argv){ //// load config from txt file auto Config = LoadConfigTxt(det_config_path); int use_direction_classify = int(Config["use_direction_classify"]); - + int rec_image_height = int(Config["rec_image_height"]); auto charactor_dict = ReadDict(dict_path); charactor_dict.insert(charactor_dict.begin(), "#"); // blank char for ctc charactor_dict.push_back(" "); @@ -590,12 +591,16 @@ void rec(int argc, char **argv) { std::string batchsize = argv[6]; std::string img_dir = argv[7]; std::string dict_path = argv[8]; + std::string config_path = argv[9]; if (strcmp(argv[4], "FP32") != 0 && strcmp(argv[4], "INT8") != 0) { std::cerr << "Only support FP32 or INT8." << std::endl; exit(1); } + auto Config = LoadConfigTxt(config_path); + int rec_image_height = int(Config["rec_image_height"]); + std::vector cv_all_img_names; cv::glob(img_dir, cv_all_img_names); @@ -630,7 +635,7 @@ void rec(int argc, char **argv) { std::vector rec_text_score; std::vector times; RunRecModel(boxes, srcimg, rec_predictor, rec_text, rec_text_score, - charactor_dict, cls_predictor, 0, ×); + charactor_dict, cls_predictor, 0, ×, rec_image_height); //// print recognized text for (int i = 0; i < rec_text.size(); i++) { diff --git a/deploy/lite/readme.md b/deploy/lite/readme.md index 9926e2dd8c973b25b5397fd5825f790528ede279..883aff9890522c9d469555534928d7df419b76d6 100644 --- a/deploy/lite/readme.md +++ b/deploy/lite/readme.md @@ -34,7 +34,7 @@ For the compilation process of different development environments, please refer ### 1.2 Prepare Paddle-Lite library There are two ways to obtain the Paddle-Lite library: -- 1. Download directly, the download link of the Paddle-Lite library is as follows: +- 1. [Recommended] Download directly, the download link of the Paddle-Lite library is as follows: | Platform | Paddle-Lite library download link | |---|---| @@ -43,7 +43,9 @@ There are two ways to obtain the Paddle-Lite library: Note: 1. The above Paddle-Lite library is compiled from the Paddle-Lite 2.10 branch. For more information about Paddle-Lite 2.10, please refer to [link](https://github.com/PaddlePaddle/Paddle-Lite/releases/tag/v2.10). -- 2. [Recommended] Compile Paddle-Lite to get the prediction library. The compilation method of Paddle-Lite is as follows: + **Note: It is recommended to use paddlelite>=2.10 version of the prediction library, other prediction library versions [download link](https://github.com/PaddlePaddle/Paddle-Lite/tags)** + +- 2. Compile Paddle-Lite to get the prediction library. The compilation method of Paddle-Lite is as follows: ``` git clone https://github.com/PaddlePaddle/Paddle-Lite.git cd Paddle-Lite @@ -104,21 +106,17 @@ If you directly use the model in the above table for deployment, you can skip th If the model to be deployed is not in the above table, you need to follow the steps below to obtain the optimized model. -The `opt` tool can be obtained by compiling Paddle Lite. +- Step 1: Refer to [document](https://www.paddlepaddle.org.cn/lite/v2.10/user_guides/opt/opt_python.html) to install paddlelite, which is used to convert paddle inference model to paddlelite required for running nb model ``` -git clone https://github.com/PaddlePaddle/Paddle-Lite.git -cd Paddle-Lite -git checkout release/v2.10 -./lite/tools/build.sh build_optimize_tool +pip install paddlelite==2.10 # The paddlelite version should be the same as the prediction library version ``` - -After the compilation is complete, the opt file is located under build.opt/lite/api/, You can view the operating options and usage of opt in the following ways: - +After installation, the following commands can view the help information ``` -cd build.opt/lite/api/ -./opt +paddle_lite_opt ``` +Introduction to paddle_lite_opt parameters: + |Options|Description| |---|---| |--model_dir|The path of the PaddlePaddle model to be optimized (non-combined form)| @@ -131,6 +129,8 @@ cd build.opt/lite/api/ `--model_dir` is suitable for the non-combined mode of the model to be optimized, and the inference model of PaddleOCR is the combined mode, that is, the model structure and model parameters are stored in a single file. +- Step 2: Use paddle_lite_opt to convert the inference model to the mobile model format. + The following takes the ultra-lightweight Chinese model of PaddleOCR as an example to introduce the use of the compiled opt file to complete the conversion of the inference model to the Paddle-Lite optimized model ``` @@ -240,6 +240,7 @@ det_db_thresh 0.3 # Used to filter the binarized image of DB prediction, det_db_box_thresh 0.5 # DDB post-processing filter box threshold, if there is a missing box detected, it can be reduced as appropriate det_db_unclip_ratio 1.6 # Indicates the compactness of the text box, the smaller the value, the closer the text box to the text use_direction_classify 0 # Whether to use the direction classifier, 0 means not to use, 1 means to use +rec_image_height 32 # The height of the input image of the recognition model, the PP-OCRv3 model needs to be set to 48, and the PP-OCRv2 model needs to be set to 32 ``` 5. Run Model on phone @@ -258,8 +259,9 @@ After the above steps are completed, you can use adb to push the file to the pho cd /data/local/tmp/debug export LD_LIBRARY_PATH=${PWD}:$LD_LIBRARY_PATH # The use of ocr_db_crnn is: - # ./ocr_db_crnn Detection model file Orientation classifier model file Recognition model file Test image path Dictionary file path - ./ocr_db_crnn ch_PP-OCRv2_det_slim_opt.nb ch_PP-OCRv2_rec_slim_opt.nb ch_ppocr_mobile_v2.0_cls_opt.nb ./11.jpg ppocr_keys_v1.txt + # ./ocr_db_crnn Detection model file Orientation classifier model file Recognition model file Hardware Precision Threads Batchsize Test image path Dictionary file path + ./ocr_db_crnn ch_PP-OCRv2_det_slim_opt.nb ch_PP-OCRv2_rec_slim_opt.nb ch_ppocr_mobile_v2.0_cls_slim_opt.nb arm8 INT8 10 1 ./11.jpg config.txt ppocr_keys_v1.txt True +# precision can be INT8 for quantitative model or FP32 for normal model. ``` If you modify the code, you need to recompile and push to the phone. @@ -283,3 +285,7 @@ A2: Replace the .jpg test image under ./debug with the image you want to test, a Q3: How to package it into the mobile APP? A3: This demo aims to provide the core algorithm part that can run OCR on mobile phones. Further, PaddleOCR/deploy/android_demo is an example of encapsulating this demo into a mobile app for reference. + +Q4: When running the demo, an error is reported `Error: This model is not supported, because kernel for 'io_copy' is not supported by Paddle-Lite.` + +A4: The problem is that the installed paddlelite version does not match the downloaded prediction library version. Make sure that the paddleliteopt tool matches your prediction library version, and try to switch to the nb model again. diff --git a/deploy/lite/readme_ch.md b/deploy/lite/readme_ch.md index 99a543d0d60455443dd872c56a5832c8ca0ff4e9..e238b4ec53bb3b774b3b9575f212562e6027e979 100644 --- a/deploy/lite/readme_ch.md +++ b/deploy/lite/readme_ch.md @@ -8,7 +8,7 @@ - [2.1 模型优化](#21-模型优化) - [2.2 与手机联调](#22-与手机联调) - [FAQ](#faq) - + 本教程将介绍基于[Paddle Lite](https://github.com/PaddlePaddle/Paddle-Lite) 在移动端部署PaddleOCR超轻量中文检测、识别模型的详细步骤。 @@ -32,7 +32,7 @@ Paddle Lite是飞桨轻量化推理引擎,为手机、IOT端提供高效推理 ### 1.2 准备预测库 预测库有两种获取方式: -- 1. 直接下载,预测库下载链接如下: +- 1. [推荐]直接下载,预测库下载链接如下: | 平台 | 预测库下载链接 | |---|---| @@ -41,7 +41,9 @@ Paddle Lite是飞桨轻量化推理引擎,为手机、IOT端提供高效推理 注:1. 上述预测库为PaddleLite 2.10分支编译得到,有关PaddleLite 2.10 详细信息可参考 [链接](https://github.com/PaddlePaddle/Paddle-Lite/releases/tag/v2.10) 。 -- 2. [推荐]编译Paddle-Lite得到预测库,Paddle-Lite的编译方式如下: +**注:建议使用paddlelite>=2.10版本的预测库,其他预测库版本[下载链接](https://github.com/PaddlePaddle/Paddle-Lite/tags)** + +- 2. 编译Paddle-Lite得到预测库,Paddle-Lite的编译方式如下: ``` git clone https://github.com/PaddlePaddle/Paddle-Lite.git cd Paddle-Lite @@ -102,22 +104,16 @@ Paddle-Lite 提供了多种策略来自动优化原始的模型,其中包括 如果要部署的模型不在上述表格中,则需要按照如下步骤获得优化后的模型。 -模型优化需要Paddle-Lite的opt可执行文件,可以通过编译Paddle-Lite源码获得,编译步骤如下: +- 步骤1:参考[文档](https://www.paddlepaddle.org.cn/lite/v2.10/user_guides/opt/opt_python.html)安装paddlelite,用于转换paddle inference model为paddlelite运行所需的nb模型 ``` -# 如果准备环境时已经clone了Paddle-Lite,则不用重新clone Paddle-Lite -git clone https://github.com/PaddlePaddle/Paddle-Lite.git -cd Paddle-Lite -git checkout release/v2.10 -# 启动编译 -./lite/tools/build.sh build_optimize_tool +pip install paddlelite==2.10 # paddlelite版本要与预测库版本一致 ``` - -编译完成后,opt文件位于`build.opt/lite/api/`下,可通过如下方式查看opt的运行选项和使用方式; +安装完后,如下指令可以查看帮助信息 ``` -cd build.opt/lite/api/ -./opt +paddle_lite_opt ``` +paddle_lite_opt 参数介绍: |选项|说明| |---|---| |--model_dir|待优化的PaddlePaddle模型(非combined形式)的路径| @@ -130,6 +126,8 @@ cd build.opt/lite/api/ `--model_dir`适用于待优化的模型是非combined方式,PaddleOCR的inference模型是combined方式,即模型结构和模型参数使用单独一个文件存储。 +- 步骤2:使用paddle_lite_opt将inference模型转换成移动端模型格式。 + 下面以PaddleOCR的超轻量中文模型为例,介绍使用编译好的opt文件完成inference模型到Paddle-Lite优化模型的转换。 ``` @@ -148,7 +146,7 @@ wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_cls 转换成功后,inference模型目录下会多出`.nb`结尾的文件,即是转换成功的模型文件。 -注意:使用paddle-lite部署时,需要使用opt工具优化后的模型。 opt 工具的输入模型是paddle保存的inference模型 +注意:使用paddle-lite部署时,需要使用opt工具优化后的模型。 opt工具的输入模型是paddle保存的inference模型 ### 2.2 与手机联调 @@ -234,13 +232,14 @@ ppocr_keys_v1.txt # 中文字典 ... ``` -2. `config.txt` 包含了检测器、分类器的超参数,如下: +2. `config.txt` 包含了检测器、分类器、识别器的超参数,如下: ``` max_side_len 960 # 输入图像长宽大于960时,等比例缩放图像,使得图像最长边为960 det_db_thresh 0.3 # 用于过滤DB预测的二值化图像,设置为0.-0.3对结果影响不明显 -det_db_box_thresh 0.5 # DB后处理过滤box的阈值,如果检测存在漏框情况,可酌情减小 +det_db_box_thresh 0.5 # 检测器后处理过滤box的阈值,如果检测存在漏框情况,可酌情减小 det_db_unclip_ratio 1.6 # 表示文本框的紧致程度,越小则文本框更靠近文本 use_direction_classify 0 # 是否使用方向分类器,0表示不使用,1表示使用 +rec_image_height 32 # 识别模型输入图像的高度,PP-OCRv3模型设置为48,PP-OCRv2模型需要设置为32 ``` 5. 启动调试 @@ -259,8 +258,8 @@ use_direction_classify 0 # 是否使用方向分类器,0表示不使用,1 cd /data/local/tmp/debug export LD_LIBRARY_PATH=${PWD}:$LD_LIBRARY_PATH # 开始使用,ocr_db_crnn可执行文件的使用方式为: - # ./ocr_db_crnn 检测模型文件 方向分类器模型文件 识别模型文件 测试图像路径 字典文件路径 - ./ocr_db_crnn ch_PP-OCRv2_det_slim_opt.nb ch_PP-OCRv2_rec_slim_opt.nb ch_ppocr_mobile_v2.0_cls_slim_opt.nb ./11.jpg ppocr_keys_v1.txt + # ./ocr_db_crnn 检测模型文件 方向分类器模型文件 识别模型文件 运行硬件 运行精度 线程数 batchsize 测试图像路径 参数配置路径 字典文件路径 是否使用benchmark参数 + ./ocr_db_crnn ch_PP-OCRv2_det_slim_opt.nb ch_PP-OCRv2_rec_slim_opt.nb ch_ppocr_mobile_v2.0_cls_slim_opt.nb arm8 INT8 10 1 ./11.jpg config.txt ppocr_keys_v1.txt True ``` 如果对代码做了修改,则需要重新编译并push到手机上。 @@ -284,3 +283,7 @@ A2:替换debug下的.jpg测试图像为你想要测试的图像,adb push 到 Q3:如何封装到手机APP中? A3:此demo旨在提供能在手机上运行OCR的核心算法部分,PaddleOCR/deploy/android_demo是将这个demo封装到手机app的示例,供参考 + +Q4:运行demo时遇到报错`Error: This model is not supported, because kernel for 'io_copy' is not supported by Paddle-Lite.` + +A4:问题是安装的paddlelite版本和下载的预测库版本不匹配,确保paddleliteopt工具和你的预测库版本匹配,重新转nb模型试试。