diff --git a/deploy/hubserving/ocr_det/params.py b/deploy/hubserving/ocr_det/params.py index bc75cc404e43e0a6e9242c2684d615b4575e5d8f..2587a297662cb34d22dbdfe191439e61066cda78 100755 --- a/deploy/hubserving/ocr_det/params.py +++ b/deploy/hubserving/ocr_det/params.py @@ -13,7 +13,7 @@ def read_params(): #params for text detector cfg.det_algorithm = "DB" - cfg.det_model_dir = "./inference/ch_ppocr_mobile_v2.0_det_infer/" + cfg.det_model_dir = "./inference/ch_PP-OCRv2_det_infer/" cfg.det_limit_side_len = 960 cfg.det_limit_type = 'max' diff --git a/deploy/hubserving/ocr_rec/params.py b/deploy/hubserving/ocr_rec/params.py index f8d29114357946c9b6264079fca2eb4b19dbefba..5e11c3cfee0c9387fce7f465f15f9424b7b04e9d 100644 --- a/deploy/hubserving/ocr_rec/params.py +++ b/deploy/hubserving/ocr_rec/params.py @@ -13,7 +13,7 @@ def read_params(): #params for text recognizer cfg.rec_algorithm = "CRNN" - cfg.rec_model_dir = "./inference/ch_ppocr_mobile_v2.0_rec_infer/" + cfg.rec_model_dir = "./inference/ch_PP-OCRv2_rec_infer/" cfg.rec_image_shape = "3, 32, 320" cfg.rec_char_type = 'ch' diff --git a/deploy/hubserving/ocr_system/params.py b/deploy/hubserving/ocr_system/params.py index bee53bfd346e6d4d91738a2e06a0b4dab8e2b0de..4698e8ce5d8f8c826fe04a85906189e729104ddb 100755 --- a/deploy/hubserving/ocr_system/params.py +++ b/deploy/hubserving/ocr_system/params.py @@ -13,7 +13,7 @@ def read_params(): #params for text detector cfg.det_algorithm = "DB" - cfg.det_model_dir = "./inference/ch_ppocr_mobile_v2.0_det_infer/" + cfg.det_model_dir = "./inference/ch_PP-OCRv2_det_infer/" cfg.det_limit_side_len = 960 cfg.det_limit_type = 'max' @@ -31,7 +31,7 @@ def read_params(): #params for text recognizer cfg.rec_algorithm = "CRNN" - cfg.rec_model_dir = "./inference/ch_ppocr_mobile_v2.0_rec_infer/" + cfg.rec_model_dir = "./inference/ch_PP-OCRv2_rec_infer/" cfg.rec_image_shape = "3, 32, 320" cfg.rec_char_type = 'ch' diff --git a/deploy/hubserving/readme.md b/deploy/hubserving/readme.md index 11b843fec1052c3ad401ca0b7d1cb602401af8f8..b52e3584c36173e4c607dbbd9679605c98de8a67 100755 --- a/deploy/hubserving/readme.md +++ b/deploy/hubserving/readme.md @@ -34,10 +34,10 @@ pip3 install paddlehub==2.1.0 --upgrade -i https://pypi.tuna.tsinghua.edu.cn/sim ``` ### 2. 下载推理模型 -安装服务模块前,需要准备推理模型并放到正确路径。默认使用的是v2.0版的超轻量模型,默认模型路径为: +安装服务模块前,需要准备推理模型并放到正确路径。默认使用的是PP-OCRv2模型,默认模型路径为: ``` -检测模型:./inference/ch_ppocr_mobile_v2.0_det_infer/ -识别模型:./inference/ch_ppocr_mobile_v2.0_rec_infer/ +检测模型:./inference/ch_PP-OCRv2_det_infer/ +识别模型:./inference/ch_PP-OCRv2_rec_infer/ 方向分类器:./inference/ch_ppocr_mobile_v2.0_cls_infer/ ``` diff --git a/deploy/hubserving/readme_en.md b/deploy/hubserving/readme_en.md index 539ad722cae78b8315b87d35f9af6ab81140c5b3..3bbcf98cd8b78407613e6bdfb5d5ab8b0a25a084 100755 --- a/deploy/hubserving/readme_en.md +++ b/deploy/hubserving/readme_en.md @@ -35,10 +35,10 @@ pip3 install paddlehub==2.1.0 --upgrade -i https://pypi.tuna.tsinghua.edu.cn/sim ``` ### 2. Download inference model -Before installing the service module, you need to prepare the inference model and put it in the correct path. By default, the ultra lightweight model of v2.0 is used, and the default model path is: +Before installing the service module, you need to prepare the inference model and put it in the correct path. By default, the PP-OCRv2 models are used, and the default model path is: ``` -detection model: ./inference/ch_ppocr_mobile_v2.0_det_infer/ -recognition model: ./inference/ch_ppocr_mobile_v2.0_rec_infer/ +detection model: ./inference/ch_PP-OCRv2_det_infer/ +recognition model: ./inference/ch_PP-OCRv2_rec_infer/ text direction classifier: ./inference/ch_ppocr_mobile_v2.0_cls_infer/ ``` diff --git a/doc/doc_ch/benchmark.md b/doc/doc_ch/benchmark.md index 7ab829576e78aaf9296a67871e84f38aecb8bf80..39b9724abe04494c3e61f54597c64400a132e30b 100644 --- a/doc/doc_ch/benchmark.md +++ b/doc/doc_ch/benchmark.md @@ -35,4 +35,4 @@ | PP-OCR mobile | 356 | 11 6| | PP-OCR server | 1056 | 200 | -更多 PP-OCR 系列模型的预测指标可以参考[PP-OCR Benchamrk](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.2/doc/doc_ch/benchmark.md) +更多 PP-OCR 系列模型的预测指标可以参考[PP-OCR Benchmark](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.2/doc/doc_ch/benchmark.md) diff --git a/doc/doc_ch/quickstart.md b/doc/doc_ch/quickstart.md index 1896d7a137f0768c6b2a8e0c02b18ff61fbfd03c..d9ff5a628fbd8d8effd50fb2b276d89d5e13225a 100644 --- a/doc/doc_ch/quickstart.md +++ b/doc/doc_ch/quickstart.md @@ -47,10 +47,10 @@ cd /path/to/ppocr_img #### 2.1.1 中英文模型 -* 检测+方向分类器+识别全流程:设置方向分类器参数`--use_angle_cls true`后可对竖排文本进行识别。 +* 检测+方向分类器+识别全流程:`--use_angle_cls true`设置使用方向分类器识别180度旋转文字,`--use_gpu false`设置不使用GPU ```bash - paddleocr --image_dir ./imgs/11.jpg --use_angle_cls true + paddleocr --image_dir ./imgs/11.jpg --use_angle_cls true --use_gpu false ``` 结果是一个list,每个item包含了文本框,文字和识别置信度 diff --git a/doc/doc_en/benchmark_en.md b/doc/doc_en/benchmark_en.md index 5d3acd59560b6f966ecaacc3698f830e3bc5b149..70b33aebd95cfa6e02122c6816cd3863d2b584ab 100755 --- a/doc/doc_en/benchmark_en.md +++ b/doc/doc_en/benchmark_en.md @@ -38,4 +38,4 @@ Compares the time-consuming on CPU and T4 GPU (ms): | PP-OCR mobile | 356 | 116| | PP-OCR server | 1056 | 200 | -More indicators of PP-OCR series models can be referred to [PP-OCR Benchamrk](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.2/doc/doc_en/benchmark_en.md) +More indicators of PP-OCR series models can be referred to [PP-OCR Benchmark](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.2/doc/doc_en/benchmark_en.md) diff --git a/doc/doc_en/quickstart_en.md b/doc/doc_en/quickstart_en.md index 0055d8f7a89d0d218d001ea94fd4c620de5d037f..9ed83aceb9f562ac3099f22eaf264b966c0d48c7 100644 --- a/doc/doc_en/quickstart_en.md +++ b/doc/doc_en/quickstart_en.md @@ -53,10 +53,10 @@ If you do not use the provided test image, you can replace the following `--imag #### 2.1.1 Chinese and English Model -* Detection, direction classification and recognition: set the direction classifier parameter`--use_angle_cls true` to recognize vertical text. +* Detection, direction classification and recognition: set the parameter`--use_gpu false` to disable the gpu device ```bash - paddleocr --image_dir ./imgs_en/img_12.jpg --use_angle_cls true --lang en + paddleocr --image_dir ./imgs_en/img_12.jpg --use_angle_cls true --lang en --use_gpu false ``` Output will be a list, each item contains bounding box, text and recognition confidence diff --git a/tools/export_model.py b/tools/export_model.py index cae87aca129134d64711e364bf10428d69500a06..71ecc63b0bdadce8b2bd41dc9119ab556aaa435c 100755 --- a/tools/export_model.py +++ b/tools/export_model.py @@ -26,7 +26,7 @@ from paddle.jit import to_static from ppocr.modeling.architectures import build_model from ppocr.postprocess import build_post_process -from ppocr.utils.save_load import init_model +from ppocr.utils.save_load import load_dygraph_params from ppocr.utils.logging import get_logger from tools.program import load_config, merge_config, ArgsParser @@ -99,7 +99,7 @@ def main(): else: # base rec model config["Architecture"]["Head"]["out_channels"] = char_num model = build_model(config["Architecture"]) - init_model(config, model) + _ = load_dygraph_params(config, model, logger, None) model.eval() save_path = config["Global"]["save_inference_dir"]