diff --git a/deploy/hubserving/ocr_det/params.py b/deploy/hubserving/ocr_det/params.py index bc75cc404e43e0a6e9242c2684d615b4575e5d8f..2587a297662cb34d22dbdfe191439e61066cda78 100755 --- a/deploy/hubserving/ocr_det/params.py +++ b/deploy/hubserving/ocr_det/params.py @@ -13,7 +13,7 @@ def read_params(): #params for text detector cfg.det_algorithm = "DB" - cfg.det_model_dir = "./inference/ch_ppocr_mobile_v2.0_det_infer/" + cfg.det_model_dir = "./inference/ch_PP-OCRv2_det_infer/" cfg.det_limit_side_len = 960 cfg.det_limit_type = 'max' diff --git a/deploy/hubserving/ocr_rec/params.py b/deploy/hubserving/ocr_rec/params.py index f8d29114357946c9b6264079fca2eb4b19dbefba..5e11c3cfee0c9387fce7f465f15f9424b7b04e9d 100644 --- a/deploy/hubserving/ocr_rec/params.py +++ b/deploy/hubserving/ocr_rec/params.py @@ -13,7 +13,7 @@ def read_params(): #params for text recognizer cfg.rec_algorithm = "CRNN" - cfg.rec_model_dir = "./inference/ch_ppocr_mobile_v2.0_rec_infer/" + cfg.rec_model_dir = "./inference/ch_PP-OCRv2_rec_infer/" cfg.rec_image_shape = "3, 32, 320" cfg.rec_char_type = 'ch' diff --git a/deploy/hubserving/ocr_system/params.py b/deploy/hubserving/ocr_system/params.py index bee53bfd346e6d4d91738a2e06a0b4dab8e2b0de..4698e8ce5d8f8c826fe04a85906189e729104ddb 100755 --- a/deploy/hubserving/ocr_system/params.py +++ b/deploy/hubserving/ocr_system/params.py @@ -13,7 +13,7 @@ def read_params(): #params for text detector cfg.det_algorithm = "DB" - cfg.det_model_dir = "./inference/ch_ppocr_mobile_v2.0_det_infer/" + cfg.det_model_dir = "./inference/ch_PP-OCRv2_det_infer/" cfg.det_limit_side_len = 960 cfg.det_limit_type = 'max' @@ -31,7 +31,7 @@ def read_params(): #params for text recognizer cfg.rec_algorithm = "CRNN" - cfg.rec_model_dir = "./inference/ch_ppocr_mobile_v2.0_rec_infer/" + cfg.rec_model_dir = "./inference/ch_PP-OCRv2_rec_infer/" cfg.rec_image_shape = "3, 32, 320" cfg.rec_char_type = 'ch' diff --git a/deploy/hubserving/readme.md b/deploy/hubserving/readme.md index 11b843fec1052c3ad401ca0b7d1cb602401af8f8..b52e3584c36173e4c607dbbd9679605c98de8a67 100755 --- a/deploy/hubserving/readme.md +++ b/deploy/hubserving/readme.md @@ -34,10 +34,10 @@ pip3 install paddlehub==2.1.0 --upgrade -i https://pypi.tuna.tsinghua.edu.cn/sim ``` ### 2. 下载推理模型 -安装服务模块前,需要准备推理模型并放到正确路径。默认使用的是v2.0版的超轻量模型,默认模型路径为: +安装服务模块前,需要准备推理模型并放到正确路径。默认使用的是PP-OCRv2模型,默认模型路径为: ``` -检测模型:./inference/ch_ppocr_mobile_v2.0_det_infer/ -识别模型:./inference/ch_ppocr_mobile_v2.0_rec_infer/ +检测模型:./inference/ch_PP-OCRv2_det_infer/ +识别模型:./inference/ch_PP-OCRv2_rec_infer/ 方向分类器:./inference/ch_ppocr_mobile_v2.0_cls_infer/ ``` diff --git a/deploy/hubserving/readme_en.md b/deploy/hubserving/readme_en.md index 539ad722cae78b8315b87d35f9af6ab81140c5b3..3bbcf98cd8b78407613e6bdfb5d5ab8b0a25a084 100755 --- a/deploy/hubserving/readme_en.md +++ b/deploy/hubserving/readme_en.md @@ -35,10 +35,10 @@ pip3 install paddlehub==2.1.0 --upgrade -i https://pypi.tuna.tsinghua.edu.cn/sim ``` ### 2. Download inference model -Before installing the service module, you need to prepare the inference model and put it in the correct path. By default, the ultra lightweight model of v2.0 is used, and the default model path is: +Before installing the service module, you need to prepare the inference model and put it in the correct path. By default, the PP-OCRv2 models are used, and the default model path is: ``` -detection model: ./inference/ch_ppocr_mobile_v2.0_det_infer/ -recognition model: ./inference/ch_ppocr_mobile_v2.0_rec_infer/ +detection model: ./inference/ch_PP-OCRv2_det_infer/ +recognition model: ./inference/ch_PP-OCRv2_rec_infer/ text direction classifier: ./inference/ch_ppocr_mobile_v2.0_cls_infer/ ```