diff --git a/deploy/hubserving/ocr_cls/params.py b/deploy/hubserving/ocr_cls/params.py old mode 100644 new mode 100755 index bcdb2d6e3800c0ba7897b71f0b0999cafdc223af..72a7a10249176d86f75b5d3c3adae7f1021a75a8 --- a/deploy/hubserving/ocr_cls/params.py +++ b/deploy/hubserving/ocr_cls/params.py @@ -12,7 +12,7 @@ def read_params(): cfg = Config() #params for text classifier - cfg.cls_model_dir = "./inference/ch_ppocr_mobile_v1.1_cls_infer/" + cfg.cls_model_dir = "./inference/ch_ppocr_mobile_v2.0_cls_infer/" cfg.cls_image_shape = "3, 48, 192" cfg.label_list = ['0', '180'] cfg.cls_batch_num = 30 diff --git a/deploy/hubserving/ocr_det/params.py b/deploy/hubserving/ocr_det/params.py old mode 100644 new mode 100755 index 4d4a9fc27b727034d8185c82dad3e542659fd463..e50decbbc8ee604863c5965aa95bf1f79fa71d0a --- a/deploy/hubserving/ocr_det/params.py +++ b/deploy/hubserving/ocr_det/params.py @@ -13,7 +13,7 @@ def read_params(): #params for text detector cfg.det_algorithm = "DB" - cfg.det_model_dir = "./inference/ch_ppocr_mobile_v1.1_det_infer/" + cfg.det_model_dir = "./inference/ch_ppocr_mobile_v2.0_det_infer/" cfg.det_limit_side_len = 960 cfg.det_limit_type = 'max' @@ -27,16 +27,6 @@ def read_params(): # cfg.det_east_cover_thresh = 0.1 # cfg.det_east_nms_thresh = 0.2 - # #params for text recognizer - # cfg.rec_algorithm = "CRNN" - # cfg.rec_model_dir = "./inference/ch_det_mv3_crnn/" - - # cfg.rec_image_shape = "3, 32, 320" - # cfg.rec_char_type = 'ch' - # cfg.rec_batch_num = 30 - # cfg.rec_char_dict_path = "./ppocr/utils/ppocr_keys_v1.txt" - # cfg.use_space_char = True - cfg.use_zero_copy_run = False cfg.use_pdserving = False diff --git a/deploy/hubserving/ocr_system/params.py b/deploy/hubserving/ocr_system/params.py old mode 100644 new mode 100755 index 1f6a07bcc0167e90564edab9c4719b9192233b4c..a0e1960b2857630780f6b34773d7760279f862a2 --- a/deploy/hubserving/ocr_system/params.py +++ b/deploy/hubserving/ocr_system/params.py @@ -13,7 +13,7 @@ def read_params(): #params for text detector cfg.det_algorithm = "DB" - cfg.det_model_dir = "./inference/ch_ppocr_mobile_v1.1_det_infer/" + cfg.det_model_dir = "./inference/ch_ppocr_mobile_v2.0_det_infer/" cfg.det_limit_side_len = 960 cfg.det_limit_type = 'max' @@ -29,7 +29,7 @@ def read_params(): #params for text recognizer cfg.rec_algorithm = "CRNN" - cfg.rec_model_dir = "./inference/ch_ppocr_mobile_v1.1_rec_infer/" + cfg.rec_model_dir = "./inference/ch_ppocr_mobile_v2.0_rec_infer/" cfg.rec_image_shape = "3, 32, 320" cfg.rec_char_type = 'ch' @@ -41,7 +41,7 @@ def read_params(): #params for text classifier cfg.use_angle_cls = True - cfg.cls_model_dir = "./inference/ch_ppocr_mobile_v1.1_cls_infer/" + cfg.cls_model_dir = "./inference/ch_ppocr_mobile_v2.0_cls_infer/" cfg.cls_image_shape = "3, 48, 192" cfg.label_list = ['0', '180'] cfg.cls_batch_num = 30 @@ -49,5 +49,6 @@ def read_params(): cfg.use_zero_copy_run = False cfg.use_pdserving = False + cfg.drop_score = 0.5 return cfg diff --git a/deploy/hubserving/readme.md b/deploy/hubserving/readme.md old mode 100644 new mode 100755 index f64bd372569f12ea52214e3e89927df0c859a17f..d86d47041007f614d48c9b0e5adebc8739029aac --- a/deploy/hubserving/readme.md +++ b/deploy/hubserving/readme.md @@ -33,11 +33,11 @@ pip3 install paddlehub --upgrade -i https://pypi.tuna.tsinghua.edu.cn/simple ``` ### 2. 下载推理模型 -安装服务模块前,需要准备推理模型并放到正确路径。默认使用的是v1.1版的超轻量模型,默认模型路径为: +安装服务模块前,需要准备推理模型并放到正确路径。默认使用的是v2.0版的超轻量模型,默认模型路径为: ``` -检测模型:./inference/ch_ppocr_mobile_v1.1_det_infer/ -识别模型:./inference/ch_ppocr_mobile_v1.1_rec_infer/ -方向分类器:./inference/ch_ppocr_mobile_v1.1_cls_infer/ +检测模型:./inference/ch_ppocr_mobile_v2.0_det_infer/ +识别模型:./inference/ch_ppocr_mobile_v2.0_rec_infer/ +方向分类器:./inference/ch_ppocr_mobile_v2.0_cls_infer/ ``` **模型路径可在`params.py`中查看和修改。** 更多模型可以从PaddleOCR提供的[模型库](../../doc/doc_ch/models_list.md)下载,也可以替换成自己训练转换好的模型。 diff --git a/deploy/hubserving/readme_en.md b/deploy/hubserving/readme_en.md old mode 100644 new mode 100755 index c6cf53413bc3eac45f933fead66356d1491cc60c..b2ffdf0b7af638281933bfd84f9304d9ec9867cf --- a/deploy/hubserving/readme_en.md +++ b/deploy/hubserving/readme_en.md @@ -34,11 +34,11 @@ pip3 install paddlehub --upgrade -i https://pypi.tuna.tsinghua.edu.cn/simple ``` ### 2. Download inference model -Before installing the service module, you need to prepare the inference model and put it in the correct path. By default, the ultra lightweight model of v1.1 is used, and the default model path is: +Before installing the service module, you need to prepare the inference model and put it in the correct path. By default, the ultra lightweight model of v2.0 is used, and the default model path is: ``` -detection model: ./inference/ch_ppocr_mobile_v1.1_det_infer/ -recognition model: ./inference/ch_ppocr_mobile_v1.1_rec_infer/ -text direction classifier: ./inference/ch_ppocr_mobile_v1.1_cls_infer/ +detection model: ./inference/ch_ppocr_mobile_v2.0_det_infer/ +recognition model: ./inference/ch_ppocr_mobile_v2.0_rec_infer/ +text direction classifier: ./inference/ch_ppocr_mobile_v2.0_cls_infer/ ``` **The model path can be found and modified in `params.py`.** More models provided by PaddleOCR can be obtained from the [model library](../../doc/doc_en/models_list_en.md). You can also use models trained by yourself. diff --git a/tools/test_hubserving.py b/tools/test_hubserving.py old mode 100644 new mode 100755 index f28ff39e441e9f0d8a4c6e1081827daf8aff9792..0548726417699855a3905fa1a3fb679d69c85fc8 --- a/tools/test_hubserving.py +++ b/tools/test_hubserving.py @@ -17,8 +17,9 @@ __dir__ = os.path.dirname(os.path.abspath(__file__)) sys.path.append(__dir__) sys.path.append(os.path.abspath(os.path.join(__dir__, '..'))) -from ppocr.utils.utility import initial_logger -logger = initial_logger() +from ppocr.utils.logging import get_logger +logger = get_logger() + import cv2 import numpy as np import time