diff --git a/MANIFEST.in b/MANIFEST.in index 1ca129b15787978e5e890498eb4d6609d8a4cba0..d674fabc5d714f7e31ed00b32be2d44d6dd10871 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -5,4 +5,5 @@ recursive-include ppocr/utils *.txt utility.py logging.py network.py recursive-include ppocr/data *.py recursive-include ppocr/postprocess *.py recursive-include tools/infer *.py -recursive-include ppocr/utils/e2e_utils *.py \ No newline at end of file +recursive-include ppocr/utils/e2e_utils *.py +recursive-include ppstructure *.py \ No newline at end of file diff --git a/__init__.py b/__init__.py index 7d94f66be072067172d56da13d8bb27d9aeac431..504aeca61b734937b97bab18dec8e49237d873d5 100644 --- a/__init__.py +++ b/__init__.py @@ -11,7 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import paddleocr +from .paddleocr import * -__all__ = ['PaddleOCR', 'draw_ocr'] -from .paddleocr import PaddleOCR -from .tools.infer.utility import draw_ocr +__version__ = paddleocr.VERSION +__all__ = ['PaddleOCR', 'PPStructure', 'draw_ocr', 'draw_structure_result', 'save_structure_res'] diff --git a/doc/doc_ch/whl.md b/doc/doc_ch/whl.md index 957c6926b15fad3091265da9295f5ad820fe6a26..0c969988a649ba6cea0115abd1ba0aa5b19ea892 100644 --- a/doc/doc_ch/whl.md +++ b/doc/doc_ch/whl.md @@ -356,3 +356,4 @@ im_show.save('result.jpg') | rec | 前向时是否启动识别 | TRUE | | cls | 前向时是否启动分类 (命令行模式下使用use_angle_cls控制前向是否启动分类) | FALSE | | show_log | 是否打印det和rec等信息 | FALSE | +| type | 执行ocr或者表格结构化, 值可选['ocr','structure'] | ocr | diff --git a/doc/doc_en/whl_en.md b/doc/doc_en/whl_en.md index b9909f498e830309eaad952df9171cd63b6f5e7b..2d68845b18ef8937c626179c2e0a746201341bec 100644 --- a/doc/doc_en/whl_en.md +++ b/doc/doc_en/whl_en.md @@ -362,5 +362,5 @@ im_show.save('result.jpg') | det | Enable detction when `ppocr.ocr` func exec | TRUE | | rec | Enable recognition when `ppocr.ocr` func exec | TRUE | | cls | Enable classification when `ppocr.ocr` func exec((Use use_angle_cls in command line mode to control whether to start classification in the forward direction) | FALSE | -| show_log | Whether to print log in det and rec - | FALSE | \ No newline at end of file +| show_log | Whether to print log in det and rec | FALSE | +| type | Perform ocr or table structuring, the value is selected in ['ocr','structure'] | ocr | \ No newline at end of file diff --git a/doc/table/table.jpg b/doc/table/table.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3daa619e52dc2471df62ea7767be3bff350b623f Binary files /dev/null and b/doc/table/table.jpg differ diff --git a/paddleocr.py b/paddleocr.py index f2a3496897c07f8d969b198441076ef992774c19..dc40bbbf62a83b6be430122a597587321d09566d 100644 --- a/paddleocr.py +++ b/paddleocr.py @@ -29,16 +29,19 @@ from ppocr.utils.logging import get_logger logger = get_logger() from ppocr.utils.utility import check_and_read_gif, get_image_file_list from ppocr.utils.network import maybe_download, download_with_progressbar, is_link, confirm_model_dir_url -from tools.infer.utility import draw_ocr, init_args, str2bool +from tools.infer.utility import draw_ocr, str2bool +from ppstructure.utility import init_args, draw_structure_result +from ppstructure.predict_system import OCRSystem, save_structure_res -__all__ = ['PaddleOCR'] +__all__ = ['PaddleOCR', 'PPStructure', 'draw_ocr', 'draw_structure_result', 'save_structure_res'] model_urls = { 'det': { 'ch': 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar', 'en': - 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/en_ppocr_mobile_v2.0_det_infer.tar' + 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/en_ppocr_mobile_v2.0_det_infer.tar', + 'structure': 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_det_infer.tar' }, 'rec': { 'ch': { @@ -110,14 +113,21 @@ model_urls = { 'url': 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/devanagari_ppocr_mobile_v2.0_rec_infer.tar', 'dict_path': './ppocr/utils/dict/devanagari_dict.txt' + }, + 'structure': { + 'url': 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_rec_infer.tar', + 'dict_path': 'ppocr/utils/dict/table_dict.txt' } }, - 'cls': - 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar' + 'cls': 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar', + 'table': { + 'url': 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar', + 'dict_path': 'ppocr/utils/dict/table_structure_dict.txt' + } } SUPPORT_DET_MODEL = ['DB'] -VERSION = '2.1' +VERSION = '2.2' SUPPORT_REC_MODEL = ['CRNN'] BASE_DIR = os.path.expanduser("~/.paddleocr/") @@ -129,9 +139,10 @@ def parse_args(mMain=True): parser.add_argument("--lang", type=str, default='ch') parser.add_argument("--det", type=str2bool, default=True) parser.add_argument("--rec", type=str2bool, default=True) + parser.add_argument("--type", type=str, default='ocr') for action in parser._actions: - if action.dest == 'rec_char_dict_path': + if action.dest in ['rec_char_dict_path', 'table_char_dict_path']: action.default = None if mMain: return parser.parse_args() @@ -142,6 +153,42 @@ def parse_args(mMain=True): return argparse.Namespace(**inference_args_dict) +def parse_lang(lang): + latin_lang = [ + 'af', 'az', 'bs', 'cs', 'cy', 'da', 'de', 'es', 'et', 'fr', 'ga', + 'hr', 'hu', 'id', 'is', 'it', 'ku', 'la', 'lt', 'lv', 'mi', 'ms', + 'mt', 'nl', 'no', 'oc', 'pi', 'pl', 'pt', 'ro', 'rs_latin', 'sk', + 'sl', 'sq', 'sv', 'sw', 'tl', 'tr', 'uz', 'vi' + ] + arabic_lang = ['ar', 'fa', 'ug', 'ur'] + cyrillic_lang = [ + 'ru', 'rs_cyrillic', 'be', 'bg', 'uk', 'mn', 'abq', 'ady', 'kbd', + 'ava', 'dar', 'inh', 'che', 'lbe', 'lez', 'tab' + ] + devanagari_lang = [ + 'hi', 'mr', 'ne', 'bh', 'mai', 'ang', 'bho', 'mah', 'sck', 'new', + 'gom', 'sa', 'bgc' + ] + if lang in latin_lang: + lang = "latin" + elif lang in arabic_lang: + lang = "arabic" + elif lang in cyrillic_lang: + lang = "cyrillic" + elif lang in devanagari_lang: + lang = "devanagari" + assert lang in model_urls[ + 'rec'], 'param lang must in {}, but got {}'.format( + model_urls['rec'].keys(), lang) + if lang == "ch": + det_lang = "ch" + elif lang == 'structure': + det_lang = 'structure' + else: + det_lang = "en" + return lang, det_lang + + class PaddleOCR(predict_system.TextSystem): def __init__(self, **kwargs): """ @@ -154,52 +201,17 @@ class PaddleOCR(predict_system.TextSystem): if not params.show_log: logger.setLevel(logging.INFO) self.use_angle_cls = params.use_angle_cls - lang = params.lang - latin_lang = [ - 'af', 'az', 'bs', 'cs', 'cy', 'da', 'de', 'es', 'et', 'fr', 'ga', - 'hr', 'hu', 'id', 'is', 'it', 'ku', 'la', 'lt', 'lv', 'mi', 'ms', - 'mt', 'nl', 'no', 'oc', 'pi', 'pl', 'pt', 'ro', 'rs_latin', 'sk', - 'sl', 'sq', 'sv', 'sw', 'tl', 'tr', 'uz', 'vi' - ] - arabic_lang = ['ar', 'fa', 'ug', 'ur'] - cyrillic_lang = [ - 'ru', 'rs_cyrillic', 'be', 'bg', 'uk', 'mn', 'abq', 'ady', 'kbd', - 'ava', 'dar', 'inh', 'che', 'lbe', 'lez', 'tab' - ] - devanagari_lang = [ - 'hi', 'mr', 'ne', 'bh', 'mai', 'ang', 'bho', 'mah', 'sck', 'new', - 'gom', 'sa', 'bgc' - ] - if lang in latin_lang: - lang = "latin" - elif lang in arabic_lang: - lang = "arabic" - elif lang in cyrillic_lang: - lang = "cyrillic" - elif lang in devanagari_lang: - lang = "devanagari" - assert lang in model_urls[ - 'rec'], 'param lang must in {}, but got {}'.format( - model_urls['rec'].keys(), lang) - if lang == "ch": - det_lang = "ch" - else: - det_lang = "en" - use_inner_dict = False - if params.rec_char_dict_path is None: - use_inner_dict = True - params.rec_char_dict_path = model_urls['rec'][lang][ - 'dict_path'] + lang, det_lang = parse_lang(params.lang) # init model dir params.det_model_dir, det_url = confirm_model_dir_url(params.det_model_dir, - os.path.join(BASE_DIR, VERSION, 'det', det_lang), + os.path.join(BASE_DIR, VERSION, 'ocr', 'det', det_lang), model_urls['det'][det_lang]) params.rec_model_dir, rec_url = confirm_model_dir_url(params.rec_model_dir, - os.path.join(BASE_DIR, VERSION, 'rec', lang), + os.path.join(BASE_DIR, VERSION, 'ocr', 'rec', lang), model_urls['rec'][lang]['url']) params.cls_model_dir, cls_url = confirm_model_dir_url(params.cls_model_dir, - os.path.join(BASE_DIR, VERSION, 'cls'), + os.path.join(BASE_DIR, VERSION, 'ocr', 'cls'), model_urls['cls']) # download model maybe_download(params.det_model_dir, det_url) @@ -212,9 +224,9 @@ class PaddleOCR(predict_system.TextSystem): if params.rec_algorithm not in SUPPORT_REC_MODEL: logger.error('rec_algorithm must in {}'.format(SUPPORT_REC_MODEL)) sys.exit(0) - if use_inner_dict: - params.rec_char_dict_path = str( - Path(__file__).parent / params.rec_char_dict_path) + + if params.rec_char_dict_path is None: + params.rec_char_dict_path = str(Path(__file__).parent / model_urls['rec'][lang]['dict_path']) print(params) # init det_model and rec_model @@ -272,6 +284,59 @@ class PaddleOCR(predict_system.TextSystem): return rec_res +class PPStructure(OCRSystem): + def __init__(self, **kwargs): + params = parse_args(mMain=False) + params.__dict__.update(**kwargs) + if not params.show_log: + logger.setLevel(logging.INFO) + lang, det_lang = parse_lang(params.lang) + + # init model dir + params.det_model_dir, det_url = confirm_model_dir_url(params.det_model_dir, + os.path.join(BASE_DIR, VERSION, 'ocr', 'det', det_lang), + model_urls['det'][det_lang]) + params.rec_model_dir, rec_url = confirm_model_dir_url(params.rec_model_dir, + os.path.join(BASE_DIR, VERSION, 'ocr', 'rec', lang), + model_urls['rec'][lang]['url']) + params.table_model_dir, table_url = confirm_model_dir_url(params.table_model_dir, + os.path.join(BASE_DIR, VERSION, 'ocr', 'table'), + model_urls['table']['url']) + # download model + maybe_download(params.det_model_dir, det_url) + maybe_download(params.rec_model_dir, rec_url) + maybe_download(params.table_model_dir, table_url) + + if params.rec_char_dict_path is None: + params.rec_char_dict_path = str(Path(__file__).parent / model_urls['rec'][lang]['dict_path']) + if params.table_char_dict_path is None: + params.table_char_dict_path = str(Path(__file__).parent / model_urls['table']['dict_path']) + + print(params) + super().__init__(params) + + def __call__(self, img): + if isinstance(img, str): + # download net image + if img.startswith('http'): + download_with_progressbar(img, 'tmp.jpg') + img = 'tmp.jpg' + image_file = img + img, flag = check_and_read_gif(image_file) + if not flag: + with open(image_file, 'rb') as f: + np_arr = np.frombuffer(f.read(), dtype=np.uint8) + img = cv2.imdecode(np_arr, cv2.IMREAD_COLOR) + if img is None: + logger.error("error in loading image:{}".format(image_file)) + return None + if isinstance(img, np.ndarray) and len(img.shape) == 2: + img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) + + res = super().__call__(img) + return res + + def main(): # for cmd args = parse_args(mMain=True) @@ -284,14 +349,28 @@ def main(): if len(image_file_list) == 0: logger.error('no images find in {}'.format(args.image_dir)) return + if args.type == 'ocr': + engine = PaddleOCR(**(args.__dict__)) + elif args.type == 'structure': + engine = PPStructure(**(args.__dict__)) + else: + raise NotImplementedError - ocr_engine = PaddleOCR(**(args.__dict__)) for img_path in image_file_list: + img_name = os.path.basename(img_path).split('.')[0] logger.info('{}{}{}'.format('*' * 10, img_path, '*' * 10)) - result = ocr_engine.ocr(img_path, + if args.type == 'ocr': + result = engine.ocr(img_path, det=args.det, rec=args.rec, cls=args.use_angle_cls) - if result is not None: - for line in result: - logger.info(line) + if result is not None: + for line in result: + logger.info(line) + elif args.type == 'structure': + result = engine(img_path) + save_structure_res(result, args.output, img_name) + + for item in result: + item.pop('img') + logger.info(item['res']) \ No newline at end of file diff --git a/ppocr/data/imaug/label_ops.py b/ppocr/data/imaug/label_ops.py index e25cce79b553f127afc0167f18b6f663ceb617d7..32abe94deffa41c6f51ef62f011d0e4d58811065 100644 --- a/ppocr/data/imaug/label_ops.py +++ b/ppocr/data/imaug/label_ops.py @@ -380,14 +380,14 @@ class TableLabelEncode(object): list_elem = [] with open(character_dict_path, "rb") as fin: lines = fin.readlines() - substr = lines[0].decode('utf-8').strip("\n").split("\t") + substr = lines[0].decode('utf-8').strip("\r\n").split("\t") character_num = int(substr[0]) elem_num = int(substr[1]) for cno in range(1, 1+character_num): - character = lines[cno].decode('utf-8').strip("\n") + character = lines[cno].decode('utf-8').strip("\r\n") list_character.append(character) for eno in range(1+character_num, 1+character_num+elem_num): - elem = lines[eno].decode('utf-8').strip("\n") + elem = lines[eno].decode('utf-8').strip("\r\n") list_elem.append(elem) return list_character, list_elem diff --git a/ppstructure/MANIFEST.in b/ppstructure/MANIFEST.in deleted file mode 100644 index 713e4b06f3ac924070afe53de9c2ec48726185e6..0000000000000000000000000000000000000000 --- a/ppstructure/MANIFEST.in +++ /dev/null @@ -1,9 +0,0 @@ -include LICENSE -include README.md - -recursive-include ppocr/utils *.txt utility.py logging.py network.py -recursive-include ppocr/data *.py -recursive-include ppocr/postprocess *.py -recursive-include tools/infer *.py -recursive-include ppstructure *.py - diff --git a/ppstructure/README.md b/ppstructure/README.md index 90cd412df038a59ab6555b1ff632f99e2d32bb74..1fb52707f498bcd04edb04fbc8ce13fd66d7b9f8 100644 --- a/ppstructure/README.md +++ b/ppstructure/README.md @@ -1,26 +1,53 @@ -# PaddleStructure +# PPStructure -PaddleStructure is an OCR toolkit for complex layout analysis. It can divide document data in the form of pictures into **text, table, title, picture and list** 5 types of areas, and extract the table area as excel +PPStructure is an OCR toolkit for complex layout analysis. It can divide document data in the form of pictures into **text, table, title, picture and list** 5 types of areas, and extract the table area as excel ## 1. Quick start ### install +**install PaddlePaddle2.0** -**install layoutparser** -```sh -pip3 install -U premailer paddleocr https://paddleocr.bj.bcebos.com/whl/layoutparser-0.0.0-py3-none-any.whl +```bash +pip3 install --upgrade pip + +# If you have cuda9 or cuda10 installed on your machine, please run the following command to install +python3 -m pip install paddlepaddle-gpu==2.0.0 -i https://mirror.baidu.com/pypi/simple + +# If you only have cpu on your machine, please run the following command to install + +python3 -m pip install paddlepaddle==2.0.0 -i https://mirror.baidu.com/pypi/simple + +For more version requirements, please refer to the instructions in the [installation document](https://www.paddlepaddle.org.cn/install/quick) . ``` -**install paddlestructure** -install by pypi +**Clone PaddleOCR repo** ```bash -pip install paddlestructure +# Recommend +git clone https://github.com/PaddlePaddle/PaddleOCR + +# If you cannot pull successfully due to network problems, you can also choose to use the code hosting on the cloud: +git clone https://gitee.com/paddlepaddle/PaddleOCR + +# Note: The cloud-hosting code may not be able to synchronize the update with this GitHub project in real time. There might be a delay of 3-5 days. Please give priority to the recommended method. +``` + +**install paddleocr** + +install by pypi +```bash +cd PaddleOCR +pip install "paddleocr>=2.2" # # Recommend to use version 2.2 ``` build own whl package and install + ```bash python3 setup.py bdist_wheel -pip3 install dist/paddlestructure-x.x.x-py3-none-any.whl # x.x.x is the version of paddlestructure +pip3 install dist/paddleocr-x.x.x-py3-none-any.whl # x.x.x is the version of paddleocr +``` +**install layoutparser** +```sh +pip3 install -U premailer https://paddleocr.bj.bcebos.com/whl/layoutparser-0.0.0-py3-none-any.whl ``` ### 1.2 Use @@ -28,7 +55,7 @@ pip3 install dist/paddlestructure-x.x.x-py3-none-any.whl # x.x.x is the version #### 1.2.1 Use by command line ```bash -paddlestructure --image_dir=../doc/table/1.png +paddleocr --image_dir=../doc/table/1.png --type=structure ``` #### 1.2.2 Use by code @@ -36,29 +63,30 @@ paddlestructure --image_dir=../doc/table/1.png ```python import os import cv2 -from paddlestructure import PaddleStructure,draw_result,save_res +from paddleocr import PPStructure,draw_structure_result,save_structure_res -table_engine = PaddleStructure(show_log=True) +table_engine = PPStructure(show_log=True) save_folder = './output/table' img_path = '../doc/table/1.png' img = cv2.imread(img_path) result = table_engine(img) -save_res(result, save_folder,os.path.basename(img_path).split('.')[0]) +save_structure_res(result, save_folder,os.path.basename(img_path).split('.')[0]) for line in result: + line.pop('img') print(line) from PIL import Image -font_path = '../doc/fonts/simfang.ttf' # PaddleOCR下提供字体包 +font_path = '../doc/fonts/simfang.ttf' image = Image.open(img_path).convert('RGB') -im_show = draw_result(image, result,font_path=font_path) +im_show = draw_structure_result(image, result,font_path=font_path) im_show = Image.fromarray(im_show) im_show.save('result.jpg') ``` #### 1.2.3 返回结果说明 -The return result of PaddleStructure is a list composed of a dict, an example is as follows +The return result of PPStructure is a list composed of a dict, an example is as follows ```shell [ @@ -89,20 +117,20 @@ The description of each field in dict is as follows Most of the parameters are consistent with the paddleocr whl package, see [doc of whl](../doc/doc_en/whl_en.md) -After running, each image will have a directory with the same name under the directory specified in the output field. Each table in the picture will be stored as an excel, and the excel file name will be the coordinates of the table in the image. +After running, each image will have a directory with the same name under the directory specified in the output field. Each table in the picture will be stored as an excel and figure area will be cropped and saved, the excel and image file name will be the coordinates of the table in the image. -## 2. PaddleStructure Pipeline +## 2. PPStructure Pipeline the process is as follows ![pipeline](../doc/table/pipeline_en.jpg) -In PaddleStructure, the image will be analyzed by layoutparser first. In the layout analysis, the area in the image will be classified, including **text, title, image, list and table** 5 categories. For the first 4 types of areas, directly use the PP-OCR to complete the text detection and recognition. The table area will be converted to an excel file of the same table style via Table OCR. +In PPStructure, the image will be analyzed by layoutparser first. In the layout analysis, the area in the image will be classified, including **text, title, image, list and table** 5 categories. For the first 4 types of areas, directly use the PP-OCR to complete the text detection and recognition. The table area will be converted to an excel file of the same table style via Table OCR. ### 2.1 LayoutParser Layout analysis divides the document data into regions, including the use of Python scripts for layout analysis tools, extraction of special category detection boxes, performance indicators, and custom training layout analysis models. For details, please refer to [document](layout/README_en.md). -### 2.2 Table OCR +### 2.2 Table Structure Table OCR converts table image into excel documents, which include the detection and recognition of table text and the prediction of table structure and cell coordinates. For detailed, please refer to [document](table/README.md) @@ -111,15 +139,25 @@ Table OCR converts table image into excel documents, which include the detection Use the following commands to complete the inference. ```python -python3 table/predict_system.py --det_model_dir=path/to/det_model_dir --rec_model_dir=path/to/rec_model_dir --table_model_dir=path/to/table_model_dir --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=EN --det_limit_side_len=736 --det_limit_type=min --output=../output/table --vis_font_path=../doc/fonts/simfang.ttf +cd PaddleOCR/ppstructure + +# download model +mkdir inference && cd inference +# Download the detection model of the ultra-lightweight Chinese OCR model and uncompress it +wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_ppocr_mobile_v2.0_det_infer.tar +# Download the recognition model of the ultra-lightweight Chinese OCR model and uncompress it +wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar +# Download the table structure model of the ultra-lightweight Chinese OCR model and uncompress it +wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar && tar xf en_ppocr_mobile_v2.0_table_structure_infer.tar +cd .. + +python3 table/predict_system.py --det_model_dir=inference/ch_ppocr_mobile_v2.0_det_infer --rec_model_dir=inference/ch_ppocr_mobile_v2.0_rec_infer --table_model_dir=inference/en_ppocr_mobile_v2.0_table_structure_infer --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/ppocr_keys_v1.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=ch --det_limit_side_len=736 --det_limit_type=min --output=../output/table --vis_font_path=../doc/fonts/simfang.ttf ``` -After running, each image will have a directory with the same name under the directory specified in the output field. Each table in the picture will be stored as an excel, and the excel file name will be the coordinates of the table in the image. +After running, each image will have a directory with the same name under the directory specified in the output field. Each table in the picture will be stored as an excel and figure area will be cropped and saved, the excel and image file name will be the coordinates of the table in the image. **Model List** |model name|description|config|model size|download| | --- | --- | --- | --- | --- | -|en_ppocr_mobile_v2.0_table_det|Text detection in English table scene|[ch_det_mv3_db_v2.0.yml](../configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml)| 4.7M |[inference model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_det_infer.tar) | -|en_ppocr_mobile_v2.0_table_rec|Text recognition in English table scene|[rec_chinese_lite_train_v2.0.yml](..//configs/rec/rec_mv3_none_bilstm_ctc.yml)|6.9M|[inference model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_rec_infer.tar) | |en_ppocr_mobile_v2.0_table_structure|Table structure prediction for English table scenarios|[table_mv3.yml](../configs/table/table_mv3.yml)|18.6M|[inference model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar) | \ No newline at end of file diff --git a/ppstructure/README_ch.md b/ppstructure/README_ch.md index 7ae55534309ab48caecf8de1ae20c0536b49823e..07a06f91622ed82b55b8d16198639c54b11828c4 100644 --- a/ppstructure/README_ch.md +++ b/ppstructure/README_ch.md @@ -1,4 +1,4 @@ -# PaddleStructure +# PPStructure PaddleStructure是一个用于复杂版面分析的OCR工具包,其能够对图片形式的文档数据划分**文字、表格、标题、图片以及列表**5类区域,并将表格区域提取为excel @@ -6,29 +6,59 @@ PaddleStructure是一个用于复杂版面分析的OCR工具包,其能够对 ### 1.1 安装 -**安装 layoutparser** -```sh -pip3 install -U premailer paddleocr https://paddleocr.bj.bcebos.com/whl/layoutparser-0.0.0-py3-none-any.whl +**安装PaddlePaddle2.0** + +```bash +pip3 install --upgrade pip + +# 如果您的机器安装的是CUDA9或CUDA10,请运行以下命令安装 +python3 -m pip install paddlepaddle-gpu==2.0.0 -i https://mirror.baidu.com/pypi/simple + +# 如果您的机器是CPU,请运行以下命令安装 + +python3 -m pip install paddlepaddle==2.0.0 -i https://mirror.baidu.com/pypi/simple + +# 更多的版本需求,请参照[安装文档](https://www.paddlepaddle.org.cn/install/quick)中的说明进行操作。 +``` + +**克隆PaddleOCR repo代码** + +```bash +【推荐】git clone https://github.com/PaddlePaddle/PaddleOCR + +如果因为网络问题无法pull成功,也可选择使用码云上的托管: + +git clone https://gitee.com/paddlepaddle/PaddleOCR + +注:码云托管代码可能无法实时同步本github项目更新,存在3~5天延时,请优先使用推荐方式。 + ``` -**安装 paddlestructure** + +**安装 paddleocr** pip安装 ```bash -pip install paddlestructure +cd PaddleOCR +pip install "paddleocr>=2.0.1" # 推荐使用2.0.1+版本 ``` 本地构建并安装 ```bash python3 setup.py bdist_wheel -pip3 install dist/paddlestructure-x.x.x-py3-none-any.whl # x.x.x是 paddlestructure 的版本号 +pip3 install dist/paddleocr-x.x.x-py3-none-any.whl # x.x.x是paddleocr的版本号 +``` + +**安装 layoutparser** +```sh +pip3 install -U premailer paddleocr https://paddleocr.bj.bcebos.com/whl/layoutparser-0.0.0-py3-none-any.whl ``` -### 1.2 PaddleStructure whl包使用 +### 1.2 PPStructure whl包使用 #### 1.2.1 命令行使用 ```bash -paddlestructure --image_dir=../doc/table/1.png +paddleocr --image_dir=../doc/table/1.png --type=structure ``` #### 1.2.2 Python脚本使用 @@ -36,24 +66,25 @@ paddlestructure --image_dir=../doc/table/1.png ```python import os import cv2 -from paddlestructure import PaddleStructure,draw_result,save_res +from paddleocr import PPStructure,draw_structure_result,save_structure_res -table_engine = PaddleStructure(show_log=True) +table_engine = PPStructure(show_log=True) save_folder = './output/table' img_path = '../doc/table/1.png' img = cv2.imread(img_path) result = table_engine(img) -save_res(result, save_folder,os.path.basename(img_path).split('.')[0]) +save_structure_res(result, save_folder,os.path.basename(img_path).split('.')[0]) for line in result: + line.pop('img') print(line) from PIL import Image font_path = '../doc/fonts/simfang.ttf' # PaddleOCR下提供字体包 image = Image.open(img_path).convert('RGB') -im_show = draw_result(image, result,font_path=font_path) +im_show = draw_structure_result(image, result,font_path=font_path) im_show = Image.fromarray(im_show) im_show.save('result.jpg') ``` @@ -90,21 +121,21 @@ dict 里各个字段说明如下 大部分参数和paddleocr whl包保持一致,见 [whl包文档](../doc/doc_ch/whl.md) -运行完成后,每张图片会在`output`字段指定的目录下有一个同名目录,图片里的每个表格会存储为一个excel,excel文件名为表格在图片里的坐标。 +运行完成后,每张图片会在`output`字段指定的目录下有一个同名目录,图片里的每个表格会存储为一个excel,图片区域会被裁剪之后保存下来,excel文件和图片名名为表格在图片里的坐标。 -## 2. PaddleStructure Pipeline +## 2. PPStructure Pipeline 流程如下 ![pipeline](../doc/table/pipeline.jpg) -在PaddleStructure中,图片会先经由layoutparser进行版面分析,在版面分析中,会对图片里的区域进行分类,包括**文字、标题、图片、列表和表格**5类。对于前4类区域,直接使用PP-OCR完成对应区域文字检测与识别。对于表格类区域,经过Table OCR处理后,表格图片转换为相同表格样式的Excel文件。 +在PPStructure中,图片会先经由layoutparser进行版面分析,在版面分析中,会对图片里的区域进行分类,包括**文字、标题、图片、列表和表格**5类。对于前4类区域,直接使用PP-OCR完成对应区域文字检测与识别。对于表格类区域,经过Table OCR处理后,表格图片转换为相同表格样式的Excel文件。 ### 2.1 版面分析 版面分析对文档数据进行区域分类,其中包括版面分析工具的Python脚本使用、提取指定类别检测框、性能指标以及自定义训练版面分析模型,详细内容可以参考[文档](layout/README.md)。 -### 2.2 表格识别 +### 2.2 表格结构化 Table OCR将表格图片转换为excel文档,其中包含对于表格文本的检测和识别以及对于表格结构和单元格坐标的预测,详细说明参考[文档](table/README_ch.md) @@ -113,14 +144,24 @@ Table OCR将表格图片转换为excel文档,其中包含对于表格文本的 使用如下命令即可完成预测引擎的推理 ```python -python3 table/predict_system.py --det_model_dir=path/to/det_model_dir --rec_model_dir=path/to/rec_model_dir --table_model_dir=path/to/table_model_dir --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=EN --det_limit_side_len=736 --det_limit_type=min --output=../output/table --vis_font_path=../doc/fonts/simfang.ttf +cd ppstructure + +# 下载模型 +mkdir inference && cd inference +# 下载超轻量级中文OCR模型的检测模型并解压 +wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_ppocr_mobile_v2.0_det_infer.tar +# 下载超轻量级中文OCR模型的识别模型并解压 +wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar +# 下载超轻量级英文表格英寸模型并解压 +wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar && tar xf en_ppocr_mobile_v2.0_table_structure_infer.tar +cd .. + +python3 table/predict_system.py --det_model_dir=inference/ch_ppocr_mobile_v2.0_det_infer --rec_model_dir=inference/ch_ppocr_mobile_v2.0_rec_infer --table_model_dir=inference/en_ppocr_mobile_v2.0_table_structure_infer --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/ppocr_keys_v1.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=ch --det_limit_side_len=736 --det_limit_type=min --output=../output/table --vis_font_path=../doc/fonts/simfang.ttf ``` -运行完成后,每张图片会output字段指定的目录下有一个同名目录,图片里的每个表格会存储为一个excel,excel文件名为表格在图片里的坐标。 +运行完成后,每张图片会在`output`字段指定的目录下有一个同名目录,图片里的每个表格会存储为一个excel,图片区域会被裁剪之后保存下来,excel文件和图片名名为表格在图片里的坐标。 **Model List** |模型名称|模型简介|配置文件|推理模型大小|下载地址| | --- | --- | --- | --- | --- | -|en_ppocr_mobile_v2.0_table_det|英文表格场景的文字检测|[ch_det_mv3_db_v2.0.yml](../configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml)| 4.7M |[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_det_infer.tar) | -|en_ppocr_mobile_v2.0_table_rec|英文表格场景的文字识别|[rec_chinese_lite_train_v2.0.yml](../configs/rec/rec_mv3_none_bilstm_ctc.yml)|6.9M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_rec_infer.tar) | |en_ppocr_mobile_v2.0_table_structure|英文表格场景的表格结构预测|[table_mv3.yml](../configs/table/table_mv3.yml)|18.6M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar) | \ No newline at end of file diff --git a/ppstructure/__init__.py b/ppstructure/__init__.py index 3952b5ffb9f443e9aba9ba0a4a041b73d2caa9bc..1d11e265597c7c8e39098a228108da3bb954b892 100644 --- a/ppstructure/__init__.py +++ b/ppstructure/__init__.py @@ -11,7 +11,3 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -from .paddlestructure import PaddleStructure, draw_result, save_res - -__all__ = ['PaddleStructure', 'draw_result', 'save_res'] diff --git a/ppstructure/paddlestructure.py b/ppstructure/paddlestructure.py deleted file mode 100644 index d0009ae8a9be0a133e0d56734b739265034dc314..0000000000000000000000000000000000000000 --- a/ppstructure/paddlestructure.py +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import logging -import os -import sys - -__dir__ = os.path.dirname(__file__) -sys.path.append(__dir__) -sys.path.append(os.path.join(__dir__, '..')) - -import cv2 -import numpy as np -from pathlib import Path - -from ppocr.utils.logging import get_logger -from ppstructure.predict_system import OCRSystem, save_res -from ppstructure.utility import init_args, draw_result - -logger = get_logger() -from ppocr.utils.utility import check_and_read_gif, get_image_file_list -from ppocr.utils.network import maybe_download, download_with_progressbar, confirm_model_dir_url, is_link - -__all__ = ['PaddleStructure', 'draw_result', 'save_res'] - -VERSION = '2.1' -BASE_DIR = os.path.expanduser("~/.paddlestructure/") - -model_urls = { - 'det': 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_det_infer.tar', - 'rec': 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_rec_infer.tar', - 'table': 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar' - -} - - -def parse_args(mMain=True): - import argparse - parser = init_args() - parser.add_help = mMain - - for action in parser._actions: - if action.dest in ['rec_char_dict_path', 'table_char_dict_path']: - action.default = None - if mMain: - return parser.parse_args() - else: - inference_args_dict = {} - for action in parser._actions: - inference_args_dict[action.dest] = action.default - return argparse.Namespace(**inference_args_dict) - - -class PaddleStructure(OCRSystem): - def __init__(self, **kwargs): - params = parse_args(mMain=False) - params.__dict__.update(**kwargs) - if not params.show_log: - logger.setLevel(logging.INFO) - params.use_angle_cls = False - # init model dir - params.det_model_dir, det_url = confirm_model_dir_url(params.det_model_dir, - os.path.join(BASE_DIR, VERSION, 'det'), - model_urls['det']) - params.rec_model_dir, rec_url = confirm_model_dir_url(params.rec_model_dir, - os.path.join(BASE_DIR, VERSION, 'rec'), - model_urls['rec']) - params.table_model_dir, table_url = confirm_model_dir_url(params.table_model_dir, - os.path.join(BASE_DIR, VERSION, 'table'), - model_urls['table']) - # download model - maybe_download(params.det_model_dir, det_url) - maybe_download(params.rec_model_dir, rec_url) - maybe_download(params.table_model_dir, table_url) - - if params.rec_char_dict_path is None: - params.rec_char_type = 'EN' - if os.path.exists(str(Path(__file__).parent / 'ppocr/utils/dict/table_dict.txt')): - params.rec_char_dict_path = str(Path(__file__).parent / 'ppocr/utils/dict/table_dict.txt') - else: - params.rec_char_dict_path = str(Path(__file__).parent.parent / 'ppocr/utils/dict/table_dict.txt') - if params.table_char_dict_path is None: - if os.path.exists(str(Path(__file__).parent / 'ppocr/utils/dict/table_structure_dict.txt')): - params.table_char_dict_path = str( - Path(__file__).parent / 'ppocr/utils/dict/table_structure_dict.txt') - else: - params.table_char_dict_path = str( - Path(__file__).parent.parent / 'ppocr/utils/dict/table_structure_dict.txt') - - print(params) - super().__init__(params) - - def __call__(self, img): - if isinstance(img, str): - # download net image - if img.startswith('http'): - download_with_progressbar(img, 'tmp.jpg') - img = 'tmp.jpg' - image_file = img - img, flag = check_and_read_gif(image_file) - if not flag: - with open(image_file, 'rb') as f: - np_arr = np.frombuffer(f.read(), dtype=np.uint8) - img = cv2.imdecode(np_arr, cv2.IMREAD_COLOR) - if img is None: - logger.error("error in loading image:{}".format(image_file)) - return None - if isinstance(img, np.ndarray) and len(img.shape) == 2: - img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) - - res = super().__call__(img) - return res - - -def main(): - # for cmd - args = parse_args(mMain=True) - image_dir = args.image_dir - save_folder = args.output - if image_dir.startswith('http'): - download_with_progressbar(image_dir, 'tmp.jpg') - image_file_list = ['tmp.jpg'] - else: - image_file_list = get_image_file_list(args.image_dir) - if len(image_file_list) == 0: - logger.error('no images find in {}'.format(args.image_dir)) - return - - structure_engine = PaddleStructure(**(args.__dict__)) - for img_path in image_file_list: - img_name = os.path.basename(img_path).split('.')[0] - logger.info('{}{}{}'.format('*' * 10, img_path, '*' * 10)) - result = structure_engine(img_path) - for item in result: - logger.info(item['res']) - save_res(result, save_folder, img_name) - logger.info('result save to {}'.format(os.path.join(save_folder, img_name))) \ No newline at end of file diff --git a/ppstructure/predict_system.py b/ppstructure/predict_system.py index 60e2574515aeabaedc4f23d1589677c03543ce40..503ed1269e42678961d9e5ae94b9ae855d2c944d 100644 --- a/ppstructure/predict_system.py +++ b/ppstructure/predict_system.py @@ -26,19 +26,18 @@ import numpy as np import time import logging -import layoutparser as lp - from ppocr.utils.utility import get_image_file_list, check_and_read_gif from ppocr.utils.logging import get_logger from tools.infer.predict_system import TextSystem from ppstructure.table.predict_table import TableSystem, to_excel -from ppstructure.utility import parse_args, draw_result +from ppstructure.utility import parse_args, draw_structure_result logger = get_logger() class OCRSystem(object): def __init__(self, args): + import layoutparser as lp args.det_limit_type = 'resize_long' args.drop_score = 0 if not args.show_log: @@ -66,21 +65,21 @@ class OCRSystem(object): filter_boxes = [x + [x1, y1] for x in filter_boxes] filter_boxes = [x.reshape(-1).tolist() for x in filter_boxes] # remove style char - style_token = ['','','','','','','','', - '','','','','',''] + style_token = ['', '', '', '', '', '', '', '', + '', '', '', '', '', ''] filter_rec_res_tmp = [] for rec_res in filter_rec_res: rec_str, rec_conf = rec_res for token in style_token: if token in rec_str: rec_str = rec_str.replace(token, '') - filter_rec_res_tmp.append((rec_str,rec_conf)) + filter_rec_res_tmp.append((rec_str, rec_conf)) res = (filter_boxes, filter_rec_res_tmp) - res_list.append({'type': region.type, 'bbox': [x1, y1, x2, y2], 'res': res}) + res_list.append({'type': region.type, 'bbox': [x1, y1, x2, y2], 'img': roi_img, 'res': res}) return res_list -def save_res(res, save_folder, img_name): +def save_structure_res(res, save_folder, img_name): excel_save_folder = os.path.join(save_folder, img_name) os.makedirs(excel_save_folder, exist_ok=True) # save res @@ -89,6 +88,10 @@ def save_res(res, save_folder, img_name): if region['type'] == 'Table': excel_path = os.path.join(excel_save_folder, '{}.xlsx'.format(region['bbox'])) to_excel(region['res'], excel_path) + if region['type'] == 'Figure': + roi_img = region['img'] + img_path = os.path.join(excel_save_folder, '{}.jpg'.format(region['bbox'])) + cv2.imwrite(img_path, roi_img) else: for box, rec_res in zip(region['res'][0], region['res'][1]): f.write('{}\t{}\n'.format(np.array(box).reshape(-1).tolist(), rec_res)) @@ -115,8 +118,8 @@ def main(args): continue starttime = time.time() res = structure_sys(img) - save_res(res, save_folder, img_name) - draw_img = draw_result(img, res, args.vis_font_path) + save_structure_res(res, save_folder, img_name) + draw_img = draw_structure_result(img, res, args.vis_font_path) cv2.imwrite(os.path.join(save_folder, img_name, 'show.jpg'), draw_img) logger.info('result save to {}'.format(os.path.join(save_folder, img_name))) elapse = time.time() - starttime diff --git a/ppstructure/setup.py b/ppstructure/setup.py deleted file mode 100644 index c99d71fe37419b50badd3cf910fec6bb5d2cc67f..0000000000000000000000000000000000000000 --- a/ppstructure/setup.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -from setuptools import setup -from io import open -import shutil - -with open('../requirements.txt', encoding="utf-8-sig") as f: - requirements = f.readlines() - requirements.append('tqdm') - - -def readme(): - with open('README_ch.md', encoding="utf-8-sig") as f: - README = f.read() - return README - - -shutil.copytree('./table', './ppstructure/table') -shutil.copyfile('./predict_system.py', './ppstructure/predict_system.py') -shutil.copyfile('./utility.py', './ppstructure/utility.py') -shutil.copytree('../ppocr', './ppocr') -shutil.copytree('../tools', './tools') -shutil.copyfile('../LICENSE', './LICENSE') - -setup( - name='paddlestructure', - packages=['paddlestructure'], - package_dir={'paddlestructure': ''}, - include_package_data=True, - entry_points={"console_scripts": ["paddlestructure= paddlestructure.paddlestructure:main"]}, - version='1.0', - install_requires=requirements, - license='Apache License 2.0', - description='Awesome OCR toolkits based on PaddlePaddle (8.6M ultra-lightweight pre-trained model, support training and deployment among server, mobile, embeded and IoT devices', - long_description=readme(), - long_description_content_type='text/markdown', - url='https://github.com/PaddlePaddle/PaddleOCR', - download_url='https://github.com/PaddlePaddle/PaddleOCR.git', - keywords=[ - 'ocr textdetection textrecognition paddleocr crnn east star-net rosetta ocrlite db chineseocr chinesetextdetection chinesetextrecognition' - ], - classifiers=[ - 'Intended Audience :: Developers', 'Operating System :: OS Independent', - 'Natural Language :: Chinese (Simplified)', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.2', - 'Programming Language :: Python :: 3.3', - 'Programming Language :: Python :: 3.4', - 'Programming Language :: Python :: 3.5', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', 'Topic :: Utilities' - ], ) - -shutil.rmtree('ppocr') -shutil.rmtree('tools') -shutil.rmtree('ppstructure') -os.remove('LICENSE') diff --git a/ppstructure/table/README.md b/ppstructure/table/README.md index c538db275844e8eb21f405728fe09ed10c070760..7bb3127f253680d219094aa5de85bd4d77015573 100644 --- a/ppstructure/table/README.md +++ b/ppstructure/table/README.md @@ -1,4 +1,4 @@ -# Table structure and content prediction +# Table structure ## 1. pipeline The ocr of the table mainly contains three models @@ -17,8 +17,26 @@ The table ocr flow chart is as follows ## 2. How to use +### 2.1 quick start -### 2.1 Train +```python +cd PaddleOCR/ppstructure + +# download model +mkdir inference && cd inference +# Download the detection model of the ultra-lightweight Chinese OCR model and uncompress it +wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_ppocr_mobile_v2.0_det_infer.tar +# Download the recognition model of the ultra-lightweight Chinese OCR model and uncompress it +wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar +# Download the table structure model of the ultra-lightweight Chinese OCR model and uncompress it +wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar && tar xf en_ppocr_mobile_v2.0_table_structure_infer.tar +cd .. + +python3 table/predict_table.py --det_model_dir=inference/ch_ppocr_mobile_v2.0_det_infer --rec_model_dir=inference/ch_ppocr_mobile_v2.0_rec_infer --table_model_dir=inference/en_ppocr_mobile_v2.0_table_structure_infer --image_dir=../doc/table/table.jpg --rec_char_dict_path=../ppocr/utils/ppocr_keys_v1.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=ch --det_limit_side_len=736 --det_limit_type=min --output ../output/table +``` +After running, the excel sheet of each picture will be saved in the directory specified by the output field + +### 2.2 Train In this chapter, we only introduce the training of the table structure model, For model training of [text detection](../../doc/doc_en/detection_en.md) and [text recognition](../../doc/doc_en/recognition_en.md), please refer to the corresponding documents @@ -48,7 +66,7 @@ python3 tools/train.py -c configs/table/table_mv3.yml -o Global.checkpoints=./yo **Note**: The priority of `Global.checkpoints` is higher than that of `Global.pretrain_weights`, that is, when two parameters are specified at the same time, the model specified by `Global.checkpoints` will be loaded first. If the model path specified by `Global.checkpoints` is wrong, the one specified by `Global.pretrain_weights` will be loaded. -### 2.2 Eval +### 2.3 Eval The table uses TEDS (Tree-Edit-Distance-based Similarity) as the evaluation metric of the model. Before the model evaluation, the three models in the pipeline need to be exported as inference models (we have provided them), and the gt for evaluation needs to be prepared. Examples of gt are as follows: ```json @@ -70,7 +88,7 @@ python3 table/eval_table.py --det_model_dir=path/to/det_model_dir --rec_model_di ``` -### 2.3 Inference +### 2.4 Inference ```python cd PaddleOCR/ppstructure diff --git a/ppstructure/table/README_ch.md b/ppstructure/table/README_ch.md index 5981dab4b85d751ad26a9ba08ca4c9056d253961..a1bd2442641dd89653aa83372c77d615cb9997c3 100644 --- a/ppstructure/table/README_ch.md +++ b/ppstructure/table/README_ch.md @@ -1,6 +1,6 @@ -# Table OCR +# 表格结构化 -## 1. Table OCR pineline +## 1. 表格结构化 pineline 表格的ocr主要包含三个模型 1. 单行文本检测-DB 2. 单行文本识别-CRNN @@ -19,7 +19,26 @@ ## 2. 使用 -### 2.1 训练 +### 2.1 快速开始 + +```python +cd PaddleOCR/ppstructure + +# 下载模型 +mkdir inference && cd inference +# 下载超轻量级中文OCR模型的检测模型并解压 +wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_ppocr_mobile_v2.0_det_infer.tar +# 下载超轻量级中文OCR模型的识别模型并解压 +wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar +# 下载超轻量级英文表格英寸模型并解压 +wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar && tar xf en_ppocr_mobile_v2.0_table_structure_infer.tar +cd .. +# 执行预测 +python3 table/predict_table.py --det_model_dir=inference/ch_ppocr_mobile_v2.0_det_infer --rec_model_dir=inference/ch_ppocr_mobile_v2.0_rec_infer --table_model_dir=inference/en_ppocr_mobile_v2.0_table_structure_infer --image_dir=../doc/table/table.jpg --rec_char_dict_path=../ppocr/utils/ppocr_keys_v1.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=ch --det_limit_side_len=736 --det_limit_type=min --output ../output/table +``` +运行完成后,每张图片的excel表格会保存到output字段指定的目录下 + +### 2.2 训练 在这一章节中,我们仅介绍表格结构模型的训练,[文字检测](../../doc/doc_ch/detection.md)和[文字识别](../../doc/doc_ch/recognition.md)的模型训练请参考对应的文档。 #### 数据准备 @@ -46,7 +65,7 @@ python3 tools/train.py -c configs/table/table_mv3.yml -o Global.checkpoints=./yo **注意**:`Global.checkpoints`的优先级高于`Global.pretrain_weights`的优先级,即同时指定两个参数时,优先加载`Global.checkpoints`指定的模型,如果`Global.checkpoints`指定的模型路径有误,会加载`Global.pretrain_weights`指定的模型。 -### 2.2 评估 +### 2.3 评估 表格使用 TEDS(Tree-Edit-Distance-based Similarity) 作为模型的评估指标。在进行模型评估之前,需要将pipeline中的三个模型分别导出为inference模型(我们已经提供好),还需要准备评估的gt, gt示例如下: ```json @@ -56,7 +75,7 @@ python3 tools/train.py -c configs/table/table_mv3.yml -o Global.checkpoints=./yo [["", "F", "e", "a", "t", "u", "r", "e", ""], ["", "G", "b", "3", " ", "+", ""], ["", "G", "b", "3", " ", "-", ""], ["", "P", "a", "t", "i", "e", "n", "t", "s", ""], ["6", "2"], ["4", "5"]] ]} ``` -json 中,key为图片名,value为对应的gt,gt是一个由四个item组成的list,每个item分别为 +json 中,key为图片名,value为对应的gt,gt是一个由三个item组成的list,每个item分别为 1. 表格结构的html字符串list 2. 每个cell的坐标 (不包括cell里文字为空的) 3. 每个cell里的文字信息 (不包括cell里文字为空的) @@ -67,10 +86,9 @@ cd PaddleOCR/ppstructure python3 table/eval_table.py --det_model_dir=path/to/det_model_dir --rec_model_dir=path/to/rec_model_dir --table_model_dir=path/to/table_model_dir --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=EN --det_limit_side_len=736 --det_limit_type=min --gt_path=path/to/gt.json ``` +### 2.4 预测 -### 2.3 预测 ```python cd PaddleOCR/ppstructure python3 table/predict_table.py --det_model_dir=path/to/det_model_dir --rec_model_dir=path/to/rec_model_dir --table_model_dir=path/to/table_model_dir --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=EN --det_limit_side_len=736 --det_limit_type=min --output ../output/table -``` -运行完成后,每张图片的excel表格会保存到output字段指定的目录下 +``` \ No newline at end of file diff --git a/ppstructure/utility.py b/ppstructure/utility.py index 29daeef4347be5b8db0f9fdeda0dd4d8864ef595..bfb11a9c3b8505b3af5237234cb3a391aecf5246 100644 --- a/ppstructure/utility.py +++ b/ppstructure/utility.py @@ -36,7 +36,7 @@ def parse_args(): return parser.parse_args() -def draw_result(image, result, font_path): +def draw_structure_result(image, result, font_path): if isinstance(image, np.ndarray): image = Image.fromarray(image) boxes, txts, scores = [], [], [] diff --git a/setup.py b/setup.py index a1ddbbb6d6d0c2657bb699a72bde75ef07ab3a94..7d4d871d89defcf832910c60f18b094f10ba11db 100644 --- a/setup.py +++ b/setup.py @@ -14,6 +14,7 @@ from setuptools import setup from io import open +from paddleocr import VERSION with open('requirements.txt', encoding="utf-8-sig") as f: requirements = f.readlines() @@ -32,7 +33,7 @@ setup( package_dir={'paddleocr': ''}, include_package_data=True, entry_points={"console_scripts": ["paddleocr= paddleocr.paddleocr:main"]}, - version='2.0.6', + version=VERSION, install_requires=requirements, license='Apache License 2.0', description='Awesome OCR toolkits based on PaddlePaddle (8.6M ultra-lightweight pre-trained model, support training and deployment among server, mobile, embeded and IoT devices',