diff --git a/ppstructure/README.md b/ppstructure/README.md index 849c5c5667ff0532dfee35479715880192df0dc5..8994cdd46191a0fd4fb1beba2fcad91542e19b50 100644 --- a/ppstructure/README.md +++ b/ppstructure/README.md @@ -153,7 +153,7 @@ wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_in wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar && tar xf en_ppocr_mobile_v2.0_table_structure_infer.tar cd .. -python3 predict_system.py --det_model_dir=inference/ch_ppocr_mobile_v2.0_det_infer --rec_model_dir=inference/ch_ppocr_mobile_v2.0_rec_infer --table_model_dir=inference/en_ppocr_mobile_v2.0_table_structure_infer --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/ppocr_keys_v1.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=ch --output=../output/table --vis_font_path=../doc/fonts/simfang.ttf +python3 predict_system.py --det_model_dir=inference/ch_ppocr_mobile_v2.0_det_infer --rec_model_dir=inference/ch_ppocr_mobile_v2.0_rec_infer --table_model_dir=inference/en_ppocr_mobile_v2.0_table_structure_infer --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/ppocr_keys_v1.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --output=../output/table --vis_font_path=../doc/fonts/simfang.ttf ``` After running, each image will have a directory with the same name under the directory specified in the output field. Each table in the picture will be stored as an excel and figure area will be cropped and saved, the excel and image file name will be the coordinates of the table in the image. diff --git a/ppstructure/README_ch.md b/ppstructure/README_ch.md index 821a6c3e36361abefa4d754537fdbd694e844efe..607efac1bf6bfaa58f0e96ceef1a0ee344189e9c 100644 --- a/ppstructure/README_ch.md +++ b/ppstructure/README_ch.md @@ -1,6 +1,12 @@ [English](README.md) | 简体中文 -# PP-Structure +## 简介 +PP-Structure是一个可用于复杂文档结构分析和处理的OCR工具包,旨在帮助开发者更好的完成文档理解相关任务。 + +## 近期更新 +* 2021.12.07 新增VQA任务-SER和RE。 + +## 特性 PP-Structure是一个可用于复杂文档结构分析和处理的OCR工具包,主要特性如下: - 支持对图片形式的文档进行版面分析,可以划分**文字、标题、表格、图片以及列表**5类区域(与Layout-Parser联合使用) @@ -8,181 +14,88 @@ PP-Structure是一个可用于复杂文档结构分析和处理的OCR工具包 - 支持表格区域进行结构化分析,最终结果输出Excel文件 - 支持python whl包和命令行两种方式,简单易用 - 支持版面分析和表格结构化两类任务自定义训练 +- 支持文档视觉问答(Document Visual Question Answering,DOC-VQA)任务-语义实体识别(Semantic Entity Recognition,SER)和关系抽取(Relation Extraction,RE) -## 1. 效果展示 - - - - - -## 2. 安装 - -### 2.1 安装依赖 - -- **(1) 安装PaddlePaddle** - -```bash -pip3 install --upgrade pip - -# GPU安装 -python3 -m pip install paddlepaddle-gpu==2.1.1 -i https://mirror.baidu.com/pypi/simple - -# CPU安装 - python3 -m pip install paddlepaddle==2.1.1 -i https://mirror.baidu.com/pypi/simple - -``` -更多需求,请参照[安装文档](https://www.paddlepaddle.org.cn/install/quick)中的说明进行操作。 - -- **(2) 安装 Layout-Parser** - -```bash -pip3 install -U https://paddleocr.bj.bcebos.com/whl/layoutparser-0.0.0-py3-none-any.whl -``` - -### 2.2 安装PaddleOCR(包含PP-OCR和PP-Structure) - -- **(1) PIP快速安装PaddleOCR whl包(仅预测)** -```bash -pip install "paddleocr>=2.2" # 推荐使用2.2+版本 -``` - -- **(2) 完整克隆PaddleOCR源码(预测+训练)** - -```bash -【推荐】git clone https://github.com/PaddlePaddle/PaddleOCR - -#如果因为网络问题无法pull成功,也可选择使用码云上的托管: -git clone https://gitee.com/paddlepaddle/PaddleOCR - -#注:码云托管代码可能无法实时同步本github项目更新,存在3~5天延时,请优先使用推荐方式。 -``` - - -## 3. PP-Structure 快速开始 - -### 3.1 命令行使用(默认参数,极简) - -```bash -paddleocr --image_dir=../doc/table/1.png --type=structure -``` - -### 3.2 Python脚本使用(自定义参数,灵活) +## 1. 效果展示 -```python -import os -import cv2 -from paddleocr import PPStructure,draw_structure_result,save_structure_res +### 1.1 版面分析和表格识别 -table_engine = PPStructure(show_log=True) + -save_folder = './output/table' -img_path = '../doc/table/1.png' -img = cv2.imread(img_path) -result = table_engine(img) -save_structure_res(result, save_folder,os.path.basename(img_path).split('.')[0]) +### 1.2 VQA -for line in result: - line.pop('img') - print(line) +* SER -from PIL import Image +![](./vqa/images/result_ser/zh_val_0_ser.jpg) | ![](./vqa/images/result_ser/zh_val_42_ser.jpg) +---|--- -font_path = '../doc/fonts/simfang.ttf' # PaddleOCR下提供字体包 -image = Image.open(img_path).convert('RGB') -im_show = draw_structure_result(image, result,font_path=font_path) -im_show = Image.fromarray(im_show) -im_show.save('result.jpg') -``` +图中不同颜色的框表示不同的类别,对于XFUN数据集,有`QUESTION`, `ANSWER`, `HEADER` 3种类别 -### 3.3 返回结果说明 -PP-Structure的返回结果为一个dict组成的list,示例如下 +* 深紫色:HEADER +* 浅紫色:QUESTION +* 军绿色:ANSWER -```shell -[ - { 'type': 'Text', - 'bbox': [34, 432, 345, 462], - 'res': ([[36.0, 437.0, 341.0, 437.0, 341.0, 446.0, 36.0, 447.0], [41.0, 454.0, 125.0, 453.0, 125.0, 459.0, 41.0, 460.0]], - [('Tigure-6. The performance of CNN and IPT models using difforen', 0.90060663), ('Tent ', 0.465441)]) - } -] -``` -dict 里各个字段说明如下 +在OCR检测框的左上方也标出了对应的类别和OCR识别结果。 -| 字段 | 说明 | -| --------------- | -------------| -|type|图片区域的类型| -|bbox|图片区域的在原图的坐标,分别[左上角x,左上角y,右下角x,右下角y]| -|res|图片区域的OCR或表格识别结果。
表格: 表格的HTML字符串;
OCR: 一个包含各个单行文字的检测坐标和识别结果的元组| +* RE +![](./vqa/images/result_re/zh_val_21_re.jpg) | ![](./vqa/images/result_re/zh_val_40_re.jpg) +---|--- -### 3.4 参数说明 -| 字段 | 说明 | 默认值 | -| --------------- | ---------------------------------------- | ------------------------------------------- | -| output | excel和识别结果保存的地址 | ./output/table | -| table_max_len | 表格结构模型预测时,图像的长边resize尺度 | 488 | -| table_model_dir | 表格结构模型 inference 模型地址 | None | -| table_char_type | 表格结构模型所用字典地址 | ../ppocr/utils/dict/table_structure_dict.tx | +图中红色框表示问题,蓝色框表示答案,问题和答案之间使用绿色线连接。在OCR检测框的左上方也标出了对应的类别和OCR识别结果。 -大部分参数和paddleocr whl包保持一致,见 [whl包文档](../doc/doc_ch/whl.md) +## 2. 快速体验 -运行完成后,每张图片会在`output`字段指定的目录下有一个同名目录,图片里的每个表格会存储为一个excel,图片区域会被裁剪之后保存下来,excel文件和图片名名为表格在图片里的坐标。 +代码体验:从 [快速安装](./docs/quickstart.md) 开始 +## 3. PP-Structure Pipeline介绍 -## 4. PP-Structure Pipeline介绍 +### 3.1 版面分析+表格识别 ![pipeline](../doc/table/pipeline.jpg) 在PP-Structure中,图片会先经由Layout-Parser进行版面分析,在版面分析中,会对图片里的区域进行分类,包括**文字、标题、图片、列表和表格**5类。对于前4类区域,直接使用PP-OCR完成对应区域文字检测与识别。对于表格类区域,经过表格结构化处理后,表格图片转换为相同表格样式的Excel文件。 -### 4.1 版面分析 +#### 3.1.1 版面分析 版面分析对文档数据进行区域分类,其中包括版面分析工具的Python脚本使用、提取指定类别检测框、性能指标以及自定义训练版面分析模型,详细内容可以参考[文档](layout/README_ch.md)。 -### 4.2 表格识别 +#### 3.1.2 表格识别 表格识别将表格图片转换为excel文档,其中包含对于表格文本的检测和识别以及对于表格结构和单元格坐标的预测,详细说明参考[文档](table/README_ch.md) -## 5. 预测引擎推理(与whl包效果相同) -使用如下命令即可完成预测引擎的推理 +### 3.2 VQA -```python -cd ppstructure +coming soon -# 下载模型 -mkdir inference && cd inference -# 下载超轻量级中文OCR模型的检测模型并解压 -wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_ppocr_mobile_v2.0_det_infer.tar -# 下载超轻量级中文OCR模型的识别模型并解压 -wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar -# 下载超轻量级英文表格英寸模型并解压 -wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar && tar xf en_ppocr_mobile_v2.0_table_structure_infer.tar -cd .. +## 4. 模型库 -python3 predict_system.py --det_model_dir=inference/ch_ppocr_mobile_v2.0_det_infer --rec_model_dir=inference/ch_ppocr_mobile_v2.0_rec_infer --table_model_dir=inference/en_ppocr_mobile_v2.0_table_structure_infer --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/ppocr_keys_v1.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=ch --output=../output/table --vis_font_path=../doc/fonts/simfang.ttf -``` -运行完成后,每张图片会在`output`字段指定的目录下有一个同名目录,图片里的每个表格会存储为一个excel,图片区域会被裁剪之后保存下来,excel文件和图片名名为表格在图片里的坐标。 +PP-Structure系列模型列表(更新中) -**Model List** - -LayoutParser 模型 +* LayoutParser 模型 |模型名称|模型简介|下载地址| | --- | --- | --- | | ppyolov2_r50vd_dcn_365e_publaynet | PubLayNet 数据集训练的版面分析模型,可以划分**文字、标题、表格、图片以及列表**5类区域 | [PubLayNet](https://paddle-model-ecology.bj.bcebos.com/model/layout-parser/ppyolov2_r50vd_dcn_365e_publaynet.tar) | -| ppyolov2_r50vd_dcn_365e_tableBank_word | TableBank Word 数据集训练的版面分析模型,只能检测表格 | [TableBank Word](https://paddle-model-ecology.bj.bcebos.com/model/layout-parser/ppyolov2_r50vd_dcn_365e_tableBank_word.tar) | -| ppyolov2_r50vd_dcn_365e_tableBank_latex | TableBank Latex 数据集训练的版面分析模型,只能检测表格 | [TableBank Latex](https://paddle-model-ecology.bj.bcebos.com/model/layout-parser/ppyolov2_r50vd_dcn_365e_tableBank_latex.tar) | -OCR和表格识别模型 -|模型名称|模型简介|推理模型大小|下载地址| +* OCR和表格识别模型 + +|模型名称|模型简介|模型大小|下载地址| | --- | --- | --- | --- | |ch_ppocr_mobile_slim_v2.0_det|slim裁剪版超轻量模型,支持中英文、多语种文本检测|2.6M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_prune_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_prune_infer.tar) | |ch_ppocr_mobile_slim_v2.0_rec|slim裁剪量化版超轻量模型,支持中英文、数字识别|6M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_slim_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_slim_train.tar) | -|en_ppocr_mobile_v2.0_table_det|PubLayNet数据集训练的英文表格场景的文字检测|4.7M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_det_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.1/table/en_ppocr_mobile_v2.0_table_det_train.tar) | -|en_ppocr_mobile_v2.0_table_rec|PubLayNet数据集训练的英文表格场景的文字识别|6.9M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_rec_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.1/table/en_ppocr_mobile_v2.0_table_rec_train.tar) | |en_ppocr_mobile_v2.0_table_structure|PubLayNet数据集训练的英文表格场景的表格结构预测|18.6M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.1/table/en_ppocr_mobile_v2.0_table_structure_train.tar) | -如需要使用其他模型,可以在 [model_list](../doc/doc_ch/models_list.md) 下载模型或者使用自己训练好的模型配置到`det_model_dir`,`rec_model_dir`,`table_model_dir`三个字段即可。 +* VQA模型 + +|模型名称|模型简介|模型大小|下载地址| +| --- | --- | --- | --- | +|PP-Layout_v1.0_ser_pretrained|基于LayoutXLM在xfun中文数据集上训练的SER模型|1.4G|[推理模型 coming soon]() / [训练模型](https://paddleocr.bj.bcebos.com/pplayout/PP-Layout_v1.0_ser_pretrained.tar) | +|PP-Layout_v1.0_re_pretrained|基于LayoutXLM在xfun中文数据集上训练的RE模型|1.4G|[推理模型 coming soon]() / [训练模型](https://paddleocr.bj.bcebos.com/pplayout/PP-Layout_v1.0_re_pretrained.tar) | + + +更多模型下载,可以参考 [模型库](./docs/model_list.md) diff --git a/ppstructure/docs/installation.md b/ppstructure/docs/installation.md new file mode 100644 index 0000000000000000000000000000000000000000..30c25d5dc92f6ccdb0d93dafe9707f30eca0c0a9 --- /dev/null +++ b/ppstructure/docs/installation.md @@ -0,0 +1,28 @@ +# 快速安装 + +## 1. PaddlePaddle 和 PaddleOCR + +可参考[PaddleOCR安装文档](../../doc/doc_ch/installation.md) + +## 2. 安装其他依赖 + +### 2.1 版面分析所需 Layout-Parser + +Layout-Parser 可通过如下命令安装 + +```bash +pip3 install -U https://paddleocr.bj.bcebos.com/whl/layoutparser-0.0.0-py3-none-any.whl +``` +### 2.2 VQA所需依赖 +* paddleocr + +```bash +pip3 install paddleocr +``` + +* PaddleNLP +```bash +git clone https://github.com/PaddlePaddle/PaddleNLP -b develop +cd PaddleNLP +pip3 install -e . +``` diff --git a/ppstructure/docs/model_list.md b/ppstructure/docs/model_list.md new file mode 100644 index 0000000000000000000000000000000000000000..835d39a735462edb0d9f51493ec0529248aeadbf --- /dev/null +++ b/ppstructure/docs/model_list.md @@ -0,0 +1,28 @@ +# Model List + +## 1. LayoutParser 模型 + +|模型名称|模型简介|下载地址| +| --- | --- | --- | +| ppyolov2_r50vd_dcn_365e_publaynet | PubLayNet 数据集训练的版面分析模型,可以划分**文字、标题、表格、图片以及列表**5类区域 | [PubLayNet](https://paddle-model-ecology.bj.bcebos.com/model/layout-parser/ppyolov2_r50vd_dcn_365e_publaynet.tar) | +| ppyolov2_r50vd_dcn_365e_tableBank_word | TableBank Word 数据集训练的版面分析模型,只能检测表格 | [TableBank Word](https://paddle-model-ecology.bj.bcebos.com/model/layout-parser/ppyolov2_r50vd_dcn_365e_tableBank_word.tar) | +| ppyolov2_r50vd_dcn_365e_tableBank_latex | TableBank Latex 数据集训练的版面分析模型,只能检测表格 | [TableBank Latex](https://paddle-model-ecology.bj.bcebos.com/model/layout-parser/ppyolov2_r50vd_dcn_365e_tableBank_latex.tar) | + +## 2. OCR和表格识别模型 + +|模型名称|模型简介|推理模型大小|下载地址| +| --- | --- | --- | --- | +|ch_ppocr_mobile_slim_v2.0_det|slim裁剪版超轻量模型,支持中英文、多语种文本检测|2.6M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_prune_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_prune_infer.tar) | +|ch_ppocr_mobile_slim_v2.0_rec|slim裁剪量化版超轻量模型,支持中英文、数字识别|6M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_slim_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_slim_train.tar) | +|en_ppocr_mobile_v2.0_table_det|PubLayNet数据集训练的英文表格场景的文字检测|4.7M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_det_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.1/table/en_ppocr_mobile_v2.0_table_det_train.tar) | +|en_ppocr_mobile_v2.0_table_rec|PubLayNet数据集训练的英文表格场景的文字识别|6.9M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_rec_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.1/table/en_ppocr_mobile_v2.0_table_rec_train.tar) | +|en_ppocr_mobile_v2.0_table_structure|PubLayNet数据集训练的英文表格场景的表格结构预测|18.6M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.1/table/en_ppocr_mobile_v2.0_table_structure_train.tar) | + +如需要使用其他OCR模型,可以在 [model_list](../../doc/doc_ch/models_list.md) 下载模型或者使用自己训练好的模型配置到`det_model_dir`,`rec_model_dir`两个字段即可。 + +## 3. VQA模型 + +|模型名称|模型简介|推理模型大小|下载地址| +| --- | --- | --- | --- | +|PP-Layout_v1.0_ser_pretrained|基于LayoutXLM在xfun中文数据集上训练的SER模型|1.4G|[推理模型 coming soon]() / [训练模型](https://paddleocr.bj.bcebos.com/pplayout/PP-Layout_v1.0_ser_pretrained.tar) | +|PP-Layout_v1.0_re_pretrained|基于LayoutXLM在xfun中文数据集上训练的RE模型|1.4G|[推理模型 coming soon]() / [训练模型](https://paddleocr.bj.bcebos.com/pplayout/PP-Layout_v1.0_re_pretrained.tar) | diff --git a/ppstructure/docs/quickstart.md b/ppstructure/docs/quickstart.md new file mode 100644 index 0000000000000000000000000000000000000000..446c577ec39cf24dd4b8699558c633a1308fa444 --- /dev/null +++ b/ppstructure/docs/quickstart.md @@ -0,0 +1,171 @@ +# PP-Structure 快速开始 + +* [1. 安装PaddleOCR whl包](#1) +* [2. 便捷使用](#2) + + [2.1 命令行使用](#21) + + [2.2 Python脚本使用](#22) + + [2.3 返回结果说明](#23) + + [2.4 参数说明](#24) +* [3. Python脚本使用](#3) + + + + +## 1. 安装依赖包 + +```bash +pip install "paddleocr>=2.3.0.2" # 推荐使用2.3.0.2+版本 +pip3 install -U https://paddleocr.bj.bcebos.com/whl/layoutparser-0.0.0-py3-none-any.whl + +# 安装 PaddleNLP +git clone https://github.com/PaddlePaddle/PaddleNLP -b develop +cd PaddleNLP +pip3 install -e . + +``` + + + +## 2. 便捷使用 + + + +### 2.1 命令行使用 + +* 版面分析+表格识别 +```bash +paddleocr --image_dir=../doc/table/1.png --type=structure +``` + +* VQA + +coming soon + + + +### 2.2 Python脚本使用 + +* 版面分析+表格识别 +```python +import os +import cv2 +from paddleocr import PPStructure,draw_structure_result,save_structure_res + +table_engine = PPStructure(show_log=True) + +save_folder = './output/table' +img_path = '../doc/table/1.png' +img = cv2.imread(img_path) +result = table_engine(img) +save_structure_res(result, save_folder,os.path.basename(img_path).split('.')[0]) + +for line in result: + line.pop('img') + print(line) + +from PIL import Image + +font_path = '../doc/fonts/simfang.ttf' # PaddleOCR下提供字体包 +image = Image.open(img_path).convert('RGB') +im_show = draw_structure_result(image, result,font_path=font_path) +im_show = Image.fromarray(im_show) +im_show.save('result.jpg') +``` + +* VQA + +comming soon + + + +### 2.3 返回结果说明 +PP-Structure的返回结果为一个dict组成的list,示例如下 + +* 版面分析+表格识别 +```shell +[ + { 'type': 'Text', + 'bbox': [34, 432, 345, 462], + 'res': ([[36.0, 437.0, 341.0, 437.0, 341.0, 446.0, 36.0, 447.0], [41.0, 454.0, 125.0, 453.0, 125.0, 459.0, 41.0, 460.0]], + [('Tigure-6. The performance of CNN and IPT models using difforen', 0.90060663), ('Tent ', 0.465441)]) + } +] +``` +dict 里各个字段说明如下 + +| 字段 | 说明 | +| --------------- | -------------| +|type|图片区域的类型| +|bbox|图片区域的在原图的坐标,分别[左上角x,左上角y,右下角x,右下角y]| +|res|图片区域的OCR或表格识别结果。
表格: 表格的HTML字符串;
OCR: 一个包含各个单行文字的检测坐标和识别结果的元组| + +* VQA + +comming soon + + + +### 2.4 参数说明 + +| 字段 | 说明 | 默认值 | +| --------------- | ---------------------------------------- | ------------------------------------------- | +| output | excel和识别结果保存的地址 | ./output/table | +| table_max_len | 表格结构模型预测时,图像的长边resize尺度 | 488 | +| table_model_dir | 表格结构模型 inference 模型地址 | None | +| table_char_type | 表格结构模型所用字典地址 | ../ppocr/utils/dict/table_structure_dict.txt | +| model_name_or_path | VQA SER模型地址 | None | +| max_seq_length | VQA SER模型最大支持token长度 | 512 | +| label_map_path | VQA SER 标签文件地址 | ./vqa/labels/labels_ser.txt | +| mode | pipeline预测模式,structure: 版面分析+表格识别; vqa: ser文档信息抽取 | structure | + +大部分参数和paddleocr whl包保持一致,见 [whl包文档](../doc/doc_ch/whl.md) + +运行完成后,每张图片会在`output`字段指定的目录下有一个同名目录,图片里的每个表格会存储为一个excel,图片区域会被裁剪之后保存下来,excel文件和图片名名为表格在图片里的坐标。 + + + +## 3. Python脚本使用 + +* 版面分析+表格识别 + +```bash +cd ppstructure + +# 下载模型 +mkdir inference && cd inference +# 下载超轻量级中文OCR模型的检测模型并解压 +wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_ppocr_mobile_v2.0_det_infer.tar +# 下载超轻量级中文OCR模型的识别模型并解压 +wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar +# 下载超轻量级英文表格英寸模型并解压 +wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar && tar xf en_ppocr_mobile_v2.0_table_structure_infer.tar +cd .. + +python3 predict_system.py --det_model_dir=inference/ch_ppocr_mobile_v2.0_det_infer \ + --rec_model_dir=inference/ch_ppocr_mobile_v2.0_rec_infer \ + --table_model_dir=inference/en_ppocr_mobile_v2.0_table_structure_infer \ + --image_dir=../doc/table/1.png \ + --rec_char_dict_path=../ppocr/utils/ppocr_keys_v1.txt \ + --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt \ + --output=../output/table \ + --vis_font_path=../doc/fonts/simfang.ttf +``` +运行完成后,每张图片会在`output`字段指定的目录下的`talbe`目录下有一个同名目录,图片里的每个表格会存储为一个excel,图片区域会被裁剪之后保存下来,excel文件和图片名名为表格在图片里的坐标。 + +* VQA + +```bash +cd ppstructure + +# 下载模型 +mkdir inference && cd inference +# 下载SER xfun 模型并解压 +wget https://paddleocr.bj.bcebos.com/pplayout/PP-Layout_v1.0_ser_pretrained.tar && tar xf PP-Layout_v1.0_ser_pretrained.tar +cd .. + +python3 predict_system.py --model_name_or_path=vqa/PP-Layout_v1.0_ser_pretrained/ \ + --mode=vqa \ + --image_dir=vqa/images/input/zh_val_0.jpg \ + --vis_font_path=../doc/fonts/simfang.ttf +``` +运行完成后,每张图片会在`output`字段指定的目录下的`vqa`目录下存放可视化之后的图片,图片名和输入图片名一致。 diff --git a/ppstructure/predict_system.py b/ppstructure/predict_system.py index b2de3d4de80b39f046cf6cbc8a9ebbc52bf69334..e87499ccc410ae67a170f63301e5a99ef948b161 100644 --- a/ppstructure/predict_system.py +++ b/ppstructure/predict_system.py @@ -30,6 +30,7 @@ from ppocr.utils.utility import get_image_file_list, check_and_read_gif from ppocr.utils.logging import get_logger from tools.infer.predict_system import TextSystem from ppstructure.table.predict_table import TableSystem, to_excel +from ppstructure.vqa.infer_ser_e2e import SerPredictor, draw_ser_results from ppstructure.utility import parse_args, draw_structure_result logger = get_logger() @@ -37,53 +38,75 @@ logger = get_logger() class OCRSystem(object): def __init__(self, args): - import layoutparser as lp - # args.det_limit_type = 'resize_long' - args.drop_score = 0 - if not args.show_log: - logger.setLevel(logging.INFO) - self.text_system = TextSystem(args) - self.table_system = TableSystem(args, self.text_system.text_detector, self.text_system.text_recognizer) - - config_path = None - model_path = None - if os.path.isdir(args.layout_path_model): - model_path = args.layout_path_model - else: - config_path = args.layout_path_model - self.table_layout = lp.PaddleDetectionLayoutModel(config_path=config_path, - model_path=model_path, - threshold=0.5, enable_mkldnn=args.enable_mkldnn, - enforce_cpu=not args.use_gpu, thread_num=args.cpu_threads) - self.use_angle_cls = args.use_angle_cls - self.drop_score = args.drop_score + self.mode = args.mode + if self.mode == 'structure': + import layoutparser as lp + # args.det_limit_type = 'resize_long' + args.drop_score = 0 + if not args.show_log: + logger.setLevel(logging.INFO) + self.text_system = TextSystem(args) + self.table_system = TableSystem(args, + self.text_system.text_detector, + self.text_system.text_recognizer) + + config_path = None + model_path = None + if os.path.isdir(args.layout_path_model): + model_path = args.layout_path_model + else: + config_path = args.layout_path_model + self.table_layout = lp.PaddleDetectionLayoutModel( + config_path=config_path, + model_path=model_path, + threshold=0.5, + enable_mkldnn=args.enable_mkldnn, + enforce_cpu=not args.use_gpu, + thread_num=args.cpu_threads) + self.use_angle_cls = args.use_angle_cls + self.drop_score = args.drop_score + elif self.mode == 'vqa': + self.vqa_engine = SerPredictor(args) def __call__(self, img): - ori_im = img.copy() - layout_res = self.table_layout.detect(img[..., ::-1]) - res_list = [] - for region in layout_res: - x1, y1, x2, y2 = region.coordinates - x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) - roi_img = ori_im[y1:y2, x1:x2, :] - if region.type == 'Table': - res = self.table_system(roi_img) - else: - filter_boxes, filter_rec_res = self.text_system(roi_img) - filter_boxes = [x + [x1, y1] for x in filter_boxes] - filter_boxes = [x.reshape(-1).tolist() for x in filter_boxes] - # remove style char - style_token = ['', '', '', '', '', '', '', '', - '', '', '', '', '', ''] - filter_rec_res_tmp = [] - for rec_res in filter_rec_res: - rec_str, rec_conf = rec_res - for token in style_token: - if token in rec_str: - rec_str = rec_str.replace(token, '') - filter_rec_res_tmp.append((rec_str, rec_conf)) - res = (filter_boxes, filter_rec_res_tmp) - res_list.append({'type': region.type, 'bbox': [x1, y1, x2, y2], 'img': roi_img, 'res': res}) + if self.mode == 'structure': + ori_im = img.copy() + layout_res = self.table_layout.detect(img[..., ::-1]) + res_list = [] + for region in layout_res: + x1, y1, x2, y2 = region.coordinates + x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) + roi_img = ori_im[y1:y2, x1:x2, :] + if region.type == 'Table': + res = self.table_system(roi_img) + else: + filter_boxes, filter_rec_res = self.text_system(roi_img) + filter_boxes = [x + [x1, y1] for x in filter_boxes] + filter_boxes = [ + x.reshape(-1).tolist() for x in filter_boxes + ] + # remove style char + style_token = [ + '', '', '', '', '', + '', '', '', '', '', + '', '', '', '' + ] + filter_rec_res_tmp = [] + for rec_res in filter_rec_res: + rec_str, rec_conf = rec_res + for token in style_token: + if token in rec_str: + rec_str = rec_str.replace(token, '') + filter_rec_res_tmp.append((rec_str, rec_conf)) + res = (filter_boxes, filter_rec_res_tmp) + res_list.append({ + 'type': region.type, + 'bbox': [x1, y1, x2, y2], + 'img': roi_img, + 'res': res + }) + elif self.mode == 'vqa': + res_list, _ = self.vqa_engine(img) return res_list @@ -91,29 +114,35 @@ def save_structure_res(res, save_folder, img_name): excel_save_folder = os.path.join(save_folder, img_name) os.makedirs(excel_save_folder, exist_ok=True) # save res - with open(os.path.join(excel_save_folder, 'res.txt'), 'w', encoding='utf8') as f: + with open( + os.path.join(excel_save_folder, 'res.txt'), 'w', + encoding='utf8') as f: for region in res: if region['type'] == 'Table': - excel_path = os.path.join(excel_save_folder, '{}.xlsx'.format(region['bbox'])) + excel_path = os.path.join(excel_save_folder, + '{}.xlsx'.format(region['bbox'])) to_excel(region['res'], excel_path) if region['type'] == 'Figure': roi_img = region['img'] - img_path = os.path.join(excel_save_folder, '{}.jpg'.format(region['bbox'])) + img_path = os.path.join(excel_save_folder, + '{}.jpg'.format(region['bbox'])) cv2.imwrite(img_path, roi_img) else: for box, rec_res in zip(region['res'][0], region['res'][1]): - f.write('{}\t{}\n'.format(np.array(box).reshape(-1).tolist(), rec_res)) + f.write('{}\t{}\n'.format( + np.array(box).reshape(-1).tolist(), rec_res)) def main(args): image_file_list = get_image_file_list(args.image_dir) image_file_list = image_file_list image_file_list = image_file_list[args.process_id::args.total_process_num] - save_folder = args.output - os.makedirs(save_folder, exist_ok=True) structure_sys = OCRSystem(args) img_num = len(image_file_list) + save_folder = os.path.join(args.output, structure_sys.mode) + os.makedirs(save_folder, exist_ok=True) + for i, image_file in enumerate(image_file_list): logger.info("[{}/{}] {}".format(i, img_num, image_file)) img, flag = check_and_read_gif(image_file) @@ -126,10 +155,16 @@ def main(args): continue starttime = time.time() res = structure_sys(img) - save_structure_res(res, save_folder, img_name) - draw_img = draw_structure_result(img, res, args.vis_font_path) - cv2.imwrite(os.path.join(save_folder, img_name, 'show.jpg'), draw_img) - logger.info('result save to {}'.format(os.path.join(save_folder, img_name))) + + if structure_sys.mode == 'structure': + save_structure_res(res, save_folder, img_name) + draw_img = draw_structure_result(img, res, args.vis_font_path) + img_save_path = os.path.join(save_folder, img_name, 'show.jpg') + elif structure_sys.mode == 'vqa': + draw_img = draw_ser_results(img, res, args.vis_font_path) + img_save_path = os.path.join(save_folder, img_name + '.jpg') + cv2.imwrite(img_save_path, draw_img) + logger.info('result save to {}'.format(img_save_path)) elapse = time.time() - starttime logger.info("Predict time : {:.3f}s".format(elapse)) diff --git a/ppstructure/table/README.md b/ppstructure/table/README.md index 67c4d8e26d5c615f4a930752005420ba1abcc834..30a11a20e5de90500d1408f671ba914f336a0b43 100644 --- a/ppstructure/table/README.md +++ b/ppstructure/table/README.md @@ -20,9 +20,9 @@ We evaluated the algorithm on the PubTabNet[1] eval dataset, and the |Method|[TEDS(Tree-Edit-Distance-based Similarity)](https://github.com/ibm-aur-nlp/PubTabNet/tree/master/src)| -| --- | --- | -| EDD[2] | 88.3 | -| Ours | 93.32 | +| --- | --- | +| EDD[2] | 88.3 | +| Ours | 93.32 | ## 3. How to use @@ -41,7 +41,7 @@ wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_tab wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar && tar xf en_ppocr_mobile_v2.0_table_structure_infer.tar cd .. # run -python3 table/predict_table.py --det_model_dir=inference/en_ppocr_mobile_v2.0_table_det_infer --rec_model_dir=inference/en_ppocr_mobile_v2.0_table_rec_infer --table_model_dir=inference/en_ppocr_mobile_v2.0_table_structure_infer --image_dir=../doc/table/table.jpg --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=EN --det_limit_side_len=736 --det_limit_type=min --output ../output/table +python3 table/predict_table.py --det_model_dir=inference/en_ppocr_mobile_v2.0_table_det_infer --rec_model_dir=inference/en_ppocr_mobile_v2.0_table_rec_infer --table_model_dir=inference/en_ppocr_mobile_v2.0_table_structure_infer --image_dir=../doc/table/table.jpg --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_dict_path=../ppocr/utils/dict/en_dict.txt --det_limit_side_len=736 --det_limit_type=min --output ../output/table ``` Note: The above model is trained on the PubLayNet dataset and only supports English scanning scenarios. If you need to identify other scenarios, you need to train the model yourself and replace the three fields `det_model_dir`, `rec_model_dir`, `table_model_dir`. @@ -82,8 +82,8 @@ python3 tools/train.py -c configs/table/table_mv3.yml -o Global.checkpoints=./yo The table uses [TEDS(Tree-Edit-Distance-based Similarity)](https://github.com/ibm-aur-nlp/PubTabNet/tree/master/src) as the evaluation metric of the model. Before the model evaluation, the three models in the pipeline need to be exported as inference models (we have provided them), and the gt for evaluation needs to be prepared. Examples of gt are as follows: ```json {"PMC4289340_004_00.png": [ - ["", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "
", "", "", "
", "", "", "
", "", ""], - [[1, 4, 29, 13], [137, 4, 161, 13], [215, 4, 236, 13], [1, 17, 30, 27], [137, 17, 147, 27], [215, 17, 225, 27]], + ["", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "
", "", "", "
", "", "", "
", "", ""], + [[1, 4, 29, 13], [137, 4, 161, 13], [215, 4, 236, 13], [1, 17, 30, 27], [137, 17, 147, 27], [215, 17, 225, 27]], [["", "F", "e", "a", "t", "u", "r", "e", ""], ["", "G", "b", "3", " ", "+", ""], ["", "G", "b", "3", " ", "-", ""], ["", "P", "a", "t", "i", "e", "n", "t", "s", ""], ["6", "2"], ["4", "5"]] ]} ``` @@ -95,7 +95,7 @@ In gt json, the key is the image name, the value is the corresponding gt, and gt Use the following command to evaluate. After the evaluation is completed, the teds indicator will be output. ```python cd PaddleOCR/ppstructure -python3 table/eval_table.py --det_model_dir=path/to/det_model_dir --rec_model_dir=path/to/rec_model_dir --table_model_dir=path/to/table_model_dir --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=EN --det_limit_side_len=736 --det_limit_type=min --gt_path=path/to/gt.json +python3 table/eval_table.py --det_model_dir=path/to/det_model_dir --rec_model_dir=path/to/rec_model_dir --table_model_dir=path/to/table_model_dir --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --det_limit_side_len=736 --det_limit_type=min --gt_path=path/to/gt.json ``` If the PubLatNet eval dataset is used, it will be output @@ -113,4 +113,4 @@ After running, the excel sheet of each picture will be saved in the directory sp Reference 1. https://github.com/ibm-aur-nlp/PubTabNet -2. https://arxiv.org/pdf/1911.10683 \ No newline at end of file +2. https://arxiv.org/pdf/1911.10683 diff --git a/ppstructure/table/README_ch.md b/ppstructure/table/README_ch.md index 2e90ad33423da347b5a51444f2be53ed2eb67a7a..33276b36e4973e83d7efa673b90013cf5727dfe2 100644 --- a/ppstructure/table/README_ch.md +++ b/ppstructure/table/README_ch.md @@ -34,9 +34,9 @@ |算法|[TEDS(Tree-Edit-Distance-based Similarity)](https://github.com/ibm-aur-nlp/PubTabNet/tree/master/src)| -| --- | --- | -| EDD[2] | 88.3 | -| Ours | 93.32 | +| --- | --- | +| EDD[2] | 88.3 | +| Ours | 93.32 | ## 3. 使用 @@ -56,7 +56,7 @@ wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_tab wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar && tar xf en_ppocr_mobile_v2.0_table_structure_infer.tar cd .. # 执行预测 -python3 table/predict_table.py --det_model_dir=inference/en_ppocr_mobile_v2.0_table_det_infer --rec_model_dir=inference/en_ppocr_mobile_v2.0_table_rec_infer --table_model_dir=inference/en_ppocr_mobile_v2.0_table_structure_infer --image_dir=../doc/table/table.jpg --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=EN --det_limit_side_len=736 --det_limit_type=min --output ../output/table +python3 table/predict_table.py --det_model_dir=inference/en_ppocr_mobile_v2.0_table_det_infer --rec_model_dir=inference/en_ppocr_mobile_v2.0_table_rec_infer --table_model_dir=inference/en_ppocr_mobile_v2.0_table_structure_infer --image_dir=../doc/table/table.jpg --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_dict_path=../ppocr/utils/dict/en_dict.txt --det_limit_side_len=736 --det_limit_type=min --output ../output/table ``` 运行完成后,每张图片的excel表格会保存到output字段指定的目录下 @@ -94,8 +94,8 @@ python3 tools/train.py -c configs/table/table_mv3.yml -o Global.checkpoints=./yo 表格使用 [TEDS(Tree-Edit-Distance-based Similarity)](https://github.com/ibm-aur-nlp/PubTabNet/tree/master/src) 作为模型的评估指标。在进行模型评估之前,需要将pipeline中的三个模型分别导出为inference模型(我们已经提供好),还需要准备评估的gt, gt示例如下: ```json {"PMC4289340_004_00.png": [ - ["", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "
", "", "", "
", "", "", "
", "", ""], - [[1, 4, 29, 13], [137, 4, 161, 13], [215, 4, 236, 13], [1, 17, 30, 27], [137, 17, 147, 27], [215, 17, 225, 27]], + ["", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "
", "", "", "
", "", "", "
", "", ""], + [[1, 4, 29, 13], [137, 4, 161, 13], [215, 4, 236, 13], [1, 17, 30, 27], [137, 17, 147, 27], [215, 17, 225, 27]], [["", "F", "e", "a", "t", "u", "r", "e", ""], ["", "G", "b", "3", " ", "+", ""], ["", "G", "b", "3", " ", "-", ""], ["", "P", "a", "t", "i", "e", "n", "t", "s", ""], ["6", "2"], ["4", "5"]] ]} ``` @@ -107,7 +107,7 @@ json 中,key为图片名,value为对应的gt,gt是一个由三个item组 准备完成后使用如下命令进行评估,评估完成后会输出teds指标。 ```python cd PaddleOCR/ppstructure -python3 table/eval_table.py --det_model_dir=path/to/det_model_dir --rec_model_dir=path/to/rec_model_dir --table_model_dir=path/to/table_model_dir --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=EN --det_limit_side_len=736 --det_limit_type=min --gt_path=path/to/gt.json +python3 table/eval_table.py --det_model_dir=path/to/det_model_dir --rec_model_dir=path/to/rec_model_dir --table_model_dir=path/to/table_model_dir --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --det_limit_side_len=736 --det_limit_type=min --gt_path=path/to/gt.json ``` 如使用PubLatNet评估数据集,将会输出 ```bash @@ -123,4 +123,4 @@ python3 table/predict_table.py --det_model_dir=path/to/det_model_dir --rec_model Reference 1. https://github.com/ibm-aur-nlp/PubTabNet -2. https://arxiv.org/pdf/1911.10683 \ No newline at end of file +2. https://arxiv.org/pdf/1911.10683 diff --git a/ppstructure/utility.py b/ppstructure/utility.py index 7d9fa76d0ada58e363243c114519d001de3fbf2a..ce7a801b1bb4094d3f4d2ba467332c6763ad6287 100644 --- a/ppstructure/utility.py +++ b/ppstructure/utility.py @@ -21,13 +21,31 @@ def init_args(): parser = infer_args() # params for output - parser.add_argument("--output", type=str, default='./output/table') + parser.add_argument("--output", type=str, default='./output') # params for table structure parser.add_argument("--table_max_len", type=int, default=488) parser.add_argument("--table_model_dir", type=str) parser.add_argument("--table_char_type", type=str, default='en') - parser.add_argument("--table_char_dict_path", type=str, default="../ppocr/utils/dict/table_structure_dict.txt") - parser.add_argument("--layout_path_model", type=str, default="lp://PubLayNet/ppyolov2_r50vd_dcn_365e_publaynet/config") + parser.add_argument( + "--table_char_dict_path", + type=str, + default="../ppocr/utils/dict/table_structure_dict.txt") + parser.add_argument( + "--layout_path_model", + type=str, + default="lp://PubLayNet/ppyolov2_r50vd_dcn_365e_publaynet/config") + + # params for ser + parser.add_argument("--model_name_or_path", type=str) + parser.add_argument("--max_seq_length", type=int, default=512) + parser.add_argument( + "--label_map_path", type=str, default='./vqa/labels/labels_ser.txt') + + parser.add_argument( + "--mode", + type=str, + default='structure', + help='structure and vqa is supported') return parser @@ -48,5 +66,6 @@ def draw_structure_result(image, result, font_path): boxes.append(np.array(box).reshape(-1, 2)) txts.append(rec_res[0]) scores.append(rec_res[1]) - im_show = draw_ocr_box_txt(image, boxes, txts, scores, font_path=font_path,drop_score=0) - return im_show \ No newline at end of file + im_show = draw_ocr_box_txt( + image, boxes, txts, scores, font_path=font_path, drop_score=0) + return im_show diff --git a/ppstructure/vqa/infer_ser_e2e.py b/ppstructure/vqa/infer_ser_e2e.py index 1638e78a11105feb1cb037a545005b2384672eb8..3ebb350fd9ce90fa5a5688c34f041f67105fcf86 100644 --- a/ppstructure/vqa/infer_ser_e2e.py +++ b/ppstructure/vqa/infer_ser_e2e.py @@ -23,12 +23,10 @@ from PIL import Image import paddle from paddlenlp.transformers import LayoutXLMModel, LayoutXLMTokenizer, LayoutXLMForTokenClassification -from paddleocr import PaddleOCR - # relative reference -from utils import parse_args, get_image_file_list, draw_ser_results, get_bio_label_maps +from .utils import parse_args, get_image_file_list, draw_ser_results, get_bio_label_maps -from utils import pad_sentences, split_page, preprocess, postprocess, merge_preds_list_with_ocr_info +from .utils import pad_sentences, split_page, preprocess, postprocess, merge_preds_list_with_ocr_info def trans_poly_to_bbox(poly): @@ -52,6 +50,7 @@ def parse_ocr_info_for_ser(ocr_result): class SerPredictor(object): def __init__(self, args): + self.max_seq_length = args.max_seq_length # init ser token and model @@ -62,9 +61,11 @@ class SerPredictor(object): self.model.eval() # init ocr_engine + from paddleocr import PaddleOCR + self.ocr_engine = PaddleOCR( - rec_model_dir=args.ocr_rec_model_dir, - det_model_dir=args.ocr_det_model_dir, + rec_model_dir=args.rec_model_dir, + det_model_dir=args.det_model_dir, use_angle_cls=False, show_log=False) # init dict diff --git a/ppstructure/vqa/utils.py b/ppstructure/vqa/utils.py index 0af180ada2eae740c042378c73b884239ddbf7b9..f4db20d5cbcb6cf510bb794bb0e7d836da028b2f 100644 --- a/ppstructure/vqa/utils.py +++ b/ppstructure/vqa/utils.py @@ -380,8 +380,8 @@ def parse_args(): parser.add_argument("--seed", type=int, default=2048, help="random seed for initialization",) - parser.add_argument("--ocr_rec_model_dir", default=None, type=str, ) - parser.add_argument("--ocr_det_model_dir", default=None, type=str, ) + parser.add_argument("--rec_model_dir", default=None, type=str, ) + parser.add_argument("--det_model_dir", default=None, type=str, ) parser.add_argument( "--label_map_path", default="./labels/labels_ser.txt", type=str, required=False, ) parser.add_argument("--infer_imgs", default=None, type=str, required=False)