diff --git a/doc/table/PaddleDetection_config.png b/doc/table/PaddleDetection_config.png
deleted file mode 100644
index d18932b66cc148b7796fe4b319ad9eb82c2a2868..0000000000000000000000000000000000000000
Binary files a/doc/table/PaddleDetection_config.png and /dev/null differ
diff --git a/doc/table/layout.jpg b/doc/table/layout.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..db7246b314556d73cd49d049b9b480887b6ef994
Binary files /dev/null and b/doc/table/layout.jpg differ
diff --git a/doc/table/result_all.jpg b/doc/table/result_all.jpg
index 3dd9840643989f1049c228c201b43f9ed89a5fcb..3bffd40ed8821bb5259337bc7651cde51c5a7861 100644
Binary files a/doc/table/result_all.jpg and b/doc/table/result_all.jpg differ
diff --git a/doc/table/result_text.jpg b/doc/table/result_text.jpg
index 94c9bce4a73b2764bb9791972f62a3a5b37fed45..5f164a1ab10d3f3e63c761e6e059c2905ba9c878 100644
Binary files a/doc/table/result_text.jpg and b/doc/table/result_text.jpg differ
diff --git a/doc/table/tableocr_pipeline.jpg b/doc/table/tableocr_pipeline.jpg
index bd467b1bd38f89f887ccbf0aa0f060d738e6047b..da868791b16af3b56cb07c86f18e25b45c6f5b47 100644
Binary files a/doc/table/tableocr_pipeline.jpg and b/doc/table/tableocr_pipeline.jpg differ
diff --git a/doc/table/tableocr_pipeline_en.jpg b/doc/table/tableocr_pipeline_en.jpg
index 654366878e8262eede2d4330f311ea0819ff6533..cedc9bd5ca06147d6c4d22e709418fc7081d940e 100644
Binary files a/doc/table/tableocr_pipeline_en.jpg and b/doc/table/tableocr_pipeline_en.jpg differ
diff --git a/ppocr/postprocess/rec_postprocess.py b/ppocr/postprocess/rec_postprocess.py
index 8426bcf2b9a71e0293d912e25f1b617fd18c59fc..8ebe5b2741b77537b46b8057d9aa9c36dc99aeec 100644
--- a/ppocr/postprocess/rec_postprocess.py
+++ b/ppocr/postprocess/rec_postprocess.py
@@ -346,14 +346,14 @@ class TableLabelDecode(object):
list_elem = []
with open(character_dict_path, "rb") as fin:
lines = fin.readlines()
- substr = lines[0].decode('utf-8').strip("\n").split("\t")
+ substr = lines[0].decode('utf-8').strip("\n").strip("\r\n").split("\t")
character_num = int(substr[0])
elem_num = int(substr[1])
for cno in range(1, 1 + character_num):
- character = lines[cno].decode('utf-8').strip("\n")
+ character = lines[cno].decode('utf-8').strip("\n").strip("\r\n")
list_character.append(character)
for eno in range(1 + character_num, 1 + character_num + elem_num):
- elem = lines[eno].decode('utf-8').strip("\n")
+ elem = lines[eno].decode('utf-8').strip("\n").strip("\r\n")
list_elem.append(elem)
return list_character, list_elem
diff --git a/ppstructure/README.md b/ppstructure/README.md
index edd106a27149c8e10ee898f561132e8477af39ae..90cd412df038a59ab6555b1ff632f99e2d32bb74 100644
--- a/ppstructure/README.md
+++ b/ppstructure/README.md
@@ -7,7 +7,7 @@ PaddleStructure is an OCR toolkit for complex layout analysis. It can divide doc
**install layoutparser**
```sh
-pip3 install https://paddleocr.bj.bcebos.com/whl/layoutparser-0.0.0-py3-none-any.whl
+pip3 install -U premailer paddleocr https://paddleocr.bj.bcebos.com/whl/layoutparser-0.0.0-py3-none-any.whl
```
**install paddlestructure**
@@ -57,8 +57,28 @@ im_show = draw_result(image, result,font_path=font_path)
im_show = Image.fromarray(im_show)
im_show.save('result.jpg')
```
+#### 1.2.3 返回结果说明
+The return result of PaddleStructure is a list composed of a dict, an example is as follows
+
+```shell
+[
+ { 'type': 'Text',
+ 'bbox': [34, 432, 345, 462],
+ 'res': ([[36.0, 437.0, 341.0, 437.0, 341.0, 446.0, 36.0, 447.0], [41.0, 454.0, 125.0, 453.0, 125.0, 459.0, 41.0, 460.0]],
+ [('Tigure-6. The performance of CNN and IPT models using difforen', 0.90060663), ('Tent ', 0.465441)])
+ }
+]
+```
+The description of each field in dict is as follows
+
+| Parameter | Description |
+| --------------- | -------------|
+|type|Type of image area|
+|bbox|The coordinates of the image area in the original image, respectively [left upper x, left upper y, right bottom x, right bottom y]|
+|res|OCR or table recognition result of image area。
Table: HTML string of the table;
OCR: A tuple containing the detection coordinates and recognition results of each single line of text|
+
-#### 1.2.3 Parameter Description:
+#### 1.2.4 Parameter Description:
| Parameter | Description | Default value |
| --------------- | ---------------------------------------- | ------------------------------------------- |
@@ -80,22 +100,22 @@ In PaddleStructure, the image will be analyzed by layoutparser first. In the lay
### 2.1 LayoutParser
-Layout analysis divides the document data into regions, including the use of Python scripts for layout analysis tools, extraction of special category detection boxes, performance indicators, and custom training layout analysis models. For details, please refer to [document](layout/README.md).
+Layout analysis divides the document data into regions, including the use of Python scripts for layout analysis tools, extraction of special category detection boxes, performance indicators, and custom training layout analysis models. For details, please refer to [document](layout/README_en.md).
### 2.2 Table OCR
Table OCR converts table image into excel documents, which include the detection and recognition of table text and the prediction of table structure and cell coordinates. For detailed, please refer to [document](table/README.md)
-### 3. Predictive by inference engine
+## 3. Predictive by inference engine
Use the following commands to complete the inference.
```python
-python3 table/predict_system.py --det_model_dir=path/to/det_model_dir --rec_model_dir=path/to/rec_model_dir --table_model_dir=path/to/table_model_dir --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=EN --det_limit_side_len=736 --det_limit_type=min --output ../output/table
+python3 table/predict_system.py --det_model_dir=path/to/det_model_dir --rec_model_dir=path/to/rec_model_dir --table_model_dir=path/to/table_model_dir --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=EN --det_limit_side_len=736 --det_limit_type=min --output=../output/table --vis_font_path=../doc/fonts/simfang.ttf
```
After running, each image will have a directory with the same name under the directory specified in the output field. Each table in the picture will be stored as an excel, and the excel file name will be the coordinates of the table in the image.
-# 3. Model List
+**Model List**
|model name|description|config|model size|download|
diff --git a/ppstructure/README_ch.md b/ppstructure/README_ch.md
index f9dc56ab264c377c81ba8328d5103cee801a000c..7ae55534309ab48caecf8de1ae20c0536b49823e 100644
--- a/ppstructure/README_ch.md
+++ b/ppstructure/README_ch.md
@@ -8,7 +8,7 @@ PaddleStructure是一个用于复杂版面分析的OCR工具包,其能够对
**安装 layoutparser**
```sh
-pip3 install https://paddleocr.bj.bcebos.com/whl/layoutparser-0.0.0-py3-none-any.whl
+pip3 install -U premailer paddleocr https://paddleocr.bj.bcebos.com/whl/layoutparser-0.0.0-py3-none-any.whl
```
**安装 paddlestructure**
@@ -58,8 +58,28 @@ im_show = Image.fromarray(im_show)
im_show.save('result.jpg')
```
+#### 1.2.3 返回结果说明
+PaddleStructure 的返回结果为一个dict组成的list,示例如下
+
+```shell
+[
+ { 'type': 'Text',
+ 'bbox': [34, 432, 345, 462],
+ 'res': ([[36.0, 437.0, 341.0, 437.0, 341.0, 446.0, 36.0, 447.0], [41.0, 454.0, 125.0, 453.0, 125.0, 459.0, 41.0, 460.0]],
+ [('Tigure-6. The performance of CNN and IPT models using difforen', 0.90060663), ('Tent ', 0.465441)])
+ }
+]
+```
+dict 里各个字段说明如下
+
+| 字段 | 说明 |
+| --------------- | -------------|
+|type|图片区域的类型|
+|bbox|图片区域的在原图的坐标,分别[左上角x,左上角y,右下角x,右下角y]|
+|res|图片区域的OCR或表格识别结果。
表格: 表格的HTML字符串;
OCR: 一个包含各个单行文字的检测坐标和识别结果的元组|
-#### 1.2.3 参数说明
+
+#### 1.2.4 参数说明
| 字段 | 说明 | 默认值 |
| --------------- | ---------------------------------------- | ------------------------------------------- |
@@ -80,28 +100,27 @@ im_show.save('result.jpg')
在PaddleStructure中,图片会先经由layoutparser进行版面分析,在版面分析中,会对图片里的区域进行分类,包括**文字、标题、图片、列表和表格**5类。对于前4类区域,直接使用PP-OCR完成对应区域文字检测与识别。对于表格类区域,经过Table OCR处理后,表格图片转换为相同表格样式的Excel文件。
-### 2.1 LayoutParser
+### 2.1 版面分析
版面分析对文档数据进行区域分类,其中包括版面分析工具的Python脚本使用、提取指定类别检测框、性能指标以及自定义训练版面分析模型,详细内容可以参考[文档](layout/README.md)。
-### 2.2 Table OCR
+### 2.2 表格识别
Table OCR将表格图片转换为excel文档,其中包含对于表格文本的检测和识别以及对于表格结构和单元格坐标的预测,详细说明参考[文档](table/README_ch.md)
-### 3. 预测引擎推理
+## 3. 预测引擎推理
使用如下命令即可完成预测引擎的推理
```python
-python3 table/predict_system.py --det_model_dir=path/to/det_model_dir --rec_model_dir=path/to/rec_model_dir --table_model_dir=path/to/table_model_dir --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=EN --det_limit_side_len=736 --det_limit_type=min --output ../output/table
+python3 table/predict_system.py --det_model_dir=path/to/det_model_dir --rec_model_dir=path/to/rec_model_dir --table_model_dir=path/to/table_model_dir --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=EN --det_limit_side_len=736 --det_limit_type=min --output=../output/table --vis_font_path=../doc/fonts/simfang.ttf
```
运行完成后,每张图片会output字段指定的目录下有一个同名目录,图片里的每个表格会存储为一个excel,excel文件名为表格在图片里的坐标。
-# 3. Model List
-
+**Model List**
|模型名称|模型简介|配置文件|推理模型大小|下载地址|
| --- | --- | --- | --- | --- |
|en_ppocr_mobile_v2.0_table_det|英文表格场景的文字检测|[ch_det_mv3_db_v2.0.yml](../configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml)| 4.7M |[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_det_infer.tar) |
-|en_ppocr_mobile_v2.0_table_rec|英文表格场景的文字识别|[rec_chinese_lite_train_v2.0.yml](..//configs/rec/rec_mv3_none_bilstm_ctc.yml)|6.9M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_rec_infer.tar) |
+|en_ppocr_mobile_v2.0_table_rec|英文表格场景的文字识别|[rec_chinese_lite_train_v2.0.yml](../configs/rec/rec_mv3_none_bilstm_ctc.yml)|6.9M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_rec_infer.tar) |
|en_ppocr_mobile_v2.0_table_structure|英文表格场景的表格结构预测|[table_mv3.yml](../configs/table/table_mv3.yml)|18.6M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar) |
\ No newline at end of file
diff --git a/ppstructure/layout/README.md b/ppstructure/layout/README.md
index 274a8c63a58543d3769bbd4b11133496e74f405a..fde6d75a4d278551aba77075c3f9789a24a01b21 100644
--- a/ppstructure/layout/README.md
+++ b/ppstructure/layout/README.md
@@ -1,17 +1,20 @@
# 版面分析使用说明
-* [1. 安装whl包](#安装whl包)
-* [2. 使用](#使用)
-* [3. 后处理](#后处理)
-* [4. 指标](#指标)
-* [5. 训练版面分析模型](#训练版面分析模型)
+[1. 安装whl包](#安装whl包)
+
+[2. 使用](#使用)
+
+[3. 后处理](#后处理)
+
+[4. 指标](#指标)
+
+[5. 训练版面分析模型](#训练版面分析模型)
## 1. 安装whl包
```bash
-wget https://paddleocr.bj.bcebos.com/whl/layoutparser-0.0.0-py3-none-any.whl
-pip install -U layoutparser-0.0.0-py3-none-any.whl
+pip install -U https://paddleocr.bj.bcebos.com/whl/layoutparser-0.0.0-py3-none-any.whl
```
@@ -21,8 +24,9 @@ pip install -U layoutparser-0.0.0-py3-none-any.whl
使用layoutparser识别给定文档的布局:
```python
+import cv2
import layoutparser as lp
-image = cv2.imread("imags/paper-image.jpg")
+image = cv2.imread("doc/table/layout.jpg")
image = image[..., ::-1]
# 加载模型
@@ -35,7 +39,8 @@ model = lp.PaddleDetectionLayoutModel(config_path="lp://PubLayNet/ppyolov2_r50vd
layout = model.detect(image)
# 显示结果
-lp.draw_box(image, layout, box_width=3, show_element_type=True)
+show_img = lp.draw_box(image, layout, box_width=3, show_element_type=True)
+show_img.show()
```
下图展示了结果,不同颜色的检测框表示不同的类别,并通过`show_element_type`在框的左上角显示具体类别:
@@ -67,7 +72,7 @@ lp.draw_box(image, layout, box_width=3, show_element_type=True)
| [PubLayNet](https://github.com/ibm-aur-nlp/PubLayNet) | lp://PubLayNet/ppyolov2_r50vd_dcn_365e_publaynet/config | {0: "Text", 1: "Title", 2: "List", 3:"Table", 4:"Figure"} |
* TableBank word和TableBank latex分别在word文档、latex文档数据集训练;
-* 下载TableBank数据集同时包含word和latex。
+* 下载的TableBank数据集里同时包含word和latex。
@@ -76,6 +81,7 @@ lp.draw_box(image, layout, box_width=3, show_element_type=True)
版面分析检测包含多个类别,如果只想获取指定类别(如"Text"类别)的检测框、可以使用下述代码:
```python
+# 接上面代码
# 首先过滤特定文本类型的区域
text_blocks = lp.Layout([b for b in layout if b.type=='Text'])
figure_blocks = lp.Layout([b for b in layout if b.type=='Figure'])
@@ -99,9 +105,10 @@ right_blocks.sort(key = lambda b:b.coordinates[1])
text_blocks = lp.Layout([b.set(id = idx) for idx, b in enumerate(left_blocks + right_blocks)])
# 显示结果
-lp.draw_box(image, text_blocks,
+show_img = lp.draw_box(image, text_blocks,
box_width=3,
show_element_id=True)
+show_img.show()
```
显示只有"Text"类别的结果:
diff --git a/ppstructure/layout/README_en.md b/ppstructure/layout/README_en.md
new file mode 100644
index 0000000000000000000000000000000000000000..2d885c567b15d305602e9fb00058c4cf281de041
--- /dev/null
+++ b/ppstructure/layout/README_en.md
@@ -0,0 +1,139 @@
+# Getting Started
+
+[1. Install whl package](#Install whl package)
+
+[2. Quick Start](#Quick Start)
+
+[3. PostProcess](#PostProcess)
+
+[4. Results](#Results)
+
+[5. Training](#Training)
+
+
+
+## 1. Install whl package
+```bash
+wget https://paddleocr.bj.bcebos.com/whl/layoutparser-0.0.0-py3-none-any.whl
+pip install -U layoutparser-0.0.0-py3-none-any.whl
+```
+
+
+
+## 2. Quick Start
+
+Use LayoutParser to identify the layout of a given document:
+
+```python
+import cv2
+import layoutparser as lp
+image = cv2.imread("doc/table/layout.jpg")
+image = image[..., ::-1]
+
+# load model
+model = lp.PaddleDetectionLayoutModel(config_path="lp://PubLayNet/ppyolov2_r50vd_dcn_365e_publaynet/config",
+ threshold=0.5,
+ label_map={0: "Text", 1: "Title", 2: "List", 3:"Table", 4:"Figure"},
+ enforce_cpu=False,
+ enable_mkldnn=True)
+# detect
+layout = model.detect(image)
+
+# show result
+show_img = lp.draw_box(image, layout, box_width=3, show_element_type=True)
+show_img.show()
+```
+
+The following figure shows the result, with different colored detection boxes representing different categories and displaying specific categories in the upper left corner of the box with `show_element_type`
+
+
+
+
+`PaddleDetectionLayoutModel`parameters are described as follows:
+
+| parameter | description | default | remark |
+| :------------: | :------------------------------------------------------: | :---------: | :----------------------------------------------------------: |
+| config_path | model config path | None | Specify config_ path will automatically download the model (only for the first time,the model will exist and will not be downloaded again) |
+| model_path | model path | None | local model path, config_ path and model_ path must be set to one, cannot be none at the same time |
+| threshold | threshold of prediction score | 0.5 | \ |
+| input_shape | picture size of reshape | [3,640,640] | \ |
+| batch_size | testing batch size | 1 | \ |
+| label_map | category mapping table | None | Setting config_ path, it can be none, and the label is automatically obtained according to the dataset name_ map |
+| enforce_cpu | whether to use CPU | False | False to use GPU, and True to force the use of CPU |
+| enforce_mkldnn | whether mkldnn acceleration is enabled in CPU prediction | True | \ |
+| thread_num | the number of CPU threads | 10 | \ |
+
+The following model configurations and label maps are currently supported, which you can use by modifying '--config_path' and '--label_map' to detect different types of content:
+
+| dataset | config_path | label_map |
+| ------------------------------------------------------------ | ------------------------------------------------------------ | --------------------------------------------------------- |
+| [TableBank](https://doc-analysis.github.io/tablebank-page/index.html) word | lp://TableBank/ppyolov2_r50vd_dcn_365e_tableBank_word/config | {0:"Table"} |
+| TableBank latex | lp://TableBank/ppyolov2_r50vd_dcn_365e_tableBank_latex/config | {0:"Table"} |
+| [PubLayNet](https://github.com/ibm-aur-nlp/PubLayNet) | lp://PubLayNet/ppyolov2_r50vd_dcn_365e_publaynet/config | {0: "Text", 1: "Title", 2: "List", 3:"Table", 4:"Figure"} |
+
+* TableBank word and TableBank latex are trained on datasets of word documents and latex documents respectively;
+* Download TableBank dataset contains both word and latex。
+
+
+
+## 3. PostProcess
+
+Layout parser contains multiple categories, if you only want to get the detection box for a specific category (such as the "Text" category), you can use the following code:
+
+```python
+# follow the above code
+# filter areas for a specific text type
+text_blocks = lp.Layout([b for b in layout if b.type=='Text'])
+figure_blocks = lp.Layout([b for b in layout if b.type=='Figure'])
+
+# text areas may be detected within the image area, delete these areas
+text_blocks = lp.Layout([b for b in text_blocks \
+ if not any(b.is_in(b_fig) for b_fig in figure_blocks)])
+
+# sort text areas and assign ID
+h, w = image.shape[:2]
+
+left_interval = lp.Interval(0, w/2*1.05, axis='x').put_on_canvas(image)
+
+left_blocks = text_blocks.filter_by(left_interval, center=True)
+left_blocks.sort(key = lambda b:b.coordinates[1])
+
+right_blocks = [b for b in text_blocks if b not in left_blocks]
+right_blocks.sort(key = lambda b:b.coordinates[1])
+
+# the two lists are merged and the indexes are added in order
+text_blocks = lp.Layout([b.set(id = idx) for idx, b in enumerate(left_blocks + right_blocks)])
+
+# display result
+show_img = lp.draw_box(image, text_blocks,
+ box_width=3,
+ show_element_id=True)
+show_img.show()
+```
+
+Displays results with only the "Text" category:
+
+
+
+
+
+
+## 4. Results
+
+| Dataset | mAP | CPU time cost | GPU time cost |
+| --------- | ---- | ------------- | ------------- |
+| PubLayNet | 93.6 | 1713.7ms | 66.6ms |
+| TableBank | 96.2 | 1968.4ms | 65.1ms |
+
+**Envrionment:**
+
+ **CPU:** Intel(R) Xeon(R) CPU E5-2650 v4 @ 2.20GHz,24core
+
+ **GPU:** a single NVIDIA Tesla P40
+
+
+
+## 5. Training
+
+The above model is based on PaddleDetection](https://github.com/PaddlePaddle/PaddleDetection) ,if you want to train your own layout parser model,please refer to:[train_layoutparser_model](train_layoutparser_model_en.md)
+
diff --git a/ppstructure/layout/train_layoutparser_model.md b/ppstructure/layout/train_layoutparser_model.md
index 0a4554e12d9e565fa8e3de4a83cbd2eb5b515c6e..e1cd2e0773d5edef80f2aba69c491eebb9cfd03e 100644
--- a/ppstructure/layout/train_layoutparser_model.md
+++ b/ppstructure/layout/train_layoutparser_model.md
@@ -1,15 +1,24 @@
# 训练版面分析
-* [1. 安装](#安装)
- * [1.1 环境要求](#环境要求)
- * [1.2 安装PaddleDetection](#安装PaddleDetection)
-* [2. 准备数据](#准备数据)
-* [3. 配置文件改动和说明](#配置文件改动和说明)
-* [4. PaddleDetection训练](#训练)
-* [5. PaddleDetection预测](#预测)
-* [6. 预测部署](#预测部署)
- * [6.1 模型导出](#模型导出)
- * [6.2 layout parser预测](#layout_parser预测)
+[1. 安装](#安装)
+
+ [1.1 环境要求](#环境要求)
+
+ [1.2 安装PaddleDetection](#安装PaddleDetection)
+
+[2. 准备数据](#准备数据)
+
+[3. 配置文件改动和说明](#配置文件改动和说明)
+
+[4. PaddleDetection训练](#训练)
+
+[5. PaddleDetection预测](#预测)
+
+[6. 预测部署](#预测部署)
+
+ [6.1 模型导出](#模型导出)
+
+ [6.2 layout parser预测](#layout_parser预测)
@@ -64,10 +73,10 @@ tar -xvf publaynet.tar.gz
| `train/` | Images in the training subset | 335,703 |
| `val/` | Images in the validation subset | 11,245 |
| `test/` | Images in the testing subset | 11,405 |
-| `train.json` | Annotations for training images | |
-| `val.json` | Annotations for validation images | |
-| `LICENSE.txt` | Plaintext version of the CDLA-Permissive license | |
-| `README.txt` | Text file with the file names and description | |
+| `train.json` | Annotations for training images | 1 |
+| `val.json` | Annotations for validation images | 1 |
+| `LICENSE.txt` | Plaintext version of the CDLA-Permissive license | 1 |
+| `README.txt` | Text file with the file names and description | 1 |
如果使用其它数据集,请参考[准备训练数据](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.1/docs/tutorials/PrepareDataSet.md)
@@ -77,23 +86,30 @@ tar -xvf publaynet.tar.gz
我们使用 `configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml`配置进行训练,配置文件摘要如下:
-
-
-
+```bash
+_BASE_: [
+ '../datasets/coco_detection.yml',
+ '../runtime.yml',
+ './_base_/ppyolov2_r50vd_dcn.yml',
+ './_base_/optimizer_365e.yml',
+ './_base_/ppyolov2_reader.yml',
+]
+
+snapshot_epoch: 8
+weights: output/ppyolov2_r50vd_dcn_365e_coco/model_final
+```
+从中可以看到 `ppyolov2_r50vd_dcn_365e_coco.yml` 配置需要依赖其他的配置文件,在该例子中需要依赖:
-从上图看到 `ppyolov2_r50vd_dcn_365e_coco.yml` 配置需要依赖其他的配置文件,在该例子中需要依赖:
+- coco_detection.yml:主要说明了训练数据和验证数据的路径
-```
-coco_detection.yml:主要说明了训练数据和验证数据的路径
+- runtime.yml:主要说明了公共的运行参数,比如是否使用GPU、每多少个epoch存储checkpoint等
-runtime.yml:主要说明了公共的运行参数,比如是否使用GPU、每多少个epoch存储checkpoint等
+- optimizer_365e.yml:主要说明了学习率和优化器的配置
-optimizer_365e.yml:主要说明了学习率和优化器的配置
+- ppyolov2_r50vd_dcn.yml:主要说明模型和主干网络的情况
-ppyolov2_r50vd_dcn.yml:主要说明模型和主干网络的情况
+- ppyolov2_reader.yml:主要说明数据读取器配置,如batch size,并发加载子进程数等,同时包含读取后预处理操作,如resize、数据增强等等
-ppyolov2_reader.yml:主要说明数据读取器配置,如batch size,并发加载子进程数等,同时包含读取后预处理操作,如resize、数据增强等等
-```
根据实际情况,修改上述文件,比如数据集路径、batch size等。
@@ -147,7 +163,7 @@ python tools/infer.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --infer
## 6. 预测部署
-在layout parser中使用自己训练好的模型,
+在layout parser中使用自己训练好的模型。
diff --git a/ppstructure/layout/train_layoutparser_model_en.md b/ppstructure/layout/train_layoutparser_model_en.md
new file mode 100644
index 0000000000000000000000000000000000000000..ffe1026278704dc9f7994ee8cc514823c1515163
--- /dev/null
+++ b/ppstructure/layout/train_layoutparser_model_en.md
@@ -0,0 +1,204 @@
+# Training layout-parse
+
+[1. Installation](#Installation)
+
+ [1.1 Requirements](#Requirements)
+
+ [1.2 Install PaddleDetection](#Install PaddleDetection)
+
+[2. Data preparation](#Data preparation)
+
+[3. Configuration](#Configuration)
+
+[4. Training](#Training)
+
+[5. Prediction](#Prediction)
+
+[6. Deployment](#Deployment)
+
+ [6.1 Export model](#Export model)
+
+ [6.2 Inference](#Inference)
+
+
+
+## 1. Installation
+
+
+
+### 1.1 Requirements
+
+- PaddlePaddle 2.1
+- OS 64 bit
+- Python 3(3.5.1+/3.6/3.7/3.8/3.9),64 bit
+- pip/pip3(9.0.1+), 64 bit
+- CUDA >= 10.1
+- cuDNN >= 7.6
+
+
+
+### 1.2 Install PaddleDetection
+
+```bash
+# Clone PaddleDetection repository
+cd
+git clone https://github.com/PaddlePaddle/PaddleDetection.git
+
+cd PaddleDetection
+# Install other dependencies
+pip install -r requirements.txt
+```
+
+For more installation tutorials, please refer to: [Install doc](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.1/docs/tutorials/INSTALL_cn.md)
+
+
+
+## 2. Data preparation
+
+Download the [PubLayNet](https://github.com/ibm-aur-nlp/PubLayNet) dataset
+
+```bash
+cd PaddleDetection/dataset/
+mkdir publaynet
+# execute the command,download PubLayNet
+wget -O publaynet.tar.gz https://dax-cdn.cdn.appdomain.cloud/dax-publaynet/1.0.0/publaynet.tar.gz?_ga=2.104193024.1076900768.1622560733-649911202.1622560733
+# unpack
+tar -xvf publaynet.tar.gz
+```
+
+PubLayNet directory structure after decompressing :
+
+| File or Folder | Description | num |
+| :------------- | :----------------------------------------------- | ------- |
+| `train/` | Images in the training subset | 335,703 |
+| `val/` | Images in the validation subset | 11,245 |
+| `test/` | Images in the testing subset | 11,405 |
+| `train.json` | Annotations for training images | 1 |
+| `val.json` | Annotations for validation images | 1 |
+| `LICENSE.txt` | Plaintext version of the CDLA-Permissive license | 1 |
+| `README.txt` | Text file with the file names and description | 1 |
+
+For other datasets,please refer to [the PrepareDataSet]((https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.1/docs/tutorials/PrepareDataSet.md) )
+
+
+
+## 3. Configuration
+
+We use the `configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml` configuration for training,the configuration file is as follows
+
+```bash
+_BASE_: [
+ '../datasets/coco_detection.yml',
+ '../runtime.yml',
+ './_base_/ppyolov2_r50vd_dcn.yml',
+ './_base_/optimizer_365e.yml',
+ './_base_/ppyolov2_reader.yml',
+]
+
+snapshot_epoch: 8
+weights: output/ppyolov2_r50vd_dcn_365e_coco/model_final
+```
+The `ppyolov2_r50vd_dcn_365e_coco.yml` configuration depends on other configuration files, in this case:
+
+- coco_detection.yml:mainly explains the path of training data and verification data
+
+- runtime.yml:mainly describes the common parameters, such as whether to use the GPU and how many epoch to save model etc.
+
+- optimizer_365e.yml:mainly explains the learning rate and optimizer configuration
+
+- ppyolov2_r50vd_dcn.yml:mainly describes the model and the network
+
+- ppyolov2_reader.yml:mainly describes the configuration of data readers, such as batch size and number of concurrent loading child processes, and also includes post preprocessing, such as resize and data augmention etc.
+
+
+Modify the preceding files, such as the dataset path and batch size etc.
+
+
+
+## 4. Training
+
+PaddleDetection provides single-card/multi-card training mode to meet various training needs of users:
+
+* GPU single card training
+
+```bash
+export CUDA_VISIBLE_DEVICES=0 #Don't need to run this command on Windows and Mac
+python tools/train.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml
+```
+
+* GPU multi-card training
+
+```bash
+export CUDA_VISIBLE_DEVICES=0,1,2,3
+python -m paddle.distributed.launch --gpus 0,1,2,3 tools/train.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --eval
+```
+
+--eval: training while verifying
+
+* Model recovery training
+
+During the daily training, if training is interrupted due to some reasons, you can use the -r command to resume the training:
+
+```bash
+export CUDA_VISIBLE_DEVICES=0,1,2,3
+python -m paddle.distributed.launch --gpus 0,1,2,3 tools/train.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --eval -r output/ppyolov2_r50vd_dcn_365e_coco/10000
+```
+
+Note: If you encounter "`Out of memory error`" , try reducing `batch_size` in the `ppyolov2_reader.yml` file
+
+prediction
+
+## 5. Prediction
+
+Set parameters and use PaddleDetection to predict:
+
+```bash
+export CUDA_VISIBLE_DEVICES=0
+python tools/infer.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --infer_img=images/paper-image.jpg --output_dir=infer_output/ --draw_threshold=0.5 -o weights=output/ppyolov2_r50vd_dcn_365e_coco/model_final --use_vdl=Ture
+```
+
+`--draw_threshold` is an optional parameter. According to the calculation of [NMS](https://ieeexplore.ieee.org/document/1699659), different threshold will produce different results, ` keep_top_k ` represent the maximum amount of output target, the default value is 10. You can set different value according to your own actual situation。
+
+
+
+## 6. Deployment
+
+Use your trained model in Layout Parser
+
+
+
+### 6.1 Export model
+
+n the process of model training, the model file saved contains the process of forward prediction and back propagation. In the actual industrial deployment, there is no need for back propagation. Therefore, the model should be translated into the model format required by the deployment. The `tools/export_model.py` script is provided in PaddleDetection to export the model.
+
+The exported model name defaults to `model.*`, Layout Parser's code model is `inference.*`, So change [PaddleDetection/ppdet/engine/trainer. Py ](https://github.com/PaddlePaddle/PaddleDetection/blob/b87a1ea86fa18ce69e44a17ad1b49c1326f19ff9/ppdet/engine/trainer.py# L512) (click on the link to see the detailed line of code), change 'model' to 'inference'.
+
+Execute the script to export model:
+
+```bash
+python tools/export_model.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --output_dir=./inference -o weights=output/ppyolov2_r50vd_dcn_365e_coco/model_final.pdparams
+```
+
+The prediction model is exported to `inference/ppyolov2_r50vd_dcn_365e_coco` ,including:`infer_cfg.yml`(prediction not required), `inference.pdiparams`, `inference.pdiparams.info`,`inference.pdmodel`
+
+More model export tutorials, please refer to:[EXPORT_MODEL](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.1/deploy/EXPORT_MODEL.md)
+
+
+
+### 6.2 Inference
+
+`model_path` represent the trained model path, and layoutparser is used to predict:
+
+```bash
+import layoutparser as lp
+model = lp.PaddleDetectionLayoutModel(model_path="inference/ppyolov2_r50vd_dcn_365e_coco", threshold=0.5,label_map={0: "Text", 1: "Title", 2: "List", 3:"Table", 4:"Figure"},enforce_cpu=True,enable_mkldnn=True)
+```
+
+
+
+***
+
+More PaddleDetection training tutorials,please reference:[PaddleDetection Training](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.1/docs/tutorials/GETTING_STARTED_cn.md)
+
+***
+
diff --git a/ppstructure/predict_system.py b/ppstructure/predict_system.py
index 009c20521fc833d1da39699d6b39ba290cda81d0..60e2574515aeabaedc4f23d1589677c03543ce40 100644
--- a/ppstructure/predict_system.py
+++ b/ppstructure/predict_system.py
@@ -65,8 +65,17 @@ class OCRSystem(object):
filter_boxes, filter_rec_res = self.text_system(roi_img)
filter_boxes = [x + [x1, y1] for x in filter_boxes]
filter_boxes = [x.reshape(-1).tolist() for x in filter_boxes]
-
- res = (filter_boxes, filter_rec_res)
+ # remove style char
+ style_token = ['','','','','','','','',
+ '','','','','','']
+ filter_rec_res_tmp = []
+ for rec_res in filter_rec_res:
+ rec_str, rec_conf = rec_res
+ for token in style_token:
+ if token in rec_str:
+ rec_str = rec_str.replace(token, '')
+ filter_rec_res_tmp.append((rec_str,rec_conf))
+ res = (filter_boxes, filter_rec_res_tmp)
res_list.append({'type': region.type, 'bbox': [x1, y1, x2, y2], 'res': res})
return res_list
@@ -75,14 +84,12 @@ def save_res(res, save_folder, img_name):
excel_save_folder = os.path.join(save_folder, img_name)
os.makedirs(excel_save_folder, exist_ok=True)
# save res
- for region in res:
- if region['type'] == 'Table':
- excel_path = os.path.join(excel_save_folder, '{}.xlsx'.format(region['bbox']))
- to_excel(region['res'], excel_path)
- elif region['type'] == 'Figure':
- pass
- else:
- with open(os.path.join(excel_save_folder, 'res.txt'), 'a', encoding='utf8') as f:
+ with open(os.path.join(excel_save_folder, 'res.txt'), 'w', encoding='utf8') as f:
+ for region in res:
+ if region['type'] == 'Table':
+ excel_path = os.path.join(excel_save_folder, '{}.xlsx'.format(region['bbox']))
+ to_excel(region['res'], excel_path)
+ else:
for box, rec_res in zip(region['res'][0], region['res'][1]):
f.write('{}\t{}\n'.format(np.array(box).reshape(-1).tolist(), rec_res))
diff --git a/ppstructure/table/README.md b/ppstructure/table/README.md
index afcbe1696bb52154129b89f9a0c18d93ac11fbbe..c538db275844e8eb21f405728fe09ed10c070760 100644
--- a/ppstructure/table/README.md
+++ b/ppstructure/table/README.md
@@ -49,28 +49,31 @@ python3 tools/train.py -c configs/table/table_mv3.yml -o Global.checkpoints=./yo
**Note**: The priority of `Global.checkpoints` is higher than that of `Global.pretrain_weights`, that is, when two parameters are specified at the same time, the model specified by `Global.checkpoints` will be loaded first. If the model path specified by `Global.checkpoints` is wrong, the one specified by `Global.pretrain_weights` will be loaded.
### 2.2 Eval
-First cd to the PaddleOCR/ppstructure directory
The table uses TEDS (Tree-Edit-Distance-based Similarity) as the evaluation metric of the model. Before the model evaluation, the three models in the pipeline need to be exported as inference models (we have provided them), and the gt for evaluation needs to be prepared. Examples of gt are as follows:
```json
-{"PMC4289340_004_00.png": [["", "", "", "", "", "", " | ", "", " | ", "", " | ", "
", "", "", "", "", " | ", "", " | ", "", " | ", "
", "", "
", "", ""], [[1, 4, 29, 13], [137, 4, 161, 13], [215, 4, 236, 13], [1, 17, 30, 27], [137, 17, 147, 27], [215, 17, 225, 27]], [["", "F", "e", "a", "t", "u", "r", "e", ""], ["", "G", "b", "3", " ", "+", ""], ["", "G", "b", "3", " ", "-", ""], ["", "P", "a", "t", "i", "e", "n", "t", "s", ""], ["6", "2"], ["4", "5"]]]}
+{"PMC4289340_004_00.png": [
+ ["", "", "", "", "", "", " | ", "", " | ", "", " | ", "
", "", "", "", "", " | ", "", " | ", "", " | ", "
", "", "
", "", ""],
+ [[1, 4, 29, 13], [137, 4, 161, 13], [215, 4, 236, 13], [1, 17, 30, 27], [137, 17, 147, 27], [215, 17, 225, 27]],
+ [["", "F", "e", "a", "t", "u", "r", "e", ""], ["", "G", "b", "3", " ", "+", ""], ["", "G", "b", "3", " ", "-", ""], ["", "P", "a", "t", "i", "e", "n", "t", "s", ""], ["6", "2"], ["4", "5"]]
+]}
```
In gt json, the key is the image name, the value is the corresponding gt, and gt is a list composed of four items, and each item is
1. HTML string list of table structure
2. The coordinates of each cell (not including the empty text in the cell)
3. The text information in each cell (not including the empty text in the cell)
-4. The text information in each cell (including the empty text in the cell)
Use the following command to evaluate. After the evaluation is completed, the teds indicator will be output.
```python
+cd PaddleOCR/ppstructure
python3 table/eval_table.py --det_model_dir=path/to/det_model_dir --rec_model_dir=path/to/rec_model_dir --table_model_dir=path/to/table_model_dir --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=EN --det_limit_side_len=736 --det_limit_type=min --gt_path=path/to/gt.json
```
### 2.3 Inference
-First cd to the PaddleOCR/ppstructure directory
```python
+cd PaddleOCR/ppstructure
python3 table/predict_table.py --det_model_dir=path/to/det_model_dir --rec_model_dir=path/to/rec_model_dir --table_model_dir=path/to/table_model_dir --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=EN --det_limit_side_len=736 --det_limit_type=min --output ../output/table
```
After running, the excel sheet of each picture will be saved in the directory specified by the output field
\ No newline at end of file
diff --git a/ppstructure/table/README_ch.md b/ppstructure/table/README_ch.md
index 4b912f3eb8d2b65898ab4eabe008bf50d9c07f50..5981dab4b85d751ad26a9ba08ca4c9056d253961 100644
--- a/ppstructure/table/README_ch.md
+++ b/ppstructure/table/README_ch.md
@@ -1,6 +1,6 @@
-# 表格结构和内容预测
+# Table OCR
-## 1. pipeline
+## 1. Table OCR pineline
表格的ocr主要包含三个模型
1. 单行文本检测-DB
2. 单行文本识别-CRNN
@@ -10,7 +10,9 @@
![tableocr_pipeline](../../doc/table/tableocr_pipeline.jpg)
-1. 图片由单行文字检测检测模型到单行文字的坐标,然后送入识别模型拿到识别结果。
+流程说明:
+
+1. 图片由单行文字检测模型检测到单行文字的坐标,然后送入识别模型拿到识别结果。
2. 图片由表格结构和cell坐标预测模型拿到表格的结构信息和单元格的坐标信息。
3. 由单行文字的坐标、识别结果和单元格的坐标一起组合出单元格的识别结果。
4. 单元格的识别结果和表格结构一起构造表格的html字符串。
@@ -21,7 +23,7 @@
在这一章节中,我们仅介绍表格结构模型的训练,[文字检测](../../doc/doc_ch/detection.md)和[文字识别](../../doc/doc_ch/recognition.md)的模型训练请参考对应的文档。
#### 数据准备
-训练数据使用公开数据集[PubTabNet](https://arxiv.org/abs/1911.10683),可以从[官网](https://github.com/ibm-aur-nlp/PubTabNet)下载。PubTabNet数据集包含约50万张表格数据的图像,以及图像对应的html格式的注释。
+训练数据使用公开数据集PubTabNet ([论文](https://arxiv.org/abs/1911.10683),[下载地址](https://github.com/ibm-aur-nlp/PubTabNet))。PubTabNet数据集包含约50万张表格数据的图像,以及图像对应的html格式的注释。
#### 启动训练
*如果您安装的是cpu版本,请将配置文件中的 `use_gpu` 字段修改为false*
@@ -45,28 +47,30 @@ python3 tools/train.py -c configs/table/table_mv3.yml -o Global.checkpoints=./yo
### 2.2 评估
-先cd到PaddleOCR/ppstructure目录下
表格使用 TEDS(Tree-Edit-Distance-based Similarity) 作为模型的评估指标。在进行模型评估之前,需要将pipeline中的三个模型分别导出为inference模型(我们已经提供好),还需要准备评估的gt, gt示例如下:
```json
-{"PMC4289340_004_00.png": [["", "", "", "", "", "", " | ", "", " | ", "", " | ", "
", "", "", "", "", " | ", "", " | ", "", " | ", "
", "", "
", "", ""], [[1, 4, 29, 13], [137, 4, 161, 13], [215, 4, 236, 13], [1, 17, 30, 27], [137, 17, 147, 27], [215, 17, 225, 27]], [["", "F", "e", "a", "t", "u", "r", "e", ""], ["", "G", "b", "3", " ", "+", ""], ["", "G", "b", "3", " ", "-", ""], ["", "P", "a", "t", "i", "e", "n", "t", "s", ""], ["6", "2"], ["4", "5"]]]}
+{"PMC4289340_004_00.png": [
+ ["", "", "", "", "", "", " | ", "", " | ", "", " | ", "
", "", "", "", "", " | ", "", " | ", "", " | ", "
", "", "
", "", ""],
+ [[1, 4, 29, 13], [137, 4, 161, 13], [215, 4, 236, 13], [1, 17, 30, 27], [137, 17, 147, 27], [215, 17, 225, 27]],
+ [["", "F", "e", "a", "t", "u", "r", "e", ""], ["", "G", "b", "3", " ", "+", ""], ["", "G", "b", "3", " ", "-", ""], ["", "P", "a", "t", "i", "e", "n", "t", "s", ""], ["6", "2"], ["4", "5"]]
+]}
```
-json 中,key为图片名,value为对于的gt,gt是一个由四个item组成的list,每个item分别为
+json 中,key为图片名,value为对应的gt,gt是一个由四个item组成的list,每个item分别为
1. 表格结构的html字符串list
2. 每个cell的坐标 (不包括cell里文字为空的)
3. 每个cell里的文字信息 (不包括cell里文字为空的)
-4. 每个cell里的文字信息 (包括cell里文字为空的)
准备完成后使用如下命令进行评估,评估完成后会输出teds指标。
```python
+cd PaddleOCR/ppstructure
python3 table/eval_table.py --det_model_dir=path/to/det_model_dir --rec_model_dir=path/to/rec_model_dir --table_model_dir=path/to/table_model_dir --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=EN --det_limit_side_len=736 --det_limit_type=min --gt_path=path/to/gt.json
```
### 2.3 预测
-先cd到PaddleOCR/ppstructure目录下
-
```python
+cd PaddleOCR/ppstructure
python3 table/predict_table.py --det_model_dir=path/to/det_model_dir --rec_model_dir=path/to/rec_model_dir --table_model_dir=path/to/table_model_dir --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=EN --det_limit_side_len=736 --det_limit_type=min --output ../output/table
```
运行完成后,每张图片的excel表格会保存到output字段指定的目录下
diff --git a/ppstructure/table/eval_table.py b/ppstructure/table/eval_table.py
index 15f549376566811813aac40bd88ffbcbdbddbf5b..87b44d3d9792356ec1cdc65693392c288bf67448 100755
--- a/ppstructure/table/eval_table.py
+++ b/ppstructure/table/eval_table.py
@@ -46,20 +46,20 @@ def main(gt_path, img_root, args):
pred_html = text_sys(img)
pred_htmls.append(pred_html)
- gt_structures, gt_bboxes, gt_contents, contents_with_block = jsons_gt[img_name]
- gt_html, gt = get_gt_html(gt_structures, contents_with_block)
+ gt_structures, gt_bboxes, gt_contents = jsons_gt[img_name]
+ gt_html, gt = get_gt_html(gt_structures, gt_contents)
gt_htmls.append(gt_html)
scores = teds.batch_evaluate_html(gt_htmls, pred_htmls)
logger.info('teds:', sum(scores) / len(scores))
-def get_gt_html(gt_structures, contents_with_block):
+def get_gt_html(gt_structures, gt_contents):
end_html = []
td_index = 0
for tag in gt_structures:
if '' in tag:
- if contents_with_block[td_index] != []:
- end_html.extend(contents_with_block[td_index])
+ if gt_contents[td_index] != []:
+ end_html.extend(gt_contents[td_index])
end_html.append(tag)
td_index += 1
else:
diff --git a/ppstructure/utility.py b/ppstructure/utility.py
index f21b287f7d4a044838d6949fae588547ee93ec3e..29daeef4347be5b8db0f9fdeda0dd4d8864ef595 100644
--- a/ppstructure/utility.py
+++ b/ppstructure/utility.py
@@ -43,8 +43,6 @@ def draw_result(image, result, font_path):
for region in result:
if region['type'] == 'Table':
pass
- elif region['type'] == 'Figure':
- pass
else:
for box, rec_res in zip(region['res'][0], region['res'][1]):
boxes.append(np.array(box).reshape(-1, 2))