diff --git a/configs/table/table_master.yml b/configs/table/table_master.yml
index 9dfc0e274686398836de892dbcec318432e1155c..cfd98346c4e71b11f9ec6b84abe9fb4ed0932704 100755
--- a/configs/table/table_master.yml
+++ b/configs/table/table_master.yml
@@ -2,21 +2,19 @@ Global:
use_gpu: true
epoch_num: 17
log_smooth_window: 20
- print_batch_step: 5
+ print_batch_step: 100
save_model_dir: ./output/table_master/
save_epoch_step: 17
- # evaluation is run every 400 iterations after the 0th iteration
- eval_batch_step: [0, 400]
- cal_metric_during_train: True
- pretrained_model:
- checkpoints:
- save_inference_dir:
- use_visualdl: False
+ eval_batch_step: [0, 6259]
+ cal_metric_during_train: true
+ pretrained_model: null
+ checkpoints:
+ save_inference_dir: output/table_master/infer
+ use_visualdl: false
infer_img: ppstructure/docs/table/table.jpg
- save_res_path: output/table_master
- # for data or label process
+ save_res_path: ./output/table_master
character_dict_path: ppocr/utils/dict/table_master_structure_dict.txt
- infer_mode: False
+ infer_mode: false
max_text_length: 500
process_total_num: 0
process_cut_num: 0
@@ -33,8 +31,8 @@ Optimizer:
gamma: 0.1
warmup_epoch: 0.02
regularizer:
- name: 'L2'
- factor: 0.00000
+ name: L2
+ factor: 0.0
Architecture:
model_type: table
@@ -67,15 +65,15 @@ PostProcess:
Metric:
name: TableMetric
main_indicator: acc
- compute_bbox_metric: true # cost many time, set False for training
+ compute_bbox_metric: False
Train:
dataset:
name: PubTabDataSet
- data_dir: /home/zhoujun20/table/PubTabNe/pubtabnet/train/
- label_file_list: [/home/zhoujun20/table/PubTabNe/pubtabnet/PubTabNet_2.0.0_train.jsonl]
+ data_dir: train_data/table/pubtabnet/train/
+ label_file_list: [train_data/table/pubtabnet/PubTabNet_2.0.0_train.jsonl]
transforms:
- - DecodeImage: # load image
+ - DecodeImage:
img_mode: BGR
channel_first: False
- TableMasterLabelEncode:
@@ -88,20 +86,20 @@ Train:
- PaddingTableImage:
size: [480, 480]
- TableBoxEncode:
- use_xywh: true
+ use_xywh: True
- NormalizeImage:
scale: 1./255.
mean: [0.5, 0.5, 0.5]
std: [0.5, 0.5, 0.5]
- order: 'hwc'
- - ToCHWImage:
+ order: hwc
+ - ToCHWImage: null
- KeepKeys:
- keep_keys: ['image', 'structure', 'bboxes', 'bbox_masks','shape']
+ keep_keys: [image, structure, bboxes, bbox_masks, shape]
loader:
shuffle: True
- batch_size_per_card: 8
+ batch_size_per_card: 10
drop_last: True
- num_workers: 1
+ num_workers: 8
Eval:
dataset:
@@ -109,7 +107,7 @@ Eval:
data_dir: /home/zhoujun20/table/PubTabNe/pubtabnet/val/
label_file_list: [/home/zhoujun20/table/PubTabNe/pubtabnet/val_500.jsonl]
transforms:
- - DecodeImage: # load image
+ - DecodeImage:
img_mode: BGR
channel_first: False
- TableMasterLabelEncode:
@@ -120,19 +118,19 @@ Eval:
max_len: 480
resize_bboxes: True
- PaddingTableImage:
- size: [ 480, 480 ]
+ size: [480, 480]
- TableBoxEncode:
- use_xywh: true
+ use_xywh: True
- NormalizeImage:
scale: 1./255.
- mean: [ 0.5, 0.5, 0.5 ]
- std: [ 0.5, 0.5, 0.5 ]
- order: 'hwc'
- - ToCHWImage:
+ mean: [0.5, 0.5, 0.5]
+ std: [0.5, 0.5, 0.5]
+ order: hwc
+ - ToCHWImage: null
- KeepKeys:
- keep_keys: [ 'image', 'structure', 'bboxes', 'bbox_masks','shape' ]
+ keep_keys: [image, structure, bboxes, bbox_masks, shape]
loader:
shuffle: False
drop_last: False
- batch_size_per_card: 2
- num_workers: 8
+ batch_size_per_card: 10
+ num_workers: 8
\ No newline at end of file
diff --git a/doc/doc_ch/algorithm_det_fcenet.md b/doc/doc_ch/algorithm_det_fcenet.md
index bd2e734204d32bbf575ddea9f889953a72582c59..a70caa29fb590c7b7bf8d587a40676757a2ba4ce 100644
--- a/doc/doc_ch/algorithm_det_fcenet.md
+++ b/doc/doc_ch/algorithm_det_fcenet.md
@@ -1,17 +1,15 @@
# FCENet
-- [1. 算法简介](#1)
-- [2. 环境配置](#2)
-- [3. 模型训练、评估、预测](#3)
- - [3.1 训练](#3-1)
- - [3.2 评估](#3-2)
- - [3.3 预测](#3-3)
-- [4. 推理部署](#4)
- - [4.1 Python推理](#4-1)
- - [4.2 C++推理](#4-2)
- - [4.3 Serving服务化部署](#4-3)
- - [4.4 更多推理部署](#4-4)
-- [5. FAQ](#5)
+- [1. 算法简介](#1-算法简介)
+- [2. 环境配置](#2-环境配置)
+- [3. 模型训练、评估、预测](#3-模型训练评估预测)
+- [4. 推理部署](#4-推理部署)
+ - [4.1 Python推理](#41-python推理)
+ - [4.2 C++推理](#42-c推理)
+ - [4.3 Serving服务化部署](#43-serving服务化部署)
+ - [4.4 更多推理部署](#44-更多推理部署)
+- [5. FAQ](#5-faq)
+- [引用](#引用)
## 1. 算法简介
diff --git a/doc/doc_ch/algorithm_overview.md b/doc/doc_ch/algorithm_overview.md
index ef96f6ec122594afd115b333ffc18fb836253b79..84af5fdcce6ac332af8014b1a5a7d98206489607 100755
--- a/doc/doc_ch/algorithm_overview.md
+++ b/doc/doc_ch/algorithm_overview.md
@@ -1,9 +1,10 @@
# OCR算法
- [1. 两阶段算法](#1-两阶段算法)
- - [1.1 文本检测算法](#11-文本检测算法)
- - [1.2 文本识别算法](#12-文本识别算法)
+ - [1.1 文本检测算法](#11-文本检测算法)
+ - [1.2 文本识别算法](#12-文本识别算法)
- [2. 端到端算法](#2-端到端算法)
+- [3. 表格识别算法](#3-表格识别算法)
本文给出了PaddleOCR已支持的OCR算法列表,以及每个算法在**英文公开数据集**上的模型和指标,主要用于算法简介和算法性能对比,更多包括中文在内的其他数据集上的模型请参考[PP-OCR v2.0 系列模型下载](./models_list.md)。
@@ -96,3 +97,14 @@
已支持的端到端OCR算法列表(戳链接获取使用教程):
- [x] [PGNet](./algorithm_e2e_pgnet.md)
+
+## 3. 表格识别算法
+
+已支持的表格识别算法列表(戳链接获取使用教程):
+- [x] [TableMaster](./algorithm_table_master.md)
+
+在PubTabNet表格识别公开数据集上,算法效果如下:
+
+|模型|骨干网络|配置文件|acc|下载链接|
+|---|---|---|---|---|
+|TableMaster|TableResNetExtra|[configs/table/table_master.yml](../../configs/table/table_master.yml)|77.47%|[训练模型]|[训练模型](https://paddleocr.bj.bcebos.com/ppstructure/models/tablemaster/table_structure_tablemaster_train.tar)/[推理模型](https://paddleocr.bj.bcebos.com/ppstructure/models/tablemaster/table_structure_tablemaster_infer.tar)|
diff --git a/doc/doc_ch/algorithm_table_master.md b/doc/doc_ch/algorithm_table_master.md
new file mode 100644
index 0000000000000000000000000000000000000000..6bfd0f36fc05a4adf81d6bb2d306a74d1525650f
--- /dev/null
+++ b/doc/doc_ch/algorithm_table_master.md
@@ -0,0 +1,114 @@
+# 表格识别算法-TableMASTER
+
+- [1. 算法简介](#1-算法简介)
+- [2. 环境配置](#2-环境配置)
+- [3. 模型训练、评估、预测](#3-模型训练评估预测)
+- [4. 推理部署](#4-推理部署)
+ - [4.1 Python推理](#41-python推理)
+ - [4.2 C++推理部署](#42-c推理部署)
+ - [4.3 Serving服务化部署](#43-serving服务化部署)
+ - [4.4 更多推理部署](#44-更多推理部署)
+- [5. FAQ](#5-faq)
+- [引用](#引用)
+
+
+## 1. 算法简介
+
+论文信息:
+> [TableMaster: PINGAN-VCGROUP’S SOLUTION FOR ICDAR 2021 COMPETITION ON SCIENTIFIC LITERATURE PARSING TASK B: TABLE RECOGNITION TO HTML](https://arxiv.org/pdf/2105.01848.pdf)
+> Ye, Jiaquan and Qi, Xianbiao and He, Yelin and Chen, Yihao and Gu, Dengyi and Gao, Peng and Xiao, Rong
+> 2021
+
+在PubTabNet表格识别公开数据集上,算法复现效果如下:
+
+|模型|骨干网络|配置文件|acc|下载链接|
+| --- | --- | --- | --- | --- |
+|TableMaster|TableResNetExtra|[configs/table/table_master.yml](../../configs/table/table_master.yml)|77.47%|[训练模型](https://paddleocr.bj.bcebos.com/ppstructure/models/tablemaster/table_structure_tablemaster_train.tar)/[推理模型](https://paddleocr.bj.bcebos.com/ppstructure/models/tablemaster/table_structure_tablemaster_infer.tar)|
+
+
+
+## 2. 环境配置
+请先参考[《运行环境准备》](./environment.md)配置PaddleOCR运行环境,参考[《项目克隆》](./clone.md)克隆项目代码。
+
+
+
+## 3. 模型训练、评估、预测
+
+上述TableMaster模型使用PubTabNet表格识别公开数据集训练得到,数据集下载可参考 [table_datasets](./dataset/table_datasets.md)。
+
+数据下载完成后,请参考[文本识别教程](./recognition.md)进行训练。PaddleOCR对代码进行了模块化,训练不同的模型只需要**更换配置文件**即可。
+
+
+## 4. 推理部署
+
+
+### 4.1 Python推理
+首先将训练得到best模型,转换成inference model。以基于TableResNetExtra骨干网络,在PubTabNet数据集训练的模型为例([模型下载地址](https://paddleocr.bj.bcebos.com/contribution/table_master.tar)),可以使用如下命令进行转换:
+
+```shell
+# 注意将pretrained_model的路径设置为本地路径。
+python3 tools/export_model.py -c configs/table/table_master.yml -o Global.pretrained_model=output/table_master/best_accuracy Global.save_inference_dir=./inference/table_master
+```
+
+**注意:**
+- 如果您是在自己的数据集上训练的模型,并且调整了字典文件,请注意修改配置文件中的`character_dict_path`是否为所正确的字典文件。
+
+转换成功后,在目录下有三个文件:
+```
+/inference/table_master/
+ ├── inference.pdiparams # 识别inference模型的参数文件
+ ├── inference.pdiparams.info # 识别inference模型的参数信息,可忽略
+ └── inference.pdmodel # 识别inference模型的program文件
+```
+
+
+执行如下命令进行模型推理:
+
+```shell
+cd ppstructure/
+python3.7 table/predict_structure.py --table_model_dir=../output/table_master/table_structure_tablemaster_infer/ --table_algorithm=TableMaster --table_char_dict_path=../ppocr/utils/dict/table_master_structure_dict.txt --table_max_len=480 --image_dir=docs/table/table.jpg
+# 预测文件夹下所有图像时,可修改image_dir为文件夹,如 --image_dir='docs/table'。
+```
+
+执行命令后,上面图像的预测结果(结构信息和表格中每个单元格的坐标)会打印到屏幕上,同时会保存单元格坐标的可视化结果。示例如下:
+结果如下:
+```shell
+[2022/06/16 13:06:54] ppocr INFO: result: ['', '
', '', '', '', ' | ', ' | ', ' | ', ' | ', ' | ', '
', '', '', '', ' | ', ' | ', ' | ', ' | ', ' | ', '
', '', ' | ', ' | ', ' | ', ' | ', ' | ', '
', '', ' | ', ' | ', ' | ', ' | ', ' | ', '
', '', ' | ', ' | ', ' | ', ' | ', ' | ', '
', '', ' | ', ' | ', ' | ', ' | ', ' | ', '
', '', ' | ', ' | ', ' | ', ' | ', ' | ', '
', '', ' | ', ' | ', ' | ', ' | ', ' | ', '
', '', ' | ', ' | ', ' | ', ' | ', ' | ', '
', '', ' | ', ' | ', ' | ', ' | ', ' | ', '
', '', ' | ', ' | ', ' | ', ' | ', ' | ', '
', '', ' | ', ' | ', ' | ', ' | ', ' | ', '
', '', ' | ', ' | ', ' | ', ' | ', ' | ', '
', '', ' | ', ' | ', ' | ', ' | ', ' | ', '
', '', ' | ', ' | ', ' | ', ' | ', ' | ', '
', '', ' | ', ' | ', ' | ', ' | ', ' | ', '
', '', '
', '', ''], [[72.17591094970703, 10.759100914001465, 60.29658508300781, 16.6805362701416], [161.85562133789062, 10.884308815002441, 14.9495210647583, 16.727018356323242], [277.79876708984375, 29.54340362548828, 31.490320205688477, 18.143272399902344],
+...
+[336.11724853515625, 280.3601989746094, 39.456939697265625, 18.121286392211914]]
+[2022/06/16 13:06:54] ppocr INFO: save vis result to ./output/table.jpg
+[2022/06/16 13:06:54] ppocr INFO: Predict time of docs/table/table.jpg: 17.36806297302246
+```
+
+**注意**:
+
+- TableMaster在推理时比较慢,建议使用GPU进行使用。
+
+
+### 4.2 C++推理部署
+
+由于C++预处理后处理还未支持TableMaster,所以暂未支持
+
+
+### 4.3 Serving服务化部署
+
+暂不支持
+
+
+### 4.4 更多推理部署
+
+暂不支持
+
+
+## 5. FAQ
+
+## 引用
+
+```bibtex
+@article{ye2021pingan,
+ title={PingAn-VCGroup's Solution for ICDAR 2021 Competition on Scientific Literature Parsing Task B: Table Recognition to HTML},
+ author={Ye, Jiaquan and Qi, Xianbiao and He, Yelin and Chen, Yihao and Gu, Dengyi and Gao, Peng and Xiao, Rong},
+ journal={arXiv preprint arXiv:2105.01848},
+ year={2021}
+}
+```
diff --git a/doc/doc_en/algorithm_overview_en.md b/doc/doc_en/algorithm_overview_en.md
index bc96cdf2351f10454441e20d319e485019bbec00..cd277c74a38adb11121f353f7bfd07d7c255f61e 100755
--- a/doc/doc_en/algorithm_overview_en.md
+++ b/doc/doc_en/algorithm_overview_en.md
@@ -4,6 +4,7 @@
* [1.1 Text Detection Algorithms](#11)
* [1.2 Text Recognition Algorithms](#12)
- [2. End-to-end Algorithms](#2)
+- [3. Table Recognition Algorithms](#3)
This tutorial lists the OCR algorithms supported by PaddleOCR, as well as the models and metrics of each algorithm on **English public datasets**. It is mainly used for algorithm introduction and algorithm performance comparison. For more models on other datasets including Chinese, please refer to [PP-OCR v2.0 models list](./models_list_en.md).
@@ -95,3 +96,15 @@ Refer to [DTRB](https://arxiv.org/abs/1904.01906), the training and evaluation r
Supported end-to-end algorithms (Click the link to get the tutorial):
- [x] [PGNet](./algorithm_e2e_pgnet_en.md)
+
+
+## 3. Table Recognition Algorithms
+
+Supported table recognition algorithms (Click the link to get the tutorial):
+- [x] [TableMaster](./algorithm_table_master_en.md)
+
+On the PubTabNet dataset, the algorithm result is as follows:
+
+|Model|Backbone|Config|Acc|Download link|
+|---|---|---|---|---|
+|TableMaster|TableResNetExtra|[configs/table/table_master.yml](../../configs/table/table_master.yml)|77.47%|[训练模型]|[训练模型](https://paddleocr.bj.bcebos.com/ppstructure/models/tablemaster/table_structure_tablemaster_train.tar)/[推理模型](https://paddleocr.bj.bcebos.com/ppstructure/models/tablemaster/table_structure_tablemaster_infer.tar)|
diff --git a/doc/doc_en/algorithm_table_master_en.md b/doc/doc_en/algorithm_table_master_en.md
new file mode 100644
index 0000000000000000000000000000000000000000..a557a609017baf69da8777d062f4b34e6e2c5973
--- /dev/null
+++ b/doc/doc_en/algorithm_table_master_en.md
@@ -0,0 +1,112 @@
+# Torm Recognition Algorithm-TableMASTER
+
+- [1. Introduction](#1-introduction)
+- [2. Environment](#2-environment)
+- [3. Model Training / Evaluation / Prediction](#3-model-training--evaluation--prediction)
+- [4. Inference and Deployment](#4-inference-and-deployment)
+ - [4.1 Python Inference](#41-python-inference)
+ - [4.2 C++ Inference](#42-c-inference)
+ - [4.3 Serving](#43-serving)
+ - [4.4 More](#44-more)
+- [5. FAQ](#5-faq)
+- [Citation](#citation)
+
+
+## 1. Introduction
+
+Paper:
+> [TableMaster: PINGAN-VCGROUP’S SOLUTION FOR ICDAR 2021 COMPETITION ON SCIENTIFIC LITERATURE PARSING TASK B: TABLE RECOGNITION TO HTML](https://arxiv.org/pdf/2105.01848.pdf)
+> Ye, Jiaquan and Qi, Xianbiao and He, Yelin and Chen, Yihao and Gu, Dengyi and Gao, Peng and Xiao, Rong
+> 2021
+
+
+On the PubTabNet table recognition public data set, the algorithm reproduction acc is as follows:
+
+|Model|Backbone|Cnnfig|Acc|Download link|
+| --- | --- | --- | --- | --- |
+|TableMaster|TableResNetExtra|[configs/table/table_master.yml](../../configs/table/table_master.yml)|77.47%|[train model](https://paddleocr.bj.bcebos.com/ppstructure/models/tablemaster/table_structure_tablemaster_train.tar)/[inference model](https://paddleocr.bj.bcebos.com/ppstructure/models/tablemaster/table_structure_tablemaster_infer.tar)|
+
+
+
+## 2. Environment
+Please refer to ["Environment Preparation"](./environment_en.md) to configure the PaddleOCR environment, and refer to ["Project Clone"](./clone_en.md) to clone the project code.
+
+
+
+## 3. Model Training / Evaluation / Prediction
+
+The above TableMaster model is trained using the PubTabNet table recognition public dataset. For the download of the dataset, please refer to [table_datasets](./dataset/table_datasets_en.md).
+
+After the data download is complete, please refer to [Text Recognition Training Tutorial](./recognition_en.md) for training. PaddleOCR has modularized the code structure, so that you only need to **replace the configuration file** to train different models.
+
+
+
+## 4. Inference and Deployment
+
+
+### 4.1 Python Inference
+
+First, convert the model saved in the TableMaster table recognition training process into an inference model. Taking the model based on the TableResNetExtra backbone network and trained on the PubTabNet dataset as example ([model download link](https://paddleocr.bj.bcebos.com/contribution/table_master.tar)), you can use the following command to convert:
+
+
+```shell
+python3 tools/export_model.py -c configs/table/table_master.yml -o Global.pretrained_model=output/table_master/best_accuracy Global.save_inference_dir=./inference/table_master
+```
+
+**Note: **
+- If you trained the model on your own dataset and adjusted the dictionary file, please pay attention to whether the `character_dict_path` in the modified configuration file is the correct dictionary file
+
+
+Execute the following command for model inference:
+
+```shell
+cd ppstructure/
+# When predicting all images in a folder, you can modify image_dir to a folder, such as --image_dir='docs/table'.
+python3.7 table/predict_structure.py --table_model_dir=../output/table_master/table_structure_tablemaster_infer/ --table_algorithm=TableMaster --table_char_dict_path=../ppocr/utils/dict/table_master_structure_dict.txt --table_max_len=480 --image_dir=docs/table/table.jpg
+
+```
+
+After executing the command, the prediction results of the above image (structural information and the coordinates of each cell in the table) are printed to the screen, and the visualization of the cell coordinates is also saved. An example is as follows:
+
+result:
+```shell
+[2022/06/16 13:06:54] ppocr INFO: result: ['', '', '', '', '', ' | ', ' | ', ' | ', ' | ', ' | ', '
', '', '', '', ' | ', ' | ', ' | ', ' | ', ' | ', '
', '', ' | ', ' | ', ' | ', ' | ', ' | ', '
', '', ' | ', ' | ', ' | ', ' | ', ' | ', '
', '', ' | ', ' | ', ' | ', ' | ', ' | ', '
', '', ' | ', ' | ', ' | ', ' | ', ' | ', '
', '', ' | ', ' | ', ' | ', ' | ', ' | ', '
', '', ' | ', ' | ', ' | ', ' | ', ' | ', '
', '', ' | ', ' | ', ' | ', ' | ', ' | ', '
', '', ' | ', ' | ', ' | ', ' | ', ' | ', '
', '', ' | ', ' | ', ' | ', ' | ', ' | ', '
', '', ' | ', ' | ', ' | ', ' | ', ' | ', '
', '', ' | ', ' | ', ' | ', ' | ', ' | ', '
', '', ' | ', ' | ', ' | ', ' | ', ' | ', '
', '', ' | ', ' | ', ' | ', ' | ', ' | ', '
', '', ' | ', ' | ', ' | ', ' | ', ' | ', '
', '', '
', '', ''], [[72.17591094970703, 10.759100914001465, 60.29658508300781, 16.6805362701416], [161.85562133789062, 10.884308815002441, 14.9495210647583, 16.727018356323242], [277.79876708984375, 29.54340362548828, 31.490320205688477, 18.143272399902344],
+...
+[336.11724853515625, 280.3601989746094, 39.456939697265625, 18.121286392211914]]
+[2022/06/16 13:06:54] ppocr INFO: save vis result to ./output/table.jpg
+[2022/06/16 13:06:54] ppocr INFO: Predict time of docs/table/table.jpg: 17.36806297302246
+```
+
+**Note**:
+
+- TableMaster is relatively slow during inference, and it is recommended to use GPU for use.
+
+
+### 4.2 C++ Inference
+
+Since the post-processing is not written in CPP, the TableMaster does not support CPP inference.
+
+
+
+### 4.3 Serving
+
+Not supported
+
+
+### 4.4 More
+
+Not supported
+
+
+## 5. FAQ
+
+## Citation
+
+```bibtex
+@article{ye2021pingan,
+ title={PingAn-VCGroup's Solution for ICDAR 2021 Competition on Scientific Literature Parsing Task B: Table Recognition to HTML},
+ author={Ye, Jiaquan and Qi, Xianbiao and He, Yelin and Chen, Yihao and Gu, Dengyi and Gao, Peng and Xiao, Rong},
+ journal={arXiv preprint arXiv:2105.01848},
+ year={2021}
+}
+```
diff --git a/ppocr/data/imaug/label_ops.py b/ppocr/data/imaug/label_ops.py
index 007927011a5134129b322940418df8f124746c99..96f3c98674ce3e3be973b0e2989ba555ba1a6389 100644
--- a/ppocr/data/imaug/label_ops.py
+++ b/ppocr/data/imaug/label_ops.py
@@ -670,6 +670,10 @@ class TableLabelEncode(AttnLabelEncode):
return data
def _merge_no_span_structure(self, structure):
+ """
+ This fun code is refer from:
+ https://github.com/JiaquanYe/TableMASTER-mmocr/blob/master/table_recognition/data_preprocess.py
+ """
new_structure = []
i = 0
while i < len(structure):
@@ -682,6 +686,11 @@ class TableLabelEncode(AttnLabelEncode):
return new_structure
def _replace_empty_cell_token(self, token_list, cells):
+ """
+ This fun code is refer from:
+ https://github.com/JiaquanYe/TableMASTER-mmocr/blob/master/table_recognition/data_preprocess.py
+ """
+
bbox_idx = 0
add_empty_bbox_token_list = []
for token in token_list:
diff --git a/ppocr/losses/table_master_loss.py b/ppocr/losses/table_master_loss.py
index 50a773dd9a9c39d6e7c323011847f8362414a43a..216c4e521a9580e0fc8d897f918aedbb679fbc1b 100644
--- a/ppocr/losses/table_master_loss.py
+++ b/ppocr/losses/table_master_loss.py
@@ -11,6 +11,11 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+"""
+This fun code is refer from:
+https://github.com/JiaquanYe/TableMASTER-mmocr/tree/master/mmocr/models/textrecog/losses
+"""
+
import paddle
from paddle import nn
diff --git a/ppocr/modeling/backbones/table_master_resnet.py b/ppocr/modeling/backbones/table_master_resnet.py
index 82b4f37a7420982415f21fe50c6200aa16e58314..f1c506ca45d3f73cda91f48ac1ba6f3ebbe87bda 100644
--- a/ppocr/modeling/backbones/table_master_resnet.py
+++ b/ppocr/modeling/backbones/table_master_resnet.py
@@ -11,6 +11,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+"""
+This fun code is refer from:
+https://github.com/JiaquanYe/TableMASTER-mmocr/blob/master/mmocr/models/textrecog/backbones/table_resnet_extra.py
+"""
import paddle
import paddle.nn as nn
diff --git a/ppocr/modeling/heads/table_master_head.py b/ppocr/modeling/heads/table_master_head.py
index 4da6e9b59f78db5bfe1557317e11204f97544aa6..887630a87e6d04b6b993b9afd068461f70df158f 100644
--- a/ppocr/modeling/heads/table_master_head.py
+++ b/ppocr/modeling/heads/table_master_head.py
@@ -11,6 +11,11 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+"""
+This fun code is refer from:
+https://github.com/JiaquanYe/TableMASTER-mmocr/blob/master/mmocr/models/textrecog/decoders/master_decoder.py
+"""
+
import copy
import math
import paddle
diff --git a/ppocr/postprocess/__init__.py b/ppocr/postprocess/__init__.py
index 26a23f1ea476c81a092fcbdd11ff79e4e38ec2e8..1d414eb2e8562925f461b0c6f6ce15774b81bb8f 100644
--- a/ppocr/postprocess/__init__.py
+++ b/ppocr/postprocess/__init__.py
@@ -26,7 +26,7 @@ from .east_postprocess import EASTPostProcess
from .sast_postprocess import SASTPostProcess
from .fce_postprocess import FCEPostProcess
from .rec_postprocess import CTCLabelDecode, AttnLabelDecode, SRNLabelDecode, \
- DistillationCTCLabelDecode, TableLabelDecode, NRTRLabelDecode, SARLabelDecode, \
+ DistillationCTCLabelDecode, NRTRLabelDecode, SARLabelDecode, \
SEEDLabelDecode, PRENLabelDecode, ViTSTRLabelDecode, ABINetLabelDecode
from .cls_postprocess import ClsPostProcess
from .pg_postprocess import PGPostProcess
diff --git a/ppstructure/docs/models_list.md b/ppstructure/docs/models_list.md
index dabce3a5149a88833d38a4395e31ac1f82306c4f..42d44009dad1ba1b07bb410c199993c6f79f3d5d 100644
--- a/ppstructure/docs/models_list.md
+++ b/ppstructure/docs/models_list.md
@@ -35,7 +35,7 @@
|模型名称|模型简介|推理模型大小|下载地址|
| --- | --- | --- | --- |
-|en_ppocr_mobile_v2.0_table_structure|PubLayNet数据集训练的英文表格场景的表格结构预测|18.6M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.1/table/en_ppocr_mobile_v2.0_table_structure_train.tar) |
+|en_ppocr_mobile_v2.0_table_structure|PubTabNet数据集训练的英文表格场景的表格结构预测|18.6M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.1/table/en_ppocr_mobile_v2.0_table_structure_train.tar) |
## 3. VQA模型