diff --git a/deploy/pdserving/config.yml b/deploy/pdserving/config.yml
index 19cd9325ee8b241fd591678b9ba6452de9bec025..8014cbd362461ead5d065f96a50eb3031a60fa67 100644
--- a/deploy/pdserving/config.yml
+++ b/deploy/pdserving/config.yml
@@ -36,8 +36,8 @@ op:
#det模型路径
model_config: ./ppocr_det_v3_serving
- #Fetch结果列表,以client_config中fetch_var的alias_name为准
- fetch_list: ["sigmoid_0.tmp_0"]
+ #Fetch结果列表,以client_config中fetch_var的alias_name为准,不设置默认取全部输出变量
+ #fetch_list: ["sigmoid_0.tmp_0"]
#计算硬件ID,当devices为""或不写时为CPU预测;当devices为"0", "0,1,2"时为GPU预测,表示使用的GPU卡
devices: "0"
@@ -62,8 +62,8 @@ op:
#rec模型路径
model_config: ./ppocr_rec_v3_serving
- #Fetch结果列表,以client_config中fetch_var的alias_name为准
- fetch_list: ["softmax_5.tmp_0"]
+ #Fetch结果列表,以client_config中fetch_var的alias_name为准, 不设置默认取全部输出变量
+ #fetch_list:
#计算硬件ID,当devices为""或不写时为CPU预测;当devices为"0", "0,1,2"时为GPU预测,表示使用的GPU卡
devices: "0"
diff --git a/deploy/pdserving/ocr_reader.py b/deploy/pdserving/ocr_reader.py
index 6a2d57b679d69ab11ac6f0fd74c47a342b391545..75f0f3d5c3aea488f82ec01a72e20310663d565b 100644
--- a/deploy/pdserving/ocr_reader.py
+++ b/deploy/pdserving/ocr_reader.py
@@ -393,7 +393,7 @@ class OCRReader(object):
return norm_img_batch[0]
def postprocess(self, outputs, with_score=False):
- preds = outputs["softmax_5.tmp_0"]
+ preds = list(outputs.values())[0]
try:
preds = preds.numpy()
except:
@@ -404,8 +404,11 @@ class OCRReader(object):
preds_idx, preds_prob, is_remove_duplicate=True)
return text
-from argparse import ArgumentParser,RawDescriptionHelpFormatter
+
+from argparse import ArgumentParser, RawDescriptionHelpFormatter
import yaml
+
+
class ArgsParser(ArgumentParser):
def __init__(self):
super(ArgsParser, self).__init__(
@@ -441,16 +444,16 @@ class ArgsParser(ArgumentParser):
s = s.strip()
k, v = s.split('=')
v = self._parse_helper(v)
- print(k,v, type(v))
+ print(k, v, type(v))
cur = config
parent = cur
for kk in k.split("."):
if kk not in cur:
- cur[kk] = {}
- parent = cur
- cur = cur[kk]
+ cur[kk] = {}
+ parent = cur
+ cur = cur[kk]
else:
- parent = cur
- cur = cur[kk]
+ parent = cur
+ cur = cur[kk]
parent[k.split(".")[-1]] = v
- return config
\ No newline at end of file
+ return config
diff --git a/deploy/pdserving/web_service.py b/deploy/pdserving/web_service.py
index 98e2dfba2f5abd3fc36bf3743b23f7eb7be3b9c4..f05806ce030238144568a3ca137798a9132027e4 100644
--- a/deploy/pdserving/web_service.py
+++ b/deploy/pdserving/web_service.py
@@ -56,7 +56,7 @@ class DetOp(Op):
return {"x": det_img[np.newaxis, :].copy()}, False, None, ""
def postprocess(self, input_dicts, fetch_dict, data_id, log_id):
- det_out = fetch_dict["sigmoid_0.tmp_0"]
+ det_out = list(fetch_dict.values())[0]
ratio_list = [
float(self.new_h) / self.ori_h, float(self.new_w) / self.ori_w
]
diff --git a/deploy/pdserving/web_service_det.py b/deploy/pdserving/web_service_det.py
index 7584608a9fed4bea93caa5c814c0450566696d56..4a62ab861d8338194da826cdcea2d42de189c994 100644
--- a/deploy/pdserving/web_service_det.py
+++ b/deploy/pdserving/web_service_det.py
@@ -55,7 +55,7 @@ class DetOp(Op):
return {"x": det_img[np.newaxis, :].copy()}, False, None, ""
def postprocess(self, input_dicts, fetch_dict, data_id, log_id):
- det_out = fetch_dict["sigmoid_0.tmp_0"]
+ det_out = list(fetch_dict.values())[0]
ratio_list = [
float(self.new_h) / self.ori_h, float(self.new_w) / self.ori_w
]
diff --git a/doc/doc_ch/PP-OCRv3_introduction.md b/doc/doc_ch/PP-OCRv3_introduction.md
index dc0271f294cf43a26477dbc974b77297e04122ac..c7ef4f01569437959e310f2301d250c1660113a7 100644
--- a/doc/doc_ch/PP-OCRv3_introduction.md
+++ b/doc/doc_ch/PP-OCRv3_introduction.md
@@ -8,123 +8,214 @@
- [4. 端到端评估](#4)
-
## 1. 简介
-PP-OCRv3在PP-OCRv2的基础上进一步升级。检测模型仍然基于DB算法,优化策略采用了带残差注意力机制的FPN结构RSEFPN、增大感受野的PAN结构LKPAN、基于DML训练的更优的教师模型;识别模型将base模型从CRNN替换成了IJCAI 2022论文[SVTR](),并采用SVTR轻量化、带指导训练CTC、数据增广策略RecConAug、自监督训练的更好的预训练模型、无标签数据的使用进行模型加速和效果提升。更多细节请参考PP-OCRv3[技术报告](./PP-OCRv3_introduction.md)。
-
-PP-OCRv3系统pipeline如下:
+PP-OCRv3在PP-OCRv2的基础上进一步升级。整体的框架图保持了与PP-OCRv2相同的pipeline,针对检测模型和识别模型进行了优化。其中,检测模型仍基于DB模型优化,而识别模型不再采用CRNN,换成了会议IJCAI 2022中的最新方法[SVTR](https://arxiv.org/abs/2205.00159),PP-OCRv3系统框图如下所示(粉色框中为PP-OCRv3新增策略):
+
+从算法改进思路上看,分别针对检测和识别模型,进行了共八个方面的改进:
+
+
+- 检测模型优化:
+ - LK-PAN:增大感受野的PAN模块;
+ - DML:教师模型互学习策略;
+ - RSE-FPN:带残差注意力机制的FPN模块;
+
+
+- 识别模型优化:
+ - SVTR_LCNet:轻量级文本识别网络;
+ - GTC:Attention指导CTC训练策略;
+ - TextConAug:丰富图像上下文信息的数据增广策略;
+ - TextRotNet:自监督的预训练模型;
+ - UIM:无标签数据挖掘方案。
+
+从效果上看,速度可比情况下,多种场景精度均有大幅提升:
+- 中文场景,相对于PP-OCRv2中文模型提升超5%;
+- 英文数字场景,相比于PP-OCRv2英文模型提升11%;
+- 多语言场景,优化80+语种识别效果,平均准确率提升超5%。
+
+
## 2. 检测优化
-PP-OCRv3采用PP-OCRv2的[CML](https://arxiv.org/pdf/2109.03144.pdf)蒸馏策略,在蒸馏的student模型、teacher模型精度提升,CML蒸馏策略上分别做了优化。
+PP-OCRv3检测模型整体训练方案仍采用PP-OCRv2的[CML](https://arxiv.org/pdf/2109.03144.pdf)蒸馏策略,CML蒸馏包含一个教师模型和两个学生模型,在训练过程中,教师模型不参与训练,学生模型受到来自标签和教师模型的监督,同时两个学生模型互相学习。PP-OCRv3分别针对教师模型、学生模型进一步优化。其中,在对教师模型优化时,采用了增大感受野的PAN模块LK-PAN和DML蒸馏策略;在对学生模型优化时,采用了带残差注意力机制的FPN模块RSE-FPN。
-- 在蒸馏student模型精度提升方面,提出了基于残差结构的通道注意力模块RSEFPN(Residual Squeeze-and-Excitation FPN),用于提升student模型精度和召回。
-
-RSEFPN的网络结构如下图所示,RSEFPN在PP-OCRv2的FPN基础上,将FPN中的卷积层更换为了通道注意力结构的RSEConv层。
+PP-OCRv3 CML蒸馏训练框架图如下:
-
+
-RSEFPN将PP-OCR检测模型的精度hmean从81.3%提升到84.5%。模型大小从3M变为3.6M。
+消融实验如下:
-*注:PP-OCRv2的FPN通道数仅为96和24,如果直接用SE模块代替FPN的卷积会导致精度下降,RSEConv引入残差结构可以防止训练中包含重要特征的通道被抑制。*
+|序号|策略|模型大小|hmean|速度(cpu + mkldnn)|
+|-|-|-|-|-|
+|baseline teacher|PP-OCR server|49M|83.2%|171ms|
+|teacher1|DB-R50-LK-PAN|124M|85.0%|396ms|
+|teacher2|DB-R50-LK-PAN-DML|124M|86.0%|396ms|
+|baseline student|PP-OCRv2|3M|83.2%|117ms|
+|student0|DB-MV3-RSE-FPN|3.6M|84.5%|124ms|
+|student1|DB-MV3-CML(teacher2)|3M|84.3%|117ms|
+|student2|DB-MV3-RSE-FPN-CML(teacher2)|3.6M|85.4%|124ms|
-- 在蒸馏的teacher模型精度提升方面,提出了LKPAN结构替换PP-OCRv2的FPN结构,并且使用ResNet50作为Backbone,更大的模型带来更多的精度提升。另外,对teacher模型使用[DML](https://arxiv.org/abs/1706.00384)蒸馏策略进一步提升teacher模型的精度。最终teacher的模型指标相比ppocr_server_v2.0从83.2%提升到了86.0%。
+测试环境: Intel Gold 6148 CPU,预测时开启MKLDNN加速。
-*注:[PP-OCRv2的FPN结构](https://github.com/PaddlePaddle/PaddleOCR/blob/77acb3bfe51c8a46c684527f73cd218cefedb4a3/ppocr/modeling/necks/db_fpn.py#L107)对DB算法FPN结构做了轻量级设计*
+**(1)增大感受野的PAN模块LK-PAN(Large Kernel PAN)**
-LKPAN的网络结构如下图所示:
+LK-PAN(Large Kernel PAN)是一个具有更大感受野的轻量级[PAN](https://arxiv.org/pdf/1803.01534.pdf)结构。在LK-PAN的path augmentation中,使用卷积核为`9*9`的卷积;更大的卷积核意味着更大的感受野,更容易检测大字体的文字以及极端长宽比的文字。LK-PAN将PP-OCR server检测模型的hmean从83.2%提升到85.0%。
-
+
-LKPAN(Large Kernel PAN)是一个具有更大感受野的轻量级[PAN](https://arxiv.org/pdf/1803.01534.pdf)结构。在LKPAN的path augmentation中,使用kernel size为`9*9`的卷积;更大的kernel size意味着更大的感受野,更容易检测大字体的文字以及极端长宽比的文字。LKPAN将PP-OCR检测模型的精度hmean从81.3%提升到84.9%。
+**(2)DML(Deep Mutual Learning)蒸馏进一步提升teacher模型精度。**
-*注:LKPAN相比RSEFPN有更多的精度提升,但是考虑到模型大小和预测速度等因素,在student模型中使用RSEFPN。*
+[DML](https://arxiv.org/abs/1706.00384) 互学习蒸馏方法,通过两个结构相同的模型互相学习,相比于传统的教师模型监督学生模型的蒸馏方法,DML 摆脱了对大的教师模型的依赖,蒸馏训练的流程更加简单。在PP-OCRv3的检测模型训练中,使用DML蒸馏策略进一步提升教师模型的精度,并使用ResNet50作为Backbone。DML策略将教师模型的Hmean从85%进一步提升至86%。
-采用上述策略,PP-OCRv3相比PP-OCRv2,hmean指标从83.3%提升到85.4%;预测速度从平均117ms/image变为124ms/image。
+教师模型DML训练流程图如下:
-3. PP-OCRv3检测模型消融实验
+
+
+
-|序号|策略|模型大小|hmean|Intel Gold 6148CPU+mkldnn预测耗时|
-|-|-|-|-|-|
-|0|PP-OCR|3M|81.3%|117ms|
-|1|PP-OCRV2|3M|83.3%|117ms|
-|2|0 + RESFPN|3.6M|84.5%|124ms|
-|3|0 + LKPAN|4.6M|84.9%|156ms|
-|4|ppocr_server_v2.0 |124M|83.2%||171ms|
-|5|teacher + DML + LKPAN|124M|86.0%|396ms|
-|6|0 + 2 + 5 + CML|3.6M|85.4%|124ms|
+**(3)带残差注意力机制的FPN模块RSE-FPN(Residual SE-FPN)。**
+
+残差结构的通道注意力模块RSE-FPN结构如下图所示,RSE-FPN在PP-OCRv2的FPN基础上,将FPN中的卷积层更换为通道注意力结构的RSEConv层。考虑到PP-OCRv2的FPN通道数仅为96和24,如果直接用SEblock代替FPN中卷积会导致某些通道的特征被抑制,进而导致精度下降,RSEConv引入残差结构防止训练中包含重要特征的通道被抑制。直接添加RSE-FPN模块,可将PP-OCR检测模型的精度Hmean从81.3%提升到84.5%。在学生模型中加入RSE-FPN后进行CML蒸馏,比不加时,Hmean指标从83.2提升到84.3%。
+
+
+
## 3. 识别优化
-[SVTR](https://arxiv.org/abs/2205.00159) 证明了强大的单视觉模型(无需序列模型)即可高效准确完成文本识别任务,在中英文数据上均有优秀的表现。经过实验验证,SVTR_Tiny在自建的 [中文数据集上](https://arxiv.org/abs/2109.03144) ,识别精度可以提升10.7%,网络结构如下所示:
+PP-OCRv3识别模型从网络结构、训练策略、数据增广等多个方面进行了优化,PP-OCRv3系统流程图如下所示:
-
+
+
+
-由于 MKLDNN 加速库支持的模型结构有限,SVTR 在CPU+MKLDNN上相比PP-OCRv2慢了10倍。
+上图中,蓝色方块中列举了PP-OCRv3识别模型的6个主要模块。首先在模块①,将base模型从CRNN替换为精度更高的单一视觉模型[SVTR](https://arxiv.org/abs/2205.00159),并进行一系列的结构优化进行加速,得到全新的轻量级文本识别网络SVTR_LCNet(如图中红色虚线框所示);在模块②,借鉴[GTC](https://arxiv.org/pdf/2002.01276.pdf)策略,引入Attention指导CTC训练,进一步提升模型精度;在模块③,使用基于上下文信息的数据增广策略TextConAug,丰富训练数据上下文信息,提升训练数据多样性;在模块④,使用TextRotNet训练自监督的预训练模型,充分利用无标注识别数据的信息;模块⑤基于PP-OCRv2中提出的UDML蒸馏策略进行蒸馏学习,除计算2个模型的CTC分支的DMLLoss外,也计算2个模型的Attention分支之间的DMLLoss,从而得到更优模型;在模块⑥中,基于UIM无标注数据挖掘方法,使用效果好但速度相对较慢的SVTR_tiny模型进行无标签数据挖掘,为模型训练增加更多真实数据。
-PP-OCRv3 期望在提升模型精度的同时,不带来额外的推理耗时。通过分析发现,SVTR_Tiny结构的主要耗时模块为Mixing Block,因此我们对 SVTR_Tiny 的结构进行了一系列优化(详细速度数据请参考下方消融实验表格):
-1. 将SVTR网络前半部分替换为PP-LCNet的前三个stage,保留4个 Global Mixing Block ,精度为76%,加速69%,网络结构如下所示:
-
-2. 将4个 Global Attenntion Block 减小到2个,精度为72.9%,加速69%,网络结构如下所示:
-
-3. 实验发现 Global Attention 的预测速度与输入其特征的shape有关,因此后移Global Mixing Block的位置到池化层之后,精度下降为71.9%,速度超越 CNN-base 的PP-OCRv2 22%,网络结构如下所示:
-
+基于上述策略,PP-OCRv3识别模型相比PP-OCRv2,在速度可比的情况下,精度进一步提升4.6%。 具体消融实验如下所示:
-为了提升模型精度同时不引入额外推理成本,PP-OCRv3参考GTC策略,使用Attention监督CTC训练,预测时完全去除Attention模块,在推理阶段不增加任何耗时, 精度提升3.8%,训练流程如下所示:
-
+| ID | 策略 | 模型大小 | 精度 | 预测耗时(CPU + MKLDNN)|
+|-----|-----|--------|----| --- |
+| 01 | PP-OCRv2 | 8M | 74.8% | 8.54ms |
+| 02 | SVTR_Tiny | 21M | 80.1% | 97ms |
+| 03 | SVTR_LCNet | 12M | 71.9% | 6.6ms |
+| 04 | + GTC | 12M | 75.8% | 7.6ms |
+| 05 | + TextConAug | 12M | 76.3% | 7.6ms |
+| 06 | + TextRotNet | 12M | 76.9% | 7.6ms |
+| 07 | + UDML | 12M | 78.4% | 7.6ms |
+| 08 | + UIM | 12M | 79.4% | 7.6ms |
-在训练策略方面,PP-OCRv3参考 [SSL](https://github.com/ku21fan/STR-Fewer-Labels) 设计了文本方向任务,训练了适用于文本识别的预训练模型,加速模型收敛过程,精度提升了0.6%; 使用UDML蒸馏策略,进一步提升精度1.5%,训练流程所示:
+注: 测试速度时,实验01-03输入图片尺寸均为(3,32,320),04-08输入图片尺寸均为(3,48,320)。在实际预测时,图像为变长输入,速度会有所变化。
-
+**(1)轻量级文本识别网络SVTR_LCNet。**
-数据增强方面:
+PP-OCRv3将base模型从CRNN替换成了[SVTR](https://arxiv.org/abs/2205.00159),SVTR证明了强大的单视觉模型(无需序列模型)即可高效准确完成文本识别任务,在中英文数据上均有优秀的表现。经过实验验证,SVTR_Tiny 在自建的[中文数据集](https://arxiv.org/abs/2109.03144)上 ,识别精度可以提升至80.1%,SVTR_Tiny 网络结构如下所示:
-1. 基于 [ConCLR](https://www.cse.cuhk.edu.hk/~byu/papers/C139-AAAI2022-ConCLR.pdf) 中的ConAug方法,设计了 RecConAug 数据增强方法,增强数据多样性,精度提升0.5%,增强可视化效果如下所示:
-
+
+
+
-2. 使用训练好的 SVTR_large 预测 120W 的 lsvt 无标注数据,取出其中得分大于0.95的数据,共得到81W识别数据加入到PP-OCRv3的训练数据中,精度提升1%。
-总体来讲PP-OCRv3识别从网络结构、训练策略、数据增强三个方向做了进一步优化:
+由于 MKLDNN 加速库支持的模型结构有限,SVTR 在 CPU+MKLDNN 上相比 PP-OCRv2 慢了10倍。PP-OCRv3 期望在提升模型精度的同时,不带来额外的推理耗时。通过分析发现,SVTR_Tiny 结构的主要耗时模块为 Mixing Block,因此我们对 SVTR_Tiny 的结构进行了一系列优化(详细速度数据请参考下方消融实验表格):
-- 网络结构上:考虑[SVTR](https://arxiv.org/abs/2205.00159) 在中英文效果上的优越性,采用SVTR_Tiny作为base,选取Global Mixing Block和卷积组合提取特征,并将Global Mixing Block位置后移进行加速; 参考 [GTC](https://arxiv.org/pdf/2002.01276.pdf) 策略,使用注意力机制模块指导CTC训练,定位和识别字符,提升不规则文本的识别精度。
-- 训练策略上:参考 [SSL](https://github.com/ku21fan/STR-Fewer-Labels) 设计了方向分类前序任务,获取更优预训练模型,加速模型收敛过程,提升精度; 使用UDML蒸馏策略、监督attention、ctc两个分支得到更优模型。
-- 数据增强上:基于 [ConCLR](https://www.cse.cuhk.edu.hk/~byu/papers/C139-AAAI2022-ConCLR.pdf) 中的ConAug方法,改进得到 RecConAug 数据增广方法,支持随机结合任意多张图片,提升训练数据的上下文信息丰富度,增强模型鲁棒性;使用 SVTR_large 预测无标签数据,向训练集中补充81w高质量真实数据。
-基于上述策略,PP-OCRv3识别模型相比PP-OCRv2,在速度可比的情况下,精度进一步提升4.5%。 具体消融实验如下所示:
+1. 将 SVTR 网络前半部分替换为 PP-LCNet 的前三个stage,保留4个 Global Mixing Block ,精度为76%,加速69%,网络结构如下所示:
+
+
+
+2. 将4个 Global Mixing Block 减小到2个,精度为72.9%,加速69%,网络结构如下所示:
+
+
+
+3. 实验发现 Global Mixing Block 的预测速度与输入其特征的shape有关,因此后移 Global Mixing Block 的位置到池化层之后,精度下降为71.9%,速度超越基于CNN结构的PP-OCRv2-baseline 22%,网络结构如下所示:
+
+
+
-实验细节:
+具体消融实验如下所示:
-| id | 策略 | 模型大小 | 精度 | 速度(cpu + mkldnn)|
+| ID | 策略 | 模型大小 | 精度 | 速度(CPU + MKLDNN)|
|-----|-----|--------|----| --- |
-| 01 | PP-OCRv2 | 8M | 69.3% | 8.54ms |
+| 01 | PP-OCRv2-baseline | 8M | 69.3% | 8.54ms |
| 02 | SVTR_Tiny | 21M | 80.1% | 97ms |
-| 03 | LCNet_SVTR_G4 | 9.2M | 76% | 30ms |
-| 04 | LCNet_SVTR_G2 | 13M | 72.98% | 9.37ms |
-| 05 | PP-OCRv3 | 12M | 71.9% | 6.6ms |
-| 06 | + large input_shape | 12M | 73.98% | 7.6ms |
-| 06 | + GTC | 12M | 75.8% | 7.6ms |
-| 07 | + RecConAug | 12M | 76.3% | 7.6ms |
-| 08 | + SSL pretrain | 12M | 76.9% | 7.6ms |
-| 09 | + UDML | 12M | 78.4% | 7.6ms |
-| 10 | + unlabeled data | 12M | 79.4% | 7.6ms |
-
-注: 测试速度时,实验01-05输入图片尺寸均为(3,32,320),06-10输入图片尺寸均为(3,48,320)
+| 03 | SVTR_LCNet(G4) | 9.2M | 76% | 30ms |
+| 04 | SVTR_LCNet(G2) | 13M | 72.98% | 9.37ms |
+| 05 | SVTR_LCNet | 12M | 71.9% | 6.6ms |
+
+注: 测试速度时,输入图片尺寸均为(3,32,320); PP-OCRv2-baseline 代表没有借助蒸馏方法训练得到的模型
+
+**(2)采用Attention指导CTC训练。**
+
+为了提升模型精度同时不引入额外推理成本,PP-OCRv3 参考 GTC(Guided Training of CTC) 策略,使用 Attention 监督 CTC 训练,预测时完全去除 Attention 模块,在推理阶段不增加任何耗时, 精度提升3.8%,训练流程如下所示:
+
+
+
+
+**(3)TextConAug数据增广策略。**
+
+在论文[ConCLR](https://www.cse.cuhk.edu.hk/~byu/papers/C139-AAAI2022-ConCLR.pdf)中,作者提出ConAug数据增广,在一个batch内对2张不同的图像进行联结,组成新的图像并进行自监督对比学习。PP-OCRv3将此方法应用到有监督的学习任务中,设计了TextConAug数据增强方法,支持更多图像的联结,从而进一步丰富了图像的上下文信息。最终将识别模型精度进一步提升0.5%。TextConAug示意图如下所示:
+
+
+
+
+
+
+**(4)TextRotNet自监督训练优化预训练模型。**
+
+为了充分利用自然场景中的大量无标注文本数据,PP-OCRv3参考论文[STR-Fewer-Labels](https://github.com/ku21fan/STR-Fewer-Labels),设计TextRotNet自监督任务,对识别图像进行旋转并预测其旋转角度,同时结合中文场景文字识别任务的特点,在训练时适当调整图像的尺寸,添加文本识别数据增广,最终产出针对文本识别任务的PP-LCNet预训练模型,帮助识别模型精度进一步提升0.6%。TextRotNet训练流程如下图所示:
+
+
+
+
+
+
+**(5)UIM(Unlabeled Images Mining)无标注数据挖掘策略。**
+
+为更直接利用自然场景中包含大量无标注数据,使用PP-OCRv2检测模型以及SVTR_tiny识别模型对百度开源的40W [LSVT弱标注数据集](https://ai.baidu.com/broad/introduction?dataset=lsvt)进行检测与识别,并筛选出识别得分大于0.95的文本,共81W文本行数据,将其补充到训练数据中,最终进一步提升模型精度1.0%。
+
+
+
+
+
## 4. 端到端评估
+
+经过以上优化,最终PP-OCRv3在速度可比情况下,中文场景端到端Hmean指标相比于PP-OCRv2提升5%,效果大幅提升。具体指标如下表所示:
+
+| Model | Hmean | Model Size (M) | Time Cost (CPU, ms) | Time Cost (T4 GPU, ms) |
+|-----|-----|--------|----| --- |
+| PP-OCR mobile | 50.3% | 8.1 | 356 | 116 |
+| PP-OCR server | 57.0% | 155.1 | 1056 | 200 |
+| PP-OCRv2 | 57.6% | 11.6 | 330 | 111 |
+| PP-OCRv3 | 62.9% | 15.6 | 331 | 86.64 |
+
+测试环境:CPU型号为Intel Gold 6148,CPU预测时开启MKLDNN加速。
+
+
+除了更新中文模型,本次升级也同步优化了英文数字模型,端到端效果提升11%,如下表所示:
+
+| Model | Recall | Precision | Hmean |
+|-----|-----|--------|----|
+| PP-OCR_en | 38.99% | 45.91% | 42.17% |
+| PP-OCRv3_en | 50.95% | 55.53% | 53.14% |
+
+同时,也对已支持的80余种语言识别模型进行了升级更新,在有评估集的四种语系识别准确率平均提升5%以上,如下表所示:
+
+| Model | 拉丁语系 | 阿拉伯语系 | 日语 | 韩语 |
+|-----|-----|--------|----| --- |
+| PP-OCR_mul | 69.6% | 40.5% | 38.5% | 55.4% |
+| PP-OCRv3_mul | 75.2%| 45.37% | 45.8% | 60.1% |
diff --git a/doc/doc_ch/models_list.md b/doc/doc_ch/models_list.md
index b7a93af5d9cc50c89d626e557d74b6d848125ccc..08f31f6fb13a682202d75761f5a5bcad0888a2a8 100644
--- a/doc/doc_ch/models_list.md
+++ b/doc/doc_ch/models_list.md
@@ -41,7 +41,7 @@ PaddleOCR提供的可下载模型包括`推理模型`、`训练模型`、`预训
|模型名称|模型简介|配置文件|推理模型大小|下载地址|
| --- | --- | --- | --- | --- |
-|ch_PP-OCRv3_det_slim|【最新】slim量化+蒸馏版超轻量模型,支持中英文、多语种文本检测|[ch_PP-OCRv3_det_cml.yml](../../configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml)| 1.1M |[推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_slim_infer.tar) / [训练模型(coming soon)](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_slim_distill_train.tar) / [slim模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_slim_infer.nb)|
+|ch_PP-OCRv3_det_slim|【最新】slim量化+蒸馏版超轻量模型,支持中英文、多语种文本检测|[ch_PP-OCRv3_det_cml.yml](../../configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml)| 1.1M |[推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_slim_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_slim_distill_train.tar) / [slim模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_slim_infer.nb)|
|ch_PP-OCRv3_det| 【最新】原始超轻量模型,支持中英文、多语种文本检测 |[ch_PP-OCRv3_det_cml.yml](../../configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml)| 3.8M |[推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_distill_train.tar)|
|ch_PP-OCRv2_det_slim| slim量化+蒸馏版超轻量模型,支持中英文、多语种文本检测|[ch_PP-OCRv2_det_cml.yml](../../configs/det/ch_PP-OCRv2/ch_PP-OCRv2_det_cml.yml)| 3M |[推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_slim_quant_infer.tar)|
|ch_PP-OCRv2_det| 原始超轻量模型,支持中英文、多语种文本检测|[ch_PP-OCRv2_det_cml.yml](../../configs/det/ch_PP-OCRv2/ch_PP-OCRv2_det_cml.yml)|3M|[推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_distill_train.tar)|
@@ -55,7 +55,7 @@ PaddleOCR提供的可下载模型包括`推理模型`、`训练模型`、`预训
|模型名称|模型简介|配置文件|推理模型大小|下载地址|
| --- | --- | --- | --- | --- |
-|en_PP-OCRv3_det_slim |【最新】slim量化版超轻量模型,支持英文、数字检测 | [ch_PP-OCRv3_det_cml.yml](../../configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml) | 1.1M |[推理模型(coming soon)](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_slim_infer.tar) / [训练模型(coming soon)](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_slim_distill_train.tar) / [slim模型(coming soon)](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_slim_infer.nb) |
+|en_PP-OCRv3_det_slim |【最新】slim量化版超轻量模型,支持英文、数字检测 | [ch_PP-OCRv3_det_cml.yml](../../configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml) | 1.1M |[推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_slim_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_slim_distill_train.tar) / [slim模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_slim_infer.nb) |
|ch_PP-OCRv3_det |【最新】原始超轻量模型,支持英文、数字检测|[ch_PP-OCRv3_det_cml.yml](../../configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml)| 3.8M | [推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_distill_train.tar) |
* 注:英文检测模型与中文检测模型结构完全相同,只有训练数据不同,在此仅提供相同的配置文件。
@@ -66,7 +66,7 @@ PaddleOCR提供的可下载模型包括`推理模型`、`训练模型`、`预训
|模型名称|模型简介|配置文件|推理模型大小|下载地址|
| --- | --- | --- | --- | --- |
-| ml_PP-OCRv3_det_slim |【最新】slim量化版超轻量模型,支持多语言检测 | [ch_PP-OCRv3_det_cml.yml](../../configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml) | 1.1M |[推理模型(coming soon)](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/Multilingual_PP-OCRv3_det_slim_infer.tar) / [训练模型(coming soon)](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/Multilingual_PP-OCRv3_det_slim_distill_train.tar) / [slim模型(coming soon)](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/Multilingual_PP-OCRv3_det_slim_infer.nb) |
+| ml_PP-OCRv3_det_slim |【最新】slim量化版超轻量模型,支持多语言检测 | [ch_PP-OCRv3_det_cml.yml](../../configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml) | 1.1M |[推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/Multilingual_PP-OCRv3_det_slim_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/Multilingual_PP-OCRv3_det_slim_distill_train.tar) / [slim模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/Multilingual_PP-OCRv3_det_slim_infer.nb) |
| ml_PP-OCRv3_det |【最新】原始超轻量模型,支持多语言检测 | [ch_PP-OCRv3_det_cml.yml](../../configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml)| 3.8M | [推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/Multilingual_PP-OCRv3_det_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/Multilingual_PP-OCRv3_det_distill_train.tar) |
* 注:多语言检测模型与中文检测模型结构完全相同,只有训练数据不同,在此仅提供相同的配置文件。
@@ -113,11 +113,10 @@ PaddleOCR提供的可下载模型包括`推理模型`、`训练模型`、`预训
| te_PP-OCRv3_rec | ppocr/utils/dict/te_dict.txt | 泰卢固文识别|[te_PP-OCRv3_rec.yml](../../configs/rec/PP-OCRv3/multi_language/te_PP-OCRv3_rec.yml)|9.6M|[推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/te_PP-OCRv3_rec_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/te_PP-OCRv3_rec_train.tar) |
| ka_PP-OCRv3_rec | ppocr/utils/dict/ka_dict.txt |卡纳达文识别|[ka_PP-OCRv3_rec.yml](../../configs/rec/PP-OCRv3/multi_language/ka_PP-OCRv3_rec.yml)|9.9M|[推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/ka_PP-OCRv3_rec_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/ka_PP-OCRv3_rec_train.tar) |
| ta_PP-OCRv3_rec | ppocr/utils/dict/ta_dict.txt |泰米尔文识别|[ta_PP-OCRv3_rec.yml](../../configs/rec/PP-OCRv3/multi_language/ta_PP-OCRv3_rec.yml)|9.6M|[推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/ta_PP-OCRv3_rec_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/ta_PP-OCRv3_rec_train.tar) |
-| latin_PP-OCRv3_rec | ppocr/utils/dict/latin_dict.txt | 拉丁文识别 | [latin_PP-OCRv3_rec.yml](../../configs/rec/PP-OCRv3/multi_language/latin_PP-OCRv3_rec.yml) |9.6M|[推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/latin_ppocr_PP-OCRv3_rec_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/latin_ppocr_PP-OCRv3_rec_train.tar) |
-| arabic_PP-OCRv3_rec | ppocr/utils/dict/arabic_dict.txt | 阿拉伯字母 | [arabic_PP-OCRv3_rec.yml](../../configs/rec/PP-OCRv3/multi_language/rec_arabic_lite_train.yml) |9.6M|[推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/arabic_ppocr_PP-OCRv3_rec_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/arabic_ppocr_PP-OCRv3_rec_train.tar) |
-| cyrillic_PP-OCRv3_rec | ppocr/utils/dict/cyrillic_dict.txt | 斯拉夫字母 | [cyrillic_PP-OCRv3_rec.yml](../../configs/rec/PP-OCRv3/multi_language/cyrillic_PP-OCRv3_rec.yml) |9.6M|[推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/cyrillic_ppocr_PP-OCRv3_rec_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/cyrillic_ppocr_PP-OCRv3_rec_train.tar) |
-| devanagari_PP-OCRv3_rec | ppocr/utils/dict/devanagari_dict.txt |梵文字母 | [devanagari_PP-OCRv3_rec.yml](../../configs/rec/PP-OCRv3/multi_language/devanagari_PP-OCRv3_rec.yml) |9.6M|[推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/devanagari_ppocr_PP-OCRv3_rec_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/devanagari_ppocr_PP-OCRv3_rec_train.tar) |
-
+| latin_PP-OCRv3_rec | ppocr/utils/dict/latin_dict.txt | 拉丁文识别 | [latin_PP-OCRv3_rec.yml](../../configs/rec/PP-OCRv3/multi_language/latin_PP-OCRv3_rec.yml) |9.7M|[推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/latin_PP-OCRv3_rec_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/latin_PP-OCRv3_rec_train.tar) |
+| arabic_PP-OCRv3_rec | ppocr/utils/dict/arabic_dict.txt | 阿拉伯字母 | [arabic_PP-OCRv3_rec.yml](../../configs/rec/PP-OCRv3/multi_language/rec_arabic_lite_train.yml) |9.6M|[推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/arabic_PP-OCRv3_rec_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/arabic_PP-OCRv3_rec_train.tar) |
+| cyrillic_PP-OCRv3_rec | ppocr/utils/dict/cyrillic_dict.txt | 斯拉夫字母 | [cyrillic_PP-OCRv3_rec.yml](../../configs/rec/PP-OCRv3/multi_language/cyrillic_PP-OCRv3_rec.yml) |9.6M|[推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/cyrillic_PP-OCRv3_rec_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/cyrillic_PP-OCRv3_rec_train.tar) |
+| devanagari_PP-OCRv3_rec | ppocr/utils/dict/devanagari_dict.txt |梵文字母 | [devanagari_PP-OCRv3_rec.yml](../../configs/rec/PP-OCRv3/multi_language/devanagari_PP-OCRv3_rec.yml) |9.9M|[推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/devanagari_PP-OCRv3_rec_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/devanagari_PP-OCRv3_rec_train.tar) |
更多支持语种请参考: [多语言模型](./multi_languages.md)
diff --git a/doc/doc_en/models_list_en.md b/doc/doc_en/models_list_en.md
index a61667b8d66a72d265c5ea9d3dbb9a2bff51de61..6e61e8d0a78d8dc1e4312fe11f5e9feee88a483d 100644
--- a/doc/doc_en/models_list_en.md
+++ b/doc/doc_en/models_list_en.md
@@ -37,7 +37,7 @@ Relationship of the above models is as follows.
|model name|description|config|model size|download|
| --- | --- | --- | --- | --- |
-|ch_PP-OCRv3_det_slim| [New] slim quantization with distillation lightweight model, supporting Chinese, English, multilingual text detection |[ch_PP-OCRv3_det_cml.yml](../../configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml)| 1.1M |[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_slim_infer.tar) / [trained model (coming soon)](https://paddleocr.bj.bcebos.com/PP-OCRv3/ch/ch_PP-OCRv3_det_slim_distill_train.tar) / [slim model](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_slim_infer.nb)|
+|ch_PP-OCRv3_det_slim| [New] slim quantization with distillation lightweight model, supporting Chinese, English, multilingual text detection |[ch_PP-OCRv3_det_cml.yml](../../configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml)| 1.1M |[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_slim_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/ch/ch_PP-OCRv3_det_slim_distill_train.tar) / [slim model](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_slim_infer.nb)|
|ch_PP-OCRv3_det| [New] Original lightweight model, supporting Chinese, English, multilingual text detection |[ch_PP-OCRv3_det_cml.yml](../../configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml)| 3.8M |[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_distill_train.tar)|
|ch_PP-OCRv2_det_slim| [New] slim quantization with distillation lightweight model, supporting Chinese, English, multilingual text detection|[ch_PP-OCRv2_det_cml.yml](../../configs/det/ch_PP-OCRv2/ch_PP-OCRv2_det_cml.yml)| 3M |[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_slim_quant_infer.tar)|
|ch_PP-OCRv2_det| [New] Original lightweight model, supporting Chinese, English, multilingual text detection|[ch_PP-OCRv2_det_cml.yml](../../configs/det/ch_PP-OCRv2/ch_PP-OCRv2_det_cml.yml)|3M|[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_distill_train.tar)|
@@ -51,7 +51,7 @@ Relationship of the above models is as follows.
|model name|description|config|model size|download|
| --- | --- | --- | --- | --- |
-|en_PP-OCRv3_det_slim | [New] Slim qunatization with distillation lightweight detection model, supporting English | [ch_PP-OCRv3_det_cml.yml](../../configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml) | 1.1M |[inference model(coming soon)](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_slim_infer.tar) / [trained model (coming soon)](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_slim_distill_train.tar) / [slim model(coming soon)](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_slim_infer.nb) |
+|en_PP-OCRv3_det_slim | [New] Slim qunatization with distillation lightweight detection model, supporting English | [ch_PP-OCRv3_det_cml.yml](../../configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml) | 1.1M |[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_slim_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_slim_distill_train.tar) / [slim model](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_slim_infer.nb) |
|ch_PP-OCRv3_det | [New] Original lightweight detection model, supporting English |[ch_PP-OCRv3_det_cml.yml](../../configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml)| 3.8M | [inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_distill_train.tar) |
* Note: English configuration file is same as Chinese except training data, here we only provide one configuration file.
@@ -62,7 +62,7 @@ Relationship of the above models is as follows.
|model name|description|config|model size|download|
| --- | --- | --- | --- | --- |
-| ml_PP-OCRv3_det_slim | [New] Slim qunatization with distillation lightweight detection model, supporting English | [ch_PP-OCRv3_det_cml.yml](../../configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml) | 1.1M | [inference model(coming soon)](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/Multilingual_PP-OCRv3_det_slim_infer.tar) / [trained model (coming soon)](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/Multilingual_PP-OCRv3_det_slim_distill_train.tar) / [slim model(coming soon)](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/Multilingual_PP-OCRv3_det_slim_infer.nb) |
+| ml_PP-OCRv3_det_slim | [New] Slim qunatization with distillation lightweight detection model, supporting English | [ch_PP-OCRv3_det_cml.yml](../../configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml) | 1.1M | [inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/Multilingual_PP-OCRv3_det_slim_infer.tar) / [trained model ](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/Multilingual_PP-OCRv3_det_slim_distill_train.tar) / [slim model](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/Multilingual_PP-OCRv3_det_slim_infer.nb) |
| ml_PP-OCRv3_det |[New] Original lightweight detection model, supporting English | [ch_PP-OCRv3_det_cml.yml](../../configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml)| 3.8M | [inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/Multilingual_PP-OCRv3_det_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/Multilingual_PP-OCRv3_det_distill_train.tar) |
* Note: English configuration file is same as Chinese except training data, here we only provide one configuration file.
diff --git a/doc/doc_en/quickstart_en.md b/doc/doc_en/quickstart_en.md
index 7243e2db927a1cc89f8ac4d63c2a5a722de393d5..bf1ce05cf444937ae1565ed71f11ca82ee4f4f33 100644
--- a/doc/doc_en/quickstart_en.md
+++ b/doc/doc_en/quickstart_en.md
@@ -1,8 +1,11 @@
-- [PaddleOCR Quick Start](#paddleocr-quick-start)
- - [1. Installation](#1-installation)
+# PaddleOCR Quick Start
+
+**Note:** this tutorial mainly introduces the usage of PP-OCR series models, please refer to [PP-Structure Quick Start](../../ppstructure/docs/quickstart_en.md) for the quick use of document analysis related functions.
+
+- [1. Installation](#1-installation)
- [1.1 Install PaddlePaddle](#11-install-paddlepaddle)
- [1.2 Install PaddleOCR Whl Package](#12-install-paddleocr-whl-package)
- - [2. Easy-to-Use](#2-easy-to-use)
+- [2. Easy-to-Use](#2-easy-to-use)
- [2.1 Use by Command Line](#21-use-by-command-line)
- [2.1.1 Chinese and English Model](#211-chinese-and-english-model)
- [2.1.2 Multi-language Model](#212-multi-language-model)
@@ -10,9 +13,8 @@
- [2.2 Use by Code](#22-use-by-code)
- [2.2.1 Chinese & English Model and Multilingual Model](#221-chinese--english-model-and-multilingual-model)
- [2.2.2 Layout Analysis](#222-layout-analysis)
- - [3. Summary](#3-summary)
+- [3. Summary](#3-summary)
-# PaddleOCR Quick Start
diff --git a/doc/ppocr_v3/GTC.png b/doc/ppocr_v3/GTC.png
index 2af2261d51d2279f171727a5a0b5a8d974763d80..30a9cdd146283e2e64fc0965cb06309b64707819 100644
Binary files a/doc/ppocr_v3/GTC.png and b/doc/ppocr_v3/GTC.png differ
diff --git a/doc/ppocr_v3/LCNet_SVTR.png b/doc/ppocr_v3/LCNet_SVTR.png
new file mode 100644
index 0000000000000000000000000000000000000000..7f0d701d27502999fcee6d0872d02b9fe1554e3c
Binary files /dev/null and b/doc/ppocr_v3/LCNet_SVTR.png differ
diff --git a/doc/ppocr_v3/LKPAN.png b/doc/ppocr_v3/LKPAN.png
index ff0578f6901603185809e10c85793c212c40dc48..6b1605362317da48110b64a1a774b6f1e017eaa1 100644
Binary files a/doc/ppocr_v3/LKPAN.png and b/doc/ppocr_v3/LKPAN.png differ
diff --git a/doc/ppocr_v3/RSEFPN.png b/doc/ppocr_v3/RSEFPN.png
index 87f7f69fb516d496c9357d81b97e5bdb750f808a..ddf7c52fb5b01874bd931d23bd4d41bf979dcf31 100644
Binary files a/doc/ppocr_v3/RSEFPN.png and b/doc/ppocr_v3/RSEFPN.png differ
diff --git a/doc/ppocr_v3/UIM.png b/doc/ppocr_v3/UIM.png
new file mode 100644
index 0000000000000000000000000000000000000000..7479bdf4a9174be6b431aaee29093df92e008684
Binary files /dev/null and b/doc/ppocr_v3/UIM.png differ
diff --git a/doc/ppocr_v3/ppocr_v3.png b/doc/ppocr_v3/ppocr_v3.png
deleted file mode 100644
index 123c125acdcbc9e2ef6e4d6a0a1c92d01136ffde..0000000000000000000000000000000000000000
Binary files a/doc/ppocr_v3/ppocr_v3.png and /dev/null differ
diff --git a/doc/ppocr_v3/ppocrv3_det_cml.png b/doc/ppocr_v3/ppocrv3_det_cml.png
new file mode 100644
index 0000000000000000000000000000000000000000..ccb5c8b21faeab75027690e520b072186972f796
Binary files /dev/null and b/doc/ppocr_v3/ppocrv3_det_cml.png differ
diff --git a/doc/ppocr_v3/svtr_g2.png b/doc/ppocr_v3/svtr_g2.png
index d589891d5897533243845a993bd56d8f75726cfc..2573afafbbb6f5ad270320e45c7c3bdb47d8adc2 100644
Binary files a/doc/ppocr_v3/svtr_g2.png and b/doc/ppocr_v3/svtr_g2.png differ
diff --git a/doc/ppocr_v3/svtr_g4.png b/doc/ppocr_v3/svtr_g4.png
index 234a85c44b2cc3d968942480a596b2be5e45f53d..f85d66d97f619d57edb4223a0996901050ea7959 100644
Binary files a/doc/ppocr_v3/svtr_g4.png and b/doc/ppocr_v3/svtr_g4.png differ
diff --git a/doc/ppocr_v3/svtr_tiny.png b/doc/ppocr_v3/svtr_tiny.png
index 91b3eacb9f1242806ad3520cc36252351fc7baf1..01e22e74b539b12072a677bc5081df92f81ef963 100644
Binary files a/doc/ppocr_v3/svtr_tiny.png and b/doc/ppocr_v3/svtr_tiny.png differ
diff --git a/doc/ppocr_v3/teacher_dml.png b/doc/ppocr_v3/teacher_dml.png
new file mode 100644
index 0000000000000000000000000000000000000000..ea09cacda87ae4c0d44cb0f1c18ee1f10c50b957
Binary files /dev/null and b/doc/ppocr_v3/teacher_dml.png differ
diff --git a/doc/ppocr_v3/v3_rec_pipeline.png b/doc/ppocr_v3/v3_rec_pipeline.png
new file mode 100644
index 0000000000000000000000000000000000000000..aa61cc4f1652f958977fdab8d2dca56c57f8f816
Binary files /dev/null and b/doc/ppocr_v3/v3_rec_pipeline.png differ
diff --git a/doc/ppocrv3_framework.png b/doc/ppocrv3_framework.png
index c05398248fa7273382e9691a26d932bddc3cf84f..e05279f7f57301c480c0cc11d940af0b5bf69668 100644
Binary files a/doc/ppocrv3_framework.png and b/doc/ppocrv3_framework.png differ