diff --git a/PPOCRLabel/PPOCRLabel.py b/PPOCRLabel/PPOCRLabel.py index f6e6abe36516b263e187436a72826187823a92e1..ce3d66f07f89cedab463cf38bc0cdc56f3a61237 100644 --- a/PPOCRLabel/PPOCRLabel.py +++ b/PPOCRLabel/PPOCRLabel.py @@ -1149,7 +1149,10 @@ class MainWindow(QMainWindow): for box in self.result_dic: trans_dic = {"label": box[1][0], "points": box[0], "difficult": False} if self.kie_mode: - trans_dic.update({"key_cls": "None"}) + if len(box) == 3: + trans_dic.update({"key_cls": box[2]}) + else: + trans_dic.update({"key_cls": "None"}) if trans_dic["label"] == "" and mode == 'Auto': continue shapes.append(trans_dic) @@ -2047,6 +2050,7 @@ class MainWindow(QMainWindow): rec_flag = 0 for shape in self.canvas.shapes: box = [[int(p.x()), int(p.y())] for p in shape.points] + kie_cls = shape.key_cls if len(box) > 4: box = self.gen_quad_from_poly(np.array(box)) @@ -2062,17 +2066,27 @@ class MainWindow(QMainWindow): if shape.line_color == DEFAULT_LOCK_COLOR: shape.label = result[0][0] result.insert(0, box) + if self.kie_mode: + result.append(kie_cls) self.result_dic_locked.append(result) else: result.insert(0, box) + if self.kie_mode: + result.append(kie_cls) self.result_dic.append(result) else: print('Can not recognise the box') if shape.line_color == DEFAULT_LOCK_COLOR: shape.label = result[0][0] - self.result_dic_locked.append([box, (self.noLabelText, 0)]) + if self.kie_mode: + self.result_dic_locked.append([box, (self.noLabelText, 0), kie_cls]) + else: + self.result_dic_locked.append([box, (self.noLabelText, 0)]) else: - self.result_dic.append([box, (self.noLabelText, 0)]) + if self.kie_mode: + self.result_dic.append([box, (self.noLabelText, 0), kie_cls]) + else: + self.result_dic.append([box, (self.noLabelText, 0)]) try: if self.noLabelText == shape.label or result[1][0] == shape.label: print('label no change') diff --git a/PPOCRLabel/libs/shape.py b/PPOCRLabel/libs/shape.py index 18cc4a8e7692c2a88becca2e9ca19dfebbd61779..97e2eb72380be5c1fd1e06785be846b596763986 100644 --- a/PPOCRLabel/libs/shape.py +++ b/PPOCRLabel/libs/shape.py @@ -48,6 +48,7 @@ class Shape(object): def __init__(self, label=None, line_color=None, difficult=False, key_cls="None", paintLabel=False): self.label = label + self.idx = 0 self.points = [] self.fill = False self.selected = False diff --git "a/applications/PCB\345\255\227\347\254\246\350\257\206\345\210\253/PCB\345\255\227\347\254\246\350\257\206\345\210\253.md" "b/applications/PCB\345\255\227\347\254\246\350\257\206\345\210\253/PCB\345\255\227\347\254\246\350\257\206\345\210\253.md" new file mode 100644 index 0000000000000000000000000000000000000000..ee13bacffdb65e6300a034531a527fdca4ed29f9 --- /dev/null +++ "b/applications/PCB\345\255\227\347\254\246\350\257\206\345\210\253/PCB\345\255\227\347\254\246\350\257\206\345\210\253.md" @@ -0,0 +1,652 @@ +# 基于PP-OCRv3的PCB字符识别 + +- [1. 项目介绍](#1-项目介绍) +- [2. 安装说明](#2-安装说明) +- [3. 数据准备](#3-数据准备) +- [4. 文本检测](#4-文本检测) + - [4.1 预训练模型直接评估](#41-预训练模型直接评估) + - [4.2 预训练模型+验证集padding直接评估](#42-预训练模型验证集padding直接评估) + - [4.3 预训练模型+fine-tune](#43-预训练模型fine-tune) +- [5. 文本识别](#5-文本识别) + - [5.1 预训练模型直接评估](#51-预训练模型直接评估) + - [5.2 三种fine-tune方案](#52-三种fine-tune方案) +- [6. 模型导出](#6-模型导出) +- [7. 端对端评测](#7-端对端评测) +- [8. Jetson部署](#8-Jetson部署) +- [9. 总结](#9-总结) +- [更多资源](#更多资源) + +# 1. 项目介绍 + +印刷电路板(PCB)是电子产品中的核心器件,对于板件质量的测试与监控是生产中必不可少的环节。在一些场景中,通过PCB中信号灯颜色和文字组合可以定位PCB局部模块质量问题,PCB文字识别中存在如下难点: + +- 裁剪出的PCB图片宽高比例较小 +- 文字区域整体面积也较小 +- 包含垂直、水平多种方向文本 + +针对本场景,PaddleOCR基于全新的PP-OCRv3通过合成数据、微调以及其他场景适配方法完成小字符文本识别任务,满足企业上线要求。PCB检测、识别效果如 **图1** 所示: + +
+
图1 PCB检测识别效果
+ +注:欢迎在AIStudio领取免费算力体验线上实训,项目链接: [基于PP-OCRv3实现PCB字符识别](https://aistudio.baidu.com/aistudio/projectdetail/4008973) + +# 2. 安装说明 + + +下载PaddleOCR源码,安装依赖环境。 + + +```python +# 如仍需安装or安装更新,可以执行以下步骤 +git clone https://github.com/PaddlePaddle/PaddleOCR.git +# git clone https://gitee.com/PaddlePaddle/PaddleOCR +``` + + +```python +# 安装依赖包 +pip install -r /home/aistudio/PaddleOCR/requirements.txt +``` + +# 3. 数据准备 + +我们通过图片合成工具生成 **图2** 所示的PCB图片,整图只有高25、宽150左右、文字区域高9、宽45左右,包含垂直和水平2种方向的文本: + +
+
图2 数据集示例
+ +暂时不开源生成的PCB数据集,但是通过更换背景,通过如下代码生成数据即可: + +``` +cd gen_data +python3 gen.py --num_img=10 +``` + +生成图片参数解释: + +``` +num_img:生成图片数量 +font_min_size、font_max_size:字体最大、最小尺寸 +bg_path:文字区域背景存放路径 +det_bg_path:整图背景存放路径 +fonts_path:字体路径 +corpus_path:语料路径 +output_dir:生成图片存储路径 +``` + +这里生成 **100张** 相同尺寸和文本的图片,如 **图3** 所示,方便大家跑通实验。通过如下代码解压数据集: + +
+
图3 案例提供数据集示例
+ + +```python +tar xf ./data/data148165/dataset.tar -C ./ +``` + +在生成数据集的时需要生成检测和识别训练需求的格式: + + +- **文本检测** + +标注文件格式如下,中间用'\t'分隔: + +``` +" 图像文件名 json.dumps编码的图像标注信息" +ch4_test_images/img_61.jpg [{"transcription": "MASA", "points": [[310, 104], [416, 141], [418, 216], [312, 179]]}, {...}] +``` + +json.dumps编码前的图像标注信息是包含多个字典的list,字典中的 `points` 表示文本框的四个点的坐标(x, y),从左上角的点开始顺时针排列。 `transcription` 表示当前文本框的文字,***当其内容为“###”时,表示该文本框无效,在训练时会跳过。*** + +- **文本识别** + +标注文件的格式如下, txt文件中默认请将图片路径和图片标签用'\t'分割,如用其他方式分割将造成训练报错。 + +``` +" 图像文件名 图像标注信息 " + +train_data/rec/train/word_001.jpg 简单可依赖 +train_data/rec/train/word_002.jpg 用科技让复杂的世界更简单 +... +``` + + +# 4. 文本检测 + +选用飞桨OCR开发套件[PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR)中的PP-OCRv3模型进行文本检测和识别。针对检测模型和识别模型,进行了共计9个方面的升级: + +- PP-OCRv3检测模型对PP-OCRv2中的CML协同互学习文本检测蒸馏策略进行了升级,分别针对教师模型和学生模型进行进一步效果优化。其中,在对教师模型优化时,提出了大感受野的PAN结构LK-PAN和引入了DML蒸馏策略;在对学生模型优化时,提出了残差注意力机制的FPN结构RSE-FPN。 + +- PP-OCRv3的识别模块是基于文本识别算法SVTR优化。SVTR不再采用RNN结构,通过引入Transformers结构更加有效地挖掘文本行图像的上下文信息,从而提升文本识别能力。PP-OCRv3通过轻量级文本识别网络SVTR_LCNet、Attention损失指导CTC损失训练策略、挖掘文字上下文信息的数据增广策略TextConAug、TextRotNet自监督预训练模型、UDML联合互学习策略、UIM无标注数据挖掘方案,6个方面进行模型加速和效果提升。 + +更多细节请参考PP-OCRv3[技术报告](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.5/doc/doc_ch/PP-OCRv3_introduction.md)。 + + +我们使用 **3种方案** 进行检测模型的训练、评估: +- **PP-OCRv3英文超轻量检测预训练模型直接评估** +- PP-OCRv3英文超轻量检测预训练模型 + **验证集padding**直接评估 +- PP-OCRv3英文超轻量检测预训练模型 + **fine-tune** + +## **4.1 预训练模型直接评估** + +我们首先通过PaddleOCR提供的预训练模型在验证集上进行评估,如果评估指标能满足效果,可以直接使用预训练模型,不再需要训练。 + +使用预训练模型直接评估步骤如下: + +**1)下载预训练模型** + + +PaddleOCR已经提供了PP-OCR系列模型,部分模型展示如下表所示: + +| 模型简介 | 模型名称 | 推荐场景 | 检测模型 | 方向分类器 | 识别模型 | +| ------------------------------------- | ----------------------- | --------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| 中英文超轻量PP-OCRv3模型(16.2M) | ch_PP-OCRv3_xx | 移动端&服务器端 | [推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_distill_train.tar) | [推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_train.tar) | [推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_train.tar) | +| 英文超轻量PP-OCRv3模型(13.4M) | en_PP-OCRv3_xx | 移动端&服务器端 | [推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_distill_train.tar) | [推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_train.tar) | [推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_rec_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_rec_train.tar) | +| 中英文超轻量PP-OCRv2模型(13.0M) | ch_PP-OCRv2_xx | 移动端&服务器端 | [推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_distill_train.tar) | [推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar) / [预训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_train.tar) | [推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_train.tar) | +| 中英文超轻量PP-OCR mobile模型(9.4M) | ch_ppocr_mobile_v2.0_xx | 移动端&服务器端 | [推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar) / [预训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_train.tar) | [推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar) / [预训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_train.tar) | [推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar) / [预训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_pre.tar) | +| 中英文通用PP-OCR server模型(143.4M) | ch_ppocr_server_v2.0_xx | 服务器端 | [推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_infer.tar) / [预训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_train.tar) | [推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar) / [预训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_train.tar) | [推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_rec_infer.tar) / [预训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_rec_pre.tar) | + +更多模型下载(包括多语言),可以参[考PP-OCR系列模型下载](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.5/doc/doc_ch/models_list.md) + +这里我们使用PP-OCRv3英文超轻量检测模型,下载并解压预训练模型: + + + + +```python +# 如果更换其他模型,更新下载链接和解压指令就可以 +cd /home/aistudio/PaddleOCR +mkdir pretrain_models +cd pretrain_models +# 下载英文预训练模型 +wget https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_distill_train.tar +tar xf en_PP-OCRv3_det_distill_train.tar && rm -rf en_PP-OCRv3_det_distill_train.tar +%cd .. +``` + +**模型评估** + + +首先修改配置文件`configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml`中的以下字段: +``` +Eval.dataset.data_dir:指向验证集图片存放目录,'/home/aistudio/dataset' +Eval.dataset.label_file_list:指向验证集标注文件,'/home/aistudio/dataset/det_gt_val.txt' +Eval.dataset.transforms.DetResizeForTest: 尺寸 + limit_side_len: 48 + limit_type: 'min' +``` + +然后在验证集上进行评估,具体代码如下: + + + +```python +cd /home/aistudio/PaddleOCR +python tools/eval.py \ + -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml \ + -o Global.checkpoints="./pretrain_models/en_PP-OCRv3_det_distill_train/best_accuracy" +``` + +## **4.2 预训练模型+验证集padding直接评估** + +考虑到PCB图片比较小,宽度只有25左右、高度只有140-170左右,我们在原图的基础上进行padding,再进行检测评估,padding前后效果对比如 **图4** 所示: + +
+
图4 padding前后对比图
+ +将图片都padding到300*300大小,因为坐标信息发生了变化,我们同时要修改标注文件,在`/home/aistudio/dataset`目录里也提供了padding之后的图片,大家也可以尝试训练和评估: + +同上,我们需要修改配置文件`configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml`中的以下字段: +``` +Eval.dataset.data_dir:指向验证集图片存放目录,'/home/aistudio/dataset' +Eval.dataset.label_file_list:指向验证集标注文件,/home/aistudio/dataset/det_gt_padding_val.txt +Eval.dataset.transforms.DetResizeForTest: 尺寸 + limit_side_len: 1100 + limit_type: 'min' +``` + +如需获取已训练模型,请扫码填写问卷,加入PaddleOCR官方交流群获取全部OCR垂类模型下载链接、《动手学OCR》电子书等全套OCR学习资料🎁 +
+ +
+将下载或训练完成的模型放置在对应目录下即可完成模型评估。 + + +```python +cd /home/aistudio/PaddleOCR +python tools/eval.py \ + -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml \ + -o Global.checkpoints="./pretrain_models/en_PP-OCRv3_det_distill_train/best_accuracy" +``` + +## **4.3 预训练模型+fine-tune** + + +基于预训练模型,在生成的1500图片上进行fine-tune训练和评估,其中train数据1200张,val数据300张,修改配置文件`configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_student.yml`中的以下字段: +``` +Global.epoch_num: 这里设置为1,方便快速跑通,实际中根据数据量调整该值 +Global.save_model_dir:模型保存路径 +Global.pretrained_model:指向预训练模型路径,'./pretrain_models/en_PP-OCRv3_det_distill_train/student.pdparams' +Optimizer.lr.learning_rate:调整学习率,本实验设置为0.0005 +Train.dataset.data_dir:指向训练集图片存放目录,'/home/aistudio/dataset' +Train.dataset.label_file_list:指向训练集标注文件,'/home/aistudio/dataset/det_gt_train.txt' +Train.dataset.transforms.EastRandomCropData.size:训练尺寸改为[480,64] +Eval.dataset.data_dir:指向验证集图片存放目录,'/home/aistudio/dataset/' +Eval.dataset.label_file_list:指向验证集标注文件,'/home/aistudio/dataset/det_gt_val.txt' +Eval.dataset.transforms.DetResizeForTest:评估尺寸,添加如下参数 + limit_side_len: 64 + limit_type:'min' +``` +执行下面命令启动训练: + + +```python +cd /home/aistudio/PaddleOCR/ +python tools/train.py \ + -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_student.yml +``` + +**模型评估** + + +使用训练好的模型进行评估,更新模型路径`Global.checkpoints`: + + +```python +cd /home/aistudio/PaddleOCR/ +python3 tools/eval.py \ + -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_student.yml \ + -o Global.checkpoints="./output/ch_PP-OCR_V3_det/latest" +``` + +使用训练好的模型进行评估,指标如下所示: + + +| 序号 | 方案 | hmean | 效果提升 | 实验分析 | +| -------- | -------- | -------- | -------- | -------- | +| 1 | PP-OCRv3英文超轻量检测预训练模型 | 64.64% | - | 提供的预训练模型具有泛化能力 | +| 2 | PP-OCRv3英文超轻量检测预训练模型 + 验证集padding | 72.13% |+7.5% | padding可以提升尺寸较小图片的检测效果| +| 3 | PP-OCRv3英文超轻量检测预训练模型 + fine-tune | 100% | +27.9% | fine-tune会提升垂类场景效果 | + + +``` +注:上述实验结果均是在1500张图片(1200张训练集,300张测试集)上训练、评估的得到,AIstudio只提供了100张数据,所以指标有所差异属于正常,只要策略有效、规律相同即可。 +``` + +# 5. 文本识别 + +我们分别使用如下4种方案进行训练、评估: + +- **方案1**:**PP-OCRv3中英文超轻量识别预训练模型直接评估** +- **方案2**:PP-OCRv3中英文超轻量检测预训练模型 + **fine-tune** +- **方案3**:PP-OCRv3中英文超轻量检测预训练模型 + fine-tune + **公开通用识别数据集** +- **方案4**:PP-OCRv3中英文超轻量检测预训练模型 + fine-tune + **增加PCB图像数量** + + +## **5.1 预训练模型直接评估** + +同检测模型,我们首先使用PaddleOCR提供的识别预训练模型在PCB验证集上进行评估。 + +使用预训练模型直接评估步骤如下: + +**1)下载预训练模型** + + +我们使用PP-OCRv3中英文超轻量文本识别模型,下载并解压预训练模型: + + +```python +# 如果更换其他模型,更新下载链接和解压指令就可以 +cd /home/aistudio/PaddleOCR/pretrain_models/ +wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_train.tar +tar xf ch_PP-OCRv3_rec_train.tar && rm -rf ch_PP-OCRv3_rec_train.tar +cd .. +``` + +**模型评估** + + +首先修改配置文件`configs/det/ch_PP-OCRv3/ch_PP-OCRv2_rec_distillation.yml`中的以下字段: + +``` +Metric.ignore_space: True:忽略空格 +Eval.dataset.data_dir:指向验证集图片存放目录,'/home/aistudio/dataset' +Eval.dataset.label_file_list:指向验证集标注文件,'/home/aistudio/dataset/rec_gt_val.txt' +``` + +我们使用下载的预训练模型进行评估: + + +```python +cd /home/aistudio/PaddleOCR +python3 tools/eval.py \ + -c configs/rec/PP-OCRv3/ch_PP-OCRv3_rec_distillation.yml \ + -o Global.checkpoints=pretrain_models/ch_PP-OCRv3_rec_train/best_accuracy + +``` + +## **5.2 三种fine-tune方案** + +方案2、3、4训练和评估方式是相同的,因此在我们了解每个技术方案之后,再具体看修改哪些参数是相同,哪些是不同的。 + +**方案介绍:** + +1) **方案2**:预训练模型 + **fine-tune** + +- 在预训练模型的基础上进行fine-tune,使用1500张PCB进行训练和评估,其中训练集1200张,验证集300张。 + + +2) **方案3**:预训练模型 + fine-tune + **公开通用识别数据集** + +- 当识别数据比较少的情况,可以考虑添加公开通用识别数据集。在方案2的基础上,添加公开通用识别数据集,如lsvt、rctw等。 + +3)**方案4**:预训练模型 + fine-tune + **增加PCB图像数量** + +- 如果能够获取足够多真实场景,我们可以通过增加数据量提升模型效果。在方案2的基础上,增加PCB的数量到2W张左右。 + + +**参数修改:** + +接着我们看需要修改的参数,以上方案均需要修改配置文件`configs/rec/PP-OCRv3/ch_PP-OCRv3_rec.yml`的参数,**修改一次即可**: + +``` +Global.pretrained_model:指向预训练模型路径,'pretrain_models/ch_PP-OCRv3_rec_train/best_accuracy' +Optimizer.lr.values:学习率,本实验设置为0.0005 +Train.loader.batch_size_per_card: batch size,默认128,因为数据量小于128,因此我们设置为8,数据量大可以按默认的训练 +Eval.loader.batch_size_per_card: batch size,默认128,设置为4 +Metric.ignore_space: 忽略空格,本实验设置为True +``` + +**更换不同的方案**每次需要修改的参数: +``` +Global.epoch_num: 这里设置为1,方便快速跑通,实际中根据数据量调整该值 +Global.save_model_dir:指向模型保存路径 +Train.dataset.data_dir:指向训练集图片存放目录 +Train.dataset.label_file_list:指向训练集标注文件 +Eval.dataset.data_dir:指向验证集图片存放目录 +Eval.dataset.label_file_list:指向验证集标注文件 +``` + +同时**方案3**修改以下参数 +``` +Eval.dataset.label_file_list:添加公开通用识别数据标注文件 +Eval.dataset.ratio_list:数据和公开通用识别数据每次采样比例,按实际修改即可 +``` +如 **图5** 所示: +
+
图5 添加公开通用识别数据配置文件示例
+ + +我们提取Student模型的参数,在PCB数据集上进行fine-tune,可以参考如下代码: + + +```python +import paddle +# 加载预训练模型 +all_params = paddle.load("./pretrain_models/ch_PP-OCRv3_rec_train/best_accuracy.pdparams") +# 查看权重参数的keys +print(all_params.keys()) +# 学生模型的权重提取 +s_params = {key[len("student_model."):]: all_params[key] for key in all_params if "student_model." in key} +# 查看学生模型权重参数的keys +print(s_params.keys()) +# 保存 +paddle.save(s_params, "./pretrain_models/ch_PP-OCRv3_rec_train/student.pdparams") +``` + +修改参数后,**每个方案**都执行如下命令启动训练: + + + +```python +cd /home/aistudio/PaddleOCR/ +python3 tools/train.py -c configs/rec/PP-OCRv3/ch_PP-OCRv3_rec.yml +``` + + +使用训练好的模型进行评估,更新模型路径`Global.checkpoints`: + + +```python +cd /home/aistudio/PaddleOCR/ +python3 tools/eval.py \ + -c configs/rec/PP-OCRv3/ch_PP-OCRv3_rec.yml \ + -o Global.checkpoints=./output/rec_ppocr_v3/latest +``` + +所有方案评估指标如下: + +| 序号 | 方案 | acc | 效果提升 | 实验分析 | +| -------- | -------- | -------- | -------- | -------- | +| 1 | PP-OCRv3中英文超轻量识别预训练模型直接评估 | 46.67% | - | 提供的预训练模型具有泛化能力 | +| 2 | PP-OCRv3中英文超轻量识别预训练模型 + fine-tune | 42.02% |-4.6% | 在数据量不足的情况,反而比预训练模型效果低(也可以通过调整超参数再试试)| +| 3 | PP-OCRv3中英文超轻量识别预训练模型 + fine-tune + 公开通用识别数据集 | 77% | +30% | 在数据量不足的情况下,可以考虑补充公开数据训练 | +| 4 | PP-OCRv3中英文超轻量识别预训练模型 + fine-tune + 增加PCB图像数量 | 99.99% | +23% | 如果能获取更多数据量的情况,可以通过增加数据量提升效果 | + +``` +注:上述实验结果均是在1500张图片(1200张训练集,300张测试集)、2W张图片、添加公开通用识别数据集上训练、评估的得到,AIstudio只提供了100张数据,所以指标有所差异属于正常,只要策略有效、规律相同即可。 +``` + +# 6. 模型导出 + +inference 模型(paddle.jit.save保存的模型) 一般是模型训练,把模型结构和模型参数保存在文件中的固化模型,多用于预测部署场景。 训练过程中保存的模型是checkpoints模型,保存的只有模型的参数,多用于恢复训练等。 与checkpoints模型相比,inference 模型会额外保存模型的结构信息,在预测部署、加速推理上性能优越,灵活方便,适合于实际系统集成。 + + +```python +# 导出检测模型 +python3 tools/export_model.py \ + -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_student.yml \ + -o Global.pretrained_model="./output/ch_PP-OCR_V3_det/latest" \ + Global.save_inference_dir="./inference_model/ch_PP-OCR_V3_det/" +``` + +因为上述模型只训练了1个epoch,因此我们使用训练最优的模型进行预测,存储在`/home/aistudio/best_models/`目录下,解压即可 + + +```python +cd /home/aistudio/best_models/ +wget https://paddleocr.bj.bcebos.com/fanliku/PCB/det_ppocr_v3_en_infer_PCB.tar +tar xf /home/aistudio/best_models/det_ppocr_v3_en_infer_PCB.tar -C /home/aistudio/PaddleOCR/pretrain_models/ +``` + + +```python +# 检测模型inference模型预测 +cd /home/aistudio/PaddleOCR/ +python3 tools/infer/predict_det.py \ + --image_dir="/home/aistudio/dataset/imgs/0000.jpg" \ + --det_algorithm="DB" \ + --det_model_dir="./pretrain_models/det_ppocr_v3_en_infer_PCB/" \ + --det_limit_side_len=48 \ + --det_limit_type='min' \ + --det_db_unclip_ratio=2.5 \ + --use_gpu=True +``` + +结果存储在`inference_results`目录下,检测如下图所示: +
+
图6 检测结果
+ + +同理,导出识别模型并进行推理。 + +```python +# 导出识别模型 +python3 tools/export_model.py \ + -c configs/rec/PP-OCRv3/ch_PP-OCRv3_rec.yml \ + -o Global.pretrained_model="./output/rec_ppocr_v3/latest" \ + Global.save_inference_dir="./inference_model/rec_ppocr_v3/" + +``` + +同检测模型,识别模型也只训练了1个epoch,因此我们使用训练最优的模型进行预测,存储在`/home/aistudio/best_models/`目录下,解压即可 + + +```python +cd /home/aistudio/best_models/ +wget https://paddleocr.bj.bcebos.com/fanliku/PCB/rec_ppocr_v3_ch_infer_PCB.tar +tar xf /home/aistudio/best_models/rec_ppocr_v3_ch_infer_PCB.tar -C /home/aistudio/PaddleOCR/pretrain_models/ +``` + + +```python +# 识别模型inference模型预测 +cd /home/aistudio/PaddleOCR/ +python3 tools/infer/predict_rec.py \ + --image_dir="../test_imgs/0000_rec.jpg" \ + --rec_model_dir="./pretrain_models/rec_ppocr_v3_ch_infer_PCB" \ + --rec_image_shape="3, 48, 320" \ + --use_space_char=False \ + --use_gpu=True +``` + +```python +# 检测+识别模型inference模型预测 +cd /home/aistudio/PaddleOCR/ +python3 tools/infer/predict_system.py \ + --image_dir="../test_imgs/0000.jpg" \ + --det_model_dir="./pretrain_models/det_ppocr_v3_en_infer_PCB" \ + --det_limit_side_len=48 \ + --det_limit_type='min' \ + --det_db_unclip_ratio=2.5 \ + --rec_model_dir="./pretrain_models/rec_ppocr_v3_ch_infer_PCB" \ + --rec_image_shape="3, 48, 320" \ + --draw_img_save_dir=./det_rec_infer/ \ + --use_space_char=False \ + --use_angle_cls=False \ + --use_gpu=True + +``` + +端到端预测结果存储在`det_res_infer`文件夹内,结果如下图所示: +
+
图7 检测+识别结果
+ +# 7. 端对端评测 + +接下来介绍文本检测+文本识别的端对端指标评估方式。主要分为三步: + +1)首先运行`tools/infer/predict_system.py`,将`image_dir`改为需要评估的数据文件家,得到保存的结果: + + +```python +# 检测+识别模型inference模型预测 +python3 tools/infer/predict_system.py \ + --image_dir="../dataset/imgs/" \ + --det_model_dir="./pretrain_models/det_ppocr_v3_en_infer_PCB" \ + --det_limit_side_len=48 \ + --det_limit_type='min' \ + --det_db_unclip_ratio=2.5 \ + --rec_model_dir="./pretrain_models/rec_ppocr_v3_ch_infer_PCB" \ + --rec_image_shape="3, 48, 320" \ + --draw_img_save_dir=./det_rec_infer/ \ + --use_space_char=False \ + --use_angle_cls=False \ + --use_gpu=True +``` + +得到保存结果,文本检测识别可视化图保存在`det_rec_infer/`目录下,预测结果保存在`det_rec_infer/system_results.txt`中,格式如下:`0018.jpg [{"transcription": "E295", "points": [[88, 33], [137, 33], [137, 40], [88, 40]]}]` + +2)然后将步骤一保存的数据转换为端对端评测需要的数据格式: 修改 `tools/end2end/convert_ppocr_label.py`中的代码,convert_label函数中设置输入标签路径,Mode,保存标签路径等,对预测数据的GTlabel和预测结果的label格式进行转换。 +``` +ppocr_label_gt = "/home/aistudio/dataset/det_gt_val.txt" +convert_label(ppocr_label_gt, "gt", "./save_gt_label/") + +ppocr_label_gt = "/home/aistudio/PaddleOCR/PCB_result/det_rec_infer/system_results.txt" +convert_label(ppocr_label_gt, "pred", "./save_PPOCRV2_infer/") +``` + +运行`convert_ppocr_label.py`: + + +```python + python3 tools/end2end/convert_ppocr_label.py +``` + +得到如下结果: +``` +├── ./save_gt_label/ +├── ./save_PPOCRV2_infer/ +``` + +3) 最后,执行端对端评测,运行`tools/end2end/eval_end2end.py`计算端对端指标,运行方式如下: + + +```python +pip install editdistance +python3 tools/end2end/eval_end2end.py ./save_gt_label/ ./save_PPOCRV2_infer/ +``` + +使用`预训练模型+fine-tune'检测模型`、`预训练模型 + 2W张PCB图片funetune`识别模型,在300张PCB图片上评估得到如下结果,fmeasure为主要关注的指标: +
+
图8 端到端评估指标
+ +``` +注: 使用上述命令不能跑出该结果,因为数据集不相同,可以更换为自己训练好的模型,按上述流程运行 +``` + +# 8. Jetson部署 + +我们只需要以下步骤就可以完成Jetson nano部署模型,简单易操作: + +**1、在Jetson nano开发版上环境准备:** + +* 安装PaddlePaddle + +* 下载PaddleOCR并安装依赖 + +**2、执行预测** + +* 将推理模型下载到jetson + +* 执行检测、识别、串联预测即可 + +详细[参考流程](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.5/deploy/Jetson/readme_ch.md)。 + +# 9. 总结 + +检测实验分别使用PP-OCRv3预训练模型在PCB数据集上进行了直接评估、验证集padding、 fine-tune 3种方案,识别实验分别使用PP-OCRv3预训练模型在PCB数据集上进行了直接评估、 fine-tune、添加公开通用识别数据集、增加PCB图片数量4种方案,指标对比如下: + +* 检测 + + +| 序号 | 方案 | hmean | 效果提升 | 实验分析 | +| ---- | -------------------------------------------------------- | ------ | -------- | ------------------------------------- | +| 1 | PP-OCRv3英文超轻量检测预训练模型直接评估 | 64.64% | - | 提供的预训练模型具有泛化能力 | +| 2 | PP-OCRv3英文超轻量检测预训练模型 + 验证集padding直接评估 | 72.13% | +7.5% | padding可以提升尺寸较小图片的检测效果 | +| 3 | PP-OCRv3英文超轻量检测预训练模型 + fine-tune | 100% | +27.9% | fine-tune会提升垂类场景效果 | + +* 识别 + +| 序号 | 方案 | acc | 效果提升 | 实验分析 | +| ---- | ------------------------------------------------------------ | ------ | -------- | ------------------------------------------------------------ | +| 1 | PP-OCRv3中英文超轻量识别预训练模型直接评估 | 46.67% | - | 提供的预训练模型具有泛化能力 | +| 2 | PP-OCRv3中英文超轻量识别预训练模型 + fine-tune | 42.02% | -4.6% | 在数据量不足的情况,反而比预训练模型效果低(也可以通过调整超参数再试试) | +| 3 | PP-OCRv3中英文超轻量识别预训练模型 + fine-tune + 公开通用识别数据集 | 77% | +30% | 在数据量不足的情况下,可以考虑补充公开数据训练 | +| 4 | PP-OCRv3中英文超轻量识别预训练模型 + fine-tune + 增加PCB图像数量 | 99.99% | +23% | 如果能获取更多数据量的情况,可以通过增加数据量提升效果 | + +* 端到端 + +| det | rec | fmeasure | +| --------------------------------------------- | ------------------------------------------------------------ | -------- | +| PP-OCRv3英文超轻量检测预训练模型 + fine-tune | PP-OCRv3中英文超轻量识别预训练模型 + fine-tune + 增加PCB图像数量 | 93.3% | + +*结论* + +PP-OCRv3的检测模型在未经过fine-tune的情况下,在PCB数据集上也有64.64%的精度,说明具有泛化能力。验证集padding之后,精度提升7.5%,在图片尺寸较小的情况,我们可以通过padding的方式提升检测效果。经过 fine-tune 后能够极大的提升检测效果,精度达到100%。 + +PP-OCRv3的识别模型方案1和方案2对比可以发现,当数据量不足的情况,预训练模型精度可能比fine-tune效果还要高,所以我们可以先尝试预训练模型直接评估。如果在数据量不足的情况下想进一步提升模型效果,可以通过添加公开通用识别数据集,识别效果提升30%,非常有效。最后如果我们能够采集足够多的真实场景数据集,可以通过增加数据量提升模型效果,精度达到99.99%。 + +# 更多资源 + +- 更多深度学习知识、产业案例、面试宝典等,请参考:[awesome-DeepLearning](https://github.com/paddlepaddle/awesome-DeepLearning) + +- 更多PaddleOCR使用教程,请参考:[PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR/tree/dygraph) + + +- 飞桨框架相关资料,请参考:[飞桨深度学习平台](https://www.paddlepaddle.org.cn/?fr=paddleEdu_aistudio) + +# 参考 + +* 数据生成代码库:https://github.com/zcswdt/Color_OCR_image_generator diff --git "a/applications/PCB\345\255\227\347\254\246\350\257\206\345\210\253/gen_data/background/bg.jpg" "b/applications/PCB\345\255\227\347\254\246\350\257\206\345\210\253/gen_data/background/bg.jpg" new file mode 100644 index 0000000000000000000000000000000000000000..3cb6eab819c3b7d4f68590d2cdc9d36351590197 Binary files /dev/null and "b/applications/PCB\345\255\227\347\254\246\350\257\206\345\210\253/gen_data/background/bg.jpg" differ diff --git "a/applications/PCB\345\255\227\347\254\246\350\257\206\345\210\253/gen_data/corpus/text.txt" "b/applications/PCB\345\255\227\347\254\246\350\257\206\345\210\253/gen_data/corpus/text.txt" new file mode 100644 index 0000000000000000000000000000000000000000..8b8cb793ef6755bf00ed8c154e24638b07a519c2 --- /dev/null +++ "b/applications/PCB\345\255\227\347\254\246\350\257\206\345\210\253/gen_data/corpus/text.txt" @@ -0,0 +1,30 @@ +5ZQ +I4UL +PWL +SNOG +ZL02 +1C30 +O3H +YHRS +N03S +1U5Y +JTK +EN4F +YKJ +DWNH +R42W +X0V +4OF5 +08AM +Y93S +GWE2 +0KR +9U2A +DBQ +Y6J +ROZ +K06 +KIEY +NZQJ +UN1B +6X4 \ No newline at end of file diff --git "a/applications/PCB\345\255\227\347\254\246\350\257\206\345\210\253/gen_data/det_background/1.png" "b/applications/PCB\345\255\227\347\254\246\350\257\206\345\210\253/gen_data/det_background/1.png" new file mode 100644 index 0000000000000000000000000000000000000000..8a49eaa6862113044e05d17e32941a0a20911426 Binary files /dev/null and "b/applications/PCB\345\255\227\347\254\246\350\257\206\345\210\253/gen_data/det_background/1.png" differ diff --git "a/applications/PCB\345\255\227\347\254\246\350\257\206\345\210\253/gen_data/det_background/2.png" "b/applications/PCB\345\255\227\347\254\246\350\257\206\345\210\253/gen_data/det_background/2.png" new file mode 100644 index 0000000000000000000000000000000000000000..c3fcc0c92826b97b5f6abd970f1a0580eede0f5d Binary files /dev/null and "b/applications/PCB\345\255\227\347\254\246\350\257\206\345\210\253/gen_data/det_background/2.png" differ diff --git "a/applications/PCB\345\255\227\347\254\246\350\257\206\345\210\253/gen_data/gen.py" "b/applications/PCB\345\255\227\347\254\246\350\257\206\345\210\253/gen_data/gen.py" new file mode 100644 index 0000000000000000000000000000000000000000..4c768067f998b6b4bbe0b2f5982f46a3f01fc872 --- /dev/null +++ "b/applications/PCB\345\255\227\347\254\246\350\257\206\345\210\253/gen_data/gen.py" @@ -0,0 +1,261 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This code is refer from: +https://github.com/zcswdt/Color_OCR_image_generator +""" +import os +import random +from PIL import Image, ImageDraw, ImageFont +import json +import argparse + + +def get_char_lines(txt_root_path): + """ + desc:get corpus line + """ + txt_files = os.listdir(txt_root_path) + char_lines = [] + for txt in txt_files: + f = open(os.path.join(txt_root_path, txt), mode='r', encoding='utf-8') + lines = f.readlines() + f.close() + for line in lines: + char_lines.append(line.strip()) + return char_lines + + +def get_horizontal_text_picture(image_file, chars, fonts_list, cf): + """ + desc:gen horizontal text picture + """ + img = Image.open(image_file) + if img.mode != 'RGB': + img = img.convert('RGB') + img_w, img_h = img.size + + # random choice font + font_path = random.choice(fonts_list) + # random choice font size + font_size = random.randint(cf.font_min_size, cf.font_max_size) + font = ImageFont.truetype(font_path, font_size) + + ch_w = [] + ch_h = [] + for ch in chars: + wt, ht = font.getsize(ch) + ch_w.append(wt) + ch_h.append(ht) + f_w = sum(ch_w) + f_h = max(ch_h) + + # add space + char_space_width = max(ch_w) + f_w += (char_space_width * (len(chars) - 1)) + + x1 = random.randint(0, img_w - f_w) + y1 = random.randint(0, img_h - f_h) + x2 = x1 + f_w + y2 = y1 + f_h + + crop_y1 = y1 + crop_x1 = x1 + crop_y2 = y2 + crop_x2 = x2 + + best_color = (0, 0, 0) + draw = ImageDraw.Draw(img) + for i, ch in enumerate(chars): + draw.text((x1, y1), ch, best_color, font=font) + x1 += (ch_w[i] + char_space_width) + crop_img = img.crop((crop_x1, crop_y1, crop_x2, crop_y2)) + return crop_img, chars + + +def get_vertical_text_picture(image_file, chars, fonts_list, cf): + """ + desc:gen vertical text picture + """ + img = Image.open(image_file) + if img.mode != 'RGB': + img = img.convert('RGB') + img_w, img_h = img.size + # random choice font + font_path = random.choice(fonts_list) + # random choice font size + font_size = random.randint(cf.font_min_size, cf.font_max_size) + font = ImageFont.truetype(font_path, font_size) + + ch_w = [] + ch_h = [] + for ch in chars: + wt, ht = font.getsize(ch) + ch_w.append(wt) + ch_h.append(ht) + f_w = max(ch_w) + f_h = sum(ch_h) + + x1 = random.randint(0, img_w - f_w) + y1 = random.randint(0, img_h - f_h) + x2 = x1 + f_w + y2 = y1 + f_h + + crop_y1 = y1 + crop_x1 = x1 + crop_y2 = y2 + crop_x2 = x2 + + best_color = (0, 0, 0) + draw = ImageDraw.Draw(img) + i = 0 + for ch in chars: + draw.text((x1, y1), ch, best_color, font=font) + y1 = y1 + ch_h[i] + i = i + 1 + crop_img = img.crop((crop_x1, crop_y1, crop_x2, crop_y2)) + crop_img = crop_img.transpose(Image.ROTATE_90) + return crop_img, chars + + +def get_fonts(fonts_path): + """ + desc: get all fonts + """ + font_files = os.listdir(fonts_path) + fonts_list=[] + for font_file in font_files: + font_path=os.path.join(fonts_path, font_file) + fonts_list.append(font_path) + return fonts_list + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--num_img', type=int, default=30, help="Number of images to generate") + parser.add_argument('--font_min_size', type=int, default=11) + parser.add_argument('--font_max_size', type=int, default=12, + help="Help adjust the size of the generated text and the size of the picture") + parser.add_argument('--bg_path', type=str, default='./background', + help='The generated text pictures will be pasted onto the pictures of this folder') + parser.add_argument('--det_bg_path', type=str, default='./det_background', + help='The generated text pictures will use the pictures of this folder as the background') + parser.add_argument('--fonts_path', type=str, default='../../StyleText/fonts', + help='The font used to generate the picture') + parser.add_argument('--corpus_path', type=str, default='./corpus', + help='The corpus used to generate the text picture') + parser.add_argument('--output_dir', type=str, default='./output/', help='Images save dir') + + + cf = parser.parse_args() + # save path + if not os.path.exists(cf.output_dir): + os.mkdir(cf.output_dir) + + # get corpus + txt_root_path = cf.corpus_path + char_lines = get_char_lines(txt_root_path=txt_root_path) + + # get all fonts + fonts_path = cf.fonts_path + fonts_list = get_fonts(fonts_path) + + # rec bg + img_root_path = cf.bg_path + imnames=os.listdir(img_root_path) + + # det bg + det_bg_path = cf.det_bg_path + bg_pics = os.listdir(det_bg_path) + + # OCR det files + det_val_file = open(cf.output_dir + 'det_gt_val.txt', 'w', encoding='utf-8') + det_train_file = open(cf.output_dir + 'det_gt_train.txt', 'w', encoding='utf-8') + # det imgs + det_save_dir = 'imgs/' + if not os.path.exists(cf.output_dir + det_save_dir): + os.mkdir(cf.output_dir + det_save_dir) + det_val_save_dir = 'imgs_val/' + if not os.path.exists(cf.output_dir + det_val_save_dir): + os.mkdir(cf.output_dir + det_val_save_dir) + + # OCR rec files + rec_val_file = open(cf.output_dir + 'rec_gt_val.txt', 'w', encoding='utf-8') + rec_train_file = open(cf.output_dir + 'rec_gt_train.txt', 'w', encoding='utf-8') + # rec imgs + rec_save_dir = 'rec_imgs/' + if not os.path.exists(cf.output_dir + rec_save_dir): + os.mkdir(cf.output_dir + rec_save_dir) + rec_val_save_dir = 'rec_imgs_val/' + if not os.path.exists(cf.output_dir + rec_val_save_dir): + os.mkdir(cf.output_dir + rec_val_save_dir) + + + val_ratio = cf.num_img * 0.2 # val dataset ratio + + print('start generating...') + for i in range(0, cf.num_img): + imname = random.choice(imnames) + img_path = os.path.join(img_root_path, imname) + + rnd = random.random() + # gen horizontal text picture + if rnd < 0.5: + gen_img, chars = get_horizontal_text_picture(img_path, char_lines[i], fonts_list, cf) + ori_w, ori_h = gen_img.size + gen_img = gen_img.crop((0, 3, ori_w, ori_h)) + # gen vertical text picture + else: + gen_img, chars = get_vertical_text_picture(img_path, char_lines[i], fonts_list, cf) + ori_w, ori_h = gen_img.size + gen_img = gen_img.crop((3, 0, ori_w, ori_h)) + + ori_w, ori_h = gen_img.size + + # rec imgs + save_img_name = str(i).zfill(4) + '.jpg' + if i < val_ratio: + save_dir = os.path.join(rec_val_save_dir, save_img_name) + line = save_dir + '\t' + char_lines[i] + '\n' + rec_val_file.write(line) + else: + save_dir = os.path.join(rec_save_dir, save_img_name) + line = save_dir + '\t' + char_lines[i] + '\n' + rec_train_file.write(line) + gen_img.save(cf.output_dir + save_dir, quality = 95, subsampling=0) + + # det img + # random choice bg + bg_pic = random.sample(bg_pics, 1)[0] + det_img = Image.open(os.path.join(det_bg_path, bg_pic)) + # the PCB position is fixed, modify it according to your own scenario + if bg_pic == '1.png': + x1 = 38 + y1 = 3 + else: + x1 = 34 + y1 = 1 + + det_img.paste(gen_img, (x1, y1)) + # text pos + chars_pos = [[x1, y1], [x1 + ori_w, y1], [x1 + ori_w, y1 + ori_h], [x1, y1 + ori_h]] + label = [{"transcription":char_lines[i], "points":chars_pos}] + if i < val_ratio: + save_dir = os.path.join(det_val_save_dir, save_img_name) + det_val_file.write(save_dir + '\t' + json.dumps( + label, ensure_ascii=False) + '\n') + else: + save_dir = os.path.join(det_save_dir, save_img_name) + det_train_file.write(save_dir + '\t' + json.dumps( + label, ensure_ascii=False) + '\n') + det_img.save(cf.output_dir + save_dir, quality = 95, subsampling=0) diff --git a/applications/README.md b/applications/README.md new file mode 100644 index 0000000000000000000000000000000000000000..5135dfac10b0b49d1c91eb07202bd3929c7bbb7c --- /dev/null +++ b/applications/README.md @@ -0,0 +1,41 @@ +# 场景应用 + +PaddleOCR场景应用覆盖通用,制造、金融、交通行业的主要OCR垂类应用,在PP-OCR、PP-Structure的通用能力基础之上,以notebook的形式展示利用场景数据微调、模型优化方法、数据增广等内容,为开发者快速落地OCR应用提供示范与启发。 + +> 如需下载全部垂类模型,可以扫描下方二维码,关注公众号填写问卷后,加入PaddleOCR官方交流群获取20G OCR学习大礼包(内含《动手学OCR》电子书、课程回放视频、前沿论文等重磅资料) + +
+ +
+ + +> 如果您是企业开发者且未在下述场景中找到合适的方案,可以填写[OCR应用合作调研问卷](https://paddle.wjx.cn/vj/QwF7GKw.aspx),免费与官方团队展开不同层次的合作,包括但不限于问题抽象、确定技术方案、项目答疑、共同研发等。如果您已经使用PaddleOCR落地项目,也可以填写此问卷,与飞桨平台共同宣传推广,提升企业技术品宣。期待您的提交! + +## 通用 + +| 类别 | 亮点 | 类别 | 亮点 | +| ---------------------- | -------- | ---------- | ------------ | +| 高精度中文识别模型SVTR | 新增模型 | 手写体识别 | 新增字形支持 | + +## 制造 + +| 类别 | 亮点 | 类别 | 亮点 | +| -------------- | ------------------------------ | -------------- | -------------------- | +| 数码管识别 | 数码管数据合成、漏识别调优 | 电表识别 | 大分辨率图像检测调优 | +| 液晶屏读数识别 | 检测模型蒸馏、Serving部署 | PCB文字识别 | 小尺寸文本检测与识别 | +| 包装生产日期 | 点阵字符合成、过曝过暗文字识别 | 液晶屏缺陷检测 | 非文字形态识别 | + +## 金融 + +| 类别 | 亮点 | 类别 | 亮点 | +| -------------- | ------------------------ | ------------ | --------------------- | +| 表单VQA | 多模态通用表单结构化提取 | 通用卡证识别 | 通用结构化提取 | +| 增值税发票 | 尽请期待 | 身份证识别 | 结构化提取、图像阴影 | +| 印章检测与识别 | 端到端弯曲文本识别 | 合同比对 | 密集文本检测、NLP串联 | + +## 交通 + +| 类别 | 亮点 | 类别 | 亮点 | +| ----------------- | ------------------------------ | ---------- | -------- | +| 车牌识别 | 多角度图像、轻量模型、端侧部署 | 快递单识别 | 尽请期待 | +| 驾驶证/行驶证识别 | 尽请期待 | | | \ No newline at end of file diff --git a/applications/corpus/digital.txt b/applications/corpus/digital.txt new file mode 100644 index 0000000000000000000000000000000000000000..26b06e784e779bdf97dfaa9b3ba8d4c0155b0718 --- /dev/null +++ b/applications/corpus/digital.txt @@ -0,0 +1,43 @@ +46.39 +40.08 +89.52 +-71.93 +23.19 +-81.02 +-34.09 +05.87 +-67.80 +-51.56 +-34.58 +37.91 +56.98 +29.01 +-90.13 +35.55 +66.07 +-90.35 +-50.93 +42.42 +21.40 +-30.99 +-71.78 +25.60 +-48.69 +-72.28 +-17.55 +-99.93 +-47.35 +-64.89 +-31.28 +-90.01 +05.17 +30.91 +30.56 +-06.90 +79.05 +67.74 +-32.31 +94.22 +28.75 +51.03 +-58.96 diff --git a/applications/fonts/DS-DIGI.TTF b/applications/fonts/DS-DIGI.TTF new file mode 100644 index 0000000000000000000000000000000000000000..09258773c7219ad9e60b92b918e3c50b58f43c9e Binary files /dev/null and b/applications/fonts/DS-DIGI.TTF differ diff --git a/applications/fonts/DS-DIGIB.TTF b/applications/fonts/DS-DIGIB.TTF new file mode 100644 index 0000000000000000000000000000000000000000..064ad478a510a9c581bb16d295f659256e1f920a Binary files /dev/null and b/applications/fonts/DS-DIGIB.TTF differ diff --git "a/applications/\345\205\211\345\212\237\347\216\207\350\256\241\346\225\260\347\240\201\347\256\241\345\255\227\347\254\246\350\257\206\345\210\253.md" "b/applications/\345\205\211\345\212\237\347\216\207\350\256\241\346\225\260\347\240\201\347\256\241\345\255\227\347\254\246\350\257\206\345\210\253.md" new file mode 100644 index 0000000000000000000000000000000000000000..2a35cb170ef165faa6566e0059cca8364b7a6da6 --- /dev/null +++ "b/applications/\345\205\211\345\212\237\347\216\207\350\256\241\346\225\260\347\240\201\347\256\241\345\255\227\347\254\246\350\257\206\345\210\253.md" @@ -0,0 +1,467 @@ +# 光功率计数码管字符识别 + +本案例将使用OCR技术自动识别光功率计显示屏文字,通过本章您可以掌握: + +- PaddleOCR快速使用 +- 数据合成方法 +- 数据挖掘方法 +- 基于现有数据微调 + +## 1. 背景介绍 + +光功率计(optical power meter )是指用于测量绝对光功率或通过一段光纤的光功率相对损耗的仪器。在光纤系统中,测量光功率是最基本的,非常像电子学中的万用表;在光纤测量中,光功率计是重负荷常用表。 + + + +目前光功率计缺少将数据直接输出的功能,需要人工读数。这一项工作单调重复,如果可以使用机器替代人工,将节约大量成本。针对上述问题,希望通过摄像头拍照->智能读数的方式高效地完成此任务。 + +为实现智能读数,通常会采取文本检测+文本识别的方案: + +第一步,使用文本检测模型定位出光功率计中的数字部分; + +第二步,使用文本识别模型获得准确的数字和单位信息。 + +本项目主要介绍如何完成第二步文本识别部分,包括:真实评估集的建立、训练数据的合成、基于 PP-OCRv3 和 SVTR_Tiny 两个模型进行训练,以及评估和推理。 + +本项目难点如下: + +- 光功率计数码管字符数据较少,难以获取。 +- 数码管中小数点占像素较少,容易漏识别。 + +针对以上问题, 本例选用 PP-OCRv3 和 SVTR_Tiny 两个高精度模型训练,同时提供了真实数据挖掘案例和数据合成案例。基于 PP-OCRv3 模型,在构建的真实评估集上精度从 52% 提升至 72%,SVTR_Tiny 模型精度可达到 78.9%。 + +aistudio项目链接: [光功率计数码管字符识别](https://aistudio.baidu.com/aistudio/projectdetail/4049044?contributionType=1) + +## 2. PaddleOCR 快速使用 + +PaddleOCR 旨在打造一套丰富、领先、且实用的OCR工具库,助力开发者训练出更好的模型,并应用落地。 + +![](https://github.com/PaddlePaddle/PaddleOCR/raw/release/2.5/doc/imgs_results/ch_ppocr_mobile_v2.0/test_add_91.jpg) + + +官方提供了适用于通用场景的高精轻量模型,首先使用官方提供的 PP-OCRv3 模型预测图片,验证下当前模型在光功率计场景上的效果。 + +- 准备环境 + +``` +python3 -m pip install -U pip +python3 -m pip install paddleocr +``` + + +- 测试效果 + +测试图: + +![](https://ai-studio-static-online.cdn.bcebos.com/8dca91f016884e16ad9216d416da72ea08190f97d87b4be883f15079b7ebab9a) + + +``` +paddleocr --lang=ch --det=Fase --image_dir=data +``` + +得到如下测试结果: + +``` +('.7000', 0.6885431408882141) +``` + +发现数字识别较准,然而对负号和小数点识别不准确。 由于PP-OCRv3的训练数据大多为通用场景数据,在特定的场景上效果可能不够好。因此需要基于场景数据进行微调。 + +下面就主要介绍如何在光功率计(数码管)场景上微调训练。 + + +## 3. 开始训练 + +### 3.1 数据准备 + +特定的工业场景往往很难获取开源的真实数据集,光功率计也是如此。在实际工业场景中,可以通过摄像头采集的方法收集大量真实数据,本例中重点介绍数据合成方法和真实数据挖掘方法,如何利用有限的数据优化模型精度。 + +数据集分为两个部分:合成数据,真实数据, 其中合成数据由 text_renderer 工具批量生成得到, 真实数据通过爬虫等方式在百度图片中搜索并使用 PPOCRLabel 标注得到。 + + +- 合成数据 + +本例中数据合成工具使用的是 [text_renderer](https://github.com/Sanster/text_renderer), 该工具可以合成用于文本识别训练的文本行数据: + +![](https://github.com/oh-my-ocr/text_renderer/raw/master/example_data/effect_layout_image/char_spacing_compact.jpg) + +![](https://github.com/oh-my-ocr/text_renderer/raw/master/example_data/effect_layout_image/color_image.jpg) + + +``` +export https_proxy=http://172.19.57.45:3128 +git clone https://github.com/oh-my-ocr/text_renderer +``` + +``` +import os +python3 setup.py develop +python3 -m pip install -r docker/requirements.txt +python3 main.py \ + --config example_data/example.py \ + --dataset img \ + --num_processes 2 \ + --log_period 10 +``` + +给定字体和语料,就可以合成较为丰富样式的文本行数据。 光功率计识别场景,目标是正确识别数码管文本,因此需要收集部分数码管字体,训练语料,用于合成文本识别数据。 + +将收集好的语料存放在 example_data 路径下: + +``` +ln -s ./fonts/DS* text_renderer/example_data/font/ +ln -s ./corpus/digital.txt text_renderer/example_data/text/ +``` + +修改 text_renderer/example_data/font_list/font_list.txt ,选择需要的字体开始合成: + +``` +python3 main.py \ + --config example_data/digital_example.py \ + --dataset img \ + --num_processes 2 \ + --log_period 10 +``` + +合成图片会被存在目录 text_renderer/example_data/digital/chn_data 下 + +查看合成的数据样例: + +![](https://ai-studio-static-online.cdn.bcebos.com/7d5774a273f84efba5b9ce7fd3f86e9ef24b6473e046444db69fa3ca20ac0986) + + +- 真实数据挖掘 + +模型训练需要使用真实数据作为评价指标,否则很容易过拟合到简单的合成数据中。没有开源数据的情况下,可以利用部分无标注数据+标注工具获得真实数据。 + + +1. 数据搜集 + +使用[爬虫工具](https://github.com/Joeclinton1/google-images-download.git)获得无标注数据 + +2. [PPOCRLabel](https://github.com/PaddlePaddle/PaddleOCR/tree/release/2.5/PPOCRLabel) 完成半自动标注 + +PPOCRLabel是一款适用于OCR领域的半自动化图形标注工具,内置PP-OCR模型对数据自动标注和重新识别。使用Python3和PyQT5编写,支持矩形框标注、表格标注、不规则文本标注、关键信息标注模式,导出格式可直接用于PaddleOCR检测和识别模型的训练。 + +![](https://github.com/PaddlePaddle/PaddleOCR/raw/release/2.5/PPOCRLabel/data/gif/steps_en.gif) + + +收集完数据后就可以进行分配了,验证集中一般都是真实数据,训练集中包含合成数据+真实数据。本例中标注了155张图片,其中训练集和验证集的数目为100和55。 + + +最终 `data` 文件夹应包含以下几部分: + +``` +|-data + |- synth_train.txt + |- real_train.txt + |- real_eval.txt + |- synthetic_data + |- word_001.png + |- word_002.jpg + |- word_003.jpg + | ... + |- real_data + |- word_001.png + |- word_002.jpg + |- word_003.jpg + | ... + ... +``` + +### 3.2 模型选择 + +本案例提供了2种文本识别模型:PP-OCRv3 识别模型 和 SVTR_Tiny: + +[PP-OCRv3 识别模型](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.5/doc/doc_ch/PP-OCRv3_introduction.md):PP-OCRv3的识别模块是基于文本识别算法SVTR优化。SVTR不再采用RNN结构,通过引入Transformers结构更加有效地挖掘文本行图像的上下文信息,从而提升文本识别能力。并进行了一系列结构改进加速模型预测。 + +[SVTR_Tiny](https://arxiv.org/abs/2205.00159):SVTR提出了一种用于场景文本识别的单视觉模型,该模型在patch-wise image tokenization框架内,完全摒弃了序列建模,在精度具有竞争力的前提下,模型参数量更少,速度更快。 + +以上两个策略在自建中文数据集上的精度和速度对比如下: + +| ID | 策略 | 模型大小 | 精度 | 预测耗时(CPU + MKLDNN)| +|-----|-----|--------|----| --- | +| 01 | PP-OCRv2 | 8M | 74.8% | 8.54ms | +| 02 | SVTR_Tiny | 21M | 80.1% | 97ms | +| 03 | SVTR_LCNet(h32) | 12M | 71.9% | 6.6ms | +| 04 | SVTR_LCNet(h48) | 12M | 73.98% | 7.6ms | +| 05 | + GTC | 12M | 75.8% | 7.6ms | +| 06 | + TextConAug | 12M | 76.3% | 7.6ms | +| 07 | + TextRotNet | 12M | 76.9% | 7.6ms | +| 08 | + UDML | 12M | 78.4% | 7.6ms | +| 09 | + UIM | 12M | 79.4% | 7.6ms | + + +### 3.3 开始训练 + +首先下载 PaddleOCR 代码库 + +``` +git clone -b release/2.5 https://github.com/PaddlePaddle/PaddleOCR.git +``` + +PaddleOCR提供了训练脚本、评估脚本和预测脚本,本节将以 PP-OCRv3 中文识别模型为例: + +**Step1:下载预训练模型** + +首先下载 pretrain model,您可以下载训练好的模型在自定义数据上进行finetune + +``` +cd PaddleOCR/ +# 下载PP-OCRv3 中文预训练模型 +wget -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_train.tar +# 解压模型参数 +cd pretrain_models +tar -xf ch_PP-OCRv3_rec_train.tar && rm -rf ch_PP-OCRv3_rec_train.tar +``` + +**Step2:自定义字典文件** + +接下来需要提供一个字典({word_dict_name}.txt),使模型在训练时,可以将所有出现的字符映射为字典的索引。 + +因此字典需要包含所有希望被正确识别的字符,{word_dict_name}.txt需要写成如下格式,并以 `utf-8` 编码格式保存: + +``` +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +- +. +``` + +word_dict.txt 每行有一个单字,将字符与数字索引映射在一起,“3.14” 将被映射成 [3, 11, 1, 4] + +* 内置字典 + +PaddleOCR内置了一部分字典,可以按需使用。 + +`ppocr/utils/ppocr_keys_v1.txt` 是一个包含6623个字符的中文字典 + +`ppocr/utils/ic15_dict.txt` 是一个包含36个字符的英文字典 + +* 自定义字典 + +内置字典面向通用场景,具体的工业场景中,可能需要识别特殊字符,或者只需识别某几个字符,此时自定义字典会更提升模型精度。例如在光功率计场景中,需要识别数字和单位。 + +遍历真实数据标签中的字符,制作字典`digital_dict.txt`如下所示: + +``` +- +. +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +B +E +F +H +L +N +T +W +d +k +m +n +o +z +``` + + + + +**Step3:修改配置文件** + +为了更好的使用预训练模型,训练推荐使用[ch_PP-OCRv3_rec_distillation.yml](../../configs/rec/PP-OCRv3/ch_PP-OCRv3_rec_distillation.yml)配置文件,并参考下列说明修改配置文件: + +以 `ch_PP-OCRv3_rec_distillation.yml` 为例: +``` +Global: + ... + # 添加自定义字典,如修改字典请将路径指向新字典 + character_dict_path: ppocr/utils/dict/digital_dict.txt + ... + # 识别空格 + use_space_char: True + + +Optimizer: + ... + # 添加学习率衰减策略 + lr: + name: Cosine + learning_rate: 0.001 + ... + +... + +Train: + dataset: + # 数据集格式,支持LMDBDataSet以及SimpleDataSet + name: SimpleDataSet + # 数据集路径 + data_dir: ./data/ + # 训练集标签文件 + label_file_list: + - ./train_data/digital_img/digital_train.txt #11w + - ./train_data/digital_img/real_train.txt #100 + - ./train_data/digital_img/dbm_img/dbm.txt #3w + ratio_list: + - 0.3 + - 1.0 + - 1.0 + transforms: + ... + - RecResizeImg: + # 修改 image_shape 以适应长文本 + image_shape: [3, 48, 320] + ... + loader: + ... + # 单卡训练的batch_size + batch_size_per_card: 256 + ... + +Eval: + dataset: + # 数据集格式,支持LMDBDataSet以及SimpleDataSet + name: SimpleDataSet + # 数据集路径 + data_dir: ./data + # 验证集标签文件 + label_file_list: + - ./train_data/digital_img/real_val.txt + transforms: + ... + - RecResizeImg: + # 修改 image_shape 以适应长文本 + image_shape: [3, 48, 320] + ... + loader: + # 单卡验证的batch_size + batch_size_per_card: 256 + ... +``` +**注意,训练/预测/评估时的配置文件请务必与训练一致。** + +**Step4:启动训练** + +*如果您安装的是cpu版本,请将配置文件中的 `use_gpu` 字段修改为false* + +``` +# GPU训练 支持单卡,多卡训练 +# 训练数码管数据 训练日志会自动保存为 "{save_model_dir}" 下的train.log + +#单卡训练(训练周期长,不建议) +python3 tools/train.py -c configs/rec/PP-OCRv3/ch_PP-OCRv3_rec_distillation.yml -o Global.pretrained_model=./pretrain_models/ch_PP-OCRv3_rec_train/best_accuracy + +#多卡训练,通过--gpus参数指定卡号 +python3 -m paddle.distributed.launch --gpus '0,1,2,3' tools/train.py -c configs/rec/PP-OCRv3/ch_PP-OCRv3_rec_distillation.yml -o Global.pretrained_model=./pretrain_models/en_PP-OCRv3_rec_train/best_accuracy +``` + + +PaddleOCR支持训练和评估交替进行, 可以在 `configs/rec/PP-OCRv3/ch_PP-OCRv3_rec_distillation.yml` 中修改 `eval_batch_step` 设置评估频率,默认每500个iter评估一次。评估过程中默认将最佳acc模型,保存为 `output/ch_PP-OCRv3_rec_distill/best_accuracy` 。 + +如果验证集很大,测试将会比较耗时,建议减少评估次数,或训练完再进行评估。 + +### SVTR_Tiny 训练 + +SVTR_Tiny 训练步骤与上面一致,SVTR支持的配置和模型训练权重可以参考[算法介绍文档](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.5/doc/doc_ch/algorithm_rec_svtr.md) + +**Step1:下载预训练模型** + +``` +# 下载 SVTR_Tiny 中文识别预训练模型和配置文件 +wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/rec_svtr_tiny_none_ctc_ch_train.tar +# 解压模型参数 +tar -xf rec_svtr_tiny_none_ctc_ch_train.tar && rm -rf rec_svtr_tiny_none_ctc_ch_train.tar +``` +**Step2:自定义字典文件** + +字典依然使用自定义的 digital_dict.txt + +**Step3:修改配置文件** + +配置文件中对应修改字典路径和数据路径 + +**Step4:启动训练** + +``` +## 单卡训练 +python tools/train.py -c rec_svtr_tiny_none_ctc_ch_train/rec_svtr_tiny_6local_6global_stn_ch.yml \ + -o Global.pretrained_model=./rec_svtr_tiny_none_ctc_ch_train/best_accuracy +``` + +### 3.4 验证效果 + +如需获取已训练模型,请扫码填写问卷,加入PaddleOCR官方交流群获取全部OCR垂类模型下载链接、《动手学OCR》电子书等全套OCR学习资料🎁 +
+ +
+将下载或训练完成的模型放置在对应目录下即可完成模型推理 + +* 指标评估 + +训练中模型参数默认保存在`Global.save_model_dir`目录下。在评估指标时,需要设置`Global.checkpoints`指向保存的参数文件。评估数据集可以通过 `configs/rec/PP-OCRv3/ch_PP-OCRv3_rec_distillation.yml` 修改Eval中的 `label_file_path` 设置。 + +``` +# GPU 评估, Global.checkpoints 为待测权重 +python3 -m paddle.distributed.launch --gpus '0' tools/eval.py -c configs/rec/PP-OCRv3/ch_PP-OCRv3_rec_distillation.yml -o Global.checkpoints={path/to/weights}/best_accuracy +``` + +* 测试识别效果 + +使用 PaddleOCR 训练好的模型,可以通过以下脚本进行快速预测。 + +默认预测图片存储在 `infer_img` 里,通过 `-o Global.checkpoints` 加载训练好的参数文件: + +根据配置文件中设置的 `save_model_dir` 和 `save_epoch_step` 字段,会有以下几种参数被保存下来: + +``` +output/rec/ +├── best_accuracy.pdopt +├── best_accuracy.pdparams +├── best_accuracy.states +├── config.yml +├── iter_epoch_3.pdopt +├── iter_epoch_3.pdparams +├── iter_epoch_3.states +├── latest.pdopt +├── latest.pdparams +├── latest.states +└── train.log +``` + +其中 best_accuracy.* 是评估集上的最优模型;iter_epoch_x.* 是以 `save_epoch_step` 为间隔保存下来的模型;latest.* 是最后一个epoch的模型。 + +``` +# 预测英文结果 +python3 tools/infer_rec.py -c configs/rec/PP-OCRv3/ch_PP-OCRv3_rec_distillation.yml -o Global.pretrained_model={path/to/weights}/best_accuracy Global.infer_img=test_digital.png +``` + +预测图片: + +![](https://ai-studio-static-online.cdn.bcebos.com/8dca91f016884e16ad9216d416da72ea08190f97d87b4be883f15079b7ebab9a) + + +得到输入图像的预测结果: + +``` +infer_img: test_digital.png + result: ('-70.00', 0.9998967) +``` diff --git "a/applications/\345\244\232\346\250\241\346\200\201\350\241\250\345\215\225\350\257\206\345\210\253.md" "b/applications/\345\244\232\346\250\241\346\200\201\350\241\250\345\215\225\350\257\206\345\210\253.md" index d47bbe77045d502d82a5a8d8b8eca685963e6380..471ca633c143635b1715f054efa6924c1d0a1eab 100644 --- "a/applications/\345\244\232\346\250\241\346\200\201\350\241\250\345\215\225\350\257\206\345\210\253.md" +++ "b/applications/\345\244\232\346\250\241\346\200\201\350\241\250\345\215\225\350\257\206\345\210\253.md" @@ -1,4 +1,33 @@ -# 1 项目说明 +# 多模态表单识别 +- [多模态表单识别](#多模态表单识别) + - [1 项目说明](#1-项目说明) + - [2 安装说明](#2-安装说明) + - [3 数据准备](#3-数据准备) + - [3.1 下载处理好的数据集](#31-下载处理好的数据集) + - [3.2 转换为PaddleOCR检测和识别格式](#32-转换为paddleocr检测和识别格式) + - [4 OCR](#4-ocr) + - [4.1 文本检测](#41-文本检测) + - [4.1.1 方案1:预训练模型](#411-方案1预训练模型) + - [4.1.2 方案2:XFUND数据集+fine-tune](#412-方案2xfund数据集fine-tune) + - [4.2 文本识别](#42-文本识别) + - [4.2.1 方案1:预训练模型](#421-方案1预训练模型) + - [4.2.2 方案2:XFUND数据集+finetune](#422-方案2xfund数据集finetune) + - [4.2.3 方案3:XFUND数据集+finetune+真实通用识别数据](#423-方案3xfund数据集finetune真实通用识别数据) + - [5 文档视觉问答(DOC-VQA)](#5-文档视觉问答doc-vqa) + - [5.1 SER](#51-ser) + - [5.1.1 模型训练](#511-模型训练) + - [5.1.2 模型评估](#512-模型评估) + - [5.1.3 模型预测](#513-模型预测) + - [5.2 RE](#52-re) + - [5.2.1 模型训练](#521-模型训练) + - [5.2.2 模型评估](#522-模型评估) + - [5.2.3 模型预测](#523-模型预测) + - [6 导出Excel](#6-导出excel) + - [获得模型](#获得模型) + - [更多资源](#更多资源) + - [参考链接](#参考链接) + +## 1 项目说明 计算机视觉在金融领域的应用覆盖文字识别、图像识别、视频识别等,其中文字识别(OCR)是金融领域中的核心AI能力,其应用覆盖客户服务、风险防控、运营管理等各项业务,针对的对象包括通用卡证票据识别(银行卡、身份证、营业执照等)、通用文本表格识别(印刷体、多语言、手写体等)以及一些金融特色票据凭证。通过因此如果能够在结构化信息提取时同时利用文字、页面布局等信息,便可增强不同版式下的泛化性。 @@ -16,39 +45,37 @@
图1 多模态表单识别流程图
-注:欢迎再AIStudio领取免费算力体验线上实训,项目链接: [多模态表单识别](https://aistudio.baidu.com/aistudio/projectdetail/3884375)(配备Tesla V100、A100等高级算力资源) +注:欢迎再AIStudio领取免费算力体验线上实训,项目链接: [多模态表单识别](https://aistudio.baidu.com/aistudio/projectdetail/3884375?contributionType=1) - - -# 2 安装说明 +## 2 安装说明 下载PaddleOCR源码,上述AIStudio项目中已经帮大家打包好的PaddleOCR(已经修改好配置文件),无需下载解压即可,只需安装依赖环境~ ```python -! unzip -q PaddleOCR.zip +unzip -q PaddleOCR.zip ``` ```python # 如仍需安装or安装更新,可以执行以下步骤 -# ! git clone https://github.com/PaddlePaddle/PaddleOCR.git -b dygraph -# ! git clone https://gitee.com/PaddlePaddle/PaddleOCR +# git clone https://github.com/PaddlePaddle/PaddleOCR.git -b dygraph +# git clone https://gitee.com/PaddlePaddle/PaddleOCR ``` ```python # 安装依赖包 -! pip install -U pip -! pip install -r /home/aistudio/PaddleOCR/requirements.txt -! pip install paddleocr +pip install -U pip +pip install -r /home/aistudio/PaddleOCR/requirements.txt +pip install paddleocr -! pip install yacs gnureadline paddlenlp==2.2.1 -! pip install xlsxwriter +pip install yacs gnureadline paddlenlp==2.2.1 +pip install xlsxwriter ``` -# 3 数据准备 +## 3 数据准备 这里使用[XFUN数据集](https://github.com/doc-analysis/XFUND)做为实验数据集。 XFUN数据集是微软提出的一个用于KIE任务的多语言数据集,共包含七个数据集,每个数据集包含149张训练集和50张验证集 @@ -59,7 +86,7 @@
图2 数据集样例,左中文,右法语
-## 3.1 下载处理好的数据集 +### 3.1 下载处理好的数据集 处理好的XFUND中文数据集下载地址:[https://paddleocr.bj.bcebos.com/dataset/XFUND.tar](https://paddleocr.bj.bcebos.com/dataset/XFUND.tar) ,可以运行如下指令完成中文数据集下载和解压。 @@ -69,13 +96,13 @@ ```python -! wget https://paddleocr.bj.bcebos.com/dataset/XFUND.tar -! tar -xf XFUND.tar +wget https://paddleocr.bj.bcebos.com/dataset/XFUND.tar +tar -xf XFUND.tar # XFUN其他数据集使用下面的代码进行转换 # 代码链接:https://github.com/PaddlePaddle/PaddleOCR/blob/release%2F2.4/ppstructure/vqa/helper/trans_xfun_data.py # %cd PaddleOCR -# !python3 ppstructure/vqa/tools/trans_xfun_data.py --ori_gt_path=path/to/json_path --output_path=path/to/save_path +# python3 ppstructure/vqa/tools/trans_xfun_data.py --ori_gt_path=path/to/json_path --output_path=path/to/save_path # %cd ../ ``` @@ -119,7 +146,7 @@ } ``` -## 3.2 转换为PaddleOCR检测和识别格式 +### 3.2 转换为PaddleOCR检测和识别格式 使用XFUND训练PaddleOCR检测和识别模型,需要将数据集格式改为训练需求的格式。 @@ -147,7 +174,7 @@ train_data/rec/train/word_002.jpg 用科技让复杂的世界更简单 ```python -! unzip -q /home/aistudio/data/data140302/XFUND_ori.zip -d /home/aistudio/data/data140302/ +unzip -q /home/aistudio/data/data140302/XFUND_ori.zip -d /home/aistudio/data/data140302/ ``` 已经提供转换脚本,执行如下代码即可转换成功: @@ -155,21 +182,20 @@ train_data/rec/train/word_002.jpg 用科技让复杂的世界更简单 ```python %cd /home/aistudio/ -! python trans_xfund_data.py +python trans_xfund_data.py ``` -# 4 OCR +## 4 OCR 选用飞桨OCR开发套件[PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/README_ch.md)中的PP-OCRv2模型进行文本检测和识别。PP-OCRv2在PP-OCR的基础上,进一步在5个方面重点优化,检测模型采用CML协同互学习知识蒸馏策略和CopyPaste数据增广策略;识别模型采用LCNet轻量级骨干网络、UDML 改进知识蒸馏策略和[Enhanced CTC loss](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/enhanced_ctc_loss.md)损失函数改进,进一步在推理速度和预测效果上取得明显提升。更多细节请参考PP-OCRv2[技术报告](https://arxiv.org/abs/2109.03144)。 - -## 4.1 文本检测 +### 4.1 文本检测 我们使用2种方案进行训练、评估: - **PP-OCRv2中英文超轻量检测预训练模型** - **XFUND数据集+fine-tune** -### **4.1.1 方案1:预训练模型** +#### 4.1.1 方案1:预训练模型 **1)下载预训练模型** @@ -195,8 +221,8 @@ PaddleOCR已经提供了PP-OCR系列模型,部分模型展示如下表所示 ```python %cd /home/aistudio/PaddleOCR/pretrain/ -! wget https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_distill_train.tar -! tar -xf ch_PP-OCRv2_det_distill_train.tar && rm -rf ch_PP-OCRv2_det_distill_train.tar +wget https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_distill_train.tar +tar -xf ch_PP-OCRv2_det_distill_train.tar && rm -rf ch_PP-OCRv2_det_distill_train.tar % cd .. ``` @@ -226,7 +252,7 @@ Eval.dataset.label_file_list:指向验证集标注文件 ```python %cd /home/aistudio/PaddleOCR -! python tools/eval.py \ +python tools/eval.py \ -c configs/det/ch_PP-OCRv2/ch_PP-OCRv2_det_distill.yml \ -o Global.checkpoints="./pretrain_models/ch_PP-OCRv2_det_distill_train/best_accuracy" ``` @@ -237,9 +263,9 @@ Eval.dataset.label_file_list:指向验证集标注文件 | -------- | -------- | | PP-OCRv2中英文超轻量检测预训练模型 | 77.26% | -使用文本检测预训练模型在XFUND验证集上评估,达到77%左右,充分说明ppocr提供的预训练模型有一定的泛化能力。 +使用文本检测预训练模型在XFUND验证集上评估,达到77%左右,充分说明ppocr提供的预训练模型具有泛化能力。 -### **4.1.2 方案2:XFUND数据集+fine-tune** +#### 4.1.2 方案2:XFUND数据集+fine-tune PaddleOCR提供的蒸馏预训练模型包含了多个模型的参数,我们提取Student模型的参数,在XFUND数据集上进行finetune,可以参考如下代码: @@ -281,7 +307,7 @@ Eval.dataset.transforms.DetResizeForTest:评估尺寸,添加如下参数 ```python -! CUDA_VISIBLE_DEVICES=0 python tools/train.py \ +CUDA_VISIBLE_DEVICES=0 python tools/train.py \ -c configs/det/ch_PP-OCRv2/ch_PP-OCRv2_det_student.yml ``` @@ -290,12 +316,18 @@ Eval.dataset.transforms.DetResizeForTest:评估尺寸,添加如下参数
图8 文本检测方案2-模型评估
-使用训练好的模型进行评估,更新模型路径`Global.checkpoints`,这里为大家提供训练好的模型`./pretrain/ch_db_mv3-student1600-finetune/best_accuracy`,[模型下载地址](https://paddleocr.bj.bcebos.com/fanliku/sheet_recognition/ch_db_mv3-student1600-finetune.zip) +使用训练好的模型进行评估,更新模型路径`Global.checkpoints`。如需获取已训练模型,请扫码填写问卷,加入PaddleOCR官方交流群获取全部OCR垂类模型下载链接、《动手学OCR》电子书等全套OCR学习资料🎁 + +
+ +
+ +将下载或训练完成的模型放置在对应目录下即可完成模型评估 ```python %cd /home/aistudio/PaddleOCR/ -! python tools/eval.py \ +python tools/eval.py \ -c configs/det/ch_PP-OCRv2/ch_PP-OCRv2_det_student.yml \ -o Global.checkpoints="pretrain/ch_db_mv3-student1600-finetune/best_accuracy" ``` @@ -305,7 +337,7 @@ Eval.dataset.transforms.DetResizeForTest:评估尺寸,添加如下参数 ```python %cd /home/aistudio/PaddleOCR/ -! python tools/eval.py \ +python tools/eval.py \ -c configs/det/ch_PP-OCRv2/ch_PP-OCRv2_det_student.yml \ -o Global.checkpoints="pretrain/ch_db_mv3-student1600/best_accuracy" ``` @@ -331,7 +363,7 @@ Eval.dataset.transforms.DetResizeForTest:评估尺寸,添加如下参数 # 加载配置文件`ch_PP-OCRv2_det_student.yml`,从`pretrain/ch_db_mv3-student1600-finetune`目录下加载`best_accuracy`模型 # inference模型保存在`./output/det_db_inference`目录下 %cd /home/aistudio/PaddleOCR/ -! python tools/export_model.py \ +python tools/export_model.py \ -c configs/det/ch_PP-OCRv2/ch_PP-OCRv2_det_student.yml \ -o Global.pretrained_model="pretrain/ch_db_mv3-student1600-finetune/best_accuracy" \ Global.save_inference_dir="./output/det_db_inference/" @@ -374,12 +406,11 @@ use_gpu:是否使用GPU | 方案 | hmeans | 结果分析 | | -------- | -------- | -------- | -| PP-OCRv2中英文超轻量检测预训练模型 | 77.26% | ppocr提供的预训练模型有一定的泛化能力 | +| PP-OCRv2中英文超轻量检测预训练模型 | 77.26% | ppocr提供的预训练模型有泛化能力 | | XFUND数据集 | 79.27% | | | XFUND数据集+finetune | 85.24% | finetune会提升垂类场景效果 | - -## 4.2 文本识别 +### 4.2 文本识别 我们分别使用如下3种方案进行训练、评估: @@ -387,8 +418,7 @@ use_gpu:是否使用GPU - XFUND数据集+fine-tune - XFUND数据集+fine-tune+真实通用识别数据 - -### **4.2.1 方案1:预训练模型** +#### 4.2.1 方案1:预训练模型 **1)下载预训练模型** @@ -401,8 +431,8 @@ use_gpu:是否使用GPU ```python %cd /home/aistudio/PaddleOCR/pretrain/ -! wget https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_train.tar -! tar -xf ch_PP-OCRv2_rec_train.tar && rm -rf ch_PP-OCRv2_rec_train.tar +wget https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_train.tar +tar -xf ch_PP-OCRv2_rec_train.tar && rm -rf ch_PP-OCRv2_rec_train.tar % cd .. ``` @@ -424,7 +454,7 @@ Eval.dataset.label_file_list:指向验证集标注文件 ```python %cd /home/aistudio/PaddleOCR -! CUDA_VISIBLE_DEVICES=0 python tools/eval.py \ +CUDA_VISIBLE_DEVICES=0 python tools/eval.py \ -c configs/rec/ch_PP-OCRv2/ch_PP-OCRv2_rec_distillation.yml \ -o Global.checkpoints=./pretrain/ch_PP-OCRv2_rec_train/best_accuracy ``` @@ -435,9 +465,9 @@ Eval.dataset.label_file_list:指向验证集标注文件 | -------- | -------- | | PP-OCRv2中英文超轻量识别预训练模型 | 67.48% | -使用文本预训练模型在XFUND验证集上评估,acc达到67%左右,充分说明ppocr提供的预训练模型有一定的泛化能力。 +使用文本预训练模型在XFUND验证集上评估,acc达到67%左右,充分说明ppocr提供的预训练模型具有泛化能力。 -### **4.2.2 方案2:XFUND数据集+finetune** +#### 4.2.2 方案2:XFUND数据集+finetune 同检测模型,我们提取Student模型的参数,在XFUND数据集上进行finetune,可以参考如下代码: @@ -474,11 +504,9 @@ Eval.dataset.label_file_list:指向验证集标注文件 ``` 执行如下命令启动训练: - - ```python %cd /home/aistudio/PaddleOCR/ -! CUDA_VISIBLE_DEVICES=0 python tools/train.py \ +CUDA_VISIBLE_DEVICES=0 python tools/train.py \ -c configs/rec/ch_PP-OCRv2/ch_PP-OCRv2_rec.yml ``` @@ -493,7 +521,7 @@ Eval.dataset.label_file_list:指向验证集标注文件 ```python %cd /home/aistudio/PaddleOCR/ -! CUDA_VISIBLE_DEVICES=0 python tools/eval.py \ +CUDA_VISIBLE_DEVICES=0 python tools/eval.py \ -c configs/rec/ch_PP-OCRv2/ch_PP-OCRv2_rec.yml \ -o Global.checkpoints=./pretrain/rec_mobile_pp-OCRv2-student-finetune/best_accuracy ``` @@ -506,7 +534,7 @@ Eval.dataset.label_file_list:指向验证集标注文件 使用XFUND数据集+finetune训练,在验证集上评估达到72%左右,说明 finetune会提升垂类场景效果。 -### **4.2.3 方案3:XFUND数据集+finetune+真实通用识别数据** +#### 4.2.3 方案3:XFUND数据集+finetune+真实通用识别数据 接着我们在上述`XFUND数据集+finetune`实验的基础上,添加真实通用识别数据,进一步提升识别效果。首先准备真实通用识别数据,并上传到AIStudio: @@ -528,7 +556,7 @@ Train.dataset.ratio_list:动态采样 ```python %cd /home/aistudio/PaddleOCR/ -! CUDA_VISIBLE_DEVICES=0 python tools/train.py \ +CUDA_VISIBLE_DEVICES=0 python tools/train.py \ -c configs/rec/ch_PP-OCRv2/ch_PP-OCRv2_rec.yml ``` @@ -538,11 +566,11 @@ Train.dataset.ratio_list:动态采样
图16 文本识别方案3-模型评估
-使用训练好的模型进行评估,更新模型路径`Global.checkpoints`,这里为大家提供训练好的模型`./pretrain/rec_mobile_pp-OCRv2-student-readldata/best_accuracy`,[模型下载地址](https://paddleocr.bj.bcebos.com/fanliku/sheet_recognition/rec_mobile_pp-OCRv2-student-realdata.zip) +使用训练好的模型进行评估,更新模型路径`Global.checkpoints`。 ```python -! CUDA_VISIBLE_DEVICES=0 python tools/eval.py \ +CUDA_VISIBLE_DEVICES=0 python tools/eval.py \ -c configs/rec/ch_PP-OCRv2/ch_PP-OCRv2_rec.yml \ -o Global.checkpoints=./pretrain/rec_mobile_pp-OCRv2-student-realdata/best_accuracy ``` @@ -580,7 +608,7 @@ Train.dataset.ratio_list:动态采样 ```python -! python tools/infer/predict_system.py \ +python tools/infer/predict_system.py \ --image_dir="./doc/vqa/input/zh_val_21.jpg" \ --det_model_dir="./output/det_db_inference/" \ --rec_model_dir="./output/rec_crnn_inference/" \ @@ -592,11 +620,11 @@ Train.dataset.ratio_list:动态采样 | 方案 | acc | 结果分析 | | -------- | -------- | -------- | -| PP-OCRv2中英文超轻量识别预训练模型 | 67.48% | ppocr提供的预训练模型有一定的泛化能力 | +| PP-OCRv2中英文超轻量识别预训练模型 | 67.48% | ppocr提供的预训练模型具有泛化能力 | | XFUND数据集+fine-tune |72.33% | finetune会提升垂类场景效果 | | XFUND数据集+fine-tune+真实通用识别数据 | 85.29% | 真实通用识别数据对于性能提升很有帮助 | -# 5 文档视觉问答(DOC-VQA) +## 5 文档视觉问答(DOC-VQA) VQA指视觉问答,主要针对图像内容进行提问和回答,DOC-VQA是VQA任务中的一种,DOC-VQA主要针对文本图像的文字内容提出问题。 @@ -608,14 +636,13 @@ PaddleOCR中DOC-VQA系列算法基于PaddleNLP自然语言处理算法库实现L ```python %cd pretrain #下载SER模型 -! wget https://paddleocr.bj.bcebos.com/pplayout/ser_LayoutXLM_xfun_zh.tar && tar -xvf ser_LayoutXLM_xfun_zh.tar +wget https://paddleocr.bj.bcebos.com/pplayout/ser_LayoutXLM_xfun_zh.tar && tar -xvf ser_LayoutXLM_xfun_zh.tar #下载RE模型 -! wget https://paddleocr.bj.bcebos.com/pplayout/re_LayoutXLM_xfun_zh.tar && tar -xvf re_LayoutXLM_xfun_zh.tar +wget https://paddleocr.bj.bcebos.com/pplayout/re_LayoutXLM_xfun_zh.tar && tar -xvf re_LayoutXLM_xfun_zh.tar %cd ../ ``` - -## 5.1 SER +### 5.1 SER SER: 语义实体识别 (Semantic Entity Recognition), 可以完成对图像中的文本识别与分类。 @@ -647,7 +674,7 @@ SER: 语义实体识别 (Semantic Entity Recognition), 可以完成对图像 ```python %cd /home/aistudio/PaddleOCR/ -! CUDA_VISIBLE_DEVICES=0 python tools/train.py -c configs/vqa/ser/layoutxlm.yml +CUDA_VISIBLE_DEVICES=0 python tools/train.py -c configs/vqa/ser/layoutxlm.yml ``` 最终会打印出`precision`, `recall`, `hmean`等指标。 在`./output/ser_layoutxlm/`文件夹中会保存训练日志,最优的模型和最新epoch的模型。 @@ -664,7 +691,7 @@ SER: 语义实体识别 (Semantic Entity Recognition), 可以完成对图像 ```python -! CUDA_VISIBLE_DEVICES=0 python tools/eval.py \ +CUDA_VISIBLE_DEVICES=0 python tools/eval.py \ -c configs/vqa/ser/layoutxlm.yml \ -o Architecture.Backbone.checkpoints=pretrain/ser_LayoutXLM_xfun_zh/ ``` @@ -684,7 +711,7 @@ SER: 语义实体识别 (Semantic Entity Recognition), 可以完成对图像 ```python -! CUDA_VISIBLE_DEVICES=0 python tools/infer_vqa_token_ser.py \ +CUDA_VISIBLE_DEVICES=0 python tools/infer_vqa_token_ser.py \ -c configs/vqa/ser/layoutxlm.yml \ -o Architecture.Backbone.checkpoints=pretrain/ser_LayoutXLM_xfun_zh/ \ Global.infer_img=doc/vqa/input/zh_val_42.jpg @@ -704,7 +731,7 @@ plt.figure(figsize=(48,24)) plt.imshow(img) ``` -## 5.2 RE +### 5.2 RE 基于 RE 任务,可以完成对图象中的文本内容的关系提取,如判断问题对(pair)。 @@ -729,7 +756,7 @@ plt.imshow(img) ```python -! CUDA_VISIBLE_DEVICES=0 python3 tools/train.py -c configs/vqa/re/layoutxlm.yml +CUDA_VISIBLE_DEVICES=0 python3 tools/train.py -c configs/vqa/re/layoutxlm.yml ``` 最终会打印出`precision`, `recall`, `hmean`等指标。 在`./output/re_layoutxlm/`文件夹中会保存训练日志,最优的模型和最新epoch的模型 @@ -744,7 +771,7 @@ plt.imshow(img) ```python -! CUDA_VISIBLE_DEVICES=0 python3 tools/eval.py \ +CUDA_VISIBLE_DEVICES=0 python3 tools/eval.py \ -c configs/vqa/re/layoutxlm.yml \ -o Architecture.Backbone.checkpoints=pretrain/re_LayoutXLM_xfun_zh/ ``` @@ -760,20 +787,14 @@ plt.imshow(img)
图26 RE-模型预测
- 使用OCR引擎 + SER + RE串联预测 - -使用如下命令即可完成OCR引擎 + SER + RE的串联预测, 以预训练SER和RE模型为例: - - +使用如下命令即可完成OCR引擎 + SER + RE的串联预测, 以预训练SER和RE模型为例, 最终会在config.Global.save_res_path字段所配置的目录下保存预测结果可视化图像以及预测结果文本文件,预测结果文本文件名为infer_results.txt。 - - ```python -%cd /home/aistudio/PaddleOCR -! CUDA_VISIBLE_DEVICES=0 python3 tools/infer_vqa_token_ser_re.py \ +cd /home/aistudio/PaddleOCR +CUDA_VISIBLE_DEVICES=0 python3 tools/infer_vqa_token_ser_re.py \ -c configs/vqa/re/layoutxlm.yml \ -o Architecture.Backbone.checkpoints=pretrain/re_LayoutXLM_xfun_zh/ \ Global.infer_img=test_imgs/ \ @@ -787,10 +808,9 @@ plt.imshow(img) test_imgs/t131.jpg {"政治面税": "群众", "性别": "男", "籍贯": "河北省邯郸市", "婚姻状况": "亏末婚口已婚口已娇", "通讯地址": "邯郸市阳光苑7号楼003", "民族": "汉族", "毕业院校": "河南工业大学", "户口性质": "口农村城镇", "户口地址": "河北省邯郸市", "联系电话": "13288888888", "健康状况": "健康", "姓名": "小六", "好高cm": "180", "出生年月": "1996年8月9日", "文化程度": "本科", "身份证号码": "458933777777777777"} ```` - +展示预测结果 ```python -# 展示预测结果 import cv2 from matplotlib import pyplot as plt %matplotlib inline @@ -800,7 +820,7 @@ plt.figure(figsize=(48,24)) plt.imshow(img) ``` -# 6 导出Excel +## 6 导出Excel
图27 导出Excel
@@ -859,7 +879,7 @@ with open('output/re/infer_results.txt', 'r', encoding='utf-8') as fin: workbook.close() ``` -# 更多资源 +## 更多资源 - 更多深度学习知识、产业案例、面试宝典等,请参考:[awesome-DeepLearning](https://github.com/paddlepaddle/awesome-DeepLearning) @@ -869,7 +889,7 @@ workbook.close() - 飞桨框架相关资料,请参考:[飞桨深度学习平台](https://www.paddlepaddle.org.cn/?fr=paddleEdu_aistudio) -# 参考链接 +## 参考链接 - LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding, https://arxiv.org/pdf/2104.08836.pdf diff --git "a/applications/\350\275\273\351\207\217\347\272\247\350\275\246\347\211\214\350\257\206\345\210\253.md" "b/applications/\350\275\273\351\207\217\347\272\247\350\275\246\347\211\214\350\257\206\345\210\253.md" new file mode 100644 index 0000000000000000000000000000000000000000..1a63091b9289ba51bd2dd4de6ee51264cdb2bc79 --- /dev/null +++ "b/applications/\350\275\273\351\207\217\347\272\247\350\275\246\347\211\214\350\257\206\345\210\253.md" @@ -0,0 +1,832 @@ +# 一种基于PaddleOCR的轻量级车牌识别模型 + +- [1. 项目介绍](#1-项目介绍) +- [2. 环境搭建](#2-环境搭建) +- [3. 数据集准备](#3-数据集准备) + - [3.1 数据集标注规则](#31-数据集标注规则) + - [3.2 制作符合PP-OCR训练格式的标注文件](#32-制作符合pp-ocr训练格式的标注文件) +- [4. 实验](#4-实验) + - [4.1 检测](#41-检测) + - [4.1.1 预训练模型直接预测](#411-预训练模型直接预测) + - [4.1.2 CCPD车牌数据集fine-tune](#412-ccpd车牌数据集fine-tune) + - [4.1.3 CCPD车牌数据集fine-tune+量化训练](#413-ccpd车牌数据集fine-tune量化训练) + - [4.1.4 模型导出](#414-模型导出) + - [4.2 识别](#42-识别) + - [4.2.1 预训练模型直接预测](#421-预训练模型直接预测) + - [4.2.2 预训练模型直接预测+改动后处理](#422-预训练模型直接预测改动后处理) + - [4.2.3 CCPD车牌数据集fine-tune](#423-ccpd车牌数据集fine-tune) + - [4.2.4 CCPD车牌数据集fine-tune+量化训练](#424-ccpd车牌数据集fine-tune量化训练) + - [4.2.5 模型导出](#425-模型导出) + - [4.3 计算End2End指标](#43-计算End2End指标) + - [4.4 部署](#44-部署) + - [4.5 实验总结](#45-实验总结) + +## 1. 项目介绍 + +车牌识别(Vehicle License Plate Recognition,VLPR) 是计算机视频图像识别技术在车辆牌照识别中的一种应用。车牌识别技术要求能够将运动中的汽车牌照从复杂背景中提取并识别出来,在高速公路车辆管理,停车场管理和城市交通中得到广泛应用。 + +本项目难点如下: + +1. 车牌在图像中的尺度差异大、在车辆上的悬挂位置不固定 +2. 车牌图像质量层次不齐: 角度倾斜、图片模糊、光照不足、过曝等问题严重 +3. 边缘和端测场景应用对模型大小有限制,推理速度有要求 + +针对以上问题, 本例选用 PP-OCRv3 这一开源超轻量OCR系统进行车牌识别系统的开发。基于PP-OCRv3模型,在CCPD数据集达到99%的检测和94%的识别精度,模型大小12.8M(2.5M+10.3M)。基于量化对模型体积进行进一步压缩到5.8M(1M+4.8M), 同时推理速度提升25%。 + + + +aistudio项目链接: [基于PaddleOCR的轻量级车牌识别范例](https://aistudio.baidu.com/aistudio/projectdetail/3919091?contributionType=1) + +## 2. 环境搭建 + +本任务基于Aistudio完成, 具体环境如下: + +- 操作系统: Linux +- PaddlePaddle: 2.3 +- paddleslim: 2.2.2 +- PaddleOCR: Release/2.5 + +下载 PaddleOCR代码 + +```bash +git clone -b dygraph https://github.com/PaddlePaddle/PaddleOCR +``` + +安装依赖库 + +```bash +pip install -r PaddleOCR/requirements.txt +``` + +## 3. 数据集准备 + +所使用的数据集为 CCPD2020 新能源车牌数据集,该数据集为 + +该数据集分布如下: + +|数据集类型|数量| +|---|---| +|训练集| 5769| +|验证集| 1001| +|测试集| 5006| + +数据集图片示例如下: +![](https://ai-studio-static-online.cdn.bcebos.com/3bce057a8e0c40a0acbd26b2e29e4e2590a31bc412764be7b9e49799c69cb91c) + +数据集可以从这里下载 https://aistudio.baidu.com/aistudio/datasetdetail/101595 + +下载好数据集后对数据集进行解压 + +```bash +unzip -d /home/aistudio/data /home/aistudio/data/data101595/CCPD2020.zip +``` + +### 3.1 数据集标注规则 + +CPPD数据集的图片文件名具有特殊规则,详细可查看:https://github.com/detectRecog/CCPD + +具体规则如下: + +例如: 025-95_113-154&383_386&473-386&473_177&454_154&383_363&402-0_0_22_27_27_33_16-37-15.jpg + +每个名称可以分为七个字段,以-符号作为分割。这些字段解释如下。 + +- 025:车牌面积与整个图片区域的面积比。025 (25%) + +- 95_113:水平倾斜程度和垂直倾斜度。水平 95度 垂直 113度 + +- 154&383_386&473:左上和右下顶点的坐标。左上(154,383) 右下(386,473) + +- 386&473_177&454_154&383_363&402:整个图像中车牌的四个顶点的精确(x,y)坐标。这些坐标从右下角顶点开始。(386,473) (177,454) (154,383) (363,402) + +- 0_0_22_27_27_33_16:CCPD中的每个图像只有一个车牌。每个车牌号码由一个汉字,一个字母和五个字母或数字组成。有效的中文车牌由七个字符组成:省(1个字符),字母(1个字符),字母+数字(5个字符)。“ 0_0_22_27_27_33_16”是每个字符的索引。这三个数组定义如下。每个数组的最后一个字符是字母O,而不是数字0。我们将O用作“无字符”的符号,因为中文车牌字符中没有O。因此以上车牌拼起来即为 皖AY339S + +- 37:牌照区域的亮度。 37 (37%) + +- 15:车牌区域的模糊度。15 (15%) + +```python +provinces = ["皖", "沪", "津", "渝", "冀", "晋", "蒙", "辽", "吉", "黑", "苏", "浙", "京", "闽", "赣", "鲁", "豫", "鄂", "湘", "粤", "桂", "琼", "川", "贵", "云", "藏", "陕", "甘", "青", "宁", "新", "警", "学", "O"] +alphabets = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W','X', 'Y', 'Z', 'O'] +ads = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X','Y', 'Z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'O'] +``` + +### 3.2 制作符合PP-OCR训练格式的标注文件 + +在开始训练之前,可使用如下代码制作符合PP-OCR训练格式的标注文件。 + + +```python +import cv2 +import os +import json +from tqdm import tqdm +import numpy as np + +provinces = ["皖", "沪", "津", "渝", "冀", "晋", "蒙", "辽", "吉", "黑", "苏", "浙", "京", "闽", "赣", "鲁", "豫", "鄂", "湘", "粤", "桂", "琼", "川", "贵", "云", "藏", "陕", "甘", "青", "宁", "新", "警", "学", "O"] +alphabets = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'O'] +ads = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'O'] + +def make_label(img_dir, save_gt_folder, phase): + crop_img_save_dir = os.path.join(save_gt_folder, phase, 'crop_imgs') + os.makedirs(crop_img_save_dir, exist_ok=True) + + f_det = open(os.path.join(save_gt_folder, phase, 'det.txt'), 'w', encoding='utf-8') + f_rec = open(os.path.join(save_gt_folder, phase, 'rec.txt'), 'w', encoding='utf-8') + + i = 0 + for filename in tqdm(os.listdir(os.path.join(img_dir, phase))): + str_list = filename.split('-') + if len(str_list) < 5: + continue + coord_list = str_list[3].split('_') + txt_list = str_list[4].split('_') + boxes = [] + for coord in coord_list: + boxes.append([int(x) for x in coord.split("&")]) + boxes = [boxes[2], boxes[3], boxes[0], boxes[1]] + lp_number = provinces[int(txt_list[0])] + alphabets[int(txt_list[1])] + ''.join([ads[int(x)] for x in txt_list[2:]]) + + # det + det_info = [{'points':boxes, 'transcription':lp_number}] + f_det.write('{}\t{}\n'.format(os.path.join(phase, filename), json.dumps(det_info, ensure_ascii=False))) + + # rec + boxes = np.float32(boxes) + img = cv2.imread(os.path.join(img_dir, phase, filename)) + # crop_img = img[int(boxes[:,1].min()):int(boxes[:,1].max()),int(boxes[:,0].min()):int(boxes[:,0].max())] + crop_img = get_rotate_crop_image(img, boxes) + crop_img_save_filename = '{}_{}.jpg'.format(i,'_'.join(txt_list)) + crop_img_save_path = os.path.join(crop_img_save_dir, crop_img_save_filename) + cv2.imwrite(crop_img_save_path, crop_img) + f_rec.write('{}/crop_imgs/{}\t{}\n'.format(phase, crop_img_save_filename, lp_number)) + i+=1 + f_det.close() + f_rec.close() + +def get_rotate_crop_image(img, points): + ''' + img_height, img_width = img.shape[0:2] + left = int(np.min(points[:, 0])) + right = int(np.max(points[:, 0])) + top = int(np.min(points[:, 1])) + bottom = int(np.max(points[:, 1])) + img_crop = img[top:bottom, left:right, :].copy() + points[:, 0] = points[:, 0] - left + points[:, 1] = points[:, 1] - top + ''' + assert len(points) == 4, "shape of points must be 4*2" + img_crop_width = int( + max( + np.linalg.norm(points[0] - points[1]), + np.linalg.norm(points[2] - points[3]))) + img_crop_height = int( + max( + np.linalg.norm(points[0] - points[3]), + np.linalg.norm(points[1] - points[2]))) + pts_std = np.float32([[0, 0], [img_crop_width, 0], + [img_crop_width, img_crop_height], + [0, img_crop_height]]) + M = cv2.getPerspectiveTransform(points, pts_std) + dst_img = cv2.warpPerspective( + img, + M, (img_crop_width, img_crop_height), + borderMode=cv2.BORDER_REPLICATE, + flags=cv2.INTER_CUBIC) + dst_img_height, dst_img_width = dst_img.shape[0:2] + if dst_img_height * 1.0 / dst_img_width >= 1.5: + dst_img = np.rot90(dst_img) + return dst_img + +img_dir = '/home/aistudio/data/CCPD2020/ccpd_green' +save_gt_folder = '/home/aistudio/data/CCPD2020/PPOCR' +# phase = 'train' # change to val and test to make val dataset and test dataset +for phase in ['train','val','test']: + make_label(img_dir, save_gt_folder, phase) +``` + +通过上述命令可以完成了`训练集`,`验证集`和`测试集`的制作,制作完成的数据集信息如下: + +| 类型 | 数据集 | 图片地址 | 标签地址 | 图片数量 | +| --- | --- | --- | --- | --- | +| 检测 | 训练集 | /home/aistudio/data/CCPD2020/ccpd_green/train | /home/aistudio/data/CCPD2020/PPOCR/train/det.txt | 5769 | +| 检测 | 验证集 | /home/aistudio/data/CCPD2020/ccpd_green/val | /home/aistudio/data/CCPD2020/PPOCR/val/det.txt | 1001 | +| 检测 | 测试集 | /home/aistudio/data/CCPD2020/ccpd_green/test | /home/aistudio/data/CCPD2020/PPOCR/test/det.txt | 5006 | +| 识别 | 训练集 | /home/aistudio/data/CCPD2020/PPOCR/train/crop_imgs | /home/aistudio/data/CCPD2020/PPOCR/train/rec.txt | 5769 | +| 识别 | 验证集 | /home/aistudio/data/CCPD2020/PPOCR/val/crop_imgs | /home/aistudio/data/CCPD2020/PPOCR/val/rec.txt | 1001 | +| 识别 | 测试集 | /home/aistudio/data/CCPD2020/PPOCR/test/crop_imgs | /home/aistudio/data/CCPD2020/PPOCR/test/rec.txt | 5006 | + +在普遍的深度学习流程中,都是在训练集训练,在验证集选择最优模型后在测试集上进行测试。在本例中,我们省略中间步骤,直接在训练集训练,在测试集选择最优模型,因此我们只使用训练集和测试集。 + +## 4. 实验 + +由于数据集比较少,为了模型更好和更快的收敛,这里选用 PaddleOCR 中的 PP-OCRv3 模型进行文本检测和识别,并且使用 PP-OCRv3 模型参数作为预训练模型。PP-OCRv3在PP-OCRv2的基础上,中文场景端到端Hmean指标相比于PP-OCRv2提升5%, 英文数字模型端到端效果提升11%。详细优化细节请参考[PP-OCRv3](../doc/doc_ch/PP-OCRv3_introduction.md)技术报告。 + +由于车牌场景均为端侧设备部署,因此对速度和模型大小有比较高的要求,因此还需要采用量化训练的方式进行模型大小的压缩和模型推理速度的加速。模型量化可以在基本不损失模型的精度的情况下,将FP32精度的模型参数转换为Int8精度,减小模型参数大小并加速计算,使用量化后的模型在移动端等部署时更具备速度优势。 + +因此,本实验中对于车牌检测和识别有如下3种方案: + +1. PP-OCRv3中英文超轻量预训练模型直接预测 +2. CCPD车牌数据集在PP-OCRv3模型上fine-tune +3. CCPD车牌数据集在PP-OCRv3模型上fine-tune后量化 + +### 4.1 检测 +#### 4.1.1 预训练模型直接预测 + +从下表中下载PP-OCRv3文本检测预训练模型 + +|模型名称|模型简介|配置文件|推理模型大小|下载地址| +| --- | --- | --- | --- | --- | +|ch_PP-OCRv3_det| 【最新】原始超轻量模型,支持中英文、多语种文本检测 |[ch_PP-OCRv3_det_cml.yml](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml)| 3.8M |[推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_distill_train.tar)| + +使用如下命令下载预训练模型 + +```bash +mkdir models +cd models +wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_distill_train.tar +tar -xf ch_PP-OCRv3_det_distill_train.tar +cd /home/aistudio/PaddleOCR +``` + +预训练模型下载完成后,我们使用[ch_PP-OCRv3_det_student.yml](../configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_student.yml) 配置文件进行后续实验,在开始评估之前需要对配置文件中部分字段进行设置,具体如下: + +1. 模型存储和训练相关: + 1. Global.pretrained_model: 指向PP-OCRv3文本检测预训练模型地址 +2. 数据集相关 + 1. Eval.dataset.data_dir:指向测试集图片存放目录 + 2. Eval.dataset.label_file_list:指向测试集标注文件 + +上述字段均为必须修改的字段,可以通过修改配置文件的方式改动,也可在不需要修改配置文件的情况下,改变训练的参数。这里使用不改变配置文件的方式 。使用如下命令进行PP-OCRv3文本检测预训练模型的评估 + + +```bash +python tools/eval.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_student.yml -o \ + Global.pretrained_model=models/ch_PP-OCRv3_det_distill_train/student.pdparams \ + Eval.dataset.data_dir=/home/aistudio/data/CCPD2020/ccpd_green \ + Eval.dataset.label_file_list=[/home/aistudio/data/CCPD2020/PPOCR/test/det.txt] +``` +上述指令中,通过-c 选择训练使用配置文件,通过-o参数在不需要修改配置文件的情况下,改变训练的参数。 + +使用预训练模型进行评估,指标如下所示: + +| 方案 |hmeans| +|---------------------------|---| +| PP-OCRv3中英文超轻量检测预训练模型直接预测 |76.12%| + +#### 4.1.2 CCPD车牌数据集fine-tune + +**训练** + +为了进行fine-tune训练,我们需要在配置文件中设置需要使用的预训练模型地址,学习率和数据集等参数。 具体如下: + +1. 模型存储和训练相关: + 1. Global.pretrained_model: 指向PP-OCRv3文本检测预训练模型地址 + 2. Global.eval_batch_step: 模型多少step评估一次,这里设为从第0个step开始没隔772个step评估一次,772为一个epoch总的step数。 +2. 优化器相关: + 1. Optimizer.lr.name: 学习率衰减器设为常量 Const + 2. Optimizer.lr.learning_rate: 做 fine-tune 实验,学习率需要设置的比较小,此处学习率设为配置文件中的0.05倍 + 3. Optimizer.lr.warmup_epoch: warmup_epoch设为0 +3. 数据集相关: + 1. Train.dataset.data_dir:指向训练集图片存放目录 + 2. Train.dataset.label_file_list:指向训练集标注文件 + 3. Eval.dataset.data_dir:指向测试集图片存放目录 + 4. Eval.dataset.label_file_list:指向测试集标注文件 + +使用如下代码即可启动在CCPD车牌数据集上的fine-tune。 + +```bash +python tools/train.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_student.yml -o \ + Global.pretrained_model=models/ch_PP-OCRv3_det_distill_train/student.pdparams \ + Global.save_model_dir=output/CCPD/det \ + Global.eval_batch_step="[0, 772]" \ + Optimizer.lr.name=Const \ + Optimizer.lr.learning_rate=0.0005 \ + Optimizer.lr.warmup_epoch=0 \ + Train.dataset.data_dir=/home/aistudio/data/CCPD2020/ccpd_green \ + Train.dataset.label_file_list=[/home/aistudio/data/CCPD2020/PPOCR/train/det.txt] \ + Eval.dataset.data_dir=/home/aistudio/data/CCPD2020/ccpd_green \ + Eval.dataset.label_file_list=[/home/aistudio/data/CCPD2020/PPOCR/test/det.txt] +``` + +在上述命令中,通过`-o`的方式修改了配置文件中的参数。 + + +**评估** + +训练完成后使用如下命令进行评估 + + +```bash +python tools/eval.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_student.yml -o \ + Global.pretrained_model=output/CCPD/det/best_accuracy.pdparams \ + Eval.dataset.data_dir=/home/aistudio/data/CCPD2020/ccpd_green \ + Eval.dataset.label_file_list=[/home/aistudio/data/CCPD2020/PPOCR/test/det.txt] +``` + +使用预训练模型和CCPD车牌数据集fine-tune,指标分别如下: + +|方案|hmeans| +|---|---| +|PP-OCRv3中英文超轻量检测预训练模型直接预测|76.12%| +|PP-OCRv3中英文超轻量检测预训练模型 fine-tune|99%| + +可以看到进行fine-tune能显著提升车牌检测的效果。 + +#### 4.1.3 CCPD车牌数据集fine-tune+量化训练 + +此处采用 PaddleOCR 中提供好的[量化教程](../deploy/slim/quantization/README.md)对模型进行量化训练。 + +量化训练可通过如下命令启动: + +```bash +python3.7 deploy/slim/quantization/quant.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_student.yml -o \ + Global.pretrained_model=output/CCPD/det/best_accuracy.pdparams \ + Global.save_model_dir=output/CCPD/det_quant \ + Global.eval_batch_step="[0, 772]" \ + Optimizer.lr.name=Const \ + Optimizer.lr.learning_rate=0.0005 \ + Optimizer.lr.warmup_epoch=0 \ + Train.dataset.data_dir=/home/aistudio/data/CCPD2020/ccpd_green \ + Train.dataset.label_file_list=[/home/aistudio/data/CCPD2020/PPOCR/train/det.txt] \ + Eval.dataset.data_dir=/home/aistudio/data/CCPD2020/ccpd_green \ + Eval.dataset.label_file_list=[/home/aistudio/data/CCPD2020/PPOCR/test/det.txt] +``` + +量化后指标对比如下 + +|方案|hmeans| 模型大小 | 预测速度(lite) | +|---|---|------|------------| +|PP-OCRv3中英文超轻量检测预训练模型 fine-tune|99%| 2.5M | 223ms | +|PP-OCRv3中英文超轻量检测预训练模型 fine-tune+量化|98.91%| 1M | 189ms | + +可以看到通过量化训练在精度几乎无损的情况下,降低模型体积60%并且推理速度提升15%。 + +速度测试基于[PaddleOCR lite教程](../deploy/lite/readme_ch.md)完成。 + +#### 4.1.4 模型导出 + +使用如下命令可以将训练好的模型进行导出 + +* 非量化模型 +```bash +python tools/export_model.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_student.yml -o \ + Global.pretrained_model=output/CCPD/det/best_accuracy.pdparams \ + Global.save_inference_dir=output/det/infer +``` +* 量化模型 +```bash +python deploy/slim/quantization/export_model.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_student.yml -o \ + Global.pretrained_model=output/CCPD/det_quant/best_accuracy.pdparams \ + Global.save_inference_dir=output/det/infer +``` + +### 4.2 识别 +#### 4.2.1 预训练模型直接预测 + +从下表中下载PP-OCRv3文本识别预训练模型 + +|模型名称|模型简介|配置文件|推理模型大小|下载地址| +| --- | --- | --- | --- | --- | +|ch_PP-OCRv3_rec|【最新】原始超轻量模型,支持中英文、数字识别|[ch_PP-OCRv3_rec_distillation.yml](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/configs/rec/PP-OCRv3/ch_PP-OCRv3_rec_distillation.yml)| 12.4M |[推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_train.tar) | + +使用如下命令下载预训练模型 + +```bash +mkdir models +cd models +wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_train.tar +tar -xf ch_PP-OCRv3_rec_train.tar +cd /home/aistudio/PaddleOCR +``` + +PaddleOCR提供的PP-OCRv3识别模型采用蒸馏训练策略,因此提供的预训练模型中会包含`Teacher`和`Student`模型的参数,详细信息可参考[knowledge_distillation.md](../doc/doc_ch/knowledge_distillation.md)。 因此,模型下载完成后需要使用如下代码提取`Student`模型的参数: + +```python +import paddle +# 加载预训练模型 +all_params = paddle.load("models/ch_PP-OCRv3_rec_train/best_accuracy.pdparams") +# 查看权重参数的keys +print(all_params.keys()) +# 学生模型的权重提取 +s_params = {key[len("Student."):]: all_params[key] for key in all_params if "Student." in key} +# 查看学生模型权重参数的keys +print(s_params.keys()) +# 保存 +paddle.save(s_params, "models/ch_PP-OCRv3_rec_train/student.pdparams") +``` + +预训练模型下载完成后,我们使用[ch_PP-OCRv3_rec.yml](../configs/rec/PP-OCRv3/ch_PP-OCRv3_rec.yml) 配置文件进行后续实验,在开始评估之前需要对配置文件中部分字段进行设置,具体如下: + +1. 模型存储和训练相关: + 1. Global.pretrained_model: 指向PP-OCRv3文本识别预训练模型地址 +2. 数据集相关 + 1. Eval.dataset.data_dir:指向测试集图片存放目录 + 2. Eval.dataset.label_file_list:指向测试集标注文件 + +使用如下命令进行PP-OCRv3文本识别预训练模型的评估 + +```bash +python tools/eval.py -c configs/rec/PP-OCRv3/ch_PP-OCRv3_rec.yml -o \ + Global.pretrained_model=models/ch_PP-OCRv3_rec_train/student.pdparams \ + Eval.dataset.data_dir=/home/aistudio/data/CCPD2020/PPOCR \ + Eval.dataset.label_file_list=[/home/aistudio/data/CCPD2020/PPOCR/test/rec.txt] +``` + +如需获取已训练模型,请扫码填写问卷,加入PaddleOCR官方交流群获取全部OCR垂类模型下载链接、《动手学OCR》电子书等全套OCR学习资料🎁 +
+ +
+ + +评估部分日志如下: +```bash +[2022/05/12 19:52:02] ppocr INFO: load pretrain successful from models/ch_PP-OCRv3_rec_train/best_accuracy +eval model:: 100%|██████████████████████████████| 40/40 [00:15<00:00, 2.57it/s] +[2022/05/12 19:52:17] ppocr INFO: metric eval *************** +[2022/05/12 19:52:17] ppocr INFO: acc:0.0 +[2022/05/12 19:52:17] ppocr INFO: norm_edit_dis:0.8656084923002452 +[2022/05/12 19:52:17] ppocr INFO: Teacher_acc:0.000399520574511545 +[2022/05/12 19:52:17] ppocr INFO: Teacher_norm_edit_dis:0.8657902943394548 +[2022/05/12 19:52:17] ppocr INFO: fps:1443.1801978719905 + +``` +使用预训练模型进行评估,指标如下所示: + +|方案|acc| +|---|---| +|PP-OCRv3中英文超轻量识别预训练模型直接预测|0%| + +从评估日志中可以看到,直接使用PP-OCRv3预训练模型进行评估,acc非常低,但是norm_edit_dis很高。因此,我们猜测是模型大部分文字识别是对的,只有少部分文字识别错误。使用如下命令进行infer查看模型的推理结果进行验证: + + +```bash +python tools/infer_rec.py -c configs/rec/PP-OCRv3/ch_PP-OCRv3_rec.yml -o \ + Global.pretrained_model=models/ch_PP-OCRv3_rec_train/student.pdparams \ + Global.infer_img=/home/aistudio/data/CCPD2020/PPOCR/test/crop_imgs/0_0_0_3_32_30_31_30_30.jpg +``` + +输出部分日志如下: +```bash +[2022/05/01 08:51:57] ppocr INFO: train with paddle 2.2.2 and device CUDAPlace(0) +W0501 08:51:57.127391 11326 device_context.cc:447] Please NOTE: device: 0, GPU Compute Capability: 7.0, Driver API Version: 11.0, Runtime API Version: 10.1 +W0501 08:51:57.132315 11326 device_context.cc:465] device: 0, cuDNN Version: 7.6. +[2022/05/01 08:52:00] ppocr INFO: load pretrain successful from models/ch_PP-OCRv3_rec_train/student +[2022/05/01 08:52:00] ppocr INFO: infer_img: /home/aistudio/data/CCPD2020/PPOCR/test/crop_imgs/0_0_3_32_30_31_30_30.jpg +[2022/05/01 08:52:00] ppocr INFO: result: {"Student": {"label": "皖A·D86766", "score": 0.9552637934684753}, "Teacher": {"label": "皖A·D86766", "score": 0.9917094707489014}} +[2022/05/01 08:52:00] ppocr INFO: success! +``` + +从infer结果可以看到,车牌中的文字大部分都识别正确,只是多识别出了一个`·`。针对这种情况,有如下两种方案: +1. 直接通过后处理去掉多识别的`·`。 +2. 进行 fine-tune。 + +#### 4.2.2 预训练模型直接预测+改动后处理 + +直接通过后处理去掉多识别的`·`,在后处理的改动比较简单,只需在 [ppocr/postprocess/rec_postprocess.py](../ppocr/postprocess/rec_postprocess.py) 文件的76行添加如下代码: +```python +text = text.replace('·','') +``` + +改动前后指标对比: + +|方案|acc| +|---|---| +|PP-OCRv3中英文超轻量识别预训练模型直接预测|0.2%| +|PP-OCRv3中英文超轻量识别预训练模型直接预测+后处理去掉多识别的`·`|90.97%| + +可以看到,去掉多余的`·`能大幅提高精度。 + +#### 4.2.3 CCPD车牌数据集fine-tune + +**训练** + +为了进行fine-tune训练,我们需要在配置文件中设置需要使用的预训练模型地址,学习率和数据集等参数。 具体如下: + +1. 模型存储和训练相关: + 1. Global.pretrained_model: 指向PP-OCRv3文本识别预训练模型地址 + 2. Global.eval_batch_step: 模型多少step评估一次,这里设为从第0个step开始没隔45个step评估一次,45为一个epoch总的step数。 +2. 优化器相关 + 1. Optimizer.lr.name: 学习率衰减器设为常量 Const + 2. Optimizer.lr.learning_rate: 做 fine-tune 实验,学习率需要设置的比较小,此处学习率设为配置文件中的0.05倍 + 3. Optimizer.lr.warmup_epoch: warmup_epoch设为0 +3. 数据集相关 + 1. Train.dataset.data_dir:指向训练集图片存放目录 + 2. Train.dataset.label_file_list:指向训练集标注文件 + 3. Eval.dataset.data_dir:指向测试集图片存放目录 + 4. Eval.dataset.label_file_list:指向测试集标注文件 + +使用如下命令启动 fine-tune + +```bash +python tools/train.py -c configs/rec/PP-OCRv3/ch_PP-OCRv3_rec.yml -o \ + Global.pretrained_model=models/ch_PP-OCRv3_rec_train/student.pdparams \ + Global.save_model_dir=output/CCPD/rec/ \ + Global.eval_batch_step="[0, 90]" \ + Optimizer.lr.name=Const \ + Optimizer.lr.learning_rate=0.0005 \ + Optimizer.lr.warmup_epoch=0 \ + Train.dataset.data_dir=/home/aistudio/data/CCPD2020/PPOCR \ + Train.dataset.label_file_list=[/home/aistudio/data/CCPD2020/PPOCR/train/rec.txt] \ + Eval.dataset.data_dir=/home/aistudio/data/CCPD2020/PPOCR \ + Eval.dataset.label_file_list=[/home/aistudio/data/CCPD2020/PPOCR/test/rec.txt] +``` + +**评估** + +训练完成后使用如下命令进行评估 + +```bash +python tools/eval.py -c configs/rec/PP-OCRv3/ch_PP-OCRv3_rec.yml -o \ + Global.pretrained_model=output/CCPD/rec/best_accuracy.pdparams \ + Eval.dataset.data_dir=/home/aistudio/data/CCPD2020/PPOCR \ + Eval.dataset.label_file_list=[/home/aistudio/data/CCPD2020/PPOCR/test/rec.txt] +``` + +使用预训练模型和CCPD车牌数据集fine-tune,指标分别如下: + +|方案| acc | +|---|--------| +|PP-OCRv3中英文超轻量识别预训练模型直接预测| 0% | +|PP-OCRv3中英文超轻量识别预训练模型直接预测+后处理去掉多识别的`·`| 90.97% | +|PP-OCRv3中英文超轻量识别预训练模型 fine-tune| 94.54% | + +可以看到进行fine-tune能显著提升车牌识别的效果。 + +#### 4.2.4 CCPD车牌数据集fine-tune+量化训练 + +此处采用 PaddleOCR 中提供好的[量化教程](../deploy/slim/quantization/README.md)对模型进行量化训练。 + +量化训练可通过如下命令启动: + +```bash +python3.7 deploy/slim/quantization/quant.py -c configs/rec/PP-OCRv3/ch_PP-OCRv3_rec.yml -o \ + Global.pretrained_model=output/CCPD/rec/best_accuracy.pdparams \ + Global.save_model_dir=output/CCPD/rec_quant/ \ + Global.eval_batch_step="[0, 90]" \ + Optimizer.lr.name=Const \ + Optimizer.lr.learning_rate=0.0005 \ + Optimizer.lr.warmup_epoch=0 \ + Train.dataset.data_dir=/home/aistudio/data/CCPD2020/PPOCR \ + Train.dataset.label_file_list=[/home/aistudio/data/CCPD2020/PPOCR/train/rec.txt] \ + Eval.dataset.data_dir=/home/aistudio/data/CCPD2020/PPOCR \ + Eval.dataset.label_file_list=[/home/aistudio/data/CCPD2020/PPOCR/test/rec.txt] +``` + +量化后指标对比如下 + +|方案| acc | 模型大小 | 预测速度(lite) | +|---|--------|-------|------------| +|PP-OCRv3中英文超轻量识别预训练模型 fine-tune| 94.54% | 10.3M | 4.2ms | +|PP-OCRv3中英文超轻量识别预训练模型 fine-tune + 量化| 93.4% | 4.8M | 1.8ms | + +可以看到量化后能降低模型体积53%并且推理速度提升57%,但是由于识别数据过少,量化带来了1%的精度下降。 + +速度测试基于[PaddleOCR lite教程](../deploy/lite/readme_ch.md)完成。 + +#### 4.2.5 模型导出 + +使用如下命令可以将训练好的模型进行导出。 + +* 非量化模型 +```bash +python tools/export_model.py -c configs/rec/PP-OCRv3/ch_PP-OCRv3_rec.yml -o \ + Global.pretrained_model=output/CCPD/rec/best_accuracy.pdparams \ + Global.save_inference_dir=output/CCPD/rec/infer +``` +* 量化模型 +```bash +python deploy/slim/quantization/export_model.py -c configs/rec/PP-OCRv3/ch_PP-OCRv3_rec.yml -o \ + Global.pretrained_model=output/CCPD/rec_quant/best_accuracy.pdparams \ + Global.save_inference_dir=output/CCPD/rec_quant/infer +``` + +### 4.3 计算End2End指标 + +端到端指标可通过 [PaddleOCR内置脚本](../tools/end2end/readme.md) 进行计算,具体步骤如下: + +1. 导出模型 + +通过如下命令进行模型的导出。注意,量化模型导出时,需要配置eval数据集 + +```bash +# 检测模型 + +# 预训练模型 +python tools/export_model.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_student.yml -o \ + Global.pretrained_model=models/ch_PP-OCRv3_det_distill_train/student.pdparams \ + Global.save_inference_dir=output/ch_PP-OCRv3_det_distill_train/infer + +# 非量化模型 +python tools/export_model.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_student.yml -o \ + Global.pretrained_model=output/CCPD/det/best_accuracy.pdparams \ + Global.save_inference_dir=output/CCPD/det/infer + +# 量化模型 +python deploy/slim/quantization/export_model.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_student.yml -o \ + Global.pretrained_model=output/CCPD/det_quant/best_accuracy.pdparams \ + Global.save_inference_dir=output/CCPD/det_quant/infer \ + Eval.dataset.data_dir=/home/aistudio/data/CCPD2020/ccpd_green \ + Eval.dataset.label_file_list=[/home/aistudio/data/CCPD2020/PPOCR/test/det.txt] \ + Eval.loader.num_workers=0 + +# 识别模型 + +# 预训练模型 +python tools/export_model.py -c configs/rec/PP-OCRv3/ch_PP-OCRv3_rec.yml -o \ + Global.pretrained_model=models/ch_PP-OCRv3_rec_train/student.pdparams \ + Global.save_inference_dir=output/ch_PP-OCRv3_rec_train/infer + +# 非量化模型 +python tools/export_model.py -c configs/rec/PP-OCRv3/ch_PP-OCRv3_rec.yml -o \ + Global.pretrained_model=output/CCPD/rec/best_accuracy.pdparams \ + Global.save_inference_dir=output/CCPD/rec/infer + +# 量化模型 +python deploy/slim/quantization/export_model.py -c configs/rec/PP-OCRv3/ch_PP-OCRv3_rec.yml -o \ + Global.pretrained_model=output/CCPD/rec_quant/best_accuracy.pdparams \ + Global.save_inference_dir=output/CCPD/rec_quant/infer \ + Eval.dataset.data_dir=/home/aistudio/data/CCPD2020/PPOCR \ + Eval.dataset.label_file_list=[/home/aistudio/data/CCPD2020/PPOCR/test/rec.txt] +``` + +2. 用导出的模型对测试集进行预测 + +此处,分别使用PP-OCRv3预训练模型,fintune模型和量化模型对测试集的所有图像进行预测,命令如下: + +```bash +# PP-OCRv3中英文超轻量检测预训练模型,PP-OCRv3中英文超轻量识别预训练模型 +python3 tools/infer/predict_system.py --det_model_dir=models/ch_PP-OCRv3_det_distill_train/infer --rec_model_dir=models/ch_PP-OCRv3_rec_train/infer --det_limit_side_len=736 --det_limit_type=min --image_dir=/home/aistudio/data/CCPD2020/ccpd_green/test/ --draw_img_save_dir=infer/pretrain --use_dilation=true + +# PP-OCRv3中英文超轻量检测预训练模型+fine-tune,PP-OCRv3中英文超轻量识别预训练模型+fine-tune +python3 tools/infer/predict_system.py --det_model_dir=output/CCPD/det/infer --rec_model_dir=output/CCPD/rec/infer --det_limit_side_len=736 --det_limit_type=min --image_dir=/home/aistudio/data/CCPD2020/ccpd_green/test/ --draw_img_save_dir=infer/fine-tune --use_dilation=true + +# PP-OCRv3中英文超轻量检测预训练模型 fine-tune +量化,PP-OCRv3中英文超轻量识别预训练模型 fine-tune +量化 结果转换和评估 +python3 tools/infer/predict_system.py --det_model_dir=output/CCPD/det_quant/infer --rec_model_dir=output/CCPD/rec_quant/infer --det_limit_side_len=736 --det_limit_type=min --image_dir=/home/aistudio/data/CCPD2020/ccpd_green/test/ --draw_img_save_dir=infer/quant --use_dilation=true +``` + +3. 转换label并计算指标 + +将gt和上一步保存的预测结果转换为端对端评测需要的数据格式,并根据转换后的数据进行端到端指标计算 + +```bash +python3 tools/end2end/convert_ppocr_label.py --mode=gt --label_path=/home/aistudio/data/CCPD2020/PPOCR/test/det.txt --save_folder=end2end/gt + +# PP-OCRv3中英文超轻量检测预训练模型,PP-OCRv3中英文超轻量识别预训练模型 结果转换和评估 +python3 tools/end2end/convert_ppocr_label.py --mode=pred --label_path=infer/pretrain/system_results.txt --save_folder=end2end/pretrain +python3 tools/end2end/eval_end2end.py end2end/gt end2end/pretrain + +# PP-OCRv3中英文超轻量检测预训练模型,PP-OCRv3中英文超轻量识别预训练模型+后处理去掉多识别的`·` 结果转换和评估 +# 需手动修改后处理函数 +python3 tools/end2end/convert_ppocr_label.py --mode=pred --label_path=infer/post/system_results.txt --save_folder=end2end/post +python3 tools/end2end/eval_end2end.py end2end/gt end2end/post + +# PP-OCRv3中英文超轻量检测预训练模型 fine-tune,PP-OCRv3中英文超轻量识别预训练模型 fine-tune 结果转换和评估 +python3 tools/end2end/convert_ppocr_label.py --mode=pred --label_path=infer/fine-tune/system_results.txt --save_folder=end2end/fine-tune +python3 tools/end2end/eval_end2end.py end2end/gt end2end/fine-tune + +# PP-OCRv3中英文超轻量检测预训练模型 fine-tune +量化,PP-OCRv3中英文超轻量识别预训练模型 fine-tune +量化 结果转换和评估 +python3 tools/end2end/convert_ppocr_label.py --mode=pred --label_path=infer/quant/system_results.txt --save_folder=end2end/quant +python3 tools/end2end/eval_end2end.py end2end/gt end2end/quant +``` + +日志如下: +```bash +The convert label saved in end2end/gt +The convert label saved in end2end/pretrain +start testing... +hit, dt_count, gt_count 2 5988 5006 +character_acc: 70.42% +avg_edit_dist_field: 2.37 +avg_edit_dist_img: 2.37 +precision: 0.03% +recall: 0.04% +fmeasure: 0.04% +The convert label saved in end2end/post +start testing... +hit, dt_count, gt_count 4224 5988 5006 +character_acc: 81.59% +avg_edit_dist_field: 1.47 +avg_edit_dist_img: 1.47 +precision: 70.54% +recall: 84.38% +fmeasure: 76.84% +The convert label saved in end2end/fine-tune +start testing... +hit, dt_count, gt_count 4286 4898 5006 +character_acc: 94.16% +avg_edit_dist_field: 0.47 +avg_edit_dist_img: 0.47 +precision: 87.51% +recall: 85.62% +fmeasure: 86.55% +The convert label saved in end2end/quant +start testing... +hit, dt_count, gt_count 4349 4951 5006 +character_acc: 94.13% +avg_edit_dist_field: 0.47 +avg_edit_dist_img: 0.47 +precision: 87.84% +recall: 86.88% +fmeasure: 87.36% +``` + +各个方案端到端指标如下: + +|模型| 指标 | +|---|--------| +|PP-OCRv3中英文超轻量检测预训练模型
PP-OCRv3中英文超轻量识别预训练模型| 0.04% | +|PP-OCRv3中英文超轻量检测预训练模型
PP-OCRv3中英文超轻量识别预训练模型 + 后处理去掉多识别的`·`| 78.27% | +|PP-OCRv3中英文超轻量检测预训练模型+fine-tune
PP-OCRv3中英文超轻量识别预训练模型+fine-tune| 87.14% | +|PP-OCRv3中英文超轻量检测预训练模型+fine-tune+量化
PP-OCRv3中英文超轻量识别预训练模型+fine-tune+量化| 88% | + +从结果中可以看到对预训练模型不做修改,只根据场景下的具体情况进行后处理的修改就能大幅提升端到端指标到78.27%,在CCPD数据集上进行 fine-tune 后指标进一步提升到87.14%, 在经过量化训练之后,由于检测模型的recall变高,指标进一步提升到88%。但是这个结果仍旧不符合检测模型+识别模型的真实性能(99%*94%=93%),因此我们需要对 base case 进行具体分析。 + +在之前的端到端预测结果中,可以看到很多不符合车牌标注的文字被识别出来, 因此可以进行简单的过滤来提升precision + +为了快速评估,我们在 ` tools/end2end/convert_ppocr_label.py` 脚本的 58 行加入如下代码,对非8个字符的结果进行过滤 +```python +if len(txt) != 8: # 车牌字符串长度为8 + continue +``` + +此外,通过可视化box可以发现有很多框都是竖直翻转之后的框,并且没有完全框住车牌边界,因此需要进行框的竖直翻转以及轻微扩大,示意图如下: + +![](https://ai-studio-static-online.cdn.bcebos.com/59ab0411c8eb4dfd917fb2b6e5b69a17ee7ca48351444aec9ac6104b79ff1028) + +修改前后个方案指标对比如下: + + +各个方案端到端指标如下: + +|模型|base|A:识别结果过滤|B:use_dilation|C:flip_box|best| +|---|---|---|---|---|---| +|PP-OCRv3中英文超轻量检测预训练模型
PP-OCRv3中英文超轻量识别预训练模型|0.04%|0.08%|0.02%|0.05%|0.00%(A)| +|PP-OCRv3中英文超轻量检测预训练模型
PP-OCRv3中英文超轻量识别预训练模型 + 后处理去掉多识别的`·`|78.27%|90.84%|78.61%|79.43%|91.66%(A+B+C)| +|PP-OCRv3中英文超轻量检测预训练模型+fine-tune
PP-OCRv3中英文超轻量识别预训练模型+fine-tune|87.14%|90.40%|87.66%|89.98|92.5%(A+B+C)| +|PP-OCRv3中英文超轻量检测预训练模型+fine-tune+量化
PP-OCRv3中英文超轻量识别预训练模型+fine-tune+量化|88%|90.54%|88.5%|89.46%|92.02%(A+B+C)| + + +从结果中可以看到对预训练模型不做修改,只根据场景下的具体情况进行后处理的修改就能大幅提升端到端指标到91.66%,在CCPD数据集上进行 fine-tune 后指标进一步提升到92.5%, 在经过量化训练之后,指标变为92.02%。 + +### 4.4 部署 + +- 基于 Paddle Inference 的python推理 + +检测模型和识别模型分别 fine-tune 并导出为inference模型之后,可以使用如下命令基于 Paddle Inference 进行端到端推理并对结果进行可视化。 + +```bash +python tools/infer/predict_system.py \ + --det_model_dir=output/CCPD/det/infer/ \ + --rec_model_dir=output/CCPD/rec/infer/ \ + --image_dir="/home/aistudio/data/CCPD2020/ccpd_green/test/04131106321839081-92_258-159&509_530&611-527&611_172&599_159&509_530&525-0_0_3_32_30_31_30_30-109-106.jpg" \ + --rec_image_shape=3,48,320 +``` +推理结果如下 + +![](https://ai-studio-static-online.cdn.bcebos.com/76b6a0939c2c4cf49039b6563c4b28e241e11285d7464e799e81c58c0f7707a7) + +- 端侧部署 + +端侧部署我们采用基于 PaddleLite 的 cpp 推理。Paddle Lite是飞桨轻量化推理引擎,为手机、IOT端提供高效推理能力,并广泛整合跨平台硬件,为端侧部署及应用落地问题提供轻量化的部署方案。具体可参考 [PaddleOCR lite教程](../deploy/lite/readme_ch.md) + + +### 4.5 实验总结 + +我们分别使用PP-OCRv3中英文超轻量预训练模型在车牌数据集上进行了直接评估和 fine-tune 和 fine-tune +量化3种方案的实验,并基于[PaddleOCR lite教程](../deploy/lite/readme_ch.md)进行了速度测试,指标对比如下: + +- 检测 + +|方案|hmeans| 模型大小 | 预测速度(lite) | +|---|---|------|------------| +|PP-OCRv3中英文超轻量检测预训练模型直接预测|76.12%|2.5M| 233ms | +|PP-OCRv3中英文超轻量检测预训练模型 fine-tune|99%| 2.5M | 233ms | +|PP-OCRv3中英文超轻量检测预训练模型 fine-tune + 量化|98.91%| 1M | 189ms |fine-tune + +- 识别 + +|方案| acc | 模型大小 | 预测速度(lite) | +|---|--------|-------|------------| +|PP-OCRv3中英文超轻量识别预训练模型直接预测| 0% |10.3M| 4.2ms | +|PP-OCRv3中英文超轻量识别预训练模型直接预测+后处理去掉多识别的`·`| 90.97% |10.3M| 4.2ms | +|PP-OCRv3中英文超轻量识别预训练模型 fine-tune| 94.54% | 10.3M | 4,2ms | +|PP-OCRv3中英文超轻量识别预训练模型 fine-tune + 量化| 93.4% | 4.8M | 1.8ms | + + +- 端到端指标如下: + +|方案|fmeasure|模型大小|预测速度(lite) | +|---|---|---|---| +|PP-OCRv3中英文超轻量检测预训练模型
PP-OCRv3中英文超轻量识别预训练模型|0.08%|12.8M|298ms| +|PP-OCRv3中英文超轻量检测预训练模型
PP-OCRv3中英文超轻量识别预训练模型 + 后处理去掉多识别的`·`|91.66%|12.8M|298ms| +|PP-OCRv3中英文超轻量检测预训练模型+fine-tune
PP-OCRv3中英文超轻量识别预训练模型+fine-tune|92.5%|12.8M|298ms| +|PP-OCRv3中英文超轻量检测预训练模型+fine-tune+量化
PP-OCRv3中英文超轻量识别预训练模型+fine-tune+量化|92.02%|5.8M|224ms| + + +**结论** + +PP-OCRv3的检测模型在未经过fine-tune的情况下,在车牌数据集上也有一定的精度,经过 fine-tune 后能够极大的提升检测效果,精度达到99%。在使用量化训练后检测模型的精度几乎无损,并且模型大小压缩60%。 + +PP-OCRv3的识别模型在未经过fine-tune的情况下,在车牌数据集上精度为0,但是经过分析可以知道,模型大部分字符都预测正确,但是会多预测一个特殊字符,去掉这个特殊字符后,精度达到90%。PP-OCRv3识别模型在经过 fine-tune 后识别精度进一步提升,达到94.4%。在使用量化训练后识别模型大小压缩53%,但是由于数据量多少,带来了1%的精度损失。 + +从端到端结果中可以看到对预训练模型不做修改,只根据场景下的具体情况进行后处理的修改就能大幅提升端到端指标到91.66%,在CCPD数据集上进行 fine-tune 后指标进一步提升到92.5%, 在经过量化训练之后,指标轻微下降到92.02%但模型大小降低54%。 diff --git "a/applications/\351\253\230\347\262\276\345\272\246\344\270\255\346\226\207\350\257\206\345\210\253\346\250\241\345\236\213.md" "b/applications/\351\253\230\347\262\276\345\272\246\344\270\255\346\226\207\350\257\206\345\210\253\346\250\241\345\236\213.md" new file mode 100644 index 0000000000000000000000000000000000000000..3c31af42ee41f6233b8ea42cf995543846c43120 --- /dev/null +++ "b/applications/\351\253\230\347\262\276\345\272\246\344\270\255\346\226\207\350\257\206\345\210\253\346\250\241\345\236\213.md" @@ -0,0 +1,107 @@ +# 高精度中文场景文本识别模型SVTR + +## 1. 简介 + +PP-OCRv3是百度开源的超轻量级场景文本检测识别模型库,其中超轻量的场景中文识别模型SVTR_LCNet使用了SVTR算法结构。为了保证速度,SVTR_LCNet将SVTR模型的Local Blocks替换为LCNet,使用两层Global Blocks。在中文场景中,PP-OCRv3识别主要使用如下优化策略: +- GTC:Attention指导CTC训练策略; +- TextConAug:挖掘文字上下文信息的数据增广策略; +- TextRotNet:自监督的预训练模型; +- UDML:联合互学习策略; +- UIM:无标注数据挖掘方案。 + +其中 *UIM:无标注数据挖掘方案* 使用了高精度的SVTR中文模型进行无标注文件的刷库,该模型在PP-OCRv3识别的数据集上训练,精度对比如下表。 + +|中文识别算法|模型|UIM|精度| +| --- | --- | --- |--- | +|PP-OCRv3|SVTR_LCNet| w/o |78.4%| +|PP-OCRv3|SVTR_LCNet| w |79.4%| +|SVTR|SVTR-Tiny|-|82.5%| + +aistudio项目链接: [高精度中文场景文本识别模型SVTR](https://aistudio.baidu.com/aistudio/projectdetail/4263032) + +## 2. SVTR中文模型使用 + +### 环境准备 + + +本任务基于Aistudio完成, 具体环境如下: + +- 操作系统: Linux +- PaddlePaddle: 2.3 +- PaddleOCR: dygraph + +下载 PaddleOCR代码 + +```bash +git clone -b dygraph https://github.com/PaddlePaddle/PaddleOCR +``` + +安装依赖库 + +```bash +pip install -r PaddleOCR/requirements.txt -i https://mirror.baidu.com/pypi/simple +``` + +### 快速使用 + +获取SVTR中文模型文件,请扫码填写问卷,加入PaddleOCR官方交流群获取全部OCR垂类模型下载链接、《动手学OCR》电子书等全套OCR学习资料🎁 +
+ +
+ +```bash +# 解压模型文件 +tar xf svtr_ch_high_accuracy.tar +``` + +预测中文文本,以下图为例: +![](../doc/imgs_words/ch/word_1.jpg) + +预测命令: + +```bash +# CPU预测 +python tools/infer_rec.py -c configs/rec/rec_svtrnet_ch.yml -o Global.pretrained_model=./svtr_ch_high_accuracy/best_accuracy Global.infer_img=./doc/imgs_words/ch/word_1.jpg Global.use_gpu=False + +# GPU预测 +#python tools/infer_rec.py -c configs/rec/rec_svtrnet_ch.yml -o Global.pretrained_model=./svtr_ch_high_accuracy/best_accuracy Global.infer_img=./doc/imgs_words/ch/word_1.jpg Global.use_gpu=True +``` + +可以看到最后打印结果为 +- result: 韩国小馆 0.9853458404541016 + +0.9853458404541016为预测置信度。 + +### 推理模型导出与预测 + +inference 模型(paddle.jit.save保存的模型) 一般是模型训练,把模型结构和模型参数保存在文件中的固化模型,多用于预测部署场景。 训练过程中保存的模型是checkpoints模型,保存的只有模型的参数,多用于恢复训练等。 与checkpoints模型相比,inference 模型会额外保存模型的结构信息,在预测部署、加速推理上性能优越,灵活方便,适合于实际系统集成。 + +运行识别模型转inference模型命令,如下: + +```bash +python tools/export_model.py -c configs/rec/rec_svtrnet_ch.yml -o Global.pretrained_model=./svtr_ch_high_accuracy/best_accuracy Global.save_inference_dir=./inference/svtr_ch +``` + +转换成功后,在目录下有三个文件: +```shell +inference/svtr_ch/ + ├── inference.pdiparams # 识别inference模型的参数文件 + ├── inference.pdiparams.info # 识别inference模型的参数信息,可忽略 + └── inference.pdmodel # 识别inference模型的program文件 +``` + +inference模型预测,命令如下: + +```bash +# CPU预测 +python3 tools/infer/predict_rec.py --image_dir="./doc/imgs_words/ch/word_1.jpg" --rec_algorithm='SVTR' --rec_model_dir=./inference/svtr_ch/ --rec_image_shape='3, 32, 320' --rec_char_dict_path=ppocr/utils/ppocr_keys_v1.txt --use_gpu=False + +# GPU预测 +#python3 tools/infer/predict_rec.py --image_dir="./doc/imgs_words/ch/word_1.jpg" --rec_algorithm='SVTR' --rec_model_dir=./inference/svtr_ch/ --rec_image_shape='3, 32, 320' --rec_char_dict_path=ppocr/utils/ppocr_keys_v1.txt --use_gpu=True +``` + +**注意** + +- 使用SVTR算法时,需要指定--rec_algorithm='SVTR' +- 如果使用自定义字典训练的模型,需要将--rec_char_dict_path=ppocr/utils/ppocr_keys_v1.txt修改为自定义的字典 +- --rec_image_shape='3, 32, 320' 该参数不能去掉 diff --git a/configs/cls/ch_PP-OCRv3/ch_PP-OCRv3_rotnet.yml b/configs/cls/ch_PP-OCRv3/ch_PP-OCRv3_rotnet.yml index 1ffeba07995860f964e22b8b9d2538320d80f651..f7e327d1e0378ed75b03b1f866414ef34e197c47 100644 --- a/configs/cls/ch_PP-OCRv3/ch_PP-OCRv3_rotnet.yml +++ b/configs/cls/ch_PP-OCRv3/ch_PP-OCRv3_rotnet.yml @@ -63,8 +63,7 @@ Train: - DecodeImage: img_mode: BGR channel_first: false - - RecAug: - use_tia: False + - BaseDataAugmentation: - RandAugment: - SSLRotateResize: image_shape: [3, 48, 320] diff --git a/configs/cls/cls_mv3.yml b/configs/cls/cls_mv3.yml index 5e643dc3839b2e2edf3c811db813dd6a90797366..0c46ff560277d9e318587432671de09f0d13cf35 100644 --- a/configs/cls/cls_mv3.yml +++ b/configs/cls/cls_mv3.yml @@ -60,8 +60,7 @@ Train: img_mode: BGR channel_first: False - ClsLabelEncode: # Class handling label - - RecAug: - use_tia: False + - BaseDataAugmentation: - RandAugment: - ClsResizeImg: image_shape: [3, 48, 192] diff --git a/configs/rec/rec_mtb_nrtr.yml b/configs/rec/rec_mtb_nrtr.yml index 04267500854310dc6d5df9318bb8c056c65cd5b5..4e5826adc990c30aaee1d63fd5b4523944906eee 100644 --- a/configs/rec/rec_mtb_nrtr.yml +++ b/configs/rec/rec_mtb_nrtr.yml @@ -9,7 +9,7 @@ Global: eval_batch_step: [0, 2000] cal_metric_during_train: True pretrained_model: - checkpoints: + checkpoints: save_inference_dir: use_visualdl: False infer_img: doc/imgs_words_en/word_10.png @@ -49,7 +49,7 @@ Architecture: Loss: - name: NRTRLoss + name: CELoss smoothing: True PostProcess: @@ -68,8 +68,8 @@ Train: img_mode: BGR channel_first: False - NRTRLabelEncode: # Class handling label - - NRTRRecResizeImg: - image_shape: [100, 32] + - GrayRecResizeImg: + image_shape: [100, 32] # W H resize_type: PIL # PIL or OpenCV - KeepKeys: keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order @@ -82,14 +82,14 @@ Train: Eval: dataset: name: LMDBDataSet - data_dir: ./train_data/data_lmdb_release/evaluation/ + data_dir: ./train_data/data_lmdb_release/validation/ transforms: - DecodeImage: # load image img_mode: BGR channel_first: False - NRTRLabelEncode: # Class handling label - - NRTRRecResizeImg: - image_shape: [100, 32] + - GrayRecResizeImg: + image_shape: [100, 32] # W H resize_type: PIL # PIL or OpenCV - KeepKeys: keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order @@ -97,5 +97,5 @@ Eval: shuffle: False drop_last: False batch_size_per_card: 256 - num_workers: 1 + num_workers: 4 use_shared_memory: False diff --git a/configs/rec/rec_r45_abinet.yml b/configs/rec/rec_r45_abinet.yml new file mode 100644 index 0000000000000000000000000000000000000000..e604bcd12d9c06d1358e250bc76700af920db93b --- /dev/null +++ b/configs/rec/rec_r45_abinet.yml @@ -0,0 +1,101 @@ +Global: + use_gpu: True + epoch_num: 10 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec/r45_abinet/ + save_epoch_step: 1 + # evaluation is run every 2000 iterations + eval_batch_step: [0, 2000] + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words_en/word_10.png + # for data or label process + character_dict_path: + character_type: en + max_text_length: 25 + infer_mode: False + use_space_char: False + save_res_path: ./output/rec/predicts_abinet.txt + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.99 + clip_norm: 20.0 + lr: + name: Piecewise + decay_epochs: [6] + values: [0.0001, 0.00001] + regularizer: + name: 'L2' + factor: 0. + +Architecture: + model_type: rec + algorithm: ABINet + in_channels: 3 + Transform: + Backbone: + name: ResNet45 + Head: + name: ABINetHead + use_lang: True + iter_size: 3 + + +Loss: + name: CELoss + ignore_index: &ignore_index 100 # Must be greater than the number of character classes + +PostProcess: + name: ABINetLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/training/ + transforms: + - DecodeImage: # load image + img_mode: RGB + channel_first: False + - ABINetRecAug: + - ABINetLabelEncode: # Class handling label + ignore_index: *ignore_index + - ABINetRecResizeImg: + image_shape: [3, 32, 128] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 96 + drop_last: True + num_workers: 4 + +Eval: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/validation/ + transforms: + - DecodeImage: # load image + img_mode: RGB + channel_first: False + - ABINetLabelEncode: # Class handling label + ignore_index: *ignore_index + - ABINetRecResizeImg: + image_shape: [3, 32, 128] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 4 + use_shared_memory: False diff --git a/configs/rec/rec_svtrnet.yml b/configs/rec/rec_svtrnet.yml index 233d5e276577cad0144456ef7df1e20de99891f9..c1f5cc380ab9652e3ac750f8b8edacde9837daf0 100644 --- a/configs/rec/rec_svtrnet.yml +++ b/configs/rec/rec_svtrnet.yml @@ -26,7 +26,7 @@ Optimizer: name: AdamW beta1: 0.9 beta2: 0.99 - epsilon: 0.00000008 + epsilon: 8.e-8 weight_decay: 0.05 no_weight_decay_name: norm pos_embed one_dim_param_no_weight_decay: true @@ -77,14 +77,13 @@ Metric: Train: dataset: name: LMDBDataSet - data_dir: ./train_data/data_lmdb_release/training/ + data_dir: ./train_data/data_lmdb_release/training transforms: - DecodeImage: # load image img_mode: BGR channel_first: False - CTCLabelEncode: # Class handling label - - RecResizeImg: - character_dict_path: + - SVTRRecResizeImg: image_shape: [3, 64, 256] padding: False - KeepKeys: @@ -98,14 +97,13 @@ Train: Eval: dataset: name: LMDBDataSet - data_dir: ./train_data/data_lmdb_release/validation/ + data_dir: ./train_data/data_lmdb_release/validation transforms: - DecodeImage: # load image img_mode: BGR channel_first: False - CTCLabelEncode: # Class handling label - - RecResizeImg: - character_dict_path: + - SVTRRecResizeImg: image_shape: [3, 64, 256] padding: False - KeepKeys: diff --git a/configs/rec/rec_svtrnet_ch.yml b/configs/rec/rec_svtrnet_ch.yml new file mode 100644 index 0000000000000000000000000000000000000000..0d3f63d125ea12fa097fe49454b04423710e2f68 --- /dev/null +++ b/configs/rec/rec_svtrnet_ch.yml @@ -0,0 +1,155 @@ +Global: + use_gpu: true + epoch_num: 100 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec/svtr_ch_all/ + save_epoch_step: 10 + eval_batch_step: + - 0 + - 2000 + cal_metric_during_train: true + pretrained_model: null + checkpoints: null + save_inference_dir: null + use_visualdl: false + infer_img: doc/imgs_words/ch/word_1.jpg + character_dict_path: ppocr/utils/ppocr_keys_v1.txt + max_text_length: 25 + infer_mode: false + use_space_char: true + save_res_path: ./output/rec/predicts_svtr_tiny_ch_all.txt +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.99 + epsilon: 8.0e-08 + weight_decay: 0.05 + no_weight_decay_name: norm pos_embed + one_dim_param_no_weight_decay: true + lr: + name: Cosine + learning_rate: 0.0005 + warmup_epoch: 2 +Architecture: + model_type: rec + algorithm: SVTR + Transform: null + Backbone: + name: SVTRNet + img_size: + - 32 + - 320 + out_char_num: 40 + out_channels: 96 + patch_merging: Conv + embed_dim: + - 64 + - 128 + - 256 + depth: + - 3 + - 6 + - 3 + num_heads: + - 2 + - 4 + - 8 + mixer: + - Local + - Local + - Local + - Local + - Local + - Local + - Global + - Global + - Global + - Global + - Global + - Global + local_mixer: + - - 7 + - 11 + - - 7 + - 11 + - - 7 + - 11 + last_stage: true + prenorm: false + Neck: + name: SequenceEncoder + encoder_type: reshape + Head: + name: CTCHead +Loss: + name: CTCLoss +PostProcess: + name: CTCLabelDecode +Metric: + name: RecMetric + main_indicator: acc +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data + label_file_list: + - ./train_data/train_list.txt + ext_op_transform_idx: 1 + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - RecConAug: + prob: 0.5 + ext_data_num: 2 + image_shape: + - 32 + - 320 + - 3 + - RecAug: null + - CTCLabelEncode: null + - SVTRRecResizeImg: + image_shape: + - 3 + - 32 + - 320 + padding: true + - KeepKeys: + keep_keys: + - image + - label + - length + loader: + shuffle: true + batch_size_per_card: 256 + drop_last: true + num_workers: 8 +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data + label_file_list: + - ./train_data/val_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - CTCLabelEncode: null + - SVTRRecResizeImg: + image_shape: + - 3 + - 32 + - 320 + padding: true + - KeepKeys: + keep_keys: + - image + - label + - length + loader: + shuffle: false + drop_last: false + batch_size_per_card: 256 + num_workers: 2 +profiler_options: null diff --git a/configs/rec/rec_vitstr_none_ce.yml b/configs/rec/rec_vitstr_none_ce.yml new file mode 100644 index 0000000000000000000000000000000000000000..b969c83a5d77b8c8ffde97ce5f914512074cf26e --- /dev/null +++ b/configs/rec/rec_vitstr_none_ce.yml @@ -0,0 +1,102 @@ +Global: + use_gpu: True + epoch_num: 20 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec/vitstr_none_ce/ + save_epoch_step: 1 + # evaluation is run every 2000 iterations after the 0th iteration# + eval_batch_step: [0, 2000] + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words_en/word_10.png + # for data or label process + character_dict_path: ppocr/utils/EN_symbol_dict.txt + max_text_length: 25 + infer_mode: False + use_space_char: False + save_res_path: ./output/rec/predicts_vitstr.txt + + +Optimizer: + name: Adadelta + epsilon: 1.e-8 + rho: 0.95 + clip_norm: 5.0 + lr: + learning_rate: 1.0 + +Architecture: + model_type: rec + algorithm: ViTSTR + in_channels: 1 + Transform: + Backbone: + name: ViTSTR + scale: tiny + Neck: + name: SequenceEncoder + encoder_type: reshape + Head: + name: CTCHead + +Loss: + name: CELoss + with_all: True + ignore_index: &ignore_index 0 # Must be zero or greater than the number of character classes + +PostProcess: + name: ViTSTRLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/training/ + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - ViTSTRLabelEncode: # Class handling label + ignore_index: *ignore_index + - GrayRecResizeImg: + image_shape: [224, 224] # W H + resize_type: PIL # PIL or OpenCV + inter_type: 'Image.BICUBIC' + scale: false + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 48 + drop_last: True + num_workers: 8 + +Eval: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/validation/ + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - ViTSTRLabelEncode: # Class handling label + ignore_index: *ignore_index + - GrayRecResizeImg: + image_shape: [224, 224] # W H + resize_type: PIL # PIL or OpenCV + inter_type: 'Image.BICUBIC' + scale: false + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 2 diff --git a/deploy/avh/.gitignore b/deploy/avh/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..faeba235a6894f0bd28aab23dea5f4f559071846 --- /dev/null +++ b/deploy/avh/.gitignore @@ -0,0 +1,5 @@ +include/inputs.h +include/outputs.h + +__pycache__/ +build/ \ No newline at end of file diff --git a/deploy/avh/Makefile b/deploy/avh/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..cf7d375b7e54c7781768db39274d9b3f7128812b --- /dev/null +++ b/deploy/avh/Makefile @@ -0,0 +1,129 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Makefile to build demo + +# Setup build environment +BUILD_DIR := build + +ARM_CPU = ARMCM55 +ETHOSU_PATH = /opt/arm/ethosu +CMSIS_PATH ?= ${ETHOSU_PATH}/cmsis +ETHOSU_PLATFORM_PATH ?= ${ETHOSU_PATH}/core_platform +STANDALONE_CRT_PATH := $(abspath $(BUILD_DIR))/runtime +CORSTONE_300_PATH = ${ETHOSU_PLATFORM_PATH}/targets/corstone-300 +PKG_COMPILE_OPTS = -g -Wall -O2 -Wno-incompatible-pointer-types -Wno-format -mcpu=cortex-m55 -mthumb -mfloat-abi=hard -std=gnu99 +CMAKE ?= cmake +CC = arm-none-eabi-gcc +AR = arm-none-eabi-ar +RANLIB = arm-none-eabi-ranlib +PKG_CFLAGS = ${PKG_COMPILE_OPTS} \ + -I${STANDALONE_CRT_PATH}/include \ + -I${STANDALONE_CRT_PATH}/src/runtime/crt/include \ + -I${PWD}/include \ + -I${CORSTONE_300_PATH} \ + -I${CMSIS_PATH}/Device/ARM/${ARM_CPU}/Include/ \ + -I${CMSIS_PATH}/CMSIS/Core/Include \ + -I${CMSIS_PATH}/CMSIS/NN/Include \ + -I${CMSIS_PATH}/CMSIS/DSP/Include \ + -I$(abspath $(BUILD_DIR))/codegen/host/include +CMSIS_NN_CMAKE_FLAGS = -DCMAKE_TOOLCHAIN_FILE=$(abspath $(BUILD_DIR))/../arm-none-eabi-gcc.cmake \ + -DTARGET_CPU=cortex-m55 \ + -DBUILD_CMSIS_NN_FUNCTIONS=YES +PKG_LDFLAGS = -lm -specs=nosys.specs -static -T corstone300.ld + +$(ifeq VERBOSE,1) +QUIET ?= +$(else) +QUIET ?= @ +$(endif) + +DEMO_MAIN = src/demo_bare_metal.c +CODEGEN_SRCS = $(wildcard $(abspath $(BUILD_DIR))/codegen/host/src/*.c) +CODEGEN_OBJS = $(subst .c,.o,$(CODEGEN_SRCS)) +CMSIS_STARTUP_SRCS = $(wildcard ${CMSIS_PATH}/Device/ARM/${ARM_CPU}/Source/*.c) +UART_SRCS = $(wildcard ${CORSTONE_300_PATH}/*.c) + +demo: $(BUILD_DIR)/demo + +$(BUILD_DIR)/stack_allocator.o: $(STANDALONE_CRT_PATH)/src/runtime/crt/memory/stack_allocator.c + $(QUIET)mkdir -p $(@D) + $(QUIET)$(CC) -c $(PKG_CFLAGS) -o $@ $^ + +$(BUILD_DIR)/crt_backend_api.o: $(STANDALONE_CRT_PATH)/src/runtime/crt/common/crt_backend_api.c + $(QUIET)mkdir -p $(@D) + $(QUIET)$(CC) -c $(PKG_CFLAGS) -o $@ $^ + +# Build generated code +$(BUILD_DIR)/libcodegen.a: $(CODEGEN_SRCS) + $(QUIET)cd $(abspath $(BUILD_DIR)/codegen/host/src) && $(CC) -c $(PKG_CFLAGS) $(CODEGEN_SRCS) + $(QUIET)$(AR) -cr $(abspath $(BUILD_DIR)/libcodegen.a) $(CODEGEN_OBJS) + $(QUIET)$(RANLIB) $(abspath $(BUILD_DIR)/libcodegen.a) + +# Build CMSIS startup code +${BUILD_DIR}/libcmsis_startup.a: $(CMSIS_STARTUP_SRCS) + $(QUIET)mkdir -p $(abspath $(BUILD_DIR)/libcmsis_startup) + $(QUIET)cd $(abspath $(BUILD_DIR)/libcmsis_startup) && $(CC) -c $(PKG_CFLAGS) -D${ARM_CPU} $^ + $(QUIET)$(AR) -cr $(abspath $(BUILD_DIR)/libcmsis_startup.a) $(abspath $(BUILD_DIR))/libcmsis_startup/*.o + $(QUIET)$(RANLIB) $(abspath $(BUILD_DIR)/libcmsis_startup.a) + +CMSIS_SHA_FILE=${CMSIS_PATH}/977abe9849781a2e788b02282986480ff4e25ea6.sha +ifneq ("$(wildcard $(CMSIS_SHA_FILE))","") +${BUILD_DIR}/cmsis_nn/Source/libcmsis-nn.a: + $(QUIET)mkdir -p $(@D) + $(QUIET)cd $(CMSIS_PATH)/CMSIS/NN && $(CMAKE) -B $(abspath $(BUILD_DIR)/cmsis_nn) $(CMSIS_NN_CMAKE_FLAGS) + $(QUIET)cd $(abspath $(BUILD_DIR)/cmsis_nn) && $(MAKE) all +else +# Build CMSIS-NN +${BUILD_DIR}/cmsis_nn/Source/SoftmaxFunctions/libCMSISNNSoftmax.a: + $(QUIET)mkdir -p $(@D) + $(QUIET)cd $(CMSIS_PATH)/CMSIS/NN && $(CMAKE) -B $(abspath $(BUILD_DIR)/cmsis_nn) $(CMSIS_NN_CMAKE_FLAGS) + $(QUIET)cd $(abspath $(BUILD_DIR)/cmsis_nn) && $(MAKE) all +endif + +# Build demo application +ifneq ("$(wildcard $(CMSIS_SHA_FILE))","") +$(BUILD_DIR)/demo: $(DEMO_MAIN) $(UART_SRCS) $(BUILD_DIR)/stack_allocator.o $(BUILD_DIR)/crt_backend_api.o \ + ${BUILD_DIR}/libcodegen.a ${BUILD_DIR}/libcmsis_startup.a ${BUILD_DIR}/cmsis_nn/Source/libcmsis-nn.a + $(QUIET)mkdir -p $(@D) + $(QUIET)$(CC) $(PKG_CFLAGS) $(FREERTOS_FLAGS) -o $@ -Wl,--whole-archive $^ -Wl,--no-whole-archive $(PKG_LDFLAGS) +else +$(BUILD_DIR)/demo: $(DEMO_MAIN) $(UART_SRCS) $(BUILD_DIR)/stack_allocator.o $(BUILD_DIR)/crt_backend_api.o \ + ${BUILD_DIR}/libcodegen.a ${BUILD_DIR}/libcmsis_startup.a \ + ${BUILD_DIR}/cmsis_nn/Source/SoftmaxFunctions/libCMSISNNSoftmax.a \ + ${BUILD_DIR}/cmsis_nn/Source/FullyConnectedFunctions/libCMSISNNFullyConnected.a \ + ${BUILD_DIR}/cmsis_nn/Source/SVDFunctions/libCMSISNNSVDF.a \ + ${BUILD_DIR}/cmsis_nn/Source/ReshapeFunctions/libCMSISNNReshape.a \ + ${BUILD_DIR}/cmsis_nn/Source/ActivationFunctions/libCMSISNNActivation.a \ + ${BUILD_DIR}/cmsis_nn/Source/NNSupportFunctions/libCMSISNNSupport.a \ + ${BUILD_DIR}/cmsis_nn/Source/ConcatenationFunctions/libCMSISNNConcatenation.a \ + ${BUILD_DIR}/cmsis_nn/Source/BasicMathFunctions/libCMSISNNBasicMaths.a \ + ${BUILD_DIR}/cmsis_nn/Source/ConvolutionFunctions/libCMSISNNConvolutions.a \ + ${BUILD_DIR}/cmsis_nn/Source/PoolingFunctions/libCMSISNNPooling.a + $(QUIET)mkdir -p $(@D) + $(QUIET)$(CC) $(PKG_CFLAGS) $(FREERTOS_FLAGS) -o $@ -Wl,--whole-archive $^ -Wl,--no-whole-archive $(PKG_LDFLAGS) +endif + +clean: + $(QUIET)rm -rf $(BUILD_DIR)/codegen + +cleanall: + $(QUIET)rm -rf $(BUILD_DIR) + +.SUFFIXES: + +.DEFAULT: demo diff --git a/deploy/avh/README.md b/deploy/avh/README.md new file mode 100644 index 0000000000000000000000000000000000000000..922fb4da920391d1baf98d82b94569321df9a2bc --- /dev/null +++ b/deploy/avh/README.md @@ -0,0 +1,100 @@ + + + + + + + + + + + + + + + + + +Running PaddleOCR text recognition model via TVM on bare metal Arm(R) Cortex(R)-M55 CPU and CMSIS-NN +=============================================================== + +This folder contains an example of how to use TVM to run a PaddleOCR model +on bare metal Cortex(R)-M55 CPU and CMSIS-NN. + +Prerequisites +------------- +If the demo is run in the ci_cpu Docker container provided with TVM, then the following +software will already be installed. + +If the demo is not run in the ci_cpu Docker container, then you will need the following: +- Software required to build and run the demo (These can all be installed by running + https://github.com/apache/tvm/blob/main/docker/install/ubuntu_install_ethosu_driver_stack.sh .) + - [Fixed Virtual Platform (FVP) based on Arm(R) Corstone(TM)-300 software](https://developer.arm.com/tools-and-software/open-source-software/arm-platforms-software/arm-ecosystem-fvps) + - [cmake 3.19.5](https://github.com/Kitware/CMake/releases/) + - [GCC toolchain from Arm(R)](https://developer.arm.com/-/media/Files/downloads/gnu-rm/10-2020q4/gcc-arm-none-eabi-10-2020-q4-major-x86_64-linux.tar.bz2) + - [Arm(R) Ethos(TM)-U NPU driver stack](https://review.mlplatform.org) + - [CMSIS](https://github.com/ARM-software/CMSIS_5) +- The python libraries listed in the requirements.txt of this directory + - These can be installed by running the following from the current directory: + ```bash + pip install -r ./requirements.txt + ``` + +You will also need TVM which can either be: + - Built from source (see [Install from Source](https://tvm.apache.org/docs/install/from_source.html)) + - When building from source, the following need to be set in config.cmake: + - set(USE_CMSISNN ON) + - set(USE_MICRO ON) + - set(USE_LLVM ON) + - Installed from TLCPack nightly(see [TLCPack](https://tlcpack.ai/)) + +You will need to update your PATH environment variable to include the path to cmake 3.19.5 and the FVP. +For example if you've installed these in ```/opt/arm``` , then you would do the following: +```bash +export PATH=/opt/arm/FVP_Corstone_SSE-300/models/Linux64_GCC-6.4:/opt/arm/cmake/bin:$PATH +``` + +Running the demo application +---------------------------- +Type the following command to run the bare metal text recognition application ([src/demo_bare_metal.c](./src/demo_bare_metal.c)): + +```bash +./run_demo.sh +``` + +If the Ethos(TM)-U platform and/or CMSIS have not been installed in /opt/arm/ethosu then +the locations for these can be specified as arguments to run_demo.sh, for example: + +```bash +./run_demo.sh --cmsis_path /home/tvm-user/cmsis \ +--ethosu_platform_path /home/tvm-user/ethosu/core_platform +``` + +This will: +- Download a PaddleOCR text recognition model +- Use tvmc to compile the text recognition model for Cortex(R)-M55 CPU and CMSIS-NN +- Create a C header file inputs.c containing the image data as a C array +- Create a C header file outputs.c containing a C array where the output of inference will be stored +- Build the demo application +- Run the demo application on a Fixed Virtual Platform (FVP) based on Arm(R) Corstone(TM)-300 software +- The application will report the text on the image and the corresponding score. + +Using your own image +-------------------- +The create_image.py script takes a single argument on the command line which is the path of the +image to be converted into an array of bytes for consumption by the model. + +The demo can be modified to use an image of your choice by changing the following line in run_demo.sh + +```bash +python3 ./convert_image.py path/to/image +``` + +Model description +----------------- +In this demo, the model we use is an English recognition model based on [PP-OCRv3](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/PP-OCRv3_introduction.md). PP-OCRv3 is the third version of the PP-OCR series model released by [PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR). This series of models has the following features: + - PP-OCRv3: ultra-lightweight OCR system: detection (3.6M) + direction classifier (1.4M) + recognition (12M) = 17.0M + - Support more than 80 kinds of multi-language recognition models, including English, Chinese, French, German, Arabic, Korean, Japanese and so on. For details + - Support vertical text recognition, and long text recognition + +The text recognition model in PP-OCRv3 supports more than 80 languages. In the process of model development, since Arm(R) Cortex(R)-M55 CPU does not support rnn operator, we delete the unsupported operator based on the PP-OCRv3 text recognition model to obtain the current model. \ No newline at end of file diff --git a/deploy/avh/README_ch.md b/deploy/avh/README_ch.md new file mode 100644 index 0000000000000000000000000000000000000000..7e6272ae1258e11aeea7a21dd01400a5bff614ba --- /dev/null +++ b/deploy/avh/README_ch.md @@ -0,0 +1,93 @@ + + + + + + + + + + + + + + + + + +通过TVM在 Arm(R) Cortex(R)-M55 CPU 上运行 PaddleOCR文 本能识别模型 +=============================================================== + +此文件夹包含如何使用 TVM 在 Cortex(R)-M55 CPU 上运行 PaddleOCR 模型的示例。 + +依赖 +------------- +本demo运行在TVM提供的docker环境上,在该环境中已经安装好的必须的软件 + + +在非docker环境中,需要手动安装如下依赖项: + +- 软件可通过[安装脚本](https://github.com/apache/tvm/blob/main/docker/install/ubuntu_install_ethosu_driver_stack.sh)一键安装 + - [Fixed Virtual Platform (FVP) based on Arm(R) Corstone(TM)-300 software](https://developer.arm.com/tools-and-software/open-source-software/arm-platforms-software/arm-ecosystem-fvps) + - [cmake 3.19.5](https://github.com/Kitware/CMake/releases/) + - [GCC toolchain from Arm(R)](https://developer.arm.com/-/media/Files/downloads/gnu-rm/10-2020q4/gcc-arm-none-eabi-10-2020-q4-major-x86_64-linux.tar.bz2) + - [Arm(R) Ethos(TM)-U NPU driver stack](https://review.mlplatform.org) + - [CMSIS](https://github.com/ARM-software/CMSIS_5) +- python 依赖 + ```bash + pip install -r ./requirements.txt + ``` +- TVM + - 从源码安装([Install from Source](https://tvm.apache.org/docs/install/from_source.html)) + 从源码安装时,需要设置如下字段 + - set(USE_CMSISNN ON) + - set(USE_MICRO ON) + - set(USE_LLVM ON) + - 从TLCPack 安装([TLCPack](https://tlcpack.ai/)) + +安装完成后需要更新环境变量,以软件安装地址为`/opt/arm`为例: +```bash +export PATH=/opt/arm/FVP_Corstone_SSE-300/models/Linux64_GCC-6.4:/opt/arm/cmake/bin:$PATH +``` + +运行demo +---------------------------- +使用如下命令可以一键运行demo + +```bash +./run_demo.sh +``` + +如果 Ethos(TM)-U 平台或 CMSIS 没有安装在 `/opt/arm/ethosu` 中,可通过参数进行设置,例如: + +```bash +./run_demo.sh --cmsis_path /home/tvm-user/cmsis \ +--ethosu_platform_path /home/tvm-user/ethosu/core_platform +``` + +`./run_demo.sh`脚本会执行如下步骤: +- 下载 PaddleOCR 文字识别模型 +- 使用tvm将PaddleOCR 文字识别模型编译为 Cortex(R)-M55 CPU 和 CMSIS-NN 后端的可执行文件 +- 创建一个包含输入图像数据的头文件`inputs.c` +- 创建一个包含输出tensor大小的头文件`outputs.c` +- 编译可执行程序 +- 运行程序 +- 输出图片上的文字和置信度 + +使用自己的图片 +-------------------- +替换 `run_demo.sh ` 中140行处的图片地址即可 + +使用自己的模型 +-------------------- +替换 `run_demo.sh ` 中130行处的模型地址即可 + +模型描述 +----------------- + +在这个demo中,我们使用的模型是基于[PP-OCRv3](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/PP-OCRv3_introduction.md)的英文识别模型。 PP-OCRv3是[PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR)发布的PP-OCR系列模型的第三个版本。 该系列模型具有以下特点: + - 超轻量级OCR系统:检测(3.6M)+方向分类器(1.4M)+识别(12M)=17.0M。 + - 支持80多种多语言识别模型,包括英文、中文、法文、德文、阿拉伯文、韩文、日文等。 + - 支持竖排文本识别,长文本识别。 + +PP-OCRv3 中的文本识别模型支持 80 多种语言。 在模型开发过程中,由于Arm(R) Cortex(R)-M55 CPU不支持rnn算子,我们在PP-OCRv3文本识别模型的基础上删除了不支持的算子,得到当前模型。 \ No newline at end of file diff --git a/deploy/avh/arm-none-eabi-gcc.cmake b/deploy/avh/arm-none-eabi-gcc.cmake new file mode 100644 index 0000000000000000000000000000000000000000..415b3139be1b7f891c017dff0dc299b67f7ef2fe --- /dev/null +++ b/deploy/avh/arm-none-eabi-gcc.cmake @@ -0,0 +1,79 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +if (__TOOLCHAIN_LOADED) + return() +endif() +set(__TOOLCHAIN_LOADED TRUE) + +set(CMAKE_SYSTEM_NAME Generic) +set(CMAKE_C_COMPILER "arm-none-eabi-gcc") +set(CMAKE_CXX_COMPILER "arm-none-eabi-g++") +set(CMAKE_SYSTEM_PROCESSOR "cortex-m55" CACHE STRING "Select Arm(R) Cortex(R)-M architecture. (cortex-m0, cortex-m3, cortex-m33, cortex-m4, cortex-m55, cortex-m7, etc)") + +set(CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) + +SET(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) +SET(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) +SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) + +set(CMAKE_C_STANDARD 99) +set(CMAKE_CXX_STANDARD 14) + +# The system processor could for example be set to cortex-m33+nodsp+nofp. +set(__CPU_COMPILE_TARGET ${CMAKE_SYSTEM_PROCESSOR}) +string(REPLACE "+" ";" __CPU_FEATURES ${__CPU_COMPILE_TARGET}) +list(POP_FRONT __CPU_FEATURES CMAKE_SYSTEM_PROCESSOR) + +string(FIND ${__CPU_COMPILE_TARGET} "+" __OFFSET) +if(__OFFSET GREATER_EQUAL 0) + string(SUBSTRING ${__CPU_COMPILE_TARGET} ${__OFFSET} -1 CPU_FEATURES) +endif() + +# Add -mcpu to the compile options to override the -mcpu the CMake toolchain adds +add_compile_options(-mcpu=${__CPU_COMPILE_TARGET}) + +# Set floating point unit +if("${__CPU_COMPILE_TARGET}" MATCHES "\\+fp") + set(FLOAT hard) +elseif("${__CPU_COMPILE_TARGET}" MATCHES "\\+nofp") + set(FLOAT soft) +elseif("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "cortex-m33" OR + "${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "cortex-m55") + set(FLOAT hard) +else() + set(FLOAT soft) +endif() + +add_compile_options(-mfloat-abi=${FLOAT}) +add_link_options(-mfloat-abi=${FLOAT}) + +# Link target +add_link_options(-mcpu=${__CPU_COMPILE_TARGET}) +add_link_options(-Xlinker -Map=output.map) + +# +# Compile options +# +set(cxx_flags "-fno-unwind-tables;-fno-rtti;-fno-exceptions") + +add_compile_options("-Wall;-Wextra;-Wsign-compare;-Wunused;-Wswitch-default;\ +-Wdouble-promotion;-Wredundant-decls;-Wshadow;-Wnull-dereference;\ +-Wno-format-extra-args;-Wno-unused-function;-Wno-unused-label;\ +-Wno-missing-field-initializers;-Wno-return-type;-Wno-format;-Wno-int-conversion" + "$<$:${cxx_flags}>" +) diff --git a/deploy/avh/convert_image.py b/deploy/avh/convert_image.py new file mode 100755 index 0000000000000000000000000000000000000000..747ab29e4ad2b577890298626da756b7bac6047c --- /dev/null +++ b/deploy/avh/convert_image.py @@ -0,0 +1,102 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import os +import pathlib +import re +import sys +import cv2 +import math +from PIL import Image +import numpy as np + +def resize_norm_img(img, image_shape, padding=True): + imgC, imgH, imgW = image_shape + h = img.shape[0] + w = img.shape[1] + if not padding: + resized_image = cv2.resize( + img, (imgW, imgH), interpolation=cv2.INTER_LINEAR) + resized_w = imgW + else: + ratio = w / float(h) + if math.ceil(imgH * ratio) > imgW: + resized_w = imgW + else: + resized_w = int(math.ceil(imgH * ratio)) + resized_image = cv2.resize(img, (resized_w, imgH)) + resized_image = resized_image.astype('float32') + if image_shape[0] == 1: + resized_image = resized_image / 255 + resized_image = resized_image[np.newaxis, :] + else: + resized_image = resized_image.transpose((2, 0, 1)) / 255 + resized_image -= 0.5 + resized_image /= 0.5 + padding_im = np.zeros((imgC, imgH, imgW), dtype=np.float32) + padding_im[:, :, 0:resized_w] = resized_image + return padding_im + + +def create_header_file(name, tensor_name, tensor_data, output_path): + """ + This function generates a header file containing the data from the numpy array provided. + """ + file_path = pathlib.Path(f"{output_path}/" + name).resolve() + # Create header file with npy_data as a C array + raw_path = file_path.with_suffix(".h").resolve() + with open(raw_path, "w") as header_file: + header_file.write( + "\n" + + f"const size_t {tensor_name}_len = {tensor_data.size};\n" + + f'__attribute__((section(".data.tvm"), aligned(16))) float {tensor_name}[] = ' + ) + + header_file.write("{") + for i in np.ndindex(tensor_data.shape): + header_file.write(f"{tensor_data[i]}, ") + header_file.write("};\n\n") + + +def create_headers(image_name): + """ + This function generates C header files for the input and output arrays required to run inferences + """ + img_path = os.path.join("./", f"{image_name}") + + # Resize image to 32x320 + img = cv2.imread(img_path) + img = resize_norm_img(img, [3,32,320]) + img_data = img.astype("float32") + + # # Add the batch dimension, as we are expecting 4-dimensional input: NCHW. + img_data = np.expand_dims(img_data, axis=0) + + # Create input header file + create_header_file("inputs", "input", img_data, "./include") + # Create output header file + output_data = np.zeros([7760], np.float) + create_header_file( + "outputs", + "output", + output_data, + "./include", + ) + + +if __name__ == "__main__": + create_headers(sys.argv[1]) diff --git a/deploy/avh/corstone300.ld b/deploy/avh/corstone300.ld new file mode 100644 index 0000000000000000000000000000000000000000..e52b23da33601b8b858f11902e6553687cfbf352 --- /dev/null +++ b/deploy/avh/corstone300.ld @@ -0,0 +1,295 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*------------------ Reference System Memories ------------- + +===================+============+=======+============+============+ + | Memory | Address | Size | CPU Access | NPU Access | + +===================+============+=======+============+============+ + | ITCM | 0x00000000 | 512KB | Yes (RO) | No | + +-------------------+------------+-------+------------+------------+ + | DTCM | 0x20000000 | 512KB | Yes (R/W) | No | + +-------------------+------------+-------+------------+------------+ + | SSE-300 SRAM | 0x21000000 | 2MB | Yes (R/W) | Yes (R/W) | + +-------------------+------------+-------+------------+------------+ + | Data SRAM | 0x01000000 | 2MB | Yes (R/W) | Yes (R/W) | + +-------------------+------------+-------+------------+------------+ + | DDR | 0x60000000 | 32MB | Yes (R/W) | Yes (R/W) | + +-------------------+------------+-------+------------+------------+ */ + +/*---------------------- ITCM Configuration ---------------------------------- + Flash Configuration + Flash Base Address <0x0-0xFFFFFFFF:8> + Flash Size (in Bytes) <0x0-0xFFFFFFFF:8> + + -----------------------------------------------------------------------------*/ +__ROM_BASE = 0x00000000; +__ROM_SIZE = 0x00080000; + +/*--------------------- DTCM RAM Configuration ---------------------------- + RAM Configuration + RAM Base Address <0x0-0xFFFFFFFF:8> + RAM Size (in Bytes) <0x0-0xFFFFFFFF:8> + + -----------------------------------------------------------------------------*/ +__RAM_BASE = 0x20000000; +__RAM_SIZE = 0x00080000; + +/*----------------------- Data SRAM Configuration ------------------------------ + Data SRAM Configuration + DATA_SRAM Base Address <0x0-0xFFFFFFFF:8> + DATA_SRAM Size (in Bytes) <0x0-0xFFFFFFFF:8> + + -----------------------------------------------------------------------------*/ +__DATA_SRAM_BASE = 0x01000000; +__DATA_SRAM_SIZE = 0x00200000; + +/*--------------------- Embedded SRAM Configuration ---------------------------- + SRAM Configuration + SRAM Base Address <0x0-0xFFFFFFFF:8> + SRAM Size (in Bytes) <0x0-0xFFFFFFFF:8> + + -----------------------------------------------------------------------------*/ +__SRAM_BASE = 0x21000000; +__SRAM_SIZE = 0x00200000; + +/*--------------------- Stack / Heap Configuration ---------------------------- + Stack / Heap Configuration + Stack Size (in Bytes) <0x0-0xFFFFFFFF:8> + Heap Size (in Bytes) <0x0-0xFFFFFFFF:8> + + -----------------------------------------------------------------------------*/ +__STACK_SIZE = 0x00008000; +__HEAP_SIZE = 0x00008000; + +/*--------------------- Embedded RAM Configuration ---------------------------- + DDR Configuration + DDR Base Address <0x0-0xFFFFFFFF:8> + DDR Size (in Bytes) <0x0-0xFFFFFFFF:8> + + -----------------------------------------------------------------------------*/ +__DDR_BASE = 0x60000000; +__DDR_SIZE = 0x02000000; + +/* + *-------------------- <<< end of configuration section >>> ------------------- + */ + +MEMORY +{ + ITCM (rx) : ORIGIN = __ROM_BASE, LENGTH = __ROM_SIZE + DTCM (rwx) : ORIGIN = __RAM_BASE, LENGTH = __RAM_SIZE + DATA_SRAM (rwx) : ORIGIN = __DATA_SRAM_BASE, LENGTH = __DATA_SRAM_SIZE + SRAM (rwx) : ORIGIN = __SRAM_BASE, LENGTH = __SRAM_SIZE + DDR (rwx) : ORIGIN = __DDR_BASE, LENGTH = __DDR_SIZE +} + +/* Linker script to place sections and symbol values. Should be used together + * with other linker script that defines memory regions ITCM and RAM. + * It references following symbols, which must be defined in code: + * Reset_Handler : Entry of reset handler + * + * It defines following symbols, which code can use without definition: + * __exidx_start + * __exidx_end + * __copy_table_start__ + * __copy_table_end__ + * __zero_table_start__ + * __zero_table_end__ + * __etext + * __data_start__ + * __preinit_array_start + * __preinit_array_end + * __init_array_start + * __init_array_end + * __fini_array_start + * __fini_array_end + * __data_end__ + * __bss_start__ + * __bss_end__ + * __end__ + * end + * __HeapLimit + * __StackLimit + * __StackTop + * __stack + */ +ENTRY(Reset_Handler) + +SECTIONS +{ + /* .ddr is placed before .text so that .rodata.tvm is encountered before .rodata* */ + .ddr : + { + . = ALIGN (16); + *(.rodata.tvm) + . = ALIGN (16); + *(.data.tvm); + . = ALIGN(16); + } > DDR + + .text : + { + KEEP(*(.vectors)) + *(.text*) + + KEEP(*(.init)) + KEEP(*(.fini)) + + /* .ctors */ + *crtbegin.o(.ctors) + *crtbegin?.o(.ctors) + *(EXCLUDE_FILE(*crtend?.o *crtend.o) .ctors) + *(SORT(.ctors.*)) + *(.ctors) + + /* .dtors */ + *crtbegin.o(.dtors) + *crtbegin?.o(.dtors) + *(EXCLUDE_FILE(*crtend?.o *crtend.o) .dtors) + *(SORT(.dtors.*)) + *(.dtors) + + *(.rodata*) + + KEEP(*(.eh_frame*)) + } > ITCM + + .ARM.extab : + { + *(.ARM.extab* .gnu.linkonce.armextab.*) + } > ITCM + + __exidx_start = .; + .ARM.exidx : + { + *(.ARM.exidx* .gnu.linkonce.armexidx.*) + } > ITCM + __exidx_end = .; + + .copy.table : + { + . = ALIGN(4); + __copy_table_start__ = .; + LONG (__etext) + LONG (__data_start__) + LONG (__data_end__ - __data_start__) + /* Add each additional data section here */ + __copy_table_end__ = .; + } > ITCM + + .zero.table : + { + . = ALIGN(4); + __zero_table_start__ = .; + __zero_table_end__ = .; + } > ITCM + + /** + * Location counter can end up 2byte aligned with narrow Thumb code but + * __etext is assumed by startup code to be the LMA of a section in DTCM + * which must be 4byte aligned + */ + __etext = ALIGN (4); + + .sram : + { + . = ALIGN(16); + } > SRAM AT > SRAM + + .data : AT (__etext) + { + __data_start__ = .; + *(vtable) + *(.data) + *(.data.*) + + . = ALIGN(4); + /* preinit data */ + PROVIDE_HIDDEN (__preinit_array_start = .); + KEEP(*(.preinit_array)) + PROVIDE_HIDDEN (__preinit_array_end = .); + + . = ALIGN(4); + /* init data */ + PROVIDE_HIDDEN (__init_array_start = .); + KEEP(*(SORT(.init_array.*))) + KEEP(*(.init_array)) + PROVIDE_HIDDEN (__init_array_end = .); + + + . = ALIGN(4); + /* finit data */ + PROVIDE_HIDDEN (__fini_array_start = .); + KEEP(*(SORT(.fini_array.*))) + KEEP(*(.fini_array)) + PROVIDE_HIDDEN (__fini_array_end = .); + + KEEP(*(.jcr*)) + . = ALIGN(4); + /* All data end */ + __data_end__ = .; + + } > DTCM + + .bss.noinit (NOLOAD): + { + . = ALIGN(16); + *(.bss.noinit.*) + . = ALIGN(16); + } > SRAM AT > SRAM + + .bss : + { + . = ALIGN(4); + __bss_start__ = .; + *(.bss) + *(.bss.*) + *(COMMON) + . = ALIGN(4); + __bss_end__ = .; + } > DTCM AT > DTCM + + .data_sram : + { + . = ALIGN(16); + } > DATA_SRAM + + .heap (COPY) : + { + . = ALIGN(8); + __end__ = .; + PROVIDE(end = .); + . = . + __HEAP_SIZE; + . = ALIGN(8); + __HeapLimit = .; + } > DTCM + + .stack (ORIGIN(DTCM) + LENGTH(DTCM) - __STACK_SIZE) (COPY) : + { + . = ALIGN(8); + __StackLimit = .; + . = . + __STACK_SIZE; + . = ALIGN(8); + __StackTop = .; + } > DTCM + PROVIDE(__stack = __StackTop); + + /* Check if data + stack exceeds DTCM limit */ + ASSERT(__StackLimit >= __bss_end__, "region DTCM overflowed with stack") +} diff --git a/deploy/avh/imgs_words_en/word_10.png b/deploy/avh/imgs_words_en/word_10.png new file mode 100644 index 0000000000000000000000000000000000000000..07370f757ea83d5e3c5b1f7498b1f95d3aec2d18 Binary files /dev/null and b/deploy/avh/imgs_words_en/word_10.png differ diff --git a/deploy/avh/imgs_words_en/word_116.png b/deploy/avh/imgs_words_en/word_116.png new file mode 100644 index 0000000000000000000000000000000000000000..fd000ff60b0a7a87c8c16eb1409aa0548228e3f9 Binary files /dev/null and b/deploy/avh/imgs_words_en/word_116.png differ diff --git a/deploy/avh/include/crt_config.h b/deploy/avh/include/crt_config.h new file mode 100644 index 0000000000000000000000000000000000000000..4b9ccca02b269b289d029d2b791b37a88743148b --- /dev/null +++ b/deploy/avh/include/crt_config.h @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#ifndef TVM_RUNTIME_CRT_CONFIG_H_ +#define TVM_RUNTIME_CRT_CONFIG_H_ + +/*! Log level of the CRT runtime */ +#define TVM_CRT_LOG_LEVEL TVM_CRT_LOG_LEVEL_DEBUG + +#endif // TVM_RUNTIME_CRT_CONFIG_H_ diff --git a/deploy/avh/include/tvm_runtime.h b/deploy/avh/include/tvm_runtime.h new file mode 100644 index 0000000000000000000000000000000000000000..2b59d9347027c3b6f0af6ced19bdfbdb37a7f940 --- /dev/null +++ b/deploy/avh/include/tvm_runtime.h @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +void __attribute__((noreturn)) TVMPlatformAbort(tvm_crt_error_t error_code) { + printf("TVMPlatformAbort: %d\n", error_code); + printf("EXITTHESIM\n"); + exit(-1); +} + +tvm_crt_error_t TVMPlatformMemoryAllocate(size_t num_bytes, DLDevice dev, void** out_ptr) { + return kTvmErrorFunctionCallNotImplemented; +} + +tvm_crt_error_t TVMPlatformMemoryFree(void* ptr, DLDevice dev) { + return kTvmErrorFunctionCallNotImplemented; +} + +void TVMLogf(const char* msg, ...) { + va_list args; + va_start(args, msg); + vfprintf(stdout, msg, args); + va_end(args); +} + +TVM_DLL int TVMFuncRegisterGlobal(const char* name, TVMFunctionHandle f, int override) { return 0; } + +#ifdef __cplusplus +} +#endif diff --git a/deploy/avh/requirements.txt b/deploy/avh/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..1bf86ed1107cb18bf1ea6ac8eb2cc1214d654b60 --- /dev/null +++ b/deploy/avh/requirements.txt @@ -0,0 +1,3 @@ +paddlepaddle +numpy +opencv-python \ No newline at end of file diff --git a/deploy/avh/run_demo.sh b/deploy/avh/run_demo.sh new file mode 100755 index 0000000000000000000000000000000000000000..dd8e8f163d56899005f6a2811e2ed398b8498fc8 --- /dev/null +++ b/deploy/avh/run_demo.sh @@ -0,0 +1,152 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +set -e +set -u +set -o pipefail + +# Show usage +function show_usage() { + cat <&2 + show_usage >&2 + exit 1 + fi + ;; + + --ethosu_platform_path) + if [ $# -gt 1 ] + then + export ETHOSU_PLATFORM_PATH="$2" + shift 2 + else + echo 'ERROR: --ethosu_platform_path requires a non-empty argument' >&2 + show_usage >&2 + exit 1 + fi + ;; + + --fvp_path) + if [ $# -gt 1 ] + then + export PATH="$2/models/Linux64_GCC-6.4:$PATH" + shift 2 + else + echo 'ERROR: --fvp_path requires a non-empty argument' >&2 + show_usage >&2 + exit 1 + fi + ;; + + --cmake_path) + if [ $# -gt 1 ] + then + export CMAKE="$2" + shift 2 + else + echo 'ERROR: --cmake_path requires a non-empty argument' >&2 + show_usage >&2 + exit 1 + fi + ;; + + -*|--*) + echo "Error: Unknown flag: $1" >&2 + show_usage >&2 + exit 1 + ;; + esac +done + + +# Directories +script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" + + +# Make build directory +rm -rf build +make cleanall +mkdir -p build +cd build + +wget https://paddleocr.bj.bcebos.com/tvm/ocr_en.tar +tar -xf ocr_en.tar + +# Compile model for Arm(R) Cortex(R)-M55 CPU and CMSIS-NN +# An alternative to using "python3 -m tvm.driver.tvmc" is to call +# "tvmc" directly once TVM has been pip installed. +python3 -m tvm.driver.tvmc compile --target=cmsis-nn,c \ + --target-cmsis-nn-mcpu=cortex-m55 \ + --target-c-mcpu=cortex-m55 \ + --runtime=crt \ + --executor=aot \ + --executor-aot-interface-api=c \ + --executor-aot-unpacked-api=1 \ + --pass-config tir.usmp.enable=1 \ + --pass-config tir.usmp.algorithm=hill_climb \ + --pass-config tir.disable_storage_rewrite=1 \ + --pass-config tir.disable_vectorize=1 ocr_en/inference.pdmodel \ + --output-format=mlf \ + --model-format=paddle \ + --module-name=rec \ + --input-shapes x:[1,3,32,320] \ + --output=rec.tar +tar -xf rec.tar + +# Create C header files +cd .. +python3 ./convert_image.py imgs_words_en/word_116.png + +# Build demo executable +cd ${script_dir} +echo ${script_dir} +make + +# Run demo executable on the FVP +FVP_Corstone_SSE-300_Ethos-U55 -C cpu0.CFGDTCMSZ=15 \ +-C cpu0.CFGITCMSZ=15 -C mps3_board.uart0.out_file=\"-\" -C mps3_board.uart0.shutdown_tag=\"EXITTHESIM\" \ +-C mps3_board.visualisation.disable-visualisation=1 -C mps3_board.telnetterminal0.start_telnet=0 \ +-C mps3_board.telnetterminal1.start_telnet=0 -C mps3_board.telnetterminal2.start_telnet=0 -C mps3_board.telnetterminal5.start_telnet=0 \ +./build/demo \ No newline at end of file diff --git a/deploy/avh/src/demo_bare_metal.c b/deploy/avh/src/demo_bare_metal.c new file mode 100644 index 0000000000000000000000000000000000000000..3f5f1bc4b0fd152c37343689db77a1165de0f393 --- /dev/null +++ b/deploy/avh/src/demo_bare_metal.c @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#include +#include +#include + +#include "uart.h" + +// Header files generated by convert_image.py +#include "inputs.h" +#include "outputs.h" + + +int main(int argc, char** argv) { + char dict[]={"#0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~!\"#$%&'()*+,-./ "}; + int char_dict_nums = 97; + uart_init(); + printf("Starting ocr rec inference\n"); + struct tvmgen_rec_outputs rec_outputs = { + .output = output, + }; + struct tvmgen_rec_inputs rec_inputs = { + .x = input, + }; + + tvmgen_rec_run(&rec_inputs, &rec_outputs); + + // post process + int char_nums = output_len / char_dict_nums; + + int last_index = 0; + float score = 0.f; + int count = 0; + + printf("text: "); + for (int i = 0; i < char_nums; i++) { + int argmax_idx = 0; + float max_value = 0.0f; + for (int j = 0; j < char_dict_nums; j++){ + if (output[i * char_dict_nums + j] > max_value){ + max_value = output[i * char_dict_nums + j]; + argmax_idx = j; + } + } + if (argmax_idx > 0 && (!(i > 0 && argmax_idx == last_index))) { + score += max_value; + count += 1; + // printf("%d,%f,%c\n", argmax_idx, max_value, dict[argmax_idx]); + printf("%c", dict[argmax_idx]); + } + last_index = argmax_idx; + } + score /= count; + printf(", score: %f\n", score); + + // The FVP will shut down when it receives "EXITTHESIM" on the UART + printf("EXITTHESIM\n"); + while (1 == 1) + ; + return 0; +} diff --git a/deploy/cpp_infer/CMakeLists.txt b/deploy/cpp_infer/CMakeLists.txt index 6d3ecb6ac2e9e6993814f077ca772d0d94f5d008..7deacfadc585e8905870e7fd218c6b115dcd8256 100644 --- a/deploy/cpp_infer/CMakeLists.txt +++ b/deploy/cpp_infer/CMakeLists.txt @@ -92,6 +92,8 @@ include_directories("${PADDLE_LIB}/third_party/install/glog/include") include_directories("${PADDLE_LIB}/third_party/install/gflags/include") include_directories("${PADDLE_LIB}/third_party/install/xxhash/include") include_directories("${PADDLE_LIB}/third_party/install/zlib/include") +include_directories("${PADDLE_LIB}/third_party/install/onnxruntime/include") +include_directories("${PADDLE_LIB}/third_party/install/paddle2onnx/include") include_directories("${PADDLE_LIB}/third_party/boost") include_directories("${PADDLE_LIB}/third_party/eigen3") @@ -110,6 +112,8 @@ link_directories("${PADDLE_LIB}/third_party/install/protobuf/lib") link_directories("${PADDLE_LIB}/third_party/install/glog/lib") link_directories("${PADDLE_LIB}/third_party/install/gflags/lib") link_directories("${PADDLE_LIB}/third_party/install/xxhash/lib") +link_directories("${PADDLE_LIB}/third_party/install/onnxruntime/lib") +link_directories("${PADDLE_LIB}/third_party/install/paddle2onnx/lib") link_directories("${PADDLE_LIB}/paddle/lib") diff --git a/deploy/cpp_infer/readme.md b/deploy/cpp_infer/readme.md index ddd15d49558454a5ffb0731665b118c929e607f0..a87db7e6596bc2528bfb4a93c3170ebf0482ccad 100644 --- a/deploy/cpp_infer/readme.md +++ b/deploy/cpp_infer/readme.md @@ -208,7 +208,7 @@ Execute the built executable file: ./build/ppocr [--param1] [--param2] [...] ``` -**Note**:ppocr uses the `PP-OCRv3` model by default, and the input shape used by the recognition model is `3, 48, 320`, so if you use the recognition function, you need to add the parameter `--rec_img_h=48`, if you do not use the default `PP-OCRv3` model, you do not need to set this parameter. +**Note**:ppocr uses the `PP-OCRv3` model by default, and the input shape used by the recognition model is `3, 48, 320`, if you want to use the old version model, you should add the parameter `--rec_img_h=32`. Specifically, @@ -222,7 +222,6 @@ Specifically, --det=true \ --rec=true \ --cls=true \ - --rec_img_h=48\ ``` ##### 2. det+rec: @@ -234,7 +233,6 @@ Specifically, --det=true \ --rec=true \ --cls=false \ - --rec_img_h=48\ ``` ##### 3. det @@ -254,7 +252,6 @@ Specifically, --det=false \ --rec=true \ --cls=true \ - --rec_img_h=48\ ``` ##### 5. rec @@ -265,7 +262,6 @@ Specifically, --det=false \ --rec=true \ --cls=false \ - --rec_img_h=48\ ``` ##### 6. cls @@ -330,7 +326,7 @@ More parameters are as follows, |rec_model_dir|string|-|Address of recognition inference model| |rec_char_dict_path|string|../../ppocr/utils/ppocr_keys_v1.txt|dictionary file| |rec_batch_num|int|6|batch size of recognition| -|rec_img_h|int|32|image height of recognition| +|rec_img_h|int|48|image height of recognition| |rec_img_w|int|320|image width of recognition| * Multi-language inference is also supported in PaddleOCR, you can refer to [recognition tutorial](../../doc/doc_en/recognition_en.md) for more supported languages and models in PaddleOCR. Specifically, if you want to infer using multi-language models, you just need to modify values of `rec_char_dict_path` and `rec_model_dir`. diff --git a/deploy/cpp_infer/readme_ch.md b/deploy/cpp_infer/readme_ch.md index e5a4869eca1d35765013e63011c680e59b33ac00..8c334851c0d44acd393c6daa79edf25dc9e6fa24 100644 --- a/deploy/cpp_infer/readme_ch.md +++ b/deploy/cpp_infer/readme_ch.md @@ -213,7 +213,7 @@ CUDNN_LIB_DIR=/your_cudnn_lib_dir 本demo支持系统串联调用,也支持单个功能的调用,如,只使用检测或识别功能。 -**注意** ppocr默认使用`PP-OCRv3`模型,识别模型使用的输入shape为`3,48,320`, 因此如果使用识别功能,需要添加参数`--rec_img_h=48`,如果不使用默认的`PP-OCRv3`模型,则无需设置该参数。 +**注意** ppocr默认使用`PP-OCRv3`模型,识别模型使用的输入shape为`3,48,320`, 如需使用旧版本的PP-OCR模型,则需要设置参数`--rec_img_h=32`。 运行方式: @@ -232,7 +232,6 @@ CUDNN_LIB_DIR=/your_cudnn_lib_dir --det=true \ --rec=true \ --cls=true \ - --rec_img_h=48\ ``` ##### 2. 检测+识别: @@ -244,7 +243,6 @@ CUDNN_LIB_DIR=/your_cudnn_lib_dir --det=true \ --rec=true \ --cls=false \ - --rec_img_h=48\ ``` ##### 3. 检测: @@ -264,7 +262,6 @@ CUDNN_LIB_DIR=/your_cudnn_lib_dir --det=false \ --rec=true \ --cls=true \ - --rec_img_h=48\ ``` ##### 5. 识别: @@ -275,7 +272,6 @@ CUDNN_LIB_DIR=/your_cudnn_lib_dir --det=false \ --rec=true \ --cls=false \ - --rec_img_h=48\ ``` ##### 6. 分类: @@ -339,7 +335,7 @@ CUDNN_LIB_DIR=/your_cudnn_lib_dir |rec_model_dir|string|-|识别模型inference model地址| |rec_char_dict_path|string|../../ppocr/utils/ppocr_keys_v1.txt|字典文件| |rec_batch_num|int|6|识别模型batchsize| -|rec_img_h|int|32|识别模型输入图像高度| +|rec_img_h|int|48|识别模型输入图像高度| |rec_img_w|int|320|识别模型输入图像宽度| diff --git a/deploy/cpp_infer/src/args.cpp b/deploy/cpp_infer/src/args.cpp index fe58236734568035dfb26570df39f21154f4e9ef..93d0f5ea5fd07bdc3eb44537bc1c0d4e131736d3 100644 --- a/deploy/cpp_infer/src/args.cpp +++ b/deploy/cpp_infer/src/args.cpp @@ -47,7 +47,7 @@ DEFINE_string(rec_model_dir, "", "Path of rec inference model."); DEFINE_int32(rec_batch_num, 6, "rec_batch_num."); DEFINE_string(rec_char_dict_path, "../../ppocr/utils/ppocr_keys_v1.txt", "Path of dictionary."); -DEFINE_int32(rec_img_h, 32, "rec image height"); +DEFINE_int32(rec_img_h, 48, "rec image height"); DEFINE_int32(rec_img_w, 320, "rec image width"); // ocr forward related diff --git a/deploy/cpp_infer/src/main.cpp b/deploy/cpp_infer/src/main.cpp index b6085257e7a7f517e308895d5219d55f032264fd..c4b5b97ea8b2ebf77dd9a3e2af69a1a1ca19ed2a 100644 --- a/deploy/cpp_infer/src/main.cpp +++ b/deploy/cpp_infer/src/main.cpp @@ -77,13 +77,19 @@ int main(int argc, char **argv) { for (int i = 0; i < cv_all_img_names.size(); ++i) { if (FLAGS_benchmark) { cout << cv_all_img_names[i] << '\t'; - for (int n = 0; n < ocr_results[i].size(); n++) { - for (int m = 0; m < ocr_results[i][n].box.size(); m++) { - cout << ocr_results[i][n].box[m][0] << ' ' - << ocr_results[i][n].box[m][1] << ' '; + if (FLAGS_rec && FLAGS_det) { + Utility::print_result(ocr_results[i]); + } else if (FLAGS_det) { + for (int n = 0; n < ocr_results[i].size(); n++) { + for (int m = 0; m < ocr_results[i][n].box.size(); m++) { + cout << ocr_results[i][n].box[m][0] << ' ' + << ocr_results[i][n].box[m][1] << ' '; + } } + cout << endl; + } else { + Utility::print_result(ocr_results[i]); } - cout << endl; } else { cout << cv_all_img_names[i] << "\n"; Utility::print_result(ocr_results[i]); diff --git a/deploy/cpp_infer/src/ocr_det.cpp b/deploy/cpp_infer/src/ocr_det.cpp index c08f97b55ab53c934734393af404a53dc77493a9..550997e71937d23a7448e8ff1c4ffad579d2931c 100644 --- a/deploy/cpp_infer/src/ocr_det.cpp +++ b/deploy/cpp_infer/src/ocr_det.cpp @@ -32,40 +32,46 @@ void DBDetector::LoadModel(const std::string &model_dir) { if (this->precision_ == "int8") { precision = paddle_infer::Config::Precision::kInt8; } - config.EnableTensorRtEngine(1 << 20, 10, 3, precision, false, false); + config.EnableTensorRtEngine(1 << 20, 1, 20, precision, false, false); std::map> min_input_shape = { {"x", {1, 3, 50, 50}}, - {"conv2d_92.tmp_0", {1, 96, 20, 20}}, - {"conv2d_91.tmp_0", {1, 96, 10, 10}}, - {"nearest_interp_v2_1.tmp_0", {1, 96, 10, 10}}, - {"nearest_interp_v2_2.tmp_0", {1, 96, 20, 20}}, - {"nearest_interp_v2_3.tmp_0", {1, 24, 20, 20}}, - {"nearest_interp_v2_4.tmp_0", {1, 24, 20, 20}}, - {"nearest_interp_v2_5.tmp_0", {1, 24, 20, 20}}, + {"conv2d_92.tmp_0", {1, 120, 20, 20}}, + {"conv2d_91.tmp_0", {1, 24, 10, 10}}, + {"conv2d_59.tmp_0", {1, 96, 20, 20}}, + {"nearest_interp_v2_1.tmp_0", {1, 256, 10, 10}}, + {"nearest_interp_v2_2.tmp_0", {1, 256, 20, 20}}, + {"conv2d_124.tmp_0", {1, 256, 20, 20}}, + {"nearest_interp_v2_3.tmp_0", {1, 64, 20, 20}}, + {"nearest_interp_v2_4.tmp_0", {1, 64, 20, 20}}, + {"nearest_interp_v2_5.tmp_0", {1, 64, 20, 20}}, {"elementwise_add_7", {1, 56, 2, 2}}, - {"nearest_interp_v2_0.tmp_0", {1, 96, 2, 2}}}; + {"nearest_interp_v2_0.tmp_0", {1, 256, 2, 2}}}; std::map> max_input_shape = { {"x", {1, 3, this->max_side_len_, this->max_side_len_}}, - {"conv2d_92.tmp_0", {1, 96, 400, 400}}, - {"conv2d_91.tmp_0", {1, 96, 200, 200}}, - {"nearest_interp_v2_1.tmp_0", {1, 96, 200, 200}}, - {"nearest_interp_v2_2.tmp_0", {1, 96, 400, 400}}, - {"nearest_interp_v2_3.tmp_0", {1, 24, 400, 400}}, - {"nearest_interp_v2_4.tmp_0", {1, 24, 400, 400}}, - {"nearest_interp_v2_5.tmp_0", {1, 24, 400, 400}}, + {"conv2d_92.tmp_0", {1, 120, 400, 400}}, + {"conv2d_91.tmp_0", {1, 24, 200, 200}}, + {"conv2d_59.tmp_0", {1, 96, 400, 400}}, + {"nearest_interp_v2_1.tmp_0", {1, 256, 200, 200}}, + {"nearest_interp_v2_2.tmp_0", {1, 256, 400, 400}}, + {"conv2d_124.tmp_0", {1, 256, 400, 400}}, + {"nearest_interp_v2_3.tmp_0", {1, 64, 400, 400}}, + {"nearest_interp_v2_4.tmp_0", {1, 64, 400, 400}}, + {"nearest_interp_v2_5.tmp_0", {1, 64, 400, 400}}, {"elementwise_add_7", {1, 56, 400, 400}}, - {"nearest_interp_v2_0.tmp_0", {1, 96, 400, 400}}}; + {"nearest_interp_v2_0.tmp_0", {1, 256, 400, 400}}}; std::map> opt_input_shape = { {"x", {1, 3, 640, 640}}, - {"conv2d_92.tmp_0", {1, 96, 160, 160}}, - {"conv2d_91.tmp_0", {1, 96, 80, 80}}, - {"nearest_interp_v2_1.tmp_0", {1, 96, 80, 80}}, - {"nearest_interp_v2_2.tmp_0", {1, 96, 160, 160}}, - {"nearest_interp_v2_3.tmp_0", {1, 24, 160, 160}}, - {"nearest_interp_v2_4.tmp_0", {1, 24, 160, 160}}, - {"nearest_interp_v2_5.tmp_0", {1, 24, 160, 160}}, + {"conv2d_92.tmp_0", {1, 120, 160, 160}}, + {"conv2d_91.tmp_0", {1, 24, 80, 80}}, + {"conv2d_59.tmp_0", {1, 96, 160, 160}}, + {"nearest_interp_v2_1.tmp_0", {1, 256, 80, 80}}, + {"nearest_interp_v2_2.tmp_0", {1, 256, 160, 160}}, + {"conv2d_124.tmp_0", {1, 256, 160, 160}}, + {"nearest_interp_v2_3.tmp_0", {1, 64, 160, 160}}, + {"nearest_interp_v2_4.tmp_0", {1, 64, 160, 160}}, + {"nearest_interp_v2_5.tmp_0", {1, 64, 160, 160}}, {"elementwise_add_7", {1, 56, 40, 40}}, - {"nearest_interp_v2_0.tmp_0", {1, 96, 40, 40}}}; + {"nearest_interp_v2_0.tmp_0", {1, 256, 40, 40}}}; config.SetTRTDynamicShapeInfo(min_input_shape, max_input_shape, opt_input_shape); diff --git a/deploy/cpp_infer/src/ocr_rec.cpp b/deploy/cpp_infer/src/ocr_rec.cpp index f69f37b8f51ecec5925d556f2b3e169bb0e80715..0f90ddfab4872f97829da081e64cb7437e72493a 100644 --- a/deploy/cpp_infer/src/ocr_rec.cpp +++ b/deploy/cpp_infer/src/ocr_rec.cpp @@ -83,7 +83,7 @@ void CRNNRecognizer::Run(std::vector img_list, int out_num = std::accumulate(predict_shape.begin(), predict_shape.end(), 1, std::multiplies()); predict_batch.resize(out_num); - + // predict_batch is the result of Last FC with softmax output_t->CopyToCpu(predict_batch.data()); auto inference_end = std::chrono::steady_clock::now(); inference_diff += inference_end - inference_start; @@ -98,9 +98,11 @@ void CRNNRecognizer::Run(std::vector img_list, float max_value = 0.0f; for (int n = 0; n < predict_shape[1]; n++) { + // get idx argmax_idx = int(Utility::argmax( &predict_batch[(m * predict_shape[1] + n) * predict_shape[2]], &predict_batch[(m * predict_shape[1] + n + 1) * predict_shape[2]])); + // get score max_value = float(*std::max_element( &predict_batch[(m * predict_shape[1] + n) * predict_shape[2]], &predict_batch[(m * predict_shape[1] + n + 1) * predict_shape[2]])); @@ -132,7 +134,9 @@ void CRNNRecognizer::LoadModel(const std::string &model_dir) { paddle_infer::Config config; config.SetModel(model_dir + "/inference.pdmodel", model_dir + "/inference.pdiparams"); - + std::cout << "In PP-OCRv3, default rec_img_h is 48," + << "if you use other model, you should set the param rec_img_h=32" + << std::endl; if (this->use_gpu_) { config.EnableUseGpu(this->gpu_mem_, this->gpu_id_); if (this->use_tensorrt_) { @@ -143,15 +147,17 @@ void CRNNRecognizer::LoadModel(const std::string &model_dir) { if (this->precision_ == "int8") { precision = paddle_infer::Config::Precision::kInt8; } - config.EnableTensorRtEngine(1 << 20, 10, 3, precision, false, false); + config.EnableTensorRtEngine(1 << 20, 10, 15, precision, false, false); int imgH = this->rec_image_shape_[1]; int imgW = this->rec_image_shape_[2]; std::map> min_input_shape = { {"x", {1, 3, imgH, 10}}, {"lstm_0.tmp_0", {10, 1, 96}}}; std::map> max_input_shape = { - {"x", {1, 3, imgH, 2000}}, {"lstm_0.tmp_0", {1000, 1, 96}}}; + {"x", {this->rec_batch_num_, 3, imgH, 2500}}, + {"lstm_0.tmp_0", {1000, 1, 96}}}; std::map> opt_input_shape = { - {"x", {1, 3, imgH, imgW}}, {"lstm_0.tmp_0", {25, 1, 96}}}; + {"x", {this->rec_batch_num_, 3, imgH, imgW}}, + {"lstm_0.tmp_0", {25, 1, 96}}}; config.SetTRTDynamicShapeInfo(min_input_shape, max_input_shape, opt_input_shape); diff --git a/deploy/lite/config.txt b/deploy/lite/config.txt index 4c68105d39031830a8222b3d88163aebc8cac257..dda0d2b0320544d3a82f59b0672c086c64d83d3d 100644 --- a/deploy/lite/config.txt +++ b/deploy/lite/config.txt @@ -4,4 +4,5 @@ det_db_box_thresh 0.5 det_db_unclip_ratio 1.6 det_db_use_dilate 0 det_use_polygon_score 1 -use_direction_classify 1 \ No newline at end of file +use_direction_classify 1 +rec_image_height 32 \ No newline at end of file diff --git a/deploy/lite/crnn_process.cc b/deploy/lite/crnn_process.cc index 7528f36fe6316c84724891a4421c047fbdd33fa2..6d5fc1504e7b1b3faa35a80662442f60d2e30499 100644 --- a/deploy/lite/crnn_process.cc +++ b/deploy/lite/crnn_process.cc @@ -19,25 +19,27 @@ const std::vector rec_image_shape{3, 32, 320}; -cv::Mat CrnnResizeImg(cv::Mat img, float wh_ratio) { +cv::Mat CrnnResizeImg(cv::Mat img, float wh_ratio, int rec_image_height) { int imgC, imgH, imgW; imgC = rec_image_shape[0]; + imgH = rec_image_height; imgW = rec_image_shape[2]; - imgH = rec_image_shape[1]; - imgW = int(32 * wh_ratio); + imgW = int(imgH * wh_ratio); - float ratio = static_cast(img.cols) / static_cast(img.rows); + float ratio = float(img.cols) / float(img.rows); int resize_w, resize_h; + if (ceilf(imgH * ratio) > imgW) resize_w = imgW; else - resize_w = static_cast(ceilf(imgH * ratio)); - cv::Mat resize_img; + resize_w = int(ceilf(imgH * ratio)); + cv::resize(img, resize_img, cv::Size(resize_w, imgH), 0.f, 0.f, cv::INTER_LINEAR); - - return resize_img; + cv::copyMakeBorder(resize_img, resize_img, 0, 0, 0, + int(imgW - resize_img.cols), cv::BORDER_CONSTANT, + {127, 127, 127}); } std::vector ReadDict(std::string path) { diff --git a/deploy/lite/crnn_process.h b/deploy/lite/crnn_process.h index 29e67906976198210394c4960786105bf884dce8..ed7a3167069538a0c40d1bc01f0073c36cb7e461 100644 --- a/deploy/lite/crnn_process.h +++ b/deploy/lite/crnn_process.h @@ -26,7 +26,7 @@ #include "opencv2/imgcodecs.hpp" #include "opencv2/imgproc.hpp" -cv::Mat CrnnResizeImg(cv::Mat img, float wh_ratio); +cv::Mat CrnnResizeImg(cv::Mat img, float wh_ratio, int rec_image_height); std::vector ReadDict(std::string path); diff --git a/deploy/lite/ocr_db_crnn.cc b/deploy/lite/ocr_db_crnn.cc index 1ffbbacb74545b0bbea4957e25b6235225bad02b..cb2bf7791a4307d4e8d2167197d41d903410e0b4 100644 --- a/deploy/lite/ocr_db_crnn.cc +++ b/deploy/lite/ocr_db_crnn.cc @@ -162,7 +162,8 @@ void RunRecModel(std::vector>> boxes, cv::Mat img, std::vector charactor_dict, std::shared_ptr predictor_cls, int use_direction_classify, - std::vector *times) { + std::vector *times, + int rec_image_height) { std::vector mean = {0.5f, 0.5f, 0.5f}; std::vector scale = {1 / 0.5f, 1 / 0.5f, 1 / 0.5f}; @@ -183,7 +184,7 @@ void RunRecModel(std::vector>> boxes, cv::Mat img, float wh_ratio = static_cast(crop_img.cols) / static_cast(crop_img.rows); - resize_img = CrnnResizeImg(crop_img, wh_ratio); + resize_img = CrnnResizeImg(crop_img, wh_ratio, rec_image_height); resize_img.convertTo(resize_img, CV_32FC3, 1 / 255.f); const float *dimg = reinterpret_cast(resize_img.data); @@ -444,7 +445,7 @@ void system(char **argv){ //// load config from txt file auto Config = LoadConfigTxt(det_config_path); int use_direction_classify = int(Config["use_direction_classify"]); - + int rec_image_height = int(Config["rec_image_height"]); auto charactor_dict = ReadDict(dict_path); charactor_dict.insert(charactor_dict.begin(), "#"); // blank char for ctc charactor_dict.push_back(" "); @@ -590,12 +591,16 @@ void rec(int argc, char **argv) { std::string batchsize = argv[6]; std::string img_dir = argv[7]; std::string dict_path = argv[8]; + std::string config_path = argv[9]; if (strcmp(argv[4], "FP32") != 0 && strcmp(argv[4], "INT8") != 0) { std::cerr << "Only support FP32 or INT8." << std::endl; exit(1); } + auto Config = LoadConfigTxt(config_path); + int rec_image_height = int(Config["rec_image_height"]); + std::vector cv_all_img_names; cv::glob(img_dir, cv_all_img_names); @@ -630,7 +635,7 @@ void rec(int argc, char **argv) { std::vector rec_text_score; std::vector times; RunRecModel(boxes, srcimg, rec_predictor, rec_text, rec_text_score, - charactor_dict, cls_predictor, 0, ×); + charactor_dict, cls_predictor, 0, ×, rec_image_height); //// print recognized text for (int i = 0; i < rec_text.size(); i++) { diff --git a/deploy/lite/readme.md b/deploy/lite/readme.md index 9926e2dd8c973b25b5397fd5825f790528ede279..a1bef8120e52dd91db0fda4ac2a4d91cc2800818 100644 --- a/deploy/lite/readme.md +++ b/deploy/lite/readme.md @@ -34,7 +34,7 @@ For the compilation process of different development environments, please refer ### 1.2 Prepare Paddle-Lite library There are two ways to obtain the Paddle-Lite library: -- 1. Download directly, the download link of the Paddle-Lite library is as follows: +- 1. [Recommended] Download directly, the download link of the Paddle-Lite library is as follows: | Platform | Paddle-Lite library download link | |---|---| @@ -43,7 +43,9 @@ There are two ways to obtain the Paddle-Lite library: Note: 1. The above Paddle-Lite library is compiled from the Paddle-Lite 2.10 branch. For more information about Paddle-Lite 2.10, please refer to [link](https://github.com/PaddlePaddle/Paddle-Lite/releases/tag/v2.10). -- 2. [Recommended] Compile Paddle-Lite to get the prediction library. The compilation method of Paddle-Lite is as follows: + **Note: It is recommended to use paddlelite>=2.10 version of the prediction library, other prediction library versions [download link](https://github.com/PaddlePaddle/Paddle-Lite/tags)** + +- 2. Compile Paddle-Lite to get the prediction library. The compilation method of Paddle-Lite is as follows: ``` git clone https://github.com/PaddlePaddle/Paddle-Lite.git cd Paddle-Lite @@ -104,21 +106,17 @@ If you directly use the model in the above table for deployment, you can skip th If the model to be deployed is not in the above table, you need to follow the steps below to obtain the optimized model. -The `opt` tool can be obtained by compiling Paddle Lite. +- Step 1: Refer to [document](https://www.paddlepaddle.org.cn/lite/v2.10/user_guides/opt/opt_python.html) to install paddlelite, which is used to convert paddle inference model to paddlelite required for running nb model ``` -git clone https://github.com/PaddlePaddle/Paddle-Lite.git -cd Paddle-Lite -git checkout release/v2.10 -./lite/tools/build.sh build_optimize_tool +pip install paddlelite==2.10 # The paddlelite version should be the same as the prediction library version ``` - -After the compilation is complete, the opt file is located under build.opt/lite/api/, You can view the operating options and usage of opt in the following ways: - +After installation, the following commands can view the help information ``` -cd build.opt/lite/api/ -./opt +paddle_lite_opt ``` +Introduction to paddle_lite_opt parameters: + |Options|Description| |---|---| |--model_dir|The path of the PaddlePaddle model to be optimized (non-combined form)| @@ -131,6 +129,8 @@ cd build.opt/lite/api/ `--model_dir` is suitable for the non-combined mode of the model to be optimized, and the inference model of PaddleOCR is the combined mode, that is, the model structure and model parameters are stored in a single file. +- Step 2: Use paddle_lite_opt to convert the inference model to the mobile model format. + The following takes the ultra-lightweight Chinese model of PaddleOCR as an example to introduce the use of the compiled opt file to complete the conversion of the inference model to the Paddle-Lite optimized model ``` @@ -240,6 +240,7 @@ det_db_thresh 0.3 # Used to filter the binarized image of DB prediction, det_db_box_thresh 0.5 # DDB post-processing filter box threshold, if there is a missing box detected, it can be reduced as appropriate det_db_unclip_ratio 1.6 # Indicates the compactness of the text box, the smaller the value, the closer the text box to the text use_direction_classify 0 # Whether to use the direction classifier, 0 means not to use, 1 means to use +rec_image_height 32 # The height of the input image of the recognition model, the PP-OCRv3 model needs to be set to 48, and the PP-OCRv2 model needs to be set to 32 ``` 5. Run Model on phone @@ -258,8 +259,15 @@ After the above steps are completed, you can use adb to push the file to the pho cd /data/local/tmp/debug export LD_LIBRARY_PATH=${PWD}:$LD_LIBRARY_PATH # The use of ocr_db_crnn is: - # ./ocr_db_crnn Detection model file Orientation classifier model file Recognition model file Test image path Dictionary file path - ./ocr_db_crnn ch_PP-OCRv2_det_slim_opt.nb ch_PP-OCRv2_rec_slim_opt.nb ch_ppocr_mobile_v2.0_cls_opt.nb ./11.jpg ppocr_keys_v1.txt + # ./ocr_db_crnn Mode Detection model file Orientation classifier model file Recognition model file Hardware Precision Threads Batchsize Test image path Dictionary file path + ./ocr_db_crnn system ch_PP-OCRv2_det_slim_opt.nb ch_PP-OCRv2_rec_slim_opt.nb ch_ppocr_mobile_v2.0_cls_slim_opt.nb arm8 INT8 10 1 ./11.jpg config.txt ppocr_keys_v1.txt True +# precision can be INT8 for quantitative model or FP32 for normal model. + +# Only using detection model +./ocr_db_crnn det ch_PP-OCRv2_det_slim_opt.nb arm8 INT8 10 1 ./11.jpg config.txt + +# Only using recognition model +./ocr_db_crnn rec ch_PP-OCRv2_rec_slim_opt.nb arm8 INT8 10 1 word_1.jpg ppocr_keys_v1.txt config.txt ``` If you modify the code, you need to recompile and push to the phone. @@ -283,3 +291,7 @@ A2: Replace the .jpg test image under ./debug with the image you want to test, a Q3: How to package it into the mobile APP? A3: This demo aims to provide the core algorithm part that can run OCR on mobile phones. Further, PaddleOCR/deploy/android_demo is an example of encapsulating this demo into a mobile app for reference. + +Q4: When running the demo, an error is reported `Error: This model is not supported, because kernel for 'io_copy' is not supported by Paddle-Lite.` + +A4: The problem is that the installed paddlelite version does not match the downloaded prediction library version. Make sure that the paddleliteopt tool matches your prediction library version, and try to switch to the nb model again. diff --git a/deploy/lite/readme_ch.md b/deploy/lite/readme_ch.md index 99a543d0d60455443dd872c56a5832c8ca0ff4e9..0793827fe647c470944fc36e2b243c8f7e704e99 100644 --- a/deploy/lite/readme_ch.md +++ b/deploy/lite/readme_ch.md @@ -8,7 +8,7 @@ - [2.1 模型优化](#21-模型优化) - [2.2 与手机联调](#22-与手机联调) - [FAQ](#faq) - + 本教程将介绍基于[Paddle Lite](https://github.com/PaddlePaddle/Paddle-Lite) 在移动端部署PaddleOCR超轻量中文检测、识别模型的详细步骤。 @@ -32,7 +32,7 @@ Paddle Lite是飞桨轻量化推理引擎,为手机、IOT端提供高效推理 ### 1.2 准备预测库 预测库有两种获取方式: -- 1. 直接下载,预测库下载链接如下: +- 1. [推荐]直接下载,预测库下载链接如下: | 平台 | 预测库下载链接 | |---|---| @@ -41,7 +41,9 @@ Paddle Lite是飞桨轻量化推理引擎,为手机、IOT端提供高效推理 注:1. 上述预测库为PaddleLite 2.10分支编译得到,有关PaddleLite 2.10 详细信息可参考 [链接](https://github.com/PaddlePaddle/Paddle-Lite/releases/tag/v2.10) 。 -- 2. [推荐]编译Paddle-Lite得到预测库,Paddle-Lite的编译方式如下: +**注:建议使用paddlelite>=2.10版本的预测库,其他预测库版本[下载链接](https://github.com/PaddlePaddle/Paddle-Lite/tags)** + +- 2. 编译Paddle-Lite得到预测库,Paddle-Lite的编译方式如下: ``` git clone https://github.com/PaddlePaddle/Paddle-Lite.git cd Paddle-Lite @@ -102,22 +104,16 @@ Paddle-Lite 提供了多种策略来自动优化原始的模型,其中包括 如果要部署的模型不在上述表格中,则需要按照如下步骤获得优化后的模型。 -模型优化需要Paddle-Lite的opt可执行文件,可以通过编译Paddle-Lite源码获得,编译步骤如下: +- 步骤1:参考[文档](https://www.paddlepaddle.org.cn/lite/v2.10/user_guides/opt/opt_python.html)安装paddlelite,用于转换paddle inference model为paddlelite运行所需的nb模型 ``` -# 如果准备环境时已经clone了Paddle-Lite,则不用重新clone Paddle-Lite -git clone https://github.com/PaddlePaddle/Paddle-Lite.git -cd Paddle-Lite -git checkout release/v2.10 -# 启动编译 -./lite/tools/build.sh build_optimize_tool +pip install paddlelite==2.10 # paddlelite版本要与预测库版本一致 ``` - -编译完成后,opt文件位于`build.opt/lite/api/`下,可通过如下方式查看opt的运行选项和使用方式; +安装完后,如下指令可以查看帮助信息 ``` -cd build.opt/lite/api/ -./opt +paddle_lite_opt ``` +paddle_lite_opt 参数介绍: |选项|说明| |---|---| |--model_dir|待优化的PaddlePaddle模型(非combined形式)的路径| @@ -130,6 +126,8 @@ cd build.opt/lite/api/ `--model_dir`适用于待优化的模型是非combined方式,PaddleOCR的inference模型是combined方式,即模型结构和模型参数使用单独一个文件存储。 +- 步骤2:使用paddle_lite_opt将inference模型转换成移动端模型格式。 + 下面以PaddleOCR的超轻量中文模型为例,介绍使用编译好的opt文件完成inference模型到Paddle-Lite优化模型的转换。 ``` @@ -148,7 +146,7 @@ wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_cls 转换成功后,inference模型目录下会多出`.nb`结尾的文件,即是转换成功的模型文件。 -注意:使用paddle-lite部署时,需要使用opt工具优化后的模型。 opt 工具的输入模型是paddle保存的inference模型 +注意:使用paddle-lite部署时,需要使用opt工具优化后的模型。 opt工具的输入模型是paddle保存的inference模型 ### 2.2 与手机联调 @@ -234,13 +232,14 @@ ppocr_keys_v1.txt # 中文字典 ... ``` -2. `config.txt` 包含了检测器、分类器的超参数,如下: +2. `config.txt` 包含了检测器、分类器、识别器的超参数,如下: ``` max_side_len 960 # 输入图像长宽大于960时,等比例缩放图像,使得图像最长边为960 det_db_thresh 0.3 # 用于过滤DB预测的二值化图像,设置为0.-0.3对结果影响不明显 -det_db_box_thresh 0.5 # DB后处理过滤box的阈值,如果检测存在漏框情况,可酌情减小 +det_db_box_thresh 0.5 # 检测器后处理过滤box的阈值,如果检测存在漏框情况,可酌情减小 det_db_unclip_ratio 1.6 # 表示文本框的紧致程度,越小则文本框更靠近文本 use_direction_classify 0 # 是否使用方向分类器,0表示不使用,1表示使用 +rec_image_height 32 # 识别模型输入图像的高度,PP-OCRv3模型设置为48,PP-OCRv2模型需要设置为32 ``` 5. 启动调试 @@ -259,8 +258,14 @@ use_direction_classify 0 # 是否使用方向分类器,0表示不使用,1 cd /data/local/tmp/debug export LD_LIBRARY_PATH=${PWD}:$LD_LIBRARY_PATH # 开始使用,ocr_db_crnn可执行文件的使用方式为: - # ./ocr_db_crnn 检测模型文件 方向分类器模型文件 识别模型文件 测试图像路径 字典文件路径 - ./ocr_db_crnn ch_PP-OCRv2_det_slim_opt.nb ch_PP-OCRv2_rec_slim_opt.nb ch_ppocr_mobile_v2.0_cls_slim_opt.nb ./11.jpg ppocr_keys_v1.txt + # ./ocr_db_crnn 预测模式 检测模型文件 方向分类器模型文件 识别模型文件 运行硬件 运行精度 线程数 batchsize 测试图像路径 参数配置路径 字典文件路径 是否使用benchmark参数 + ./ocr_db_crnn system ch_PP-OCRv2_det_slim_opt.nb ch_PP-OCRv2_rec_slim_opt.nb ch_ppocr_mobile_v2.0_cls_slim_opt.nb arm8 INT8 10 1 ./11.jpg config.txt ppocr_keys_v1.txt True + +# 仅使用文本检测模型,使用方式如下: +./ocr_db_crnn det ch_PP-OCRv2_det_slim_opt.nb arm8 INT8 10 1 ./11.jpg config.txt + +# 仅使用文本识别模型,使用方式如下: +./ocr_db_crnn rec ch_PP-OCRv2_rec_slim_opt.nb arm8 INT8 10 1 word_1.jpg ppocr_keys_v1.txt config.txt ``` 如果对代码做了修改,则需要重新编译并push到手机上。 @@ -284,3 +289,7 @@ A2:替换debug下的.jpg测试图像为你想要测试的图像,adb push 到 Q3:如何封装到手机APP中? A3:此demo旨在提供能在手机上运行OCR的核心算法部分,PaddleOCR/deploy/android_demo是将这个demo封装到手机app的示例,供参考 + +Q4:运行demo时遇到报错`Error: This model is not supported, because kernel for 'io_copy' is not supported by Paddle-Lite.` + +A4:问题是安装的paddlelite版本和下载的预测库版本不匹配,确保paddleliteopt工具和你的预测库版本匹配,重新转nb模型试试。 diff --git a/deploy/pdserving/README.md b/deploy/pdserving/README.md index 55e03c4c2654f336ed942ae03e61e88b61940006..83329a11cdb57fea003a800fe9ca73791da9f6da 100644 --- a/deploy/pdserving/README.md +++ b/deploy/pdserving/README.md @@ -136,7 +136,7 @@ The recognition model is the same. 2. Run the following command to start the service. ``` # Start the service and save the running log in log.txt - python3 web_service.py &>log.txt & + python3 web_service.py --config=config.yml &>log.txt & ``` After the service is successfully started, a log similar to the following will be printed in log.txt ![](./imgs/start_server.png) @@ -217,7 +217,7 @@ The C++ service deployment is the same as python in the environment setup and da 2. Run the following command to start the service. ``` # Start the service and save the running log in log.txt - python3 -m paddle_serving_server.serve --model ppocr_det_v3_serving ppocr_rec_v3_serving --op GeneralDetectionOp GeneralInferOp --port 9293 &>log.txt & + python3 -m paddle_serving_server.serve --model ppocr_det_v3_serving ppocr_rec_v3_serving --op GeneralDetectionOp GeneralInferOp --port 8181 &>log.txt & ``` After the service is successfully started, a log similar to the following will be printed in log.txt ![](./imgs/start_server.png) diff --git a/deploy/pdserving/README_CN.md b/deploy/pdserving/README_CN.md index 0891611db5f39d322473354f7d988b10afa78cbd..ab05b766e32bc8435d2c0568e848190bf6761173 100644 --- a/deploy/pdserving/README_CN.md +++ b/deploy/pdserving/README_CN.md @@ -135,7 +135,7 @@ python3 -m paddle_serving_client.convert --dirname ./ch_PP-OCRv3_rec_infer/ \ 2. 启动服务可运行如下命令: ``` # 启动服务,运行日志保存在log.txt - python3 web_service.py &>log.txt & + python3 web_service.py --config=config.yml &>log.txt & ``` 成功启动服务后,log.txt中会打印类似如下日志 ![](./imgs/start_server.png) @@ -230,7 +230,7 @@ cp -rf general_detection_op.cpp Serving/core/general-server/op ``` # 启动服务,运行日志保存在log.txt - python3 -m paddle_serving_server.serve --model ppocr_det_v3_serving ppocr_rec_v3_serving --op GeneralDetectionOp GeneralInferOp --port 9293 &>log.txt & + python3 -m paddle_serving_server.serve --model ppocr_det_v3_serving ppocr_rec_v3_serving --op GeneralDetectionOp GeneralInferOp --port 8181 &>log.txt & ``` 成功启动服务后,log.txt中会打印类似如下日志 ![](./imgs/start_server.png) diff --git a/deploy/pdserving/ocr_cpp_client.py b/deploy/pdserving/ocr_cpp_client.py index 7f9333dd858aad5440ff256d501cf1e5d2f5fb1f..3aaf03155953ce2129fd548deef15033e91e9a09 100755 --- a/deploy/pdserving/ocr_cpp_client.py +++ b/deploy/pdserving/ocr_cpp_client.py @@ -22,15 +22,16 @@ import cv2 from paddle_serving_app.reader import Sequential, URL2Image, ResizeByFactor from paddle_serving_app.reader import Div, Normalize, Transpose from ocr_reader import OCRReader +import codecs client = Client() # TODO:load_client need to load more than one client model. # this need to figure out some details. client.load_client_config(sys.argv[1:]) -client.connect(["127.0.0.1:9293"]) +client.connect(["127.0.0.1:8181"]) import paddle -test_img_dir = "../../doc/imgs/" +test_img_dir = "../../doc/imgs/1.jpg" ocr_reader = OCRReader(char_dict_path="../../ppocr/utils/ppocr_keys_v1.txt") @@ -40,14 +41,43 @@ def cv2_to_base64(image): 'utf8') #data.tostring()).decode('utf8') -for img_file in os.listdir(test_img_dir): - with open(os.path.join(test_img_dir, img_file), 'rb') as file: +def _check_image_file(path): + img_end = {'jpg', 'bmp', 'png', 'jpeg', 'rgb', 'tif', 'tiff', 'gif'} + return any([path.lower().endswith(e) for e in img_end]) + + +test_img_list = [] +if os.path.isfile(test_img_dir) and _check_image_file(test_img_dir): + test_img_list.append(test_img_dir) +elif os.path.isdir(test_img_dir): + for single_file in os.listdir(test_img_dir): + file_path = os.path.join(test_img_dir, single_file) + if os.path.isfile(file_path) and _check_image_file(file_path): + test_img_list.append(file_path) +if len(test_img_list) == 0: + raise Exception("not found any img file in {}".format(test_img_dir)) + +for img_file in test_img_list: + with open(img_file, 'rb') as file: image_data = file.read() image = cv2_to_base64(image_data) res_list = [] fetch_map = client.predict(feed={"x": image}, fetch=[], batch=True) - one_batch_res = ocr_reader.postprocess(fetch_map, with_score=True) - for res in one_batch_res: - res_list.append(res[0]) - res = {"res": str(res_list)} - print(res) + if fetch_map is None: + print('no results') + else: + if "text" in fetch_map: + for x in fetch_map["text"]: + x = codecs.encode(x) + words = base64.b64decode(x).decode('utf-8') + res_list.append(words) + else: + try: + one_batch_res = ocr_reader.postprocess( + fetch_map, with_score=True) + for res in one_batch_res: + res_list.append(res[0]) + except: + print('no results') + res = {"res": str(res_list)} + print(res) diff --git a/deploy/pdserving/ocr_reader.py b/deploy/pdserving/ocr_reader.py index 75f0f3d5c3aea488f82ec01a72e20310663d565b..d488cc0920391eded6c08945597b5c938b7c7a42 100644 --- a/deploy/pdserving/ocr_reader.py +++ b/deploy/pdserving/ocr_reader.py @@ -339,7 +339,7 @@ class CharacterOps(object): class OCRReader(object): def __init__(self, algorithm="CRNN", - image_shape=[3, 32, 320], + image_shape=[3, 48, 320], char_type="ch", batch_num=1, char_dict_path="./ppocr_keys_v1.txt"): @@ -356,7 +356,7 @@ class OCRReader(object): def resize_norm_img(self, img, max_wh_ratio): imgC, imgH, imgW = self.rec_image_shape if self.character_type == "ch": - imgW = int(32 * max_wh_ratio) + imgW = int(imgH * max_wh_ratio) h = img.shape[0] w = img.shape[1] ratio = w / float(h) @@ -377,7 +377,7 @@ class OCRReader(object): def preprocess(self, img_list): img_num = len(img_list) norm_img_batch = [] - max_wh_ratio = 0 + max_wh_ratio = 320/48. for ino in range(img_num): h, w = img_list[ino].shape[0:2] wh_ratio = w * 1.0 / h diff --git a/deploy/pdserving/pipeline_http_client.py b/deploy/pdserving/pipeline_http_client.py index 7bc4d882e5039640e138f3e634b2c33fc6a8e48c..0a86a6398701ef6715afdd9643cb9cb8ccbb58b2 100644 --- a/deploy/pdserving/pipeline_http_client.py +++ b/deploy/pdserving/pipeline_http_client.py @@ -19,8 +19,16 @@ import base64 import os import argparse + + +def str2bool(v): + return v.lower() in ("true", "t", "1") + + parser = argparse.ArgumentParser(description="args for paddleserving") parser.add_argument("--image_dir", type=str, default="../../doc/imgs/") +parser.add_argument("--det", type=str2bool, default=True) +parser.add_argument("--rec", type=str2bool, default=True) args = parser.parse_args() @@ -28,11 +36,27 @@ def cv2_to_base64(image): return base64.b64encode(image).decode('utf8') +def _check_image_file(path): + img_end = {'jpg', 'bmp', 'png', 'jpeg', 'rgb', 'tif', 'tiff', 'gif'} + return any([path.lower().endswith(e) for e in img_end]) + + url = "http://127.0.0.1:9998/ocr/prediction" test_img_dir = args.image_dir -for idx, img_file in enumerate(os.listdir(test_img_dir)): - with open(os.path.join(test_img_dir, img_file), 'rb') as file: +test_img_list = [] +if os.path.isfile(test_img_dir) and _check_image_file(test_img_dir): + test_img_list.append(test_img_dir) +elif os.path.isdir(test_img_dir): + for single_file in os.listdir(test_img_dir): + file_path = os.path.join(test_img_dir, single_file) + if os.path.isfile(file_path) and _check_image_file(file_path): + test_img_list.append(file_path) +if len(test_img_list) == 0: + raise Exception("not found any img file in {}".format(test_img_dir)) + +for idx, img_file in enumerate(test_img_list): + with open(img_file, 'rb') as file: image_data1 = file.read() # print file name print('{}{}{}'.format('*' * 10, img_file, '*' * 10)) @@ -46,16 +70,20 @@ for idx, img_file in enumerate(os.listdir(test_img_dir)): # check success if result["err_no"] == 0: ocr_result = result["value"][0] - try: - for item in eval(ocr_result): - # return transcription and points - print("{}, {}".format(item[0], item[1])) - except Exception as e: - print("No results") - continue + if not args.det: + print(ocr_result) + else: + try: + for item in eval(ocr_result): + # return transcription and points + print("{}, {}".format(item[0], item[1])) + except Exception as e: + print(ocr_result) + print("No results") + continue else: print( "For details about error message, see PipelineServingLogs/pipeline.log" ) -print("==> total number of test imgs: ", len(os.listdir(test_img_dir))) +print("==> total number of test imgs: ", len(test_img_list)) diff --git a/deploy/pdserving/serving_client_conf.prototxt b/deploy/pdserving/serving_client_conf.prototxt new file mode 100644 index 0000000000000000000000000000000000000000..33960540a2ea1a7a1e37bda07ab62197a54e272d --- /dev/null +++ b/deploy/pdserving/serving_client_conf.prototxt @@ -0,0 +1,16 @@ +feed_var { + name: "x" + alias_name: "x" + is_lod_tensor: false + feed_type: 20 + shape: 1 +} +fetch_var { + name: "save_infer_model/scale_0.tmp_1" + alias_name: "save_infer_model/scale_0.tmp_1" + is_lod_tensor: false + fetch_type: 1 + shape: 1 + shape: 640 + shape: 640 +} diff --git a/deploy/pdserving/web_service.py b/deploy/pdserving/web_service.py index f05806ce030238144568a3ca137798a9132027e4..b6fadb91d59d6a27c6ba8ba80459f5acc6aa3516 100644 --- a/deploy/pdserving/web_service.py +++ b/deploy/pdserving/web_service.py @@ -19,7 +19,7 @@ import copy import cv2 import base64 # from paddle_serving_app.reader import OCRReader -from ocr_reader import OCRReader, DetResizeForTest +from ocr_reader import OCRReader, DetResizeForTest, ArgsParser from paddle_serving_app.reader import Sequential, ResizeByFactor from paddle_serving_app.reader import Div, Normalize, Transpose from paddle_serving_app.reader import DBPostProcess, FilterBoxes, GetRotateCropImage, SortedBoxes @@ -63,7 +63,6 @@ class DetOp(Op): dt_boxes_list = self.post_func(det_out, [ratio_list]) dt_boxes = self.filter_func(dt_boxes_list[0], [self.ori_h, self.ori_w]) out_dict = {"dt_boxes": dt_boxes, "image": self.raw_im} - return out_dict, None, "" @@ -86,7 +85,7 @@ class RecOp(Op): dt_boxes = copy.deepcopy(self.dt_list) feed_list = [] img_list = [] - max_wh_ratio = 0 + max_wh_ratio = 320 / 48. ## Many mini-batchs, the type of feed_data is list. max_batch_size = 6 # len(dt_boxes) @@ -150,7 +149,8 @@ class RecOp(Op): for i in range(dt_num): text = rec_list[i] dt_box = self.dt_list[i] - result_list.append([text, dt_box.tolist()]) + if text[1] >= 0.5: + result_list.append([text, dt_box.tolist()]) res = {"result": str(result_list)} return res, None, "" @@ -163,5 +163,6 @@ class OcrService(WebService): uci_service = OcrService(name="ocr") -uci_service.prepare_pipeline_config("config.yml") +FLAGS = ArgsParser().parse_args() +uci_service.prepare_pipeline_config(yml_dict=FLAGS.conf_dict) uci_service.run_service() diff --git a/doc/doc_ch/FAQ.md b/doc/doc_ch/FAQ.md index 24f8a3e92be1c003ec7c37b74d14f4ae4117086a..a4437b8b783ca28fed60bb0a9648191e7ec4d0db 100644 --- a/doc/doc_ch/FAQ.md +++ b/doc/doc_ch/FAQ.md @@ -682,7 +682,7 @@ lr: #### Q: 关于dygraph分支中,文本识别模型训练,要使用数据增强应该如何设置? -**A**:可以参考[配置文件](../../configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml)在Train['dataset']['transforms']添加RecAug字段,使数据增强生效。可以通过添加对aug_prob设置,表示每种数据增强采用的概率。aug_prob默认是0.4.由于tia数据增强特殊性,默认不采用,可以通过添加use_tia设置,使tia数据增强生效。详细设置可以参考[ISSUE 1744](https://github.com/PaddlePaddle/PaddleOCR/issues/1744)。 +**A**:可以参考[配置文件](../../configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml)在Train['dataset']['transforms']添加RecAug字段,使数据增强生效。可以通过添加对aug_prob设置,表示每种数据增强采用的概率。aug_prob默认是0.4。详细设置可以参考[ISSUE 1744](https://github.com/PaddlePaddle/PaddleOCR/issues/1744)。 #### Q: 训练过程中,训练程序意外退出/挂起,应该如何解决? @@ -720,6 +720,13 @@ C++TensorRT预测需要使用支持TRT的预测库并在编译时打开[-DWITH_T 注:建议使用TensorRT大于等于6.1.0.5以上的版本。 +#### Q: 为什么识别模型做预测的时候,预测图片的数量数量还会影响预测的精度 +**A**: 推理时识别模型默认的batch_size=6, 如预测图片长度变化大,可能影响预测效果。如果出现上述问题可在推理的时候设置识别bs=1,命令如下: + +``` +python3 tools/infer/predict_rec.py --image_dir="./doc/imgs_words/ch/word_4.jpg" --rec_model_dir="./ch_PP-OCRv3_rec_infer/" --rec_batch_num=1 +``` + ### 2.13 推理部署 diff --git a/doc/doc_ch/PPOCRv3_det_train.md b/doc/doc_ch/PPOCRv3_det_train.md new file mode 100644 index 0000000000000000000000000000000000000000..601acddee1ba68c90d9a768c16376496080bd711 --- /dev/null +++ b/doc/doc_ch/PPOCRv3_det_train.md @@ -0,0 +1,252 @@ + +# PP-OCRv3 文本检测模型训练 + +- [1. 简介](#1) +- [2. PPOCRv3检测训练](#2) +- [3. 基于PPOCRv3检测的finetune训练](#3) + + +## 1. 简介 + +PP-OCRv3在PP-OCRv2的基础上进一步升级。本节介绍PP-OCRv3检测模型的训练步骤。有关PPOCRv3策略介绍参考[文档](./PP-OCRv3_introduction.md)。 + + + +## 2. 检测训练 + +PP-OCRv3检测模型是对PP-OCRv2中的[CML](https://arxiv.org/pdf/2109.03144.pdf)(Collaborative Mutual Learning) 协同互学习文本检测蒸馏策略进行了升级。PP-OCRv3分别针对检测教师模型和学生模型进行进一步效果优化。其中,在对教师模型优化时,提出了大感受野的PAN结构LK-PAN和引入了DML(Deep Mutual Learning)蒸馏策略;在对学生模型优化时,提出了残差注意力机制的FPN结构RSE-FPN。 + +PP-OCRv3检测训练包括两个步骤: +- 步骤1:采用DML蒸馏方法训练检测教师模型 +- 步骤2:使用步骤1得到的教师模型采用CML方法训练出轻量学生模型 + + +### 2.1 准备数据和运行环境 + +训练数据采用icdar2015数据,准备训练集步骤参考[ocr_dataset](./dataset/ocr_datasets.md). + +运行环境准备参考[文档](./installation.md)。 + + +### 2.2 训练教师模型 + +教师模型训练的配置文件是[ch_PP-OCRv3_det_dml.yml](https://github.com/PaddlePaddle/PaddleOCR/blob/release%2F2.5/configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_dml.yml)。教师模型模型结构的Backbone、Neck、Head分别为Resnet50, LKPAN, DBHead,采用DML的蒸馏方法训练。有关配置文件的详细介绍参考[文档](./knowledge_distillation)。 + + +下载ImageNet预训练模型: +``` +# 下载ResNet50_vd的预训练模型 +wget -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/pretrained/ResNet50_vd_ssld_pretrained.pdparams +``` + +**启动训练** +``` +# 单卡训练 +python3 tools/train.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_dml.yml \ + -o Architecture.Models.Student.pretrained=./pretrain_models/ResNet50_vd_ssld_pretrained \ + Architecture.Models.Student2.pretrained=./pretrain_models/ResNet50_vd_ssld_pretrained \ + Global.save_model_dir=./output/ +# 如果要使用多GPU分布式训练,请使用如下命令: +python3 -m paddle.distributed.launch --gpus '0,1,2,3' tools/train.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_dml.yml \ + -o Architecture.Models.Student.pretrained=./pretrain_models/ResNet50_vd_ssld_pretrained \ + Architecture.Models.Student2.pretrained=./pretrain_models/ResNet50_vd_ssld_pretrained \ + Global.save_model_dir=./output/ +``` + +训练过程中保存的模型在output目录下,包含以下文件: +``` +best_accuracy.states +best_accuracy.pdparams # 默认保存最优精度的模型参数 +best_accuracy.pdopt # 默认保存最优精度的优化器相关参数 +latest.states +latest.pdparams # 默认保存的最新模型参数 +latest.pdopt # 默认保存的最新模型的优化器相关参数 +``` +其中,best_accuracy是保存的精度最高的模型参数,可以直接使用该模型评估。 + +模型评估命令如下: +``` +python3 tools/eval.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_dml.yml -o Global.checkpoints=./output/best_accuracy +``` + +训练的教师模型结构更大,精度更高,用于提升学生模型的精度。 + +**提取教师模型参数** +best_accuracy包含两个模型的参数,分别对应配置文件中的Student,Student2。提取Student的参数方法如下: + +``` +import paddle +# 加载预训练模型 +all_params = paddle.load("output/best_accuracy.pdparams") +# 查看权重参数的keys +print(all_params.keys()) +# 模型的权重提取 +s_params = {key[len("Student."):]: all_params[key] for key in all_params if "Student." in key} +# 查看模型权重参数的keys +print(s_params.keys()) +# 保存 +paddle.save(s_params, "./pretrain_models/dml_teacher.pdparams") +``` + +提取出来的模型参数可以用于模型进一步的finetune训练或者蒸馏训练。 + +### 2.3 训练学生模型 + +训练学生模型的配置文件是[ch_PP-OCRv3_det_cml.yml](https://github.com/PaddlePaddle/PaddleOCR/blob/release%2F2.5/configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml) +上一节训练得到的教师模型作为监督,采用CML方式训练得到轻量的学生模型。 + +下载学生模型的ImageNet预训练模型: +``` +# 下载MobileNetV3的预训练模型 +wget -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/pretrained/MobileNetV3_large_x0_5_pretrained.pdparams +``` + +**启动训练** + +``` +# 单卡训练 +python3 tools/train.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml \ + -o Architecture.Models.Student.pretrained=./pretrain_models/MobileNetV3_large_x0_5_pretrained \ + Architecture.Models.Student2.pretrained=./pretrain_models/MobileNetV3_large_x0_5_pretrained \ + Architecture.Models.Teacher.pretrained=./pretrain_models/dml_teacher \ + Global.save_model_dir=./output/ +# 如果要使用多GPU分布式训练,请使用如下命令: +python3 -m paddle.distributed.launch --gpus '0,1,2,3' tools/train.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml \ + -o Architecture.Models.Student.pretrained=./pretrain_models/MobileNetV3_large_x0_5_pretrained \ + Architecture.Models.Student2.pretrained=./pretrain_models/MobileNetV3_large_x0_5_pretrained \ + Architecture.Models.Teacher.pretrained=./pretrain_models/dml_teacher \ + Global.save_model_dir=./output/ +``` + +训练过程中保存的模型在output目录下, +模型评估命令如下: +``` +python3 tools/eval.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml -o Global.checkpoints=./output/best_accuracy +``` + +best_accuracy包含三个模型的参数,分别对应配置文件中的Student,Student2,Teacher。提取Student参数的方法如下: + +``` +import paddle +# 加载预训练模型 +all_params = paddle.load("output/best_accuracy.pdparams") +# 查看权重参数的keys +print(all_params.keys()) +# 模型的权重提取 +s_params = {key[len("Student."):]: all_params[key] for key in all_params if "Student." in key} +# 查看模型权重参数的keys +print(s_params.keys()) +# 保存 +paddle.save(s_params, "./pretrain_models/cml_student.pdparams") +``` + +提取出来的Student的参数可用于模型部署或者做进一步的finetune训练。 + + + + +## 3. 基于PPOCRv3检测finetune训练 + +本节介绍如何使用PPOCRv3检测模型在其他场景上的finetune训练。 + +finetune训练适用于三种场景: +- 基于CML蒸馏方法的finetune训练,适用于教师模型在使用场景上精度高于PPOCRv3检测模型,且希望得到一个轻量检测模型。 +- 基于PPOCRv3轻量检测模型的finetune训练,无需训练教师模型,希望在PPOCRv3检测模型基础上提升使用场景上的精度。 +- 基于DML蒸馏方法的finetune训练,适用于采用DML方法进一步提升精度的场景。 + + +**基于CML蒸馏方法的finetune训练** + +下载PPOCRv3训练模型: +``` +wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_distill_train.tar +tar xf ch_PP-OCRv3_det_distill_train.tar +``` +ch_PP-OCRv3_det_distill_train/best_accuracy.pdparams包含CML配置文件中Student、Student2、Teacher模型的参数。 + +启动训练: + +``` +# 单卡训练 +python3 tools/train.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml \ + -o Global.pretrained_model=./ch_PP-OCRv3_det_distill_train/best_accuracy \ + Global.save_model_dir=./output/ +# 如果要使用多GPU分布式训练,请使用如下命令: +python3 -m paddle.distributed.launch --gpus '0,1,2,3' tools/train.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml \ + -o Global.pretrained_model=./ch_PP-OCRv3_det_distill_train/best_accuracy \ + Global.save_model_dir=./output/ +``` + +**基于PPOCRv3轻量检测模型的finetune训练** + + +下载PPOCRv3训练模型,并提取Student结构的模型参数: +``` +wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_distill_train.tar +tar xf ch_PP-OCRv3_det_distill_train.tar +``` + +提取Student参数的方法如下: + +``` +import paddle +# 加载预训练模型 +all_params = paddle.load("output/best_accuracy.pdparams") +# 查看权重参数的keys +print(all_params.keys()) +# 模型的权重提取 +s_params = {key[len("Student."):]: all_params[key] for key in all_params if "Student." in key} +# 查看模型权重参数的keys +print(s_params.keys()) +# 保存 +paddle.save(s_params, "./student.pdparams") +``` + +使用配置文件[ch_PP-OCRv3_det_student.yml](https://github.com/PaddlePaddle/PaddleOCR/blob/release%2F2.5/configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_student.yml)训练。 + +**启动训练** + +``` +# 单卡训练 +python3 tools/train.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_student.yml \ + -o Global.pretrained_model=./student \ + Global.save_model_dir=./output/ +# 如果要使用多GPU分布式训练,请使用如下命令: +python3 -m paddle.distributed.launch --gpus '0,1,2,3' tools/train.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_student.yml \ + -o Global.pretrained_model=./student \ + Global.save_model_dir=./output/ +``` + + +**基于DML蒸馏方法的finetune训练** + +以ch_PP-OCRv3_det_distill_train中的Teacher模型为例,首先提取Teacher结构的参数,方法如下: +``` +import paddle +# 加载预训练模型 +all_params = paddle.load("ch_PP-OCRv3_det_distill_train/best_accuracy.pdparams") +# 查看权重参数的keys +print(all_params.keys()) +# 模型的权重提取 +s_params = {key[len("Teacher."):]: all_params[key] for key in all_params if "Teacher." in key} +# 查看模型权重参数的keys +print(s_params.keys()) +# 保存 +paddle.save(s_params, "./teacher.pdparams") +``` + +**启动训练** +``` +# 单卡训练 +python3 tools/train.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_dml.yml \ + -o Architecture.Models.Student.pretrained=./teacher \ + Architecture.Models.Student2.pretrained=./teacher \ + Global.save_model_dir=./output/ +# 如果要使用多GPU分布式训练,请使用如下命令: +python3 -m paddle.distributed.launch --gpus '0,1,2,3' tools/train.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_dml.yml \ + -o Architecture.Models.Student.pretrained=./teacher \ + Architecture.Models.Student2.pretrained=./teacher \ + Global.save_model_dir=./output/ +``` + + diff --git a/doc/doc_ch/algorithm_overview.md b/doc/doc_ch/algorithm_overview.md index 6227a21498eda7d8527e21e7f2567995251d9e47..eb81e4cd6dae2542dd07d0e25fe543419f798c9b 100755 --- a/doc/doc_ch/algorithm_overview.md +++ b/doc/doc_ch/algorithm_overview.md @@ -66,6 +66,8 @@ - [x] [SAR](./algorithm_rec_sar.md) - [x] [SEED](./algorithm_rec_seed.md) - [x] [SVTR](./algorithm_rec_svtr.md) +- [x] [ViTSTR](./algorithm_rec_vitstr.md) +- [x] [ABINet](./algorithm_rec_abinet.md) 参考[DTRB](https://arxiv.org/abs/1904.01906)[3]文字识别训练和评估流程,使用MJSynth和SynthText两个文字识别数据集训练,在IIIT, SVT, IC03, IC13, IC15, SVTP, CUTE数据集上进行评估,算法效果如下: @@ -84,7 +86,8 @@ |SAR|Resnet31| 87.20% | rec_r31_sar | [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.1/rec/rec_r31_sar_train.tar) | |SEED|Aster_Resnet| 85.35% | rec_resnet_stn_bilstm_att | [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.1/rec/rec_resnet_stn_bilstm_att.tar) | |SVTR|SVTR-Tiny| 89.25% | rec_svtr_tiny_none_ctc_en | [训练模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/rec_svtr_tiny_none_ctc_en_train.tar) | - +|ViTSTR|ViTSTR| 79.82% | rec_vitstr_none_ce_en | [训练模型](https://paddleocr.bj.bcebos.com/rec_vitstr_none_ce_train.tar) | +|ABINet|Resnet45| 90.75% | rec_r45_abinet_en | [训练模型](https://paddleocr.bj.bcebos.com/rec_r45_abinet_train.tar) | diff --git a/doc/doc_ch/algorithm_rec_abinet.md b/doc/doc_ch/algorithm_rec_abinet.md new file mode 100644 index 0000000000000000000000000000000000000000..47507c36c7295411e4b7c1662d2bde385b0a95b8 --- /dev/null +++ b/doc/doc_ch/algorithm_rec_abinet.md @@ -0,0 +1,155 @@ +# 场景文本识别算法-ABINet + +- [1. 算法简介](#1) +- [2. 环境配置](#2) +- [3. 模型训练、评估、预测](#3) + - [3.1 训练](#3-1) + - [3.2 评估](#3-2) + - [3.3 预测](#3-3) +- [4. 推理部署](#4) + - [4.1 Python推理](#4-1) + - [4.2 C++推理](#4-2) + - [4.3 Serving服务化部署](#4-3) + - [4.4 更多推理部署](#4-4) +- [5. FAQ](#5) + + +## 1. 算法简介 + +论文信息: +> [ABINet: Read Like Humans: Autonomous, Bidirectional and Iterative Language Modeling for Scene Text Recognition](https://openaccess.thecvf.com/content/CVPR2021/papers/Fang_Read_Like_Humans_Autonomous_Bidirectional_and_Iterative_Language_Modeling_for_CVPR_2021_paper.pdf) +> Shancheng Fang and Hongtao Xie and Yuxin Wang and Zhendong Mao and Yongdong Zhang +> CVPR, 2021 + + + +`ABINet`使用MJSynth和SynthText两个文字识别数据集训练,在IIIT, SVT, IC03, IC13, IC15, SVTP, CUTE数据集上进行评估,算法复现效果如下: + +|模型|骨干网络|配置文件|Acc|下载链接| +| --- | --- | --- | --- | --- | +|ABINet|ResNet45|[rec_r45_abinet.yml](../../configs/rec/rec_r45_abinet.yml)|90.75%|[预训练、训练模型](https://paddleocr.bj.bcebos.com/rec_r45_abinet_train.tar)| + + +## 2. 环境配置 +请先参考[《运行环境准备》](./environment.md)配置PaddleOCR运行环境,参考[《项目克隆》](./clone.md)克隆项目代码。 + + + +## 3. 模型训练、评估、预测 + + +### 3.1 模型训练 + +请参考[文本识别训练教程](./recognition.md)。PaddleOCR对代码进行了模块化,训练`ABINet`识别模型时需要**更换配置文件**为`ABINet`的[配置文件](../../configs/rec/rec_r45_abinet.yml)。 + +#### 启动训练 + + +具体地,在完成数据准备后,便可以启动训练,训练命令如下: +```shell +#单卡训练(训练周期长,不建议) +python3 tools/train.py -c configs/rec/rec_r45_abinet.yml + +#多卡训练,通过--gpus参数指定卡号 +python3 -m paddle.distributed.launch --gpus '0,1,2,3' tools/train.py -c configs/rec/rec_r45_abinet.yml +``` + + +### 3.2 评估 + +可下载已训练完成的[模型文件](#model),使用如下命令进行评估: + +```shell +# 注意将pretrained_model的路径设置为本地路径。 +python3 -m paddle.distributed.launch --gpus '0' tools/eval.py -c configs/rec/rec_r45_abinet.yml -o Global.pretrained_model=./rec_r45_abinet_train/best_accuracy +``` + + +### 3.3 预测 + +使用如下命令进行单张图片预测: +```shell +# 注意将pretrained_model的路径设置为本地路径。 +python3 tools/infer_rec.py -c configs/rec/rec_r45_abinet.yml -o Global.infer_img='./doc/imgs_words_en/word_10.png' Global.pretrained_model=./rec_r45_abinet_train/best_accuracy +# 预测文件夹下所有图像时,可修改infer_img为文件夹,如 Global.infer_img='./doc/imgs_words_en/'。 +``` + + + +## 4. 推理部署 + + +### 4.1 Python推理 +首先将训练得到best模型,转换成inference model。这里以训练完成的模型为例([模型下载地址](https://paddleocr.bj.bcebos.com/rec_r45_abinet_train.tar) ),可以使用如下命令进行转换: + +```shell +# 注意将pretrained_model的路径设置为本地路径。 +python3 tools/export_model.py -c configs/rec/rec_r45_abinet.yml -o Global.pretrained_model=./rec_r45_abinet_train/best_accuracy Global.save_inference_dir=./inference/rec_r45_abinet/ +``` +**注意:** +- 如果您是在自己的数据集上训练的模型,并且调整了字典文件,请注意修改配置文件中的`character_dict_path`是否是所需要的字典文件。 +- 如果您修改了训练时的输入大小,请修改`tools/export_model.py`文件中的对应ABINet的`infer_shape`。 + +转换成功后,在目录下有三个文件: +``` +/inference/rec_r45_abinet/ + ├── inference.pdiparams # 识别inference模型的参数文件 + ├── inference.pdiparams.info # 识别inference模型的参数信息,可忽略 + └── inference.pdmodel # 识别inference模型的program文件 +``` + +执行如下命令进行模型推理: + +```shell +python3 tools/infer/predict_rec.py --image_dir='./doc/imgs_words_en/word_10.png' --rec_model_dir='./inference/rec_r45_abinet/' --rec_algorithm='ABINet' --rec_image_shape='3,32,128' --rec_char_dict_path='./ppocr/utils/ic15_dict.txt' +# 预测文件夹下所有图像时,可修改image_dir为文件夹,如 --image_dir='./doc/imgs_words_en/'。 +``` + +![](../imgs_words_en/word_10.png) + +执行命令后,上面图像的预测结果(识别的文本和得分)会打印到屏幕上,示例如下: +结果如下: +```shell +Predicts of ./doc/imgs_words_en/word_10.png:('pain', 0.9999995231628418) +``` + +**注意**: + +- 训练上述模型采用的图像分辨率是[3,32,128],需要通过参数`rec_image_shape`设置为您训练时的识别图像形状。 +- 在推理时需要设置参数`rec_char_dict_path`指定字典,如果您修改了字典,请修改该参数为您的字典文件。 +- 如果您修改了预处理方法,需修改`tools/infer/predict_rec.py`中ABINet的预处理为您的预处理方法。 + + + +### 4.2 C++推理部署 + +由于C++预处理后处理还未支持ABINet,所以暂未支持 + + +### 4.3 Serving服务化部署 + +暂不支持 + + +### 4.4 更多推理部署 + +暂不支持 + + +## 5. FAQ + +1. MJSynth和SynthText两种数据集来自于[ABINet源repo](https://github.com/FangShancheng/ABINet) 。 +2. 我们使用ABINet作者提供的预训练模型进行finetune训练。 + +## 引用 + +```bibtex +@article{Fang2021ABINet, + title = {ABINet: Read Like Humans: Autonomous, Bidirectional and Iterative Language Modeling for Scene Text Recognition}, + author = {Shancheng Fang and Hongtao Xie and Yuxin Wang and Zhendong Mao and Yongdong Zhang}, + booktitle = {CVPR}, + year = {2021}, + url = {https://arxiv.org/abs/2103.06495}, + pages = {7098-7107} +} +``` diff --git a/doc/doc_ch/algorithm_rec_nrtr.md b/doc/doc_ch/algorithm_rec_nrtr.md index d3b626d0241a6c36797cba20a58f007e383c7aee..c619ac1dbc23dcf33e1405c95ab9dd075a24b347 100644 --- a/doc/doc_ch/algorithm_rec_nrtr.md +++ b/doc/doc_ch/algorithm_rec_nrtr.md @@ -12,6 +12,7 @@ - [4.3 Serving服务化部署](#4-3) - [4.4 更多推理部署](#4-4) - [5. FAQ](#5) +- [6. 发行公告](#6) ## 1. 算法简介 @@ -110,7 +111,7 @@ python3 tools/infer/predict_rec.py --image_dir='./doc/imgs_words_en/word_10.png' 执行命令后,上面图像的预测结果(识别的文本和得分)会打印到屏幕上,示例如下: 结果如下: ```shell -Predicts of ./doc/imgs_words_en/word_10.png:('pain', 0.9265879392623901) +Predicts of ./doc/imgs_words_en/word_10.png:('pain', 0.9465042352676392) ``` **注意**: @@ -140,12 +141,147 @@ Predicts of ./doc/imgs_words_en/word_10.png:('pain', 0.9265879392623901) 1. `NRTR`论文中使用Beam搜索进行解码字符,但是速度较慢,这里默认未使用Beam搜索,以贪婪搜索进行解码字符。 + +## 6. 发行公告 + +1. release/2.6更新NRTR代码结构,新版NRTR可加载旧版(release/2.5及之前)模型参数,使用下面示例代码将旧版模型参数转换为新版模型参数: + +```python + + params = paddle.load('path/' + '.pdparams') # 旧版本参数 + state_dict = model.state_dict() # 新版模型参数 + new_state_dict = {} + + for k1, v1 in state_dict.items(): + + k = k1 + if 'encoder' in k and 'self_attn' in k and 'qkv' in k and 'weight' in k: + + k_para = k[:13] + 'layers.' + k[13:] + q = params[k_para.replace('qkv', 'conv1')].transpose((1, 0, 2, 3)) + k = params[k_para.replace('qkv', 'conv2')].transpose((1, 0, 2, 3)) + v = params[k_para.replace('qkv', 'conv3')].transpose((1, 0, 2, 3)) + + new_state_dict[k1] = np.concatenate([q[:, :, 0, 0], k[:, :, 0, 0], v[:, :, 0, 0]], -1) + + elif 'encoder' in k and 'self_attn' in k and 'qkv' in k and 'bias' in k: + + k_para = k[:13] + 'layers.' + k[13:] + q = params[k_para.replace('qkv', 'conv1')] + k = params[k_para.replace('qkv', 'conv2')] + v = params[k_para.replace('qkv', 'conv3')] + + new_state_dict[k1] = np.concatenate([q, k, v], -1) + + elif 'encoder' in k and 'self_attn' in k and 'out_proj' in k: + + k_para = k[:13] + 'layers.' + k[13:] + new_state_dict[k1] = params[k_para] + + elif 'encoder' in k and 'norm3' in k: + k_para = k[:13] + 'layers.' + k[13:] + new_state_dict[k1] = params[k_para.replace('norm3', 'norm2')] + + elif 'encoder' in k and 'norm1' in k: + k_para = k[:13] + 'layers.' + k[13:] + new_state_dict[k1] = params[k_para] + + + elif 'decoder' in k and 'self_attn' in k and 'qkv' in k and 'weight' in k: + k_para = k[:13] + 'layers.' + k[13:] + q = params[k_para.replace('qkv', 'conv1')].transpose((1, 0, 2, 3)) + k = params[k_para.replace('qkv', 'conv2')].transpose((1, 0, 2, 3)) + v = params[k_para.replace('qkv', 'conv3')].transpose((1, 0, 2, 3)) + new_state_dict[k1] = np.concatenate([q[:, :, 0, 0], k[:, :, 0, 0], v[:, :, 0, 0]], -1) + + elif 'decoder' in k and 'self_attn' in k and 'qkv' in k and 'bias' in k: + k_para = k[:13] + 'layers.' + k[13:] + q = params[k_para.replace('qkv', 'conv1')] + k = params[k_para.replace('qkv', 'conv2')] + v = params[k_para.replace('qkv', 'conv3')] + new_state_dict[k1] = np.concatenate([q, k, v], -1) + + elif 'decoder' in k and 'self_attn' in k and 'out_proj' in k: + + k_para = k[:13] + 'layers.' + k[13:] + new_state_dict[k1] = params[k_para] + + elif 'decoder' in k and 'cross_attn' in k and 'q' in k and 'weight' in k: + k_para = k[:13] + 'layers.' + k[13:] + k_para = k_para.replace('cross_attn', 'multihead_attn') + q = params[k_para.replace('q', 'conv1')].transpose((1, 0, 2, 3)) + new_state_dict[k1] = q[:, :, 0, 0] + + elif 'decoder' in k and 'cross_attn' in k and 'q' in k and 'bias' in k: + k_para = k[:13] + 'layers.' + k[13:] + k_para = k_para.replace('cross_attn', 'multihead_attn') + q = params[k_para.replace('q', 'conv1')] + new_state_dict[k1] = q + + elif 'decoder' in k and 'cross_attn' in k and 'kv' in k and 'weight' in k: + k_para = k[:13] + 'layers.' + k[13:] + k_para = k_para.replace('cross_attn', 'multihead_attn') + k = params[k_para.replace('kv', 'conv2')].transpose((1, 0, 2, 3)) + v = params[k_para.replace('kv', 'conv3')].transpose((1, 0, 2, 3)) + new_state_dict[k1] = np.concatenate([k[:, :, 0, 0], v[:, :, 0, 0]], -1) + + elif 'decoder' in k and 'cross_attn' in k and 'kv' in k and 'bias' in k: + k_para = k[:13] + 'layers.' + k[13:] + k_para = k_para.replace('cross_attn', 'multihead_attn') + k = params[k_para.replace('kv', 'conv2')] + v = params[k_para.replace('kv', 'conv3')] + new_state_dict[k1] = np.concatenate([k, v], -1) + + elif 'decoder' in k and 'cross_attn' in k and 'out_proj' in k: + + k_para = k[:13] + 'layers.' + k[13:] + k_para = k_para.replace('cross_attn', 'multihead_attn') + new_state_dict[k1] = params[k_para] + elif 'decoder' in k and 'norm' in k: + k_para = k[:13] + 'layers.' + k[13:] + new_state_dict[k1] = params[k_para] + elif 'mlp' in k and 'weight' in k: + k_para = k[:13] + 'layers.' + k[13:] + k_para = k_para.replace('fc', 'conv') + k_para = k_para.replace('mlp.', '') + w = params[k_para].transpose((1, 0, 2, 3)) + new_state_dict[k1] = w[:, :, 0, 0] + elif 'mlp' in k and 'bias' in k: + k_para = k[:13] + 'layers.' + k[13:] + k_para = k_para.replace('fc', 'conv') + k_para = k_para.replace('mlp.', '') + w = params[k_para] + new_state_dict[k1] = w + + else: + new_state_dict[k1] = params[k1] + + if list(new_state_dict[k1].shape) != list(v1.shape): + print(k1) + + + for k, v1 in state_dict.items(): + if k not in new_state_dict.keys(): + print(1, k) + elif list(new_state_dict[k].shape) != list(v1.shape): + print(2, k) + + + + model.set_state_dict(new_state_dict) + paddle.save(model.state_dict(), 'nrtrnew_from_old_params.pdparams') + +``` + +2. 新版相比与旧版,代码结构简洁,推理速度有所提高。 + + ## 引用 ```bibtex @article{Sheng2019NRTR, title = {NRTR: A No-Recurrence Sequence-to-Sequence Model For Scene Text Recognition}, - author = {Fenfen Sheng and Zhineng Chen andBo Xu}, + author = {Fenfen Sheng and Zhineng Chen and Bo Xu}, booktitle = {ICDAR}, year = {2019}, url = {http://arxiv.org/abs/1806.00926}, diff --git a/doc/doc_ch/algorithm_rec_svtr.md b/doc/doc_ch/algorithm_rec_svtr.md index 41a22ca65c00d0668298cdcf1486c1c3afdef919..c0e26433e92d8de722b80951ce8ccf17d28d19c3 100644 --- a/doc/doc_ch/algorithm_rec_svtr.md +++ b/doc/doc_ch/algorithm_rec_svtr.md @@ -111,7 +111,6 @@ python3 tools/export_model.py -c ./rec_svtr_tiny_none_ctc_en_train/rec_svtr_tiny **注意:** - 如果您是在自己的数据集上训练的模型,并且调整了字典文件,请注意修改配置文件中的`character_dict_path`是否为所正确的字典文件。 -- 如果您修改了训练时的输入大小,请修改`tools/export_model.py`文件中的对应SVTR的`infer_shape`。 转换成功后,在目录下有三个文件: ``` diff --git a/doc/doc_ch/algorithm_rec_vitstr.md b/doc/doc_ch/algorithm_rec_vitstr.md new file mode 100644 index 0000000000000000000000000000000000000000..ab12be720234119f5b084fb52de9c2a392171bcd --- /dev/null +++ b/doc/doc_ch/algorithm_rec_vitstr.md @@ -0,0 +1,154 @@ +# 场景文本识别算法-ViTSTR + +- [1. 算法简介](#1) +- [2. 环境配置](#2) +- [3. 模型训练、评估、预测](#3) + - [3.1 训练](#3-1) + - [3.2 评估](#3-2) + - [3.3 预测](#3-3) +- [4. 推理部署](#4) + - [4.1 Python推理](#4-1) + - [4.2 C++推理](#4-2) + - [4.3 Serving服务化部署](#4-3) + - [4.4 更多推理部署](#4-4) +- [5. FAQ](#5) + + +## 1. 算法简介 + +论文信息: +> [Vision Transformer for Fast and Efficient Scene Text Recognition](https://arxiv.org/abs/2105.08582) +> Rowel Atienza +> ICDAR, 2021 + + + +`ViTSTR`使用MJSynth和SynthText两个文字识别数据集训练,在IIIT, SVT, IC03, IC13, IC15, SVTP, CUTE数据集上进行评估,算法复现效果如下: + +|模型|骨干网络|配置文件|Acc|下载链接| +| --- | --- | --- | --- | --- | +|ViTSTR|ViTSTR|[rec_vitstr_none_ce.yml](../../configs/rec/rec_vitstr_none_ce.yml)|79.82%|[训练模型](https://paddleocr.bj.bcebos.com/rec_vitstr_none_ce_train.tar)| + + +## 2. 环境配置 +请先参考[《运行环境准备》](./environment.md)配置PaddleOCR运行环境,参考[《项目克隆》](./clone.md)克隆项目代码。 + + + +## 3. 模型训练、评估、预测 + + +### 3.1 模型训练 + +请参考[文本识别训练教程](./recognition.md)。PaddleOCR对代码进行了模块化,训练`ViTSTR`识别模型时需要**更换配置文件**为`ViTSTR`的[配置文件](../../configs/rec/rec_vitstr_none_ce.yml)。 + +#### 启动训练 + + +具体地,在完成数据准备后,便可以启动训练,训练命令如下: +```shell +#单卡训练(训练周期长,不建议) +python3 tools/train.py -c configs/rec/rec_vitstr_none_ce.yml + +#多卡训练,通过--gpus参数指定卡号 +python3 -m paddle.distributed.launch --gpus '0,1,2,3' tools/train.py -c configs/rec/rec_vitstr_none_ce.yml +``` + + +### 3.2 评估 + +可下载已训练完成的[模型文件](#model),使用如下命令进行评估: + +```shell +# 注意将pretrained_model的路径设置为本地路径。 +python3 -m paddle.distributed.launch --gpus '0' tools/eval.py -c configs/rec/rec_vitstr_none_ce.yml -o Global.pretrained_model=./rec_vitstr_none_ce_train/best_accuracy +``` + + +### 3.3 预测 + +使用如下命令进行单张图片预测: +```shell +# 注意将pretrained_model的路径设置为本地路径。 +python3 tools/infer_rec.py -c configs/rec/rec_vitstr_none_ce.yml -o Global.infer_img='./doc/imgs_words_en/word_10.png' Global.pretrained_model=./rec_vitstr_none_ce_train/best_accuracy +# 预测文件夹下所有图像时,可修改infer_img为文件夹,如 Global.infer_img='./doc/imgs_words_en/'。 +``` + + + +## 4. 推理部署 + + +### 4.1 Python推理 +首先将训练得到best模型,转换成inference model。这里以训练完成的模型为例([模型下载地址](https://paddleocr.bj.bcebos.com/rec_vitstr_none_ce_train.tar) ),可以使用如下命令进行转换: + +```shell +# 注意将pretrained_model的路径设置为本地路径。 +python3 tools/export_model.py -c configs/rec/rec_vitstr_none_ce.yml -o Global.pretrained_model=./rec_vitstr_none_ce_train/best_accuracy Global.save_inference_dir=./inference/rec_vitstr/ +``` +**注意:** +- 如果您是在自己的数据集上训练的模型,并且调整了字典文件,请注意修改配置文件中的`character_dict_path`是否是所需要的字典文件。 +- 如果您修改了训练时的输入大小,请修改`tools/export_model.py`文件中的对应ViTSTR的`infer_shape`。 + +转换成功后,在目录下有三个文件: +``` +/inference/rec_vitstr/ + ├── inference.pdiparams # 识别inference模型的参数文件 + ├── inference.pdiparams.info # 识别inference模型的参数信息,可忽略 + └── inference.pdmodel # 识别inference模型的program文件 +``` + +执行如下命令进行模型推理: + +```shell +python3 tools/infer/predict_rec.py --image_dir='./doc/imgs_words_en/word_10.png' --rec_model_dir='./inference/rec_vitstr/' --rec_algorithm='ViTSTR' --rec_image_shape='1,224,224' --rec_char_dict_path='./ppocr/utils/EN_symbol_dict.txt' +# 预测文件夹下所有图像时,可修改image_dir为文件夹,如 --image_dir='./doc/imgs_words_en/'。 +``` + +![](../imgs_words_en/word_10.png) + +执行命令后,上面图像的预测结果(识别的文本和得分)会打印到屏幕上,示例如下: +结果如下: +```shell +Predicts of ./doc/imgs_words_en/word_10.png:('pain', 0.9998350143432617) +``` + +**注意**: + +- 训练上述模型采用的图像分辨率是[1,224,224],需要通过参数`rec_image_shape`设置为您训练时的识别图像形状。 +- 在推理时需要设置参数`rec_char_dict_path`指定字典,如果您修改了字典,请修改该参数为您的字典文件。 +- 如果您修改了预处理方法,需修改`tools/infer/predict_rec.py`中ViTSTR的预处理为您的预处理方法。 + + + +### 4.2 C++推理部署 + +由于C++预处理后处理还未支持ViTSTR,所以暂未支持 + + +### 4.3 Serving服务化部署 + +暂不支持 + + +### 4.4 更多推理部署 + +暂不支持 + + +## 5. FAQ + +1. 在`ViTSTR`论文中,使用在ImageNet1k上的预训练权重进行初始化训练,我们在训练未采用预训练权重,最终精度没有变化甚至有所提高。 +2. 我们仅仅复现了`ViTSTR`中的tiny版本,如果需要使用small、base版本,可将[ViTSTR源repo](https://github.com/roatienza/deep-text-recognition-benchmark) 中的预训练权重转为Paddle权重使用。 + +## 引用 + +```bibtex +@article{Atienza2021ViTSTR, + title = {Vision Transformer for Fast and Efficient Scene Text Recognition}, + author = {Rowel Atienza}, + booktitle = {ICDAR}, + year = {2021}, + url = {https://arxiv.org/abs/2105.08582} +} +``` diff --git a/doc/doc_ch/application.md b/doc/doc_ch/application.md new file mode 100644 index 0000000000000000000000000000000000000000..5135dfac10b0b49d1c91eb07202bd3929c7bbb7c --- /dev/null +++ b/doc/doc_ch/application.md @@ -0,0 +1,41 @@ +# 场景应用 + +PaddleOCR场景应用覆盖通用,制造、金融、交通行业的主要OCR垂类应用,在PP-OCR、PP-Structure的通用能力基础之上,以notebook的形式展示利用场景数据微调、模型优化方法、数据增广等内容,为开发者快速落地OCR应用提供示范与启发。 + +> 如需下载全部垂类模型,可以扫描下方二维码,关注公众号填写问卷后,加入PaddleOCR官方交流群获取20G OCR学习大礼包(内含《动手学OCR》电子书、课程回放视频、前沿论文等重磅资料) + +
+ +
+ + +> 如果您是企业开发者且未在下述场景中找到合适的方案,可以填写[OCR应用合作调研问卷](https://paddle.wjx.cn/vj/QwF7GKw.aspx),免费与官方团队展开不同层次的合作,包括但不限于问题抽象、确定技术方案、项目答疑、共同研发等。如果您已经使用PaddleOCR落地项目,也可以填写此问卷,与飞桨平台共同宣传推广,提升企业技术品宣。期待您的提交! + +## 通用 + +| 类别 | 亮点 | 类别 | 亮点 | +| ---------------------- | -------- | ---------- | ------------ | +| 高精度中文识别模型SVTR | 新增模型 | 手写体识别 | 新增字形支持 | + +## 制造 + +| 类别 | 亮点 | 类别 | 亮点 | +| -------------- | ------------------------------ | -------------- | -------------------- | +| 数码管识别 | 数码管数据合成、漏识别调优 | 电表识别 | 大分辨率图像检测调优 | +| 液晶屏读数识别 | 检测模型蒸馏、Serving部署 | PCB文字识别 | 小尺寸文本检测与识别 | +| 包装生产日期 | 点阵字符合成、过曝过暗文字识别 | 液晶屏缺陷检测 | 非文字形态识别 | + +## 金融 + +| 类别 | 亮点 | 类别 | 亮点 | +| -------------- | ------------------------ | ------------ | --------------------- | +| 表单VQA | 多模态通用表单结构化提取 | 通用卡证识别 | 通用结构化提取 | +| 增值税发票 | 尽请期待 | 身份证识别 | 结构化提取、图像阴影 | +| 印章检测与识别 | 端到端弯曲文本识别 | 合同比对 | 密集文本检测、NLP串联 | + +## 交通 + +| 类别 | 亮点 | 类别 | 亮点 | +| ----------------- | ------------------------------ | ---------- | -------- | +| 车牌识别 | 多角度图像、轻量模型、端侧部署 | 快递单识别 | 尽请期待 | +| 驾驶证/行驶证识别 | 尽请期待 | | | \ No newline at end of file diff --git a/doc/doc_ch/dataset/layout_datasets.md b/doc/doc_ch/dataset/layout_datasets.md index e7055b4e607aae358a9ec1e93f3640b2b68ea4a1..728a9be5fdd33a78482adb1e705afea7117a3037 100644 --- a/doc/doc_ch/dataset/layout_datasets.md +++ b/doc/doc_ch/dataset/layout_datasets.md @@ -15,8 +15,8 @@ - **数据简介**:publaynet数据集的训练集合中包含35万张图像,验证集合中包含1.1万张图像。总共包含5个类别,分别是: `text, title, list, table, figure`。部分图像以及标注框可视化如下所示。
- - + +
- **下载地址**:https://developer.ibm.com/exchanges/data/all/publaynet/ @@ -30,8 +30,8 @@ - **数据简介**:CDLA据集的训练集合中包含5000张图像,验证集合中包含1000张图像。总共包含10个类别,分别是: `Text, Title, Figure, Figure caption, Table, Table caption, Header, Footer, Reference, Equation`。部分图像以及标注框可视化如下所示。
- - + +
- **下载地址**:https://github.com/buptlihang/CDLA @@ -45,8 +45,8 @@ - **数据简介**:TableBank数据集包含Latex(训练集187199张,验证集7265张,测试集5719张)与Word(训练集73383张,验证集2735张,测试集2281张)两种类别的文档。仅包含`Table` 1个类别。部分图像以及标注框可视化如下所示。
- - + +
- **下载地址**:https://doc-analysis.github.io/tablebank-page/index.html diff --git a/doc/doc_ch/detection.md b/doc/doc_ch/detection.md index 8a71b75c249b794e7ecda0ad14dc8cd2f07447e0..2cf0732219ac9cd2309ae24896d7de1499986461 100644 --- a/doc/doc_ch/detection.md +++ b/doc/doc_ch/detection.md @@ -13,6 +13,7 @@ - [2.5 分布式训练](#25-分布式训练) - [2.6 知识蒸馏训练](#26-知识蒸馏训练) - [2.7 其他训练环境](#27-其他训练环境) + - [2.8 模型微调](#28-模型微调) - [3. 模型评估与预测](#3-模型评估与预测) - [3.1 指标评估](#31-指标评估) - [3.2 测试检测效果](#32-测试检测效果) @@ -141,7 +142,8 @@ python3 tools/train.py -c configs/det/det_mv3_db.yml \ Global.use_amp=True Global.scale_loss=1024.0 Global.use_dynamic_loss_scaling=True ``` - + + ## 2.5 分布式训练 多机多卡训练时,通过 `--ips` 参数设置使用的机器IP地址,通过 `--gpus` 参数设置使用的GPU ID: @@ -151,7 +153,7 @@ python3 -m paddle.distributed.launch --ips="xx.xx.xx.xx,xx.xx.xx.xx" --gpus '0,1 -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained ``` -**注意:** 采用多机多卡训练时,需要替换上面命令中的ips值为您机器的地址,机器之间需要能够相互ping通。另外,训练时需要在多个机器上分别启动命令。查看机器ip地址的命令为`ifconfig`。 +**注意:** (1)采用多机多卡训练时,需要替换上面命令中的ips值为您机器的地址,机器之间需要能够相互ping通;(2)训练时需要在多个机器上分别启动命令。查看机器ip地址的命令为`ifconfig`;(3)更多关于分布式训练的性能优势等信息,请参考:[分布式训练教程](./distributed_training.md)。 @@ -177,6 +179,13 @@ Windows平台只支持`单卡`的训练与预测,指定GPU进行训练`set CUD - Linux DCU DCU设备上运行需要设置环境变量 `export HIP_VISIBLE_DEVICES=0,1,2,3`,其余训练评估预测命令与Linux GPU完全相同。 + + +## 2.8 模型微调 + +实际使用过程中,建议加载官方提供的预训练模型,在自己的数据集中进行微调,关于检测模型的微调方法,请参考:[模型微调教程](./finetune.md)。 + + # 3. 模型评估与预测 @@ -196,6 +205,7 @@ python3 tools/eval.py -c configs/det/det_mv3_db.yml -o Global.checkpoints="{pat ## 3.2 测试检测效果 测试单张图像的检测效果: + ```shell python3 tools/infer_det.py -c configs/det/det_mv3_db.yml -o Global.infer_img="./doc/imgs_en/img_10.jpg" Global.pretrained_model="./output/det_db/best_accuracy" ``` @@ -226,14 +236,19 @@ python3 tools/export_model.py -c configs/det/det_mv3_db.yml -o Global.pretrained ``` DB检测模型inference 模型预测: + ```shell python3 tools/infer/predict_det.py --det_algorithm="DB" --det_model_dir="./output/det_db_inference/" --image_dir="./doc/imgs/" --use_gpu=True ``` 如果是其他检测,比如EAST模型,det_algorithm参数需要修改为EAST,默认为DB算法: + ```shell python3 tools/infer/predict_det.py --det_algorithm="EAST" --det_model_dir="./output/det_db_inference/" --image_dir="./doc/imgs/" --use_gpu=True ``` +更多关于推理超参数的配置与解释,请参考:[模型推理超参数解释教程](./inference_args.md)。 + + # 5. FAQ diff --git a/doc/doc_ch/distributed_training.md b/doc/doc_ch/distributed_training.md index e0251b21ea1157084e4e1b1d77429264d452aa20..6afa4a5b9f77ce238cb18fcb4160e49f7b465369 100644 --- a/doc/doc_ch/distributed_training.md +++ b/doc/doc_ch/distributed_training.md @@ -41,11 +41,16 @@ python3 -m paddle.distributed.launch \ ## 性能效果测试 -* 基于单机8卡P40,和2机8卡P40,在26W公开识别数据集(LSVT, RCTW, MTWI)上进行训练,最终耗时如下。 +* 在2机8卡P40的机器上,基于26W公开识别数据集(LSVT, RCTW, MTWI)上进行训练,最终耗时如下。 -| 模型 | 配置文件 | 机器数量 | 每台机器的GPU数量 | 训练时间 | 识别Acc | 加速比 | -| :----------------------: | :------------: | :------------: | :---------------: | :----------: | :-----------: | :-----------: | -| CRNN | configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml | 1 | 8 | 60h | 66.7% | - | -| CRNN | configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml | 2 | 8 | 40h | 67.0% | 150% | +| 模型 | 配置 | 精度 | 单机8卡耗时 | 2机8卡耗时 | 加速比 | +|------|-----|--------|--------|--------|-----| +| CRNN | [rec_chinese_lite_train_v2.0.yml](../../configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml) | 67.0% | 2.50d | 1.67d | **1.5** | -可以看出,精度没有下降的情况下,训练时间由60h缩短为了40h,加速比可以达到60h/40h=150%,效率为60h/(40h*2)=75%。 + +* 在4机8卡V100的机器上,基于全量数据训练,最终耗时如下 + + +| 模型 | 配置 | 精度 | 单机8卡耗时 | 4机8卡耗时 | 加速比 | +|------|-----|--------|--------|--------|-----| +| SVTR | [ch_PP-OCRv3_rec_distillation.yml](../../configs/rec/PP-OCRv3/ch_PP-OCRv3_rec_distillation.yml) | 74.0% | 10d | 2.84d | **3.5** | diff --git a/doc/doc_ch/inference_args.md b/doc/doc_ch/inference_args.md new file mode 100644 index 0000000000000000000000000000000000000000..fa188ab7c800eaabae8a4ff54413af162dd60e43 --- /dev/null +++ b/doc/doc_ch/inference_args.md @@ -0,0 +1,120 @@ +# PaddleOCR模型推理参数解释 + +在使用PaddleOCR进行模型推理时,可以自定义修改参数,来修改模型、数据、预处理、后处理等内容(参数文件:[utility.py](../../tools/infer/utility.py)),详细的参数解释如下所示。 + +* 全局信息 + +| 参数名称 | 类型 | 默认值 | 含义 | +| :--: | :--: | :--: | :--: | +| image_dir | str | 无,必须显式指定 | 图像或者文件夹路径 | +| vis_font_path | str | "./doc/fonts/simfang.ttf" | 用于可视化的字体路径 | +| drop_score | float | 0.5 | 识别得分小于该值的结果会被丢弃,不会作为返回结果 | +| use_pdserving | bool | False | 是否使用Paddle Serving进行预测 | +| warmup | bool | False | 是否开启warmup,在统计预测耗时的时候,可以使用这种方法 | +| draw_img_save_dir | str | "./inference_results" | 系统串联预测OCR结果的保存文件夹 | +| save_crop_res | bool | False | 是否保存OCR的识别文本图像 | +| crop_res_save_dir | str | "./output" | 保存OCR识别出来的文本图像路径 | +| use_mp | bool | False | 是否开启多进程预测 | +| total_process_num | int | 6 | 开启的进城数,`use_mp`为`True`时生效 | +| process_id | int | 0 | 当前进程的id号,无需自己修改 | +| benchmark | bool | False | 是否开启benchmark,对预测速度、显存占用等进行统计 | +| save_log_path | str | "./log_output/" | 开启`benchmark`时,日志结果的保存文件夹 | +| show_log | bool | True | 是否显示预测中的日志信息 | +| use_onnx | bool | False | 是否开启onnx预测 | + + +* 预测引擎相关 + +| 参数名称 | 类型 | 默认值 | 含义 | +| :--: | :--: | :--: | :--: | +| use_gpu | bool | True | 是否使用GPU进行预测 | +| ir_optim | bool | True | 是否对计算图进行分析与优化,开启后可以加速预测过程 | +| use_tensorrt | bool | False | 是否开启tensorrt | +| min_subgraph_size | int | 15 | tensorrt中最小子图size,当子图的size大于该值时,才会尝试对该子图使用trt engine计算 | +| precision | str | fp32 | 预测的精度,支持`fp32`, `fp16`, `int8` 3种输入 | +| enable_mkldnn | bool | True | 是否开启mkldnn | +| cpu_threads | int | 10 | 开启mkldnn时,cpu预测的线程数 | + +* 文本检测模型相关 + +| 参数名称 | 类型 | 默认值 | 含义 | +| :--: | :--: | :--: | :--: | +| det_algorithm | str | "DB" | 文本检测算法名称,目前支持`DB`, `EAST`, `SAST`, `PSE` | +| det_model_dir | str | xx | 检测inference模型路径 | +| det_limit_side_len | int | 960 | 检测的图像边长限制 | +| det_limit_type | str | "max" | 检测的变成限制类型,目前支持`min`, `max`,`min`表示保证图像最短边不小于`det_limit_side_len`,`max`表示保证图像最长边不大于`det_limit_side_len` | + +其中,DB算法相关参数如下 + +| 参数名称 | 类型 | 默认值 | 含义 | +| :--: | :--: | :--: | :--: | +| det_db_thresh | float | 0.3 | DB输出的概率图中,得分大于该阈值的像素点才会被认为是文字像素点 | +| det_db_box_thresh | float | 0.6 | 检测结果边框内,所有像素点的平均得分大于该阈值时,该结果会被认为是文字区域 | +| det_db_unclip_ratio | float | 1.5 | `Vatti clipping`算法的扩张系数,使用该方法对文字区域进行扩张 | +| max_batch_size | int | 10 | 预测的batch size | +| use_dilation | bool | False | 是否对分割结果进行膨胀以获取更优检测效果 | +| det_db_score_mode | str | "fast" | DB的检测结果得分计算方法,支持`fast`和`slow`,`fast`是根据polygon的外接矩形边框内的所有像素计算平均得分,`slow`是根据原始polygon内的所有像素计算平均得分,计算速度相对较慢一些,但是更加准确一些。 | + +EAST算法相关参数如下 + +| 参数名称 | 类型 | 默认值 | 含义 | +| :--: | :--: | :--: | :--: | +| det_east_score_thresh | float | 0.8 | EAST后处理中score map的阈值 | +| det_east_cover_thresh | float | 0.1 | EAST后处理中文本框的平均得分阈值 | +| det_east_nms_thresh | float | 0.2 | EAST后处理中nms的阈值 | + +SAST算法相关参数如下 + +| 参数名称 | 类型 | 默认值 | 含义 | +| :--: | :--: | :--: | :--: | +| det_sast_score_thresh | float | 0.5 | SAST后处理中的得分阈值 | +| det_sast_nms_thresh | float | 0.5 | SAST后处理中nms的阈值 | +| det_sast_polygon | bool | False | 是否多边形检测,弯曲文本场景(如Total-Text)设置为True | + +PSE算法相关参数如下 + +| 参数名称 | 类型 | 默认值 | 含义 | +| :--: | :--: | :--: | :--: | +| det_pse_thresh | float | 0.0 | 对输出图做二值化的阈值 | +| det_pse_box_thresh | float | 0.85 | 对box进行过滤的阈值,低于此阈值的丢弃 | +| det_pse_min_area | float | 16 | box的最小面积,低于此阈值的丢弃 | +| det_pse_box_type | str | "box" | 返回框的类型,box:四点坐标,poly: 弯曲文本的所有点坐标 | +| det_pse_scale | int | 1 | 输入图像相对于进后处理的图的比例,如`640*640`的图像,网络输出为`160*160`,scale为2的情况下,进后处理的图片shape为`320*320`。这个值调大可以加快后处理速度,但是会带来精度的下降 | + +* 文本识别模型相关 + +| 参数名称 | 类型 | 默认值 | 含义 | +| :--: | :--: | :--: | :--: | +| rec_algorithm | str | "CRNN" | 文本识别算法名称,目前支持`CRNN`, `SRN`, `RARE`, `NETR`, `SAR` | +| rec_model_dir | str | 无,如果使用识别模型,该项是必填项 | 识别inference模型路径 | +| rec_image_shape | list | [3, 32, 320] | 识别时的图像尺寸, | +| rec_batch_num | int | 6 | 识别的batch size | +| max_text_length | int | 25 | 识别结果最大长度,在`SRN`中有效 | +| rec_char_dict_path | str | "./ppocr/utils/ppocr_keys_v1.txt" | 识别的字符字典文件 | +| use_space_char | bool | True | 是否包含空格,如果为`True`,则会在最后字符字典中补充`空格`字符 | + + +* 端到端文本检测与识别模型相关 + +| 参数名称 | 类型 | 默认值 | 含义 | +| :--: | :--: | :--: | :--: | +| e2e_algorithm | str | "PGNet" | 端到端算法名称,目前支持`PGNet` | +| e2e_model_dir | str | 无,如果使用端到端模型,该项是必填项 | 端到端模型inference模型路径 | +| e2e_limit_side_len | int | 768 | 端到端的输入图像边长限制 | +| e2e_limit_type | str | "max" | 端到端的边长限制类型,目前支持`min`, `max`,`min`表示保证图像最短边不小于`e2e_limit_side_len`,`max`表示保证图像最长边不大于`e2e_limit_side_len` | +| e2e_pgnet_score_thresh | float | 0.5 | 端到端得分阈值,小于该阈值的结果会被丢弃 | +| e2e_char_dict_path | str | "./ppocr/utils/ic15_dict.txt" | 识别的字典文件路径 | +| e2e_pgnet_valid_set | str | "totaltext" | 验证集名称,目前支持`totaltext`, `partvgg`,不同数据集对应的后处理方式不同,与训练过程保持一致即可 | +| e2e_pgnet_mode | str | "fast" | PGNet的检测结果得分计算方法,支持`fast`和`slow`,`fast`是根据polygon的外接矩形边框内的所有像素计算平均得分,`slow`是根据原始polygon内的所有像素计算平均得分,计算速度相对较慢一些,但是更加准确一些。 | + + +* 方向分类器模型相关 + +| 参数名称 | 类型 | 默认值 | 含义 | +| :--: | :--: | :--: | :--: | +| use_angle_cls | bool | False | 是否使用方向分类器 | +| cls_model_dir | str | 无,如果需要使用,则必须显式指定路径 | 方向分类器inference模型路径 | +| cls_image_shape | list | [3, 48, 192] | 预测尺度 | +| label_list | list | ['0', '180'] | class id对应的角度值 | +| cls_batch_num | int | 6 | 方向分类器预测的batch size | +| cls_thresh | float | 0.9 | 预测阈值,模型预测结果为180度,且得分大于该阈值时,认为最终预测结果为180度,需要翻转 | diff --git a/doc/doc_ch/knowledge_distillation.md b/doc/doc_ch/knowledge_distillation.md index 76ab5e62e0136bb8707a51537bf6693fc5687047..79c4418d535cc7989360bc581974dc2d55cf1890 100644 --- a/doc/doc_ch/knowledge_distillation.md +++ b/doc/doc_ch/knowledge_distillation.md @@ -424,9 +424,9 @@ Architecture: ``` -如果是采用DML,即两个小模型互相学习的方法,上述配置文件里的Teacher网络结构需要设置为Student模型一样的配置,具体参考配置文件[ch_PP-OCRv3_det_dml.yml](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.4/configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_dml.yml)。 - -下面介绍[ch_PP-OCRv3_det_cml.yml](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.4/configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml)的配置文件参数: +如果是采用DML,即两个小模型互相学习的方法,上述配置文件里的Teacher网络结构需要设置为Student模型一样的配置,具体参考配置文件[ch_PP-OCRv3_det_dml.yml](../../configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_dml.yml)。 + +下面介绍[ch_PP-OCRv3_det_cml.yml](../../configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml)的配置文件参数: ``` Architecture: diff --git a/doc/doc_ch/models_list.md b/doc/doc_ch/models_list.md index 318d5874f5e01390976723ccdb98012b95a6eb7f..c36cac037e1f90a41da24bc64cacbbb860e04c6b 100644 --- a/doc/doc_ch/models_list.md +++ b/doc/doc_ch/models_list.md @@ -22,7 +22,7 @@ PaddleOCR提供的可下载模型包括`推理模型`、`训练模型`、`预训 |模型类型|模型格式|简介| |--- | --- | --- | -|推理模型|inference.pdmodel、inference.pdiparams|用于预测引擎推理,[详情](./inference.md)| +|推理模型|inference.pdmodel、inference.pdiparams|用于预测引擎推理,[详情](./inference_ppocr.md)| |训练模型、预训练模型|\*.pdparams、\*.pdopt、\*.states |训练过程中保存的模型的参数、优化器状态和训练中间信息,多用于模型指标评估和恢复训练| |nb模型|\*.nb|经过飞桨Paddle-Lite工具优化后的模型,适用于移动端/IoT端等端侧部署场景(需使用飞桨Paddle Lite部署)。| @@ -114,7 +114,7 @@ PaddleOCR提供的可下载模型包括`推理模型`、`训练模型`、`预训 | ka_PP-OCRv3_rec | ppocr/utils/dict/ka_dict.txt |卡纳达文识别|[ka_PP-OCRv3_rec.yml](../../configs/rec/PP-OCRv3/multi_language/ka_PP-OCRv3_rec.yml)|9.9M|[推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/ka_PP-OCRv3_rec_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/ka_PP-OCRv3_rec_train.tar) | | ta_PP-OCRv3_rec | ppocr/utils/dict/ta_dict.txt |泰米尔文识别|[ta_PP-OCRv3_rec.yml](../../configs/rec/PP-OCRv3/multi_language/ta_PP-OCRv3_rec.yml)|9.6M|[推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/ta_PP-OCRv3_rec_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/ta_PP-OCRv3_rec_train.tar) | | latin_PP-OCRv3_rec | ppocr/utils/dict/latin_dict.txt | 拉丁文识别 | [latin_PP-OCRv3_rec.yml](../../configs/rec/PP-OCRv3/multi_language/latin_PP-OCRv3_rec.yml) |9.7M|[推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/latin_PP-OCRv3_rec_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/latin_PP-OCRv3_rec_train.tar) | -| arabic_PP-OCRv3_rec | ppocr/utils/dict/arabic_dict.txt | 阿拉伯字母 | [arabic_PP-OCRv3_rec.yml](../../configs/rec/PP-OCRv3/multi_language/rec_arabic_lite_train.yml) |9.6M|[推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/arabic_PP-OCRv3_rec_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/arabic_PP-OCRv3_rec_train.tar) | +| arabic_PP-OCRv3_rec | ppocr/utils/dict/arabic_dict.txt | 阿拉伯字母 | [arabic_PP-OCRv3_rec.yml](../../configs/rec/PP-OCRv3/multi_language/arabic_PP-OCRv3_rec.yml) |9.6M|[推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/arabic_PP-OCRv3_rec_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/arabic_PP-OCRv3_rec_train.tar) | | cyrillic_PP-OCRv3_rec | ppocr/utils/dict/cyrillic_dict.txt | 斯拉夫字母 | [cyrillic_PP-OCRv3_rec.yml](../../configs/rec/PP-OCRv3/multi_language/cyrillic_PP-OCRv3_rec.yml) |9.6M|[推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/cyrillic_PP-OCRv3_rec_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/cyrillic_PP-OCRv3_rec_train.tar) | | devanagari_PP-OCRv3_rec | ppocr/utils/dict/devanagari_dict.txt |梵文字母 | [devanagari_PP-OCRv3_rec.yml](../../configs/rec/PP-OCRv3/multi_language/devanagari_PP-OCRv3_rec.yml) |9.9M|[推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/devanagari_PP-OCRv3_rec_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/devanagari_PP-OCRv3_rec_train.tar) | diff --git a/doc/doc_ch/multi_languages.md b/doc/doc_ch/multi_languages.md index 499fdd9881563b3a784b5f4ba4feace54f1a3a6a..1f337bdf49f4c6f90b8b35af85bc1316dae308a4 100644 --- a/doc/doc_ch/multi_languages.md +++ b/doc/doc_ch/multi_languages.md @@ -238,10 +238,10 @@ python3 -m paddle.distributed.launch --gpus '0,1,2,3' tools/train.py -c configs ## 4 预测部署 除了安装whl包进行快速预测,ppocr 也提供了多种预测部署方式,如有需求可阅读相关文档: -- [基于Python脚本预测引擎推理](./inference.md) -- [基于C++预测引擎推理](../../deploy/cpp_infer/readme.md) +- [基于Python脚本预测引擎推理](./inference_ppocr.md) +- [基于C++预测引擎推理](../../deploy/cpp_infer/readme_ch.md) - [服务化部署](../../deploy/hubserving/readme.md) -- [端侧部署](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/deploy/lite/readme.md) +- [端侧部署](../../deploy/lite/readme_ch.md) - [Benchmark](./benchmark.md) diff --git a/doc/doc_ch/ppocr_introduction.md b/doc/doc_ch/ppocr_introduction.md index 59de124e2ab855d0b4abb90d0a356aefd6db586d..bd62087c8b212098cba6b0ee1cbaf413ab23015f 100644 --- a/doc/doc_ch/ppocr_introduction.md +++ b/doc/doc_ch/ppocr_introduction.md @@ -30,11 +30,11 @@ PP-OCR系统pipeline如下: PP-OCR系统在持续迭代优化,目前已发布PP-OCR和PP-OCRv2两个版本: -PP-OCR从骨干网络选择和调整、预测头部的设计、数据增强、学习率变换策略、正则化参数选择、预训练模型使用以及模型自动裁剪量化8个方面,采用19个有效策略,对各个模块的模型进行效果调优和瘦身(如绿框所示),最终得到整体大小为3.5M的超轻量中英文OCR和2.8M的英文数字OCR。更多细节请参考PP-OCR技术方案 https://arxiv.org/abs/2009.09941 +PP-OCR从骨干网络选择和调整、预测头部的设计、数据增强、学习率变换策略、正则化参数选择、预训练模型使用以及模型自动裁剪量化8个方面,采用19个有效策略,对各个模块的模型进行效果调优和瘦身(如绿框所示),最终得到整体大小为3.5M的超轻量中英文OCR和2.8M的英文数字OCR。更多细节请参考[PP-OCR技术报告](https://arxiv.org/abs/2009.09941)。 #### PP-OCRv2 -PP-OCRv2在PP-OCR的基础上,进一步在5个方面重点优化,检测模型采用CML协同互学习知识蒸馏策略和CopyPaste数据增广策略;识别模型采用LCNet轻量级骨干网络、UDML 改进知识蒸馏策略和[Enhanced CTC loss](./enhanced_ctc_loss.md)损失函数改进(如上图红框所示),进一步在推理速度和预测效果上取得明显提升。更多细节请参考PP-OCRv2[技术报告](https://arxiv.org/abs/2109.03144)。 +PP-OCRv2在PP-OCR的基础上,进一步在5个方面重点优化,检测模型采用CML协同互学习知识蒸馏策略和CopyPaste数据增广策略;识别模型采用LCNet轻量级骨干网络、UDML 改进知识蒸馏策略和[Enhanced CTC loss](./enhanced_ctc_loss.md)损失函数改进(如上图红框所示),进一步在推理速度和预测效果上取得明显提升。更多细节请参考[PP-OCRv2技术报告](https://arxiv.org/abs/2109.03144)。 #### PP-OCRv3 @@ -48,7 +48,7 @@ PP-OCRv3系统pipeline如下: -更多细节请参考PP-OCRv3[技术报告](./PP-OCRv3_introduction.md)。 +更多细节请参考[PP-OCRv3技术报告](https://arxiv.org/abs/2206.03001v2) 👉[中文简洁版](./PP-OCRv3_introduction.md) diff --git a/doc/doc_ch/quickstart.md b/doc/doc_ch/quickstart.md index 29ca48fa838be4a60f08d31d5031180b951e33bc..e425cdd8a87d320554e61c72e05001875d022e43 100644 --- a/doc/doc_ch/quickstart.md +++ b/doc/doc_ch/quickstart.md @@ -101,8 +101,17 @@ cd /path/to/ppocr_img ['韩国小馆', 0.994467] ``` +**版本说明** +paddleocr默认使用PP-OCRv3模型(`--ocr_version PP-OCRv3`),如需使用其他版本可通过设置参数`--ocr_version`,具体版本说明如下: +| 版本名称 | 版本说明 | +| --- | --- | +| PP-OCRv3 | 支持中、英文检测和识别,方向分类器,支持多语种识别 | +| PP-OCRv2 | 支持中英文的检测和识别,方向分类器,多语言暂未更新 | +| PP-OCR | 支持中、英文检测和识别,方向分类器,支持多语种识别 | -如需使用2.0模型,请指定参数`--ocr_version PP-OCR`,paddleocr默认使用PP-OCRv3模型(`--ocr_version PP-OCRv3`)。更多whl包使用可参考[whl包文档](./whl.md) +如需新增自己训练的模型,可以在[paddleocr](../../paddleocr.py)中增加模型链接和字段,重新编译即可。 + +更多whl包使用可参考[whl包文档](./whl.md) diff --git a/doc/doc_ch/recognition.md b/doc/doc_ch/recognition.md index 8457df69ff4c09b196b0f0f91271a92344217d75..acf09f7bdecf41adfeee26efde6afaff8db7a41e 100644 --- a/doc/doc_ch/recognition.md +++ b/doc/doc_ch/recognition.md @@ -18,6 +18,7 @@ - [2.6. 知识蒸馏训练](#26-知识蒸馏训练) - [2.7. 多语言模型训练](#27-多语言模型训练) - [2.8. 其他训练环境](#28-其他训练环境) + - [2.9. 模型微调](#29-模型微调) - [3. 模型评估与预测](#3-模型评估与预测) - [3.1. 指标评估](#31-指标评估) - [3.2. 测试识别效果](#32-测试识别效果) @@ -217,6 +218,30 @@ python3 tools/train.py -c configs/rec/PP-OCRv3/en_PP-OCRv3_rec.yml -o Global.pre python3 -m paddle.distributed.launch --gpus '0,1,2,3' tools/train.py -c configs/rec/PP-OCRv3/en_PP-OCRv3_rec.yml -o Global.pretrained_model=./pretrain_models/en_PP-OCRv3_rec_train/best_accuracy ``` +正常启动训练后,会看到以下log输出: + +``` +[2022/02/22 07:58:05] root INFO: epoch: [1/800], iter: 10, lr: 0.000000, loss: 0.754281, acc: 0.000000, norm_edit_dis: 0.000008, reader_cost: 0.55541 s, batch_cost: 0.91654 s, samples: 1408, ips: 153.62133 +[2022/02/22 07:58:13] root INFO: epoch: [1/800], iter: 20, lr: 0.000001, loss: 0.924677, acc: 0.000000, norm_edit_dis: 0.000008, reader_cost: 0.00236 s, batch_cost: 0.28528 s, samples: 1280, ips: 448.68599 +[2022/02/22 07:58:23] root INFO: epoch: [1/800], iter: 30, lr: 0.000002, loss: 0.967231, acc: 0.000000, norm_edit_dis: 0.000008, reader_cost: 0.14527 s, batch_cost: 0.42714 s, samples: 1280, ips: 299.66507 +[2022/02/22 07:58:31] root INFO: epoch: [1/800], iter: 40, lr: 0.000003, loss: 0.895318, acc: 0.000000, norm_edit_dis: 0.000008, reader_cost: 0.00173 s, batch_cost: 0.27719 s, samples: 1280, ips: 461.77252 +``` + +log 中自动打印如下信息: + +| 字段 | 含义 | +| :----: | :------: | +| epoch | 当前迭代轮次 | +| iter | 当前迭代次数 | +| lr | 当前学习率 | +| loss | 当前损失函数 | +| acc | 当前batch的准确率 | +| norm_edit_dis | 当前 batch 的编辑距离 | +| reader_cost | 当前 batch 数据处理耗时 | +| batch_cost | 当前 batch 总耗时 | +| samples | 当前 batch 内的样本数 | +| ips | 每秒处理图片的数量 | + PaddleOCR支持训练和评估交替进行, 可以在 `configs/rec/PP-OCRv3/en_PP-OCRv3_rec.yml` 中修改 `eval_batch_step` 设置评估频率,默认每500个iter评估一次。评估过程中默认将最佳acc模型,保存为 `output/en_PP-OCRv3_rec/best_accuracy` 。 @@ -363,7 +388,7 @@ python3 -m paddle.distributed.launch --ips="xx.xx.xx.xx,xx.xx.xx.xx" --gpus '0,1 -o Global.pretrained_model=./pretrain_models/en_PP-OCRv3_rec_train/best_accuracy ``` -**注意:** 采用多机多卡训练时,需要替换上面命令中的ips值为您机器的地址,机器之间需要能够相互ping通。另外,训练时需要在多个机器上分别启动命令。查看机器ip地址的命令为`ifconfig`。 +**注意:** (1)采用多机多卡训练时,需要替换上面命令中的ips值为您机器的地址,机器之间需要能够相互ping通;(2)训练时需要在多个机器上分别启动命令。查看机器ip地址的命令为`ifconfig`;(3)更多关于分布式训练的性能优势等信息,请参考:[分布式训练教程](./distributed_training.md)。 ## 2.6. 知识蒸馏训练 @@ -438,6 +463,11 @@ Windows平台只支持`单卡`的训练与预测,指定GPU进行训练`set CUD - Linux DCU DCU设备上运行需要设置环境变量 `export HIP_VISIBLE_DEVICES=0,1,2,3`,其余训练评估预测命令与Linux GPU完全相同。 +## 2.9 模型微调 + +实际使用过程中,建议加载官方提供的预训练模型,在自己的数据集中进行微调,关于识别模型的微调方法,请参考:[模型微调教程](./finetune.md)。 + + # 3. 模型评估与预测 ## 3.1. 指标评估 @@ -540,12 +570,13 @@ inference/en_PP-OCRv3_rec/ - 自定义模型推理 - 如果训练时修改了文本的字典,在使用inference模型预测时,需要通过`--rec_char_dict_path`指定使用的字典路径 + 如果训练时修改了文本的字典,在使用inference模型预测时,需要通过`--rec_char_dict_path`指定使用的字典路径,更多关于推理超参数的配置与解释,请参考:[模型推理超参数解释教程](./inference_args.md)。 ``` python3 tools/infer/predict_rec.py --image_dir="./doc/imgs_words_en/word_336.png" --rec_model_dir="./your inference model" --rec_image_shape="3, 48, 320" --rec_char_dict_path="your text dict path" ``` + # 5. FAQ Q1: 训练模型转inference 模型之后预测效果不一致? diff --git a/doc/doc_ch/serving_inference.md b/doc/doc_ch/serving_inference.md deleted file mode 100644 index 30ea7ee7c11692ba02e8314036d74a21c2f090e5..0000000000000000000000000000000000000000 --- a/doc/doc_ch/serving_inference.md +++ /dev/null @@ -1,238 +0,0 @@ -# 使用Paddle Serving预测推理 - -阅读本文档之前,请先阅读文档 [基于Python预测引擎推理](./inference.md) - -同本地执行预测一样,我们需要保存一份可以用于Paddle Serving的模型。 - -接下来首先介绍如何将训练的模型转换成Paddle Serving模型,然后将依次介绍文本检测、文本识别以及两者串联基于预测引擎推理。 - -### 一、 准备环境 -我们先安装Paddle Serving相关组件 -我们推荐用户使用GPU来做Paddle Serving的OCR服务部署 - -**CUDA版本:9.X/10.X** - -**CUDNN版本:7.X** - -**操作系统版本:Linux/Windows** - -**Python版本: 2.7/3.5/3.6/3.7** - -**Python操作指南:** - -目前Serving用于OCR的部分功能还在测试当中,因此在这里我们给出[Servnig latest package](https://github.com/PaddlePaddle/Serving/blob/develop/doc/Latest_Packages_CN.md) -大家根据自己的环境选择需要安装的whl包即可,例如以Python 3.5为例,执行下列命令 -``` -#CPU/GPU版本选择一个 -#GPU版本服务端 -#CUDA 9 -python -m pip install -U https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server_gpu-0.0.0.post9-py3-none-any.whl -#CUDA 10 -python -m pip install -U https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server_gpu-0.0.0.post10-py3-none-any.whl -#CPU版本服务端 -python -m pip install -U https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server-0.0.0-py3-none-any.whl -#客户端和App包使用以下链接(CPU,GPU通用) -python -m pip install -U https://paddle-serving.bj.bcebos.com/whl/paddle_serving_client-0.0.0-cp36-none-any.whl https://paddle-serving.bj.bcebos.com/whl/paddle_serving_app-0.0.0-py3-none-any.whl -``` - -## 二、训练模型转Serving模型 - -在前序文档 [基于Python预测引擎推理](./inference.md) 中,我们提供了如何把训练的checkpoint转换成Paddle模型。Paddle模型通常由一个文件夹构成,内含模型结构描述文件`model`和模型参数文件`params`。Serving模型由两个文件夹构成,用于存放客户端和服务端的配置。 - -我们以`ch_rec_r34_vd_crnn`模型作为例子,下载链接在: - -``` -wget --no-check-certificate https://paddleocr.bj.bcebos.com/ch_models/ch_rec_r34_vd_crnn_infer.tar -tar xf ch_rec_r34_vd_crnn_infer.tar -``` -因此我们按照Serving模型转换教程,运行下列python文件。 -``` -python tools/inference_to_serving.py --model_dir ch_rec_r34_vd_crnn -``` -最终会在`serving_client_dir`和`serving_server_dir`生成客户端和服务端的模型配置。其中`serving_server_dir`和`serving_client_dir`的名字可以自定义。最终文件结构如下 - -``` -/ch_rec_r34_vd_crnn/ -├── serving_client_dir # 客户端配置文件夹 -└── serving_server_dir # 服务端配置文件夹 -``` - -## 三、文本检测模型Serving推理 - -启动服务可以根据实际需求选择启动`标准版`或者`快速版`,两种方式的对比如下表: - -|版本|特点|适用场景| -|-|-|-| -|标准版|稳定性高,分布式部署|适用于吞吐量大,需要跨机房部署的情况| -|快速版|部署方便,预测速度快|适用于对预测速度要求高,迭代速度快的场景,Windows用户只能选择快速版| - -接下来的命令中,我们会指定快速版和标准版的命令。需要说明的是,标准版只能用Linux平台,快速版可以支持Linux/Windows。 -文本检测模型推理,默认使用DB模型的配置参数,识别默认为CRNN。 - -配置文件在`params.py`中,我们贴出配置部分,如果需要做改动,也在这个文件内部进行修改。 - -``` -def read_params(): - cfg = Config() - #use gpu - cfg.use_gpu = False # 是否使用GPU - cfg.use_pdserving = True # 是否使用paddleserving,必须为True - - #params for text detector - cfg.det_algorithm = "DB" # 检测算法, DB/EAST等 - cfg.det_model_dir = "./det_mv_server/" # 检测算法模型路径 - cfg.det_max_side_len = 960 - - #DB params - cfg.det_db_thresh =0.3 - cfg.det_db_box_thresh =0.5 - cfg.det_db_unclip_ratio =2.0 - - #EAST params - cfg.det_east_score_thresh = 0.8 - cfg.det_east_cover_thresh = 0.1 - cfg.det_east_nms_thresh = 0.2 - - #params for text recognizer - cfg.rec_algorithm = "CRNN" # 识别算法, CRNN/RARE等 - cfg.rec_model_dir = "./ocr_rec_server/" # 识别算法模型路径 - - cfg.rec_image_shape = "3, 32, 320" - cfg.rec_batch_num = 30 - cfg.max_text_length = 25 - - cfg.rec_char_dict_path = "./ppocr_keys_v1.txt" # 识别算法字典文件 - cfg.use_space_char = True - - #params for text classifier - cfg.use_angle_cls = True # 是否启用分类算法 - cfg.cls_model_dir = "./ocr_clas_server/" # 分类算法模型路径 - cfg.cls_image_shape = "3, 48, 192" - cfg.label_list = ['0', '180'] - cfg.cls_batch_num = 30 - cfg.cls_thresh = 0.9 - - return cfg -``` -与本地预测不同的是,Serving预测需要一个客户端和一个服务端,因此接下来的教程都是两行代码。 - -在正式执行服务端启动命令之前,先export PYTHONPATH到工程主目录下。 -``` -export PYTHONPATH=$PWD:$PYTHONPATH -cd deploy/pdserving -``` -为了方便用户复现Demo程序,我们提供了Chinese and English ultra-lightweight OCR model (8.1M)版本的Serving模型 -``` -wget --no-check-certificate https://paddleocr.bj.bcebos.com/deploy/pdserving/ocr_pdserving_suite.tar.gz -tar xf ocr_pdserving_suite.tar.gz -``` - -### 1. 超轻量中文检测模型推理 - -超轻量中文检测模型推理,可以执行如下命令启动服务端: - -``` -#根据环境只需要启动其中一个就可以 -python det_rpc_server.py #标准版,Linux用户 -python det_local_server.py #快速版,Windows/Linux用户 -``` - -客户端 - -``` -python det_web_client.py -``` - - -Serving的推测和本地预测不同点在于,客户端发送请求到服务端,服务端需要检测到文字框之后返回框的坐标,此处没有后处理的图片,只能看到坐标值。 - -## 四、文本识别模型Serving推理 - -下面将介绍超轻量中文识别模型推理、基于CTC损失的识别模型推理和基于Attention损失的识别模型推理。对于中文文本识别,建议优先选择基于CTC损失的识别模型,实践中也发现基于Attention损失的效果不如基于CTC损失的识别模型。此外,如果训练时修改了文本的字典,请参考下面的自定义文本识别字典的推理。 - -### 1. 超轻量中文识别模型推理 - -超轻量中文识别模型推理,可以执行如下命令启动服务端: -需要注意params.py中的`--use_gpu`的值 -``` -#根据环境只需要启动其中一个就可以 -python rec_rpc_server.py #标准版,Linux用户 -python rec_local_server.py #快速版,Windows/Linux用户 -``` -如果需要使用CPU版本,还需增加 `--use_gpu False`。 - -客户端 - -``` -python rec_web_client.py -``` - -![](../imgs_words/ch/word_4.jpg) - -执行命令后,上面图像的预测结果(识别的文本和得分)会打印到屏幕上,示例如下: - -``` -{u'result': {u'score': [u'0.89547354'], u'pred_text': ['实力活力']}} -``` - - - -## 五、方向分类模型推理 - -下面将介绍方向分类模型推理。 - - - -### 1. 方向分类模型推理 - -方向分类模型推理, 可以执行如下命令启动服务端: -需要注意params.py中的`--use_gpu`的值 -``` -#根据环境只需要启动其中一个就可以 -python clas_rpc_server.py #标准版,Linux用户 -python clas_local_server.py #快速版,Windows/Linux用户 -``` - -客户端 - -``` -python rec_web_client.py -``` - -![](../imgs_words/ch/word_4.jpg) - -执行命令后,上面图像的预测结果(分类的方向和得分)会打印到屏幕上,示例如下: - -``` -{u'result': {u'direction': [u'0'], u'score': [u'0.9999963']}} -``` - - -## 六、文本检测、方向分类和文字识别串联Serving推理 - -### 1. 超轻量中文OCR模型推理 - -在执行预测时,需要通过参数`image_dir`指定单张图像或者图像集合的路径、参数`det_model_dir`,`cls_model_dir`和`rec_model_dir`分别指定检测,方向分类和识别的inference模型路径。参数`use_angle_cls`用于控制是否启用方向分类模型。与本地预测不同的是,为了减少网络传输耗时,可视化识别结果目前不做处理,用户收到的是推理得到的文字字段。 - -执行如下命令启动服务端: -需要注意params.py中的`--use_gpu`的值 -``` -#标准版,Linux用户 -#GPU用户 -python -m paddle_serving_server_gpu.serve --model det_infer_server --port 9293 --gpu_id 0 -python -m paddle_serving_server_gpu.serve --model cls_infer_server --port 9294 --gpu_id 0 -python ocr_rpc_server.py -#CPU用户 -python -m paddle_serving_server.serve --model det_infer_server --port 9293 -python -m paddle_serving_server.serve --model cls_infer_server --port 9294 -python ocr_rpc_server.py - -#快速版,Windows/Linux用户 -python ocr_local_server.py -``` - -客户端 - -``` -python rec_web_client.py -``` diff --git a/doc/doc_ch/update.md b/doc/doc_ch/update.md index 9927a228d270c0e4c5ddbd5ab2a0d35911c6a75a..07591ea126f5b168389657141673142c084e67ad 100644 --- a/doc/doc_ch/update.md +++ b/doc/doc_ch/update.md @@ -1,9 +1,9 @@ # 更新 - 2022.5.9 发布PaddleOCR v2.5。发布内容包括: - - [PP-OCRv3](./doc/doc_ch/ppocr_introduction.md#pp-ocrv3),速度可比情况下,中文场景效果相比于PP-OCRv2再提升5%,英文场景提升11%,80语种多语言模型平均识别准确率提升5%以上; - - 半自动标注工具[PPOCRLabelv2](./PPOCRLabel):新增表格文字图像、图像关键信息抽取任务和不规则文字图像的标注功能; + - [PP-OCRv3](./ppocr_introduction.md#pp-ocrv3),速度可比情况下,中文场景效果相比于PP-OCRv2再提升5%,英文场景提升11%,80语种多语言模型平均识别准确率提升5%以上; + - 半自动标注工具[PPOCRLabelv2](../../PPOCRLabel):新增表格文字图像、图像关键信息抽取任务和不规则文字图像的标注功能; - OCR产业落地工具集:打通22种训练部署软硬件环境与方式,覆盖企业90%的训练部署环境需求 - - 交互式OCR开源电子书[《动手学OCR》](./doc/doc_ch/ocr_book.md),覆盖OCR全栈技术的前沿理论与代码实践,并配套教学视频。 + - 交互式OCR开源电子书[《动手学OCR》](./ocr_book.md),覆盖OCR全栈技术的前沿理论与代码实践,并配套教学视频。 - 2022.5.7 添加对[Weights & Biases](https://docs.wandb.ai/)训练日志记录工具的支持。 - 2021.12.21 《OCR十讲》课程开讲,12月21日起每晚八点半线上授课! 【免费】报名地址:https://aistudio.baidu.com/aistudio/course/introduce/25207 - 2021.12.21 发布PaddleOCR v2.4。OCR算法新增1种文本检测算法(PSENet),3种文本识别算法(NRTR、SEED、SAR);文档结构化算法新增1种关键信息提取算法(SDMGR),3种DocVQA算法(LayoutLM、LayoutLMv2,LayoutXLM)。 diff --git a/doc/doc_en/PP-OCRv3_introduction_en.md b/doc/doc_en/PP-OCRv3_introduction_en.md index 07894340c2de9fe2fe2d677167439ec581aa43e8..481e0b8174b1e5ebce84eb1745c49dccd2c565f5 100644 --- a/doc/doc_en/PP-OCRv3_introduction_en.md +++ b/doc/doc_en/PP-OCRv3_introduction_en.md @@ -76,7 +76,7 @@ LK-PAN (Large Kernel PAN) is a lightweight [PAN](https://arxiv.org/pdf/1803.0153 **(2) DML: Deep Mutual Learning Strategy for Teacher Model** -[DML](https://arxiv.org/abs/1706.00384)(Collaborative Mutual Learning), as shown in the figure below, can effectively improve the accuracy of the text detection model by learning from each other with two models with the same structure. The DML strategy is adopted in the teacher model training, and the hmean is increased from 85% to 86%. By updating the teacher model of CML in PP-OCRv2 to the above-mentioned higher-precision one, the hmean of the student model can be further improved from 83.2% to 84.3%. +[DML](https://arxiv.org/abs/1706.00384)(Deep Mutual Learning), as shown in the figure below, can effectively improve the accuracy of the text detection model by learning from each other with two models with the same structure. The DML strategy is adopted in the teacher model training, and the hmean is increased from 85% to 86%. By updating the teacher model of CML in PP-OCRv2 to the above-mentioned higher-precision one, the hmean of the student model can be further improved from 83.2% to 84.3%.
@@ -100,7 +100,7 @@ Considering that the features of some channels will be suppressed if the convolu The recognition module of PP-OCRv3 is optimized based on the text recognition algorithm [SVTR](https://arxiv.org/abs/2205.00159). RNN is abandoned in SVTR, and the context information of the text line image is more effectively mined by introducing the Transformers structure, thereby improving the text recognition ability. -The recognition accuracy of SVTR_inty outperforms PP-OCRv2 recognition model by 5.3%, while the prediction speed nearly 11 times slower. It takes nearly 100ms to predict a text line on CPU. Therefore, as shown in the figure below, PP-OCRv3 adopts the following six optimization strategies to accelerate the recognition model. +The recognition accuracy of SVTR_tiny outperforms PP-OCRv2 recognition model by 5.3%, while the prediction speed nearly 11 times slower. It takes nearly 100ms to predict a text line on CPU. Therefore, as shown in the figure below, PP-OCRv3 adopts the following six optimization strategies to accelerate the recognition model.
diff --git a/doc/doc_en/algorithm_det_east_en.md b/doc/doc_en/algorithm_det_east_en.md index 3955809a49a595aa59717bafcfbb23146ae96bd2..07c434a9b162d9d373f5f357522cbd752be1afc1 100644 --- a/doc/doc_en/algorithm_det_east_en.md +++ b/doc/doc_en/algorithm_det_east_en.md @@ -40,7 +40,7 @@ Please prepare your environment referring to [prepare the environment](./environ The above EAST model is trained using the ICDAR2015 text detection public dataset. For the download of the dataset, please refer to [ocr_datasets](./dataset/ocr_datasets_en.md). -After the data download is complete, please refer to [Text Detection Training Tutorial](./detection.md) for training. PaddleOCR has modularized the code structure, so that you only need to **replace the configuration file** to train different detection models. +After the data download is complete, please refer to [Text Detection Training Tutorial](./detection_en.md) for training. PaddleOCR has modularized the code structure, so that you only need to **replace the configuration file** to train different detection models. diff --git a/doc/doc_en/algorithm_det_fcenet_en.md b/doc/doc_en/algorithm_det_fcenet_en.md index e15fb9a07ede3296d3de83c134457194d4639a1c..f3c51a91a486342b86828167de5c1b386b42cc66 100644 --- a/doc/doc_en/algorithm_det_fcenet_en.md +++ b/doc/doc_en/algorithm_det_fcenet_en.md @@ -37,7 +37,7 @@ Please prepare your environment referring to [prepare the environment](./environ The above FCE model is trained using the CTW1500 text detection public dataset. For the download of the dataset, please refer to [ocr_datasets](./dataset/ocr_datasets_en.md). -After the data download is complete, please refer to [Text Detection Training Tutorial](./detection.md) for training. PaddleOCR has modularized the code structure, so that you only need to **replace the configuration file** to train different detection models. +After the data download is complete, please refer to [Text Detection Training Tutorial](./detection_en.md) for training. PaddleOCR has modularized the code structure, so that you only need to **replace the configuration file** to train different detection models. ## 4. Inference and Deployment diff --git a/doc/doc_en/algorithm_det_psenet_en.md b/doc/doc_en/algorithm_det_psenet_en.md index d4cb3ea7d1e82a3f9c261c6e44cd6df6b0f6bf1e..3977a156ace3beb899e105bc381e27af6e825d6a 100644 --- a/doc/doc_en/algorithm_det_psenet_en.md +++ b/doc/doc_en/algorithm_det_psenet_en.md @@ -39,7 +39,7 @@ Please prepare your environment referring to [prepare the environment](./environ The above PSE model is trained using the ICDAR2015 text detection public dataset. For the download of the dataset, please refer to [ocr_datasets](./dataset/ocr_datasets_en.md). -After the data download is complete, please refer to [Text Detection Training Tutorial](./detection.md) for training. PaddleOCR has modularized the code structure, so that you only need to **replace the configuration file** to train different detection models. +After the data download is complete, please refer to [Text Detection Training Tutorial](./detection_en.md) for training. PaddleOCR has modularized the code structure, so that you only need to **replace the configuration file** to train different detection models. ## 4. Inference and Deployment diff --git a/doc/doc_en/algorithm_e2e_pgnet_en.md b/doc/doc_en/algorithm_e2e_pgnet_en.md index c7cb3221ccfd897e2fd9062a828c2fe0ceb42024..ab74c57bc3d4d97852641cd708a2dceea5732ba7 100644 --- a/doc/doc_en/algorithm_e2e_pgnet_en.md +++ b/doc/doc_en/algorithm_e2e_pgnet_en.md @@ -36,7 +36,7 @@ The results of detection and recognition are as follows: ## 2. Environment Configuration -Please refer to [Operation Environment Preparation](./environment_en.md) to configure PaddleOCR operating environment first, refer to [PaddleOCR Overview and Project Clone](./paddleOCR_overview_en.md) to clone the project +Please refer to [Operation Environment Preparation](./environment_en.md) to configure PaddleOCR operating environment first, refer to [Project Clone](./clone_en.md) to clone the project ## 3. Quick Use diff --git a/doc/doc_en/algorithm_overview_en.md b/doc/doc_en/algorithm_overview_en.md index 18c9cd7d51bdf0129245afca8a759afab5d9d589..28aca7c0d171008156104fbcc786707538fd49ef 100755 --- a/doc/doc_en/algorithm_overview_en.md +++ b/doc/doc_en/algorithm_overview_en.md @@ -41,6 +41,12 @@ On Total-Text dataset, the text detection result is as follows: | --- | --- | --- | --- | --- | --- | |SAST|ResNet50_vd|89.63%|78.44%|83.66%|[trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_sast_totaltext_v2.0_train.tar)| +On CTW1500 dataset, the text detection result is as follows: + +|Model|Backbone|Precision|Recall|Hmean| Download link| +| --- | --- | --- | --- | --- |---| +|FCE|ResNet50_dcn|88.39%|82.18%|85.27%| [trained model](https://paddleocr.bj.bcebos.com/contribution/det_r50_dcn_fce_ctw_v2.0_train.tar) | + **Note:** Additional data, like icdar2013, icdar2017, COCO-Text, ArT, was added to the model training of SAST. Download English public dataset in organized format used by PaddleOCR from: * [Baidu Drive](https://pan.baidu.com/s/12cPnZcVuV1zn5DOd4mqjVw) (download code: 2bpi). * [Google Drive](https://drive.google.com/drive/folders/1ll2-XEVyCQLpJjawLDiRlvo_i4BqHCJe?usp=sharing) @@ -59,6 +65,8 @@ Supported text recognition algorithms (Click the link to get the tutorial): - [x] [SAR](./algorithm_rec_sar_en.md) - [x] [SEED](./algorithm_rec_seed_en.md) - [x] [SVTR](./algorithm_rec_svtr_en.md) +- [x] [ViTSTR](./algorithm_rec_vitstr_en.md) +- [x] [ABINet](./algorithm_rec_abinet_en.md) Refer to [DTRB](https://arxiv.org/abs/1904.01906), the training and evaluation result of these above text recognition (using MJSynth and SynthText for training, evaluate on IIIT, SVT, IC03, IC13, IC15, SVTP, CUTE) is as follow: @@ -77,7 +85,8 @@ Refer to [DTRB](https://arxiv.org/abs/1904.01906), the training and evaluation r |SAR|Resnet31| 87.20% | rec_r31_sar | [trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.1/rec/rec_r31_sar_train.tar) | |SEED|Aster_Resnet| 85.35% | rec_resnet_stn_bilstm_att | [trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.1/rec/rec_resnet_stn_bilstm_att.tar) | |SVTR|SVTR-Tiny| 89.25% | rec_svtr_tiny_none_ctc_en | [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/rec_svtr_tiny_none_ctc_en_train.tar) | - +|ViTSTR|ViTSTR| 79.82% | rec_vitstr_none_ce_en | [trained model](https://paddleocr.bj.bcebos.com/rec_vitstr_none_none_train.tar) | +|ABINet|Resnet45| 90.75% | rec_r45_abinet_en | [trained model](https://paddleocr.bj.bcebos.com/rec_r45_abinet_train.tar) | diff --git a/doc/doc_en/algorithm_rec_abinet_en.md b/doc/doc_en/algorithm_rec_abinet_en.md new file mode 100644 index 0000000000000000000000000000000000000000..767ca65f6411a7bc071ccafacc09d12bc160e6b6 --- /dev/null +++ b/doc/doc_en/algorithm_rec_abinet_en.md @@ -0,0 +1,136 @@ +# ABINet + +- [1. Introduction](#1) +- [2. Environment](#2) +- [3. Model Training / Evaluation / Prediction](#3) + - [3.1 Training](#3-1) + - [3.2 Evaluation](#3-2) + - [3.3 Prediction](#3-3) +- [4. Inference and Deployment](#4) + - [4.1 Python Inference](#4-1) + - [4.2 C++ Inference](#4-2) + - [4.3 Serving](#4-3) + - [4.4 More](#4-4) +- [5. FAQ](#5) + + +## 1. Introduction + +Paper: +> [ABINet: Read Like Humans: Autonomous, Bidirectional and Iterative Language Modeling for Scene Text Recognition](https://openaccess.thecvf.com/content/CVPR2021/papers/Fang_Read_Like_Humans_Autonomous_Bidirectional_and_Iterative_Language_Modeling_for_CVPR_2021_paper.pdf) +> Shancheng Fang and Hongtao Xie and Yuxin Wang and Zhendong Mao and Yongdong Zhang +> CVPR, 2021 + +Using MJSynth and SynthText two text recognition datasets for training, and evaluating on IIIT, SVT, IC03, IC13, IC15, SVTP, CUTE datasets, the algorithm reproduction effect is as follows: + +|Model|Backbone|config|Acc|Download link| +| --- | --- | --- | --- | --- | +|ABINet|ResNet45|[rec_r45_abinet.yml](../../configs/rec/rec_r45_abinet.yml)|90.75%|[pretrained & trained model](https://paddleocr.bj.bcebos.com/rec_r45_abinet_train.tar)| + + +## 2. Environment +Please refer to ["Environment Preparation"](./environment_en.md) to configure the PaddleOCR environment, and refer to ["Project Clone"](./clone_en.md) to clone the project code. + + + +## 3. Model Training / Evaluation / Prediction + +Please refer to [Text Recognition Tutorial](./recognition_en.md). PaddleOCR modularizes the code, and training different recognition models only requires **changing the configuration file**. + +Training: + +Specifically, after the data preparation is completed, the training can be started. The training command is as follows: + +``` +#Single GPU training (long training period, not recommended) +python3 tools/train.py -c configs/rec/rec_r45_abinet.yml + +#Multi GPU training, specify the gpu number through the --gpus parameter +python3 -m paddle.distributed.launch --gpus '0,1,2,3' tools/train.py -c configs/rec/rec_r45_abinet.yml +``` + +Evaluation: + +``` +# GPU evaluation +python3 -m paddle.distributed.launch --gpus '0' tools/eval.py -c configs/rec/rec_r45_abinet.yml -o Global.pretrained_model={path/to/weights}/best_accuracy +``` + +Prediction: + +``` +# The configuration file used for prediction must match the training +python3 tools/infer_rec.py -c configs/rec/rec_r45_abinet.yml -o Global.infer_img='./doc/imgs_words_en/word_10.png' Global.pretrained_model=./rec_r45_abinet_train/best_accuracy +``` + + +## 4. Inference and Deployment + + +### 4.1 Python Inference +First, the model saved during the ABINet text recognition training process is converted into an inference model. ( [Model download link](https://paddleocr.bj.bcebos.com/rec_r45_abinet_train.tar)) ), you can use the following command to convert: + +``` +python3 tools/export_model.py -c configs/rec/rec_r45_abinet.yml -o Global.pretrained_model=./rec_r45_abinet_train/best_accuracy Global.save_inference_dir=./inference/rec_r45_abinet +``` + +**Note:** +- If you are training the model on your own dataset and have modified the dictionary file, please pay attention to modify the `character_dict_path` in the configuration file to the modified dictionary file. +- If you modified the input size during training, please modify the `infer_shape` corresponding to ABINet in the `tools/export_model.py` file. + +After the conversion is successful, there are three files in the directory: +``` +/inference/rec_r45_abinet/ + ├── inference.pdiparams + ├── inference.pdiparams.info + └── inference.pdmodel +``` + + +For ABINet text recognition model inference, the following commands can be executed: + +``` +python3 tools/infer/predict_rec.py --image_dir='./doc/imgs_words_en/word_10.png' --rec_model_dir='./inference/rec_r45_abinet/' --rec_algorithm='ABINet' --rec_image_shape='3,32,128' --rec_char_dict_path='./ppocr/utils/ic15_dict.txt' +``` + +![](../imgs_words_en/word_10.png) + +After executing the command, the prediction result (recognized text and score) of the image above is printed to the screen, an example is as follows: +The result is as follows: +```shell +Predicts of ./doc/imgs_words_en/word_10.png:('pain', 0.9999995231628418) +``` + + +### 4.2 C++ Inference + +Not supported + + +### 4.3 Serving + +Not supported + + +### 4.4 More + +Not supported + + +## 5. FAQ + +1. Note that the MJSynth and SynthText datasets come from [ABINet repo](https://github.com/FangShancheng/ABINet). +2. We use the pre-trained model provided by the ABINet authors for finetune training. + +## Citation + +```bibtex +@article{Fang2021ABINet, + title = {ABINet: Read Like Humans: Autonomous, Bidirectional and Iterative Language Modeling for Scene Text Recognition}, + author = {Shancheng Fang and Hongtao Xie and Yuxin Wang and Zhendong Mao and Yongdong Zhang}, + booktitle = {CVPR}, + year = {2021}, + url = {https://arxiv.org/abs/2103.06495}, + pages = {7098-7107} +} +``` diff --git a/doc/doc_en/algorithm_rec_aster_en.md b/doc/doc_en/algorithm_rec_aster_en.md index 1540681a19f94160e221c37173510395d0fd407f..b949cb5b37c985cc55a2aa01ea5ae4096946bb05 100644 --- a/doc/doc_en/algorithm_rec_aster_en.md +++ b/doc/doc_en/algorithm_rec_aster_en.md @@ -33,13 +33,13 @@ Using MJSynth and SynthText two text recognition datasets for training, and eval ## 2. Environment -Please refer to ["Environment Preparation"](./environment.md) to configure the PaddleOCR environment, and refer to ["Project Clone"](./clone.md) to clone the project code. +Please refer to ["Environment Preparation"](./environment_en.md) to configure the PaddleOCR environment, and refer to ["Project Clone"](./clone_en.md) to clone the project code. ## 3. Model Training / Evaluation / Prediction -Please refer to [Text Recognition Tutorial](./recognition.md). PaddleOCR modularizes the code, and training different recognition models only requires **changing the configuration file**. +Please refer to [Text Recognition Tutorial](./recognition_en.md). PaddleOCR modularizes the code, and training different recognition models only requires **changing the configuration file**. Training: diff --git a/doc/doc_en/algorithm_rec_crnn_en.md b/doc/doc_en/algorithm_rec_crnn_en.md index 571569ee445d756ca7bdfeea6d5f960187a5a666..8548c2fa625b713d7e7e278506ff5c46713303ed 100644 --- a/doc/doc_en/algorithm_rec_crnn_en.md +++ b/doc/doc_en/algorithm_rec_crnn_en.md @@ -33,13 +33,13 @@ Using MJSynth and SynthText two text recognition datasets for training, and eval ## 2. Environment -Please refer to ["Environment Preparation"](./environment.md) to configure the PaddleOCR environment, and refer to ["Project Clone"](./clone.md) to clone the project code. +Please refer to ["Environment Preparation"](./environment_en.md) to configure the PaddleOCR environment, and refer to ["Project Clone"](./clone_en.md) to clone the project code. ## 3. Model Training / Evaluation / Prediction -Please refer to [Text Recognition Tutorial](./recognition.md). PaddleOCR modularizes the code, and training different recognition models only requires **changing the configuration file**. +Please refer to [Text Recognition Tutorial](./recognition_en.md). PaddleOCR modularizes the code, and training different recognition models only requires **changing the configuration file**. Training: diff --git a/doc/doc_en/algorithm_rec_nrtr_en.md b/doc/doc_en/algorithm_rec_nrtr_en.md index 3f8fd0adee900cf889d70e8b78fb1122d54c7d08..309d7ab123065f15a599a1220ab3f39afffb9b60 100644 --- a/doc/doc_en/algorithm_rec_nrtr_en.md +++ b/doc/doc_en/algorithm_rec_nrtr_en.md @@ -12,6 +12,7 @@ - [4.3 Serving](#4-3) - [4.4 More](#4-4) - [5. FAQ](#5) +- [6. Release Note](#6) ## 1. Introduction @@ -25,17 +26,17 @@ Using MJSynth and SynthText two text recognition datasets for training, and eval |Model|Backbone|config|Acc|Download link| | --- | --- | --- | --- | --- | -|NRTR|MTB|[rec_mtb_nrtr.yml](../../configs/rec/rec_mtb_nrtr.yml)|84.21%|[train model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mtb_nrtr_train.tar)| +|NRTR|MTB|[rec_mtb_nrtr.yml](../../configs/rec/rec_mtb_nrtr.yml)|84.21%|[trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mtb_nrtr_train.tar)| ## 2. Environment -Please refer to ["Environment Preparation"](./environment.md) to configure the PaddleOCR environment, and refer to ["Project Clone"](./clone.md) to clone the project code. +Please refer to ["Environment Preparation"](./environment_en.md) to configure the PaddleOCR environment, and refer to ["Project Clone"](./clone_en.md) to clone the project code. ## 3. Model Training / Evaluation / Prediction -Please refer to [Text Recognition Tutorial](./recognition.md). PaddleOCR modularizes the code, and training different recognition models only requires **changing the configuration file**. +Please refer to [Text Recognition Tutorial](./recognition_en.md). PaddleOCR modularizes the code, and training different recognition models only requires **changing the configuration file**. Training: @@ -98,7 +99,7 @@ python3 tools/infer/predict_rec.py --image_dir='./doc/imgs_words_en/word_10.png' After executing the command, the prediction result (recognized text and score) of the image above is printed to the screen, an example is as follows: The result is as follows: ```shell -Predicts of ./doc/imgs_words_en/word_10.png:('pain', 0.9265879392623901) +Predicts of ./doc/imgs_words_en/word_10.png:('pain', 0.9465042352676392) ``` @@ -121,12 +122,146 @@ Not supported 1. In the `NRTR` paper, Beam search is used to decode characters, but the speed is slow. Beam search is not used by default here, and greedy search is used to decode characters. + +## 6. Release Note + +1. The release/2.6 version updates the NRTR code structure. The new version of NRTR can load the model parameters of the old version (release/2.5 and before), and you may use the following code to convert the old version model parameters to the new version model parameters: + +```python + + params = paddle.load('path/' + '.pdparams') # the old version parameters + state_dict = model.state_dict() # the new version model parameters + new_state_dict = {} + + for k1, v1 in state_dict.items(): + + k = k1 + if 'encoder' in k and 'self_attn' in k and 'qkv' in k and 'weight' in k: + + k_para = k[:13] + 'layers.' + k[13:] + q = params[k_para.replace('qkv', 'conv1')].transpose((1, 0, 2, 3)) + k = params[k_para.replace('qkv', 'conv2')].transpose((1, 0, 2, 3)) + v = params[k_para.replace('qkv', 'conv3')].transpose((1, 0, 2, 3)) + + new_state_dict[k1] = np.concatenate([q[:, :, 0, 0], k[:, :, 0, 0], v[:, :, 0, 0]], -1) + + elif 'encoder' in k and 'self_attn' in k and 'qkv' in k and 'bias' in k: + + k_para = k[:13] + 'layers.' + k[13:] + q = params[k_para.replace('qkv', 'conv1')] + k = params[k_para.replace('qkv', 'conv2')] + v = params[k_para.replace('qkv', 'conv3')] + + new_state_dict[k1] = np.concatenate([q, k, v], -1) + + elif 'encoder' in k and 'self_attn' in k and 'out_proj' in k: + + k_para = k[:13] + 'layers.' + k[13:] + new_state_dict[k1] = params[k_para] + + elif 'encoder' in k and 'norm3' in k: + k_para = k[:13] + 'layers.' + k[13:] + new_state_dict[k1] = params[k_para.replace('norm3', 'norm2')] + + elif 'encoder' in k and 'norm1' in k: + k_para = k[:13] + 'layers.' + k[13:] + new_state_dict[k1] = params[k_para] + + + elif 'decoder' in k and 'self_attn' in k and 'qkv' in k and 'weight' in k: + k_para = k[:13] + 'layers.' + k[13:] + q = params[k_para.replace('qkv', 'conv1')].transpose((1, 0, 2, 3)) + k = params[k_para.replace('qkv', 'conv2')].transpose((1, 0, 2, 3)) + v = params[k_para.replace('qkv', 'conv3')].transpose((1, 0, 2, 3)) + new_state_dict[k1] = np.concatenate([q[:, :, 0, 0], k[:, :, 0, 0], v[:, :, 0, 0]], -1) + + elif 'decoder' in k and 'self_attn' in k and 'qkv' in k and 'bias' in k: + k_para = k[:13] + 'layers.' + k[13:] + q = params[k_para.replace('qkv', 'conv1')] + k = params[k_para.replace('qkv', 'conv2')] + v = params[k_para.replace('qkv', 'conv3')] + new_state_dict[k1] = np.concatenate([q, k, v], -1) + + elif 'decoder' in k and 'self_attn' in k and 'out_proj' in k: + + k_para = k[:13] + 'layers.' + k[13:] + new_state_dict[k1] = params[k_para] + + elif 'decoder' in k and 'cross_attn' in k and 'q' in k and 'weight' in k: + k_para = k[:13] + 'layers.' + k[13:] + k_para = k_para.replace('cross_attn', 'multihead_attn') + q = params[k_para.replace('q', 'conv1')].transpose((1, 0, 2, 3)) + new_state_dict[k1] = q[:, :, 0, 0] + + elif 'decoder' in k and 'cross_attn' in k and 'q' in k and 'bias' in k: + k_para = k[:13] + 'layers.' + k[13:] + k_para = k_para.replace('cross_attn', 'multihead_attn') + q = params[k_para.replace('q', 'conv1')] + new_state_dict[k1] = q + + elif 'decoder' in k and 'cross_attn' in k and 'kv' in k and 'weight' in k: + k_para = k[:13] + 'layers.' + k[13:] + k_para = k_para.replace('cross_attn', 'multihead_attn') + k = params[k_para.replace('kv', 'conv2')].transpose((1, 0, 2, 3)) + v = params[k_para.replace('kv', 'conv3')].transpose((1, 0, 2, 3)) + new_state_dict[k1] = np.concatenate([k[:, :, 0, 0], v[:, :, 0, 0]], -1) + + elif 'decoder' in k and 'cross_attn' in k and 'kv' in k and 'bias' in k: + k_para = k[:13] + 'layers.' + k[13:] + k_para = k_para.replace('cross_attn', 'multihead_attn') + k = params[k_para.replace('kv', 'conv2')] + v = params[k_para.replace('kv', 'conv3')] + new_state_dict[k1] = np.concatenate([k, v], -1) + + elif 'decoder' in k and 'cross_attn' in k and 'out_proj' in k: + + k_para = k[:13] + 'layers.' + k[13:] + k_para = k_para.replace('cross_attn', 'multihead_attn') + new_state_dict[k1] = params[k_para] + elif 'decoder' in k and 'norm' in k: + k_para = k[:13] + 'layers.' + k[13:] + new_state_dict[k1] = params[k_para] + elif 'mlp' in k and 'weight' in k: + k_para = k[:13] + 'layers.' + k[13:] + k_para = k_para.replace('fc', 'conv') + k_para = k_para.replace('mlp.', '') + w = params[k_para].transpose((1, 0, 2, 3)) + new_state_dict[k1] = w[:, :, 0, 0] + elif 'mlp' in k and 'bias' in k: + k_para = k[:13] + 'layers.' + k[13:] + k_para = k_para.replace('fc', 'conv') + k_para = k_para.replace('mlp.', '') + w = params[k_para] + new_state_dict[k1] = w + + else: + new_state_dict[k1] = params[k1] + + if list(new_state_dict[k1].shape) != list(v1.shape): + print(k1) + + + for k, v1 in state_dict.items(): + if k not in new_state_dict.keys(): + print(1, k) + elif list(new_state_dict[k].shape) != list(v1.shape): + print(2, k) + + + + model.set_state_dict(new_state_dict) + paddle.save(model.state_dict(), 'nrtrnew_from_old_params.pdparams') + +``` + +2. The new version has a clean code structure and improved inference speed compared with the old version. + ## Citation ```bibtex @article{Sheng2019NRTR, title = {NRTR: A No-Recurrence Sequence-to-Sequence Model For Scene Text Recognition}, - author = {Fenfen Sheng and Zhineng Chen andBo Xu}, + author = {Fenfen Sheng and Zhineng Chen and Bo Xu}, booktitle = {ICDAR}, year = {2019}, url = {http://arxiv.org/abs/1806.00926}, diff --git a/doc/doc_en/algorithm_rec_sar_en.md b/doc/doc_en/algorithm_rec_sar_en.md index 8c1e6dbbfa5cc05da4d7423a535c6db74cf8f4c3..24b87c10c3b2839909392bf3de0e0c850112fcdc 100644 --- a/doc/doc_en/algorithm_rec_sar_en.md +++ b/doc/doc_en/algorithm_rec_sar_en.md @@ -31,13 +31,13 @@ Note:In addition to using the two text recognition datasets MJSynth and SynthTex ## 2. Environment -Please refer to ["Environment Preparation"](./environment.md) to configure the PaddleOCR environment, and refer to ["Project Clone"](./clone.md) to clone the project code. +Please refer to ["Environment Preparation"](./environment_en.md) to configure the PaddleOCR environment, and refer to ["Project Clone"](./clone_en.md) to clone the project code. ## 3. Model Training / Evaluation / Prediction -Please refer to [Text Recognition Tutorial](./recognition.md). PaddleOCR modularizes the code, and training different recognition models only requires **changing the configuration file**. +Please refer to [Text Recognition Tutorial](./recognition_en.md). PaddleOCR modularizes the code, and training different recognition models only requires **changing the configuration file**. Training: diff --git a/doc/doc_en/algorithm_rec_seed_en.md b/doc/doc_en/algorithm_rec_seed_en.md index 21679f42fd6302228804db49d731f9b69ec692b2..f8d7ae6d3f34ab8a4f510c88002b22dbce7a10e8 100644 --- a/doc/doc_en/algorithm_rec_seed_en.md +++ b/doc/doc_en/algorithm_rec_seed_en.md @@ -31,13 +31,13 @@ Using MJSynth and SynthText two text recognition datasets for training, and eval ## 2. Environment -Please refer to ["Environment Preparation"](./environment.md) to configure the PaddleOCR environment, and refer to ["Project Clone"](./clone.md) to clone the project code. +Please refer to ["Environment Preparation"](./environment_en.md) to configure the PaddleOCR environment, and refer to ["Project Clone"](./clone_en.md) to clone the project code. ## 3. Model Training / Evaluation / Prediction -Please refer to [Text Recognition Tutorial](./recognition.md). PaddleOCR modularizes the code, and training different recognition models only requires **changing the configuration file**. +Please refer to [Text Recognition Tutorial](./recognition_en.md). PaddleOCR modularizes the code, and training different recognition models only requires **changing the configuration file**. Training: diff --git a/doc/doc_en/algorithm_rec_srn_en.md b/doc/doc_en/algorithm_rec_srn_en.md index c022a81f9e5797c531c79de7e793d44d9a22552c..1d7fc07dc29e0de021165fc5656cbca704b45284 100644 --- a/doc/doc_en/algorithm_rec_srn_en.md +++ b/doc/doc_en/algorithm_rec_srn_en.md @@ -30,13 +30,13 @@ Using MJSynth and SynthText two text recognition datasets for training, and eval ## 2. Environment -Please refer to ["Environment Preparation"](./environment.md) to configure the PaddleOCR environment, and refer to ["Project Clone"](./clone.md) to clone the project code. +Please refer to ["Environment Preparation"](./environment_en.md) to configure the PaddleOCR environment, and refer to ["Project Clone"](./clone_en.md) to clone the project code. ## 3. Model Training / Evaluation / Prediction -Please refer to [Text Recognition Tutorial](./recognition.md). PaddleOCR modularizes the code, and training different recognition models only requires **changing the configuration file**. +Please refer to [Text Recognition Tutorial](./recognition_en.md). PaddleOCR modularizes the code, and training different recognition models only requires **changing the configuration file**. Training: diff --git a/doc/doc_en/algorithm_rec_starnet.md b/doc/doc_en/algorithm_rec_starnet.md new file mode 100644 index 0000000000000000000000000000000000000000..dbb53a9c737c16fa249483fa97b0b49cf25b2137 --- /dev/null +++ b/doc/doc_en/algorithm_rec_starnet.md @@ -0,0 +1,139 @@ +# STAR-Net + +- [1. Introduction](#1) +- [2. Environment](#2) +- [3. Model Training / Evaluation / Prediction](#3) + - [3.1 Training](#3-1) + - [3.2 Evaluation](#3-2) + - [3.3 Prediction](#3-3) +- [4. Inference and Deployment](#4) + - [4.1 Python Inference](#4-1) + - [4.2 C++ Inference](#4-2) + - [4.3 Serving](#4-3) + - [4.4 More](#4-4) +- [5. FAQ](#5) + + +## 1. Introduction + +Paper information: +> [STAR-Net: a spatial attention residue network for scene text recognition.](http://www.bmva.org/bmvc/2016/papers/paper043/paper043.pdf) +> Wei Liu, Chaofeng Chen, Kwan-Yee K. Wong, Zhizhong Su and Junyu Han. +> BMVC, pages 43.1-43.13, 2016 + +Refer to [DTRB](https://arxiv.org/abs/1904.01906) text Recognition Training and Evaluation Process . Using MJSynth and SynthText two text recognition datasets for training, and evaluating on IIIT, SVT, IC03, IC13, IC15, SVTP, CUTE datasets, the algorithm reproduction effect is as follows: + +|Models|Backbone Networks|Avg Accuracy|Configuration Files|Download Links| +| --- | --- | --- | --- | --- | +|StarNet|Resnet34_vd|84.44%|[configs/rec/rec_r34_vd_tps_bilstm_ctc.yml](../../configs/rec/rec_r34_vd_tps_bilstm_ctc.yml)|[trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_tps_bilstm_ctc_v2.0_train.tar)| +|StarNet|MobileNetV3|81.42%|[configs/rec/rec_mv3_tps_bilstm_ctc.yml](../../configs/rec/rec_mv3_tps_bilstm_ctc.yml)|[ trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_tps_bilstm_ctc_v2.0_train.tar)| + + + +## 2. Environment +Please refer to [Operating Environment Preparation](./environment_en.md) to configure the PaddleOCR operating environment, and refer to [Project Clone](./clone_en.md) to clone the project code. + + +## 3. Model Training / Evaluation / Prediction + +Please refer to [Text Recognition Training Tutorial](./recognition_en.md). PaddleOCR modularizes the code, and training different recognition models only requires **changing the configuration file**. Take the backbone network based on Resnet34_vd as an example: + + +### 3.1 Training +After the data preparation is complete, the training can be started. The training command is as follows: + +```` +#Single card training (long training period, not recommended) +python3 tools/train.py -c configs/rec/rec_r34_vd_tps_bilstm_ctc.yml #Multi-card training, specify the card number through the --gpus parameter +python3 -m paddle.distributed.launch --gpus '0,1,2,3' tools/train.py -c rec_r34_vd_tps_bilstm_ctc.yml + ```` + + +### 3.2 Evaluation + +```` +# GPU evaluation, Global.pretrained_model is the model to be evaluated +python3 -m paddle.distributed.launch --gpus '0' tools/eval.py -c configs/rec/rec_r34_vd_tps_bilstm_ctc.yml -o Global.pretrained_model={path/to/weights}/best_accuracy + ```` + + +### 3.3 Prediction + +```` +# The configuration file used for prediction must match the training +python3 tools/infer_rec.py -c configs/rec/rec_r34_vd_tps_bilstm_ctc.yml -o Global.pretrained_model={path/to/weights}/best_accuracy Global.infer_img=doc/imgs_words/en/word_1.png + ```` + + +## 4. Inference + + +### 4.1 Python Inference +First, convert the model saved during the STAR-Net text recognition training process into an inference model. Take the model trained on the MJSynth and SynthText text recognition datasets based on the Resnet34_vd backbone network as an example [Model download address]( https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_none_bilstm_ctc_v2.0_train.tar) , which can be converted using the following command: + +```shell +python3 tools/export_model.py -c configs/rec/rec_r34_vd_tps_bilstm_ctc.yml -o Global.pretrained_model=./rec_r34_vd_tps_bilstm_ctc_v2.0_train/best_accuracy Global.save_inference_dir=./inference/rec_starnet + ```` + +STAR-Net text recognition model inference, you can execute the following commands: + +```shell +python3 tools/infer/predict_rec.py --image_dir="./doc/imgs_words_en/word_336.png" --rec_model_dir="./inference/rec_starnet/" --rec_image_shape="3, 32, 100" --rec_char_dict_path="./ppocr/utils/ic15_dict.txt" + ```` + +![](../imgs_words_en/word_336.png) + +The inference results are as follows: + + +```bash +Predicts of ./doc/imgs_words_en/word_336.png:('super', 0.9999073) +``` + +**Attention** Since the above model refers to the [DTRB](https://arxiv.org/abs/1904.01906) text recognition training and evaluation process, it is different from the ultra-lightweight Chinese recognition model training in two aspects: + +- The image resolutions used during training are different. The image resolutions used for training the above models are [3, 32, 100], while for Chinese model training, in order to ensure the recognition effect of long texts, the image resolutions used during training are [ 3, 32, 320]. The default shape parameter of the predictive inference program is the image resolution used for training Chinese, i.e. [3, 32, 320]. Therefore, when inferring the above English model here, it is necessary to set the shape of the recognized image through the parameter rec_image_shape. + +- Character list, the experiment in the DTRB paper is only for 26 lowercase English letters and 10 numbers, a total of 36 characters. All uppercase and lowercase characters are converted to lowercase characters, and characters not listed above are ignored and considered spaces. Therefore, there is no input character dictionary here, but a dictionary is generated by the following command. Therefore, the parameter rec_char_dict_path needs to be set during inference, which is specified as an English dictionary "./ppocr/utils/ic15_dict.txt". + +``` +self.character_str = "0123456789abcdefghijklmnopqrstuvwxyz" +dict_character = list(self.character_str) + + + ``` + + +### 4.2 C++ Inference + +After preparing the inference model, refer to the [cpp infer](../../deploy/cpp_infer/) tutorial to operate. + + +### 4.3 Serving + +After preparing the inference model, refer to the [pdserving](../../deploy/pdserving/) tutorial for Serving deployment, including two modes: Python Serving and C++ Serving. + + +### 4.4 More + +The STAR-Net model also supports the following inference deployment methods: + +- Paddle2ONNX Inference: After preparing the inference model, refer to the [paddle2onnx](../../deploy/paddle2onnx/) tutorial. + + +## 5. FAQ + +## Quote + +```bibtex +@inproceedings{liu2016star, + title={STAR-Net: a spatial attention residue network for scene text recognition.}, + author={Liu, Wei and Chen, Chaofeng and Wong, Kwan-Yee K and Su, Zhizhong and Han, Junyu}, + booktitle={BMVC}, + volume={2}, + pages={7}, + year={2016} +} +``` + + diff --git a/doc/doc_en/algorithm_rec_svtr_en.md b/doc/doc_en/algorithm_rec_svtr_en.md index 2e7deb4c077ce508773c4789e2e76bdda7dfe8c8..37cd35f35a2025cbb55ff85fe27b50e5d6e556aa 100644 --- a/doc/doc_en/algorithm_rec_svtr_en.md +++ b/doc/doc_en/algorithm_rec_svtr_en.md @@ -34,7 +34,7 @@ The accuracy (%) and model files of SVTR on the public dataset of scene text rec ## 2. Environment -Please refer to ["Environment Preparation"](./environment.md) to configure the PaddleOCR environment, and refer to ["Project Clone"](./clone.md) to clone the project code. +Please refer to ["Environment Preparation"](./environment_en.md) to configure the PaddleOCR environment, and refer to ["Project Clone"](./clone_en.md) to clone the project code. #### Dataset Preparation @@ -44,7 +44,7 @@ Please refer to ["Environment Preparation"](./environment.md) to configure the P ## 3. Model Training / Evaluation / Prediction -Please refer to [Text Recognition Tutorial](./recognition.md). PaddleOCR modularizes the code, and training different recognition models only requires **changing the configuration file**. +Please refer to [Text Recognition Tutorial](./recognition_en.md). PaddleOCR modularizes the code, and training different recognition models only requires **changing the configuration file**. Training: @@ -88,7 +88,6 @@ python3 tools/export_model.py -c configs/rec/rec_svtrnet.yml -o Global.pretraine **Note:** - If you are training the model on your own dataset and have modified the dictionary file, please pay attention to modify the `character_dict_path` in the configuration file to the modified dictionary file. -- If you modified the input size during training, please modify the `infer_shape` corresponding to SVTR in the `tools/export_model.py` file. After the conversion is successful, there are three files in the directory: ``` diff --git a/doc/doc_en/algorithm_rec_vitstr_en.md b/doc/doc_en/algorithm_rec_vitstr_en.md new file mode 100644 index 0000000000000000000000000000000000000000..a6f9e2f15df69e7949a4b9713274d9b83ff98f60 --- /dev/null +++ b/doc/doc_en/algorithm_rec_vitstr_en.md @@ -0,0 +1,134 @@ +# ViTSTR + +- [1. Introduction](#1) +- [2. Environment](#2) +- [3. Model Training / Evaluation / Prediction](#3) + - [3.1 Training](#3-1) + - [3.2 Evaluation](#3-2) + - [3.3 Prediction](#3-3) +- [4. Inference and Deployment](#4) + - [4.1 Python Inference](#4-1) + - [4.2 C++ Inference](#4-2) + - [4.3 Serving](#4-3) + - [4.4 More](#4-4) +- [5. FAQ](#5) + + +## 1. Introduction + +Paper: +> [Vision Transformer for Fast and Efficient Scene Text Recognition](https://arxiv.org/abs/2105.08582) +> Rowel Atienza +> ICDAR, 2021 + +Using MJSynth and SynthText two text recognition datasets for training, and evaluating on IIIT, SVT, IC03, IC13, IC15, SVTP, CUTE datasets, the algorithm reproduction effect is as follows: + +|Model|Backbone|config|Acc|Download link| +| --- | --- | --- | --- | --- | +|ViTSTR|ViTSTR|[rec_vitstr_none_ce.yml](../../configs/rec/rec_vitstr_none_ce.yml)|79.82%|[trained model](https://paddleocr.bj.bcebos.com/rec_vitstr_none_none_train.tar)| + + +## 2. Environment +Please refer to ["Environment Preparation"](./environment_en.md) to configure the PaddleOCR environment, and refer to ["Project Clone"](./clone_en.md) to clone the project code. + + + +## 3. Model Training / Evaluation / Prediction + +Please refer to [Text Recognition Tutorial](./recognition_en.md). PaddleOCR modularizes the code, and training different recognition models only requires **changing the configuration file**. + +Training: + +Specifically, after the data preparation is completed, the training can be started. The training command is as follows: + +``` +#Single GPU training (long training period, not recommended) +python3 tools/train.py -c configs/rec/rec_vitstr_none_ce.yml + +#Multi GPU training, specify the gpu number through the --gpus parameter +python3 -m paddle.distributed.launch --gpus '0,1,2,3' tools/train.py -c configs/rec/rec_vitstr_none_ce.yml +``` + +Evaluation: + +``` +# GPU evaluation +python3 -m paddle.distributed.launch --gpus '0' tools/eval.py -c configs/rec/rec_vitstr_none_ce.yml -o Global.pretrained_model={path/to/weights}/best_accuracy +``` + +Prediction: + +``` +# The configuration file used for prediction must match the training +python3 tools/infer_rec.py -c configs/rec/rec_vitstr_none_ce.yml -o Global.infer_img='./doc/imgs_words_en/word_10.png' Global.pretrained_model=./rec_vitstr_none_ce_train/best_accuracy +``` + + +## 4. Inference and Deployment + + +### 4.1 Python Inference +First, the model saved during the ViTSTR text recognition training process is converted into an inference model. ( [Model download link](https://paddleocr.bj.bcebos.com/rec_vitstr_none_none_train.tar)) ), you can use the following command to convert: + +``` +python3 tools/export_model.py -c configs/rec/rec_vitstr_none_ce.yml -o Global.pretrained_model=./rec_vitstr_none_ce_train/best_accuracy Global.save_inference_dir=./inference/rec_vitstr +``` + +**Note:** +- If you are training the model on your own dataset and have modified the dictionary file, please pay attention to modify the `character_dict_path` in the configuration file to the modified dictionary file. +- If you modified the input size during training, please modify the `infer_shape` corresponding to ViTSTR in the `tools/export_model.py` file. + +After the conversion is successful, there are three files in the directory: +``` +/inference/rec_vitstr/ + ├── inference.pdiparams + ├── inference.pdiparams.info + └── inference.pdmodel +``` + + +For ViTSTR text recognition model inference, the following commands can be executed: + +``` +python3 tools/infer/predict_rec.py --image_dir='./doc/imgs_words_en/word_10.png' --rec_model_dir='./inference/rec_vitstr/' --rec_algorithm='ViTSTR' --rec_image_shape='1,224,224' --rec_char_dict_path='./ppocr/utils/EN_symbol_dict.txt' +``` + +![](../imgs_words_en/word_10.png) + +After executing the command, the prediction result (recognized text and score) of the image above is printed to the screen, an example is as follows: +The result is as follows: +```shell +Predicts of ./doc/imgs_words_en/word_10.png:('pain', 0.9998350143432617) +``` + + +### 4.2 C++ Inference + +Not supported + + +### 4.3 Serving + +Not supported + + +### 4.4 More + +Not supported + + +## 5. FAQ + +1. In the `ViTSTR` paper, using pre-trained weights on ImageNet1k for initial training, we did not use pre-trained weights in training, and the final accuracy did not change or even improved. + +## Citation + +```bibtex +@article{Atienza2021ViTSTR, + title = {Vision Transformer for Fast and Efficient Scene Text Recognition}, + author = {Rowel Atienza}, + booktitle = {ICDAR}, + year = {2021}, + url = {https://arxiv.org/abs/2105.08582} +} +``` diff --git a/doc/doc_en/detection_en.md b/doc/doc_en/detection_en.md index 76e0f8509b92dfaae62dce7ba2b4b73d39da1600..f85bf585cb66332d90de8d66ed315cb04ece7636 100644 --- a/doc/doc_en/detection_en.md +++ b/doc/doc_en/detection_en.md @@ -159,7 +159,7 @@ python3 -m paddle.distributed.launch --ips="xx.xx.xx.xx,xx.xx.xx.xx" --gpus '0,1 -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained ``` -**Note:** When using multi-machine and multi-gpu training, you need to replace the ips value in the above command with the address of your machine, and the machines need to be able to ping each other. In addition, training needs to be launched separately on multiple machines. The command to view the ip address of the machine is `ifconfig`. +**Note:** (1) When using multi-machine and multi-gpu training, you need to replace the ips value in the above command with the address of your machine, and the machines need to be able to ping each other. (2) Training needs to be launched separately on multiple machines. The command to view the ip address of the machine is `ifconfig`. (3) For more details about the distributed training speedup ratio, please refer to [Distributed Training Tutorial](./distributed_training_en.md). ### 2.6 Training with knowledge distillation diff --git a/doc/doc_en/distributed_training.md b/doc/doc_en/distributed_training_en.md similarity index 70% rename from doc/doc_en/distributed_training.md rename to doc/doc_en/distributed_training_en.md index 2822ee5e4ea52720a458e4060d8a09be7b98846b..5a219ed2b494d6239096ff634dfdc702c4be9419 100644 --- a/doc/doc_en/distributed_training.md +++ b/doc/doc_en/distributed_training_en.md @@ -40,11 +40,17 @@ python3 -m paddle.distributed.launch \ ## Performance comparison -* Based on 26W public recognition dataset (LSVT, rctw, mtwi), training on single 8-card P40 and dual 8-card P40, the final time consumption is as follows. +* On two 8-card P40 graphics cards, the final time consumption and speedup ratio for public recognition dataset (LSVT, RCTW, MTWI) containing 260k images are as follows. -| Model | Config file | Number of machines | Number of GPUs per machine | Training time | Recognition acc | Speedup ratio | -| :-------: | :------------: | :----------------: | :----------------------------: | :------------------: | :--------------: | :-----------: | -| CRNN | configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml | 1 | 8 | 60h | 66.7% | - | -| CRNN | configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml | 2 | 8 | 40h | 67.0% | 150% | -It can be seen that the training time is shortened from 60h to 40h, the speedup ratio can reach 150% (60h / 40h), and the efficiency is 75% (60h / (40h * 2)). +| Model | Config file | Recognition acc | single 8-card training time | two 8-card training time | Speedup ratio | +|------|-----|--------|--------|--------|-----| +| CRNN | [rec_chinese_lite_train_v2.0.yml](../../configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml) | 67.0% | 2.50d | 1.67d | **1.5** | + + +* On four 8-card V100 graphics cards, the final time consumption and speedup ratio for full data are as follows. + + +| Model | Config file | Recognition acc | single 8-card training time | four 8-card training time | Speedup ratio | +|------|-----|--------|--------|--------|-----| +| SVTR | [ch_PP-OCRv3_rec_distillation.yml](../../configs/rec/PP-OCRv3/ch_PP-OCRv3_rec_distillation.yml) | 74.0% | 10d | 2.84d | **3.5** | diff --git a/doc/doc_en/knowledge_distillation_en.md b/doc/doc_en/knowledge_distillation_en.md index bd36907c98c6d556fe1dea85712ece0e717fe426..52725e5c0586b7f7b3e8fdc86d0c24ea38030d53 100755 --- a/doc/doc_en/knowledge_distillation_en.md +++ b/doc/doc_en/knowledge_distillation_en.md @@ -438,10 +438,10 @@ Architecture: ``` If DML is used, that is, the method of two small models learning from each other, the Teacher network structure in the above configuration file needs to be set to the same configuration as the Student model. -Refer to the configuration file for details. [ch_PP-OCRv3_det_dml.yml](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.4/configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_dml.yml) +Refer to the configuration file for details. [ch_PP-OCRv3_det_dml.yml](../../configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_dml.yml) -The following describes the configuration file parameters [ch_PP-OCRv3_det_cml.yml](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.4/configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml): +The following describes the configuration file parameters [ch_PP-OCRv3_det_cml.yml](../../configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml): ``` Architecture: diff --git a/doc/doc_en/models_list_en.md b/doc/doc_en/models_list_en.md index 8e8c1f2fe11bcd0748d556d34fd184fed4b3a86f..c52f71dfe4124302b8cb308980a6228a89589bd6 100644 --- a/doc/doc_en/models_list_en.md +++ b/doc/doc_en/models_list_en.md @@ -20,7 +20,7 @@ The downloadable models provided by PaddleOCR include `inference model`, `traine |model type|model format|description| |--- | --- | --- | -|inference model|inference.pdmodel、inference.pdiparams|Used for inference based on Paddle inference engine,[detail](./inference_en.md)| +|inference model|inference.pdmodel、inference.pdiparams|Used for inference based on Paddle inference engine,[detail](./inference_ppocr_en.md)| |trained model, pre-trained model|\*.pdparams、\*.pdopt、\*.states |The checkpoints model saved in the training process, which stores the parameters of the model, mostly used for model evaluation and continuous training.| |nb model|\*.nb| Model optimized by Paddle-Lite, which is suitable for mobile-side deployment scenarios (Paddle-Lite is needed for nb model deployment). | @@ -37,7 +37,7 @@ Relationship of the above models is as follows. |model name|description|config|model size|download| | --- | --- | --- | --- | --- | -|ch_PP-OCRv3_det_slim| [New] slim quantization with distillation lightweight model, supporting Chinese, English, multilingual text detection |[ch_PP-OCRv3_det_cml.yml](../../configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml)| 1.1M |[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_slim_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/ch/ch_PP-OCRv3_det_slim_distill_train.tar) / [nb model](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_slim_infer.nb)| +|ch_PP-OCRv3_det_slim| [New] slim quantization with distillation lightweight model, supporting Chinese, English, multilingual text detection |[ch_PP-OCRv3_det_cml.yml](../../configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml)| 1.1M |[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_slim_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_slim_distill_train.tar) / [nb model](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_slim_infer.nb)| |ch_PP-OCRv3_det| [New] Original lightweight model, supporting Chinese, English, multilingual text detection |[ch_PP-OCRv3_det_cml.yml](../../configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml)| 3.8M |[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_distill_train.tar)| |ch_PP-OCRv2_det_slim| [New] slim quantization with distillation lightweight model, supporting Chinese, English, multilingual text detection|[ch_PP-OCRv2_det_cml.yml](../../configs/det/ch_PP-OCRv2/ch_PP-OCRv2_det_cml.yml)| 3M |[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_slim_quant_infer.tar)| |ch_PP-OCRv2_det| [New] Original lightweight model, supporting Chinese, English, multilingual text detection|[ch_PP-OCRv2_det_cml.yml](../../configs/det/ch_PP-OCRv2/ch_PP-OCRv2_det_cml.yml)|3M|[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_distill_train.tar)| @@ -75,7 +75,7 @@ Relationship of the above models is as follows. |model name|description|config|model size|download| | --- | --- | --- | --- | --- | -|ch_PP-OCRv3_rec_slim | [New] Slim qunatization with distillation lightweight model, supporting Chinese, English text recognition |[ch_PP-OCRv3_rec_distillation.yml](../../configs/rec/PP-OCRv3/ch_PP-OCRv3_rec_distillation.yml)| 4.9M |[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_slim_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/ch/ch_PP-OCRv3_rec_slim_train.tar) / [nb model](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_slim_infer.nb) | +|ch_PP-OCRv3_rec_slim | [New] Slim qunatization with distillation lightweight model, supporting Chinese, English text recognition |[ch_PP-OCRv3_rec_distillation.yml](../../configs/rec/PP-OCRv3/ch_PP-OCRv3_rec_distillation.yml)| 4.9M |[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_slim_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_slim_train.tar) / [nb model](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_slim_infer.nb) | |ch_PP-OCRv3_rec| [New] Original lightweight model, supporting Chinese, English, multilingual text recognition |[ch_PP-OCRv3_rec_distillation.yml](../../configs/rec/PP-OCRv3/ch_PP-OCRv3_rec_distillation.yml)| 12.4M |[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_train.tar) | |ch_PP-OCRv2_rec_slim| Slim qunatization with distillation lightweight model, supporting Chinese, English text recognition|[ch_PP-OCRv2_rec.yml](../../configs/rec/ch_PP-OCRv2/ch_PP-OCRv2_rec.yml)| 9M |[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_slim_quant_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_slim_quant_train.tar) | |ch_PP-OCRv2_rec| Original lightweight model, supporting Chinese, English, multilingual text recognition |[ch_PP-OCRv2_rec_distillation.yml](../../configs/rec/ch_PP-OCRv2/ch_PP-OCRv2_rec_distillation.yml)|8.5M|[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_train.tar) | @@ -91,7 +91,7 @@ Relationship of the above models is as follows. |model name|description|config|model size|download| | --- | --- | --- | --- | --- | -|en_PP-OCRv3_rec_slim | [New] Slim qunatization with distillation lightweight model, supporting english, English text recognition |[en_PP-OCRv3_rec.yml](../../configs/rec/PP-OCRv3/en_PP-OCRv3_rec.yml)| 3.2M |[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/PP-OCRv3_rec_slim_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_rec_slim_train.tar) / [nb model](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_rec_slim_infer.nb) | +|en_PP-OCRv3_rec_slim | [New] Slim qunatization with distillation lightweight model, supporting english, English text recognition |[en_PP-OCRv3_rec.yml](../../configs/rec/PP-OCRv3/en_PP-OCRv3_rec.yml)| 3.2M |[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_rec_slim_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_rec_slim_train.tar) / [nb model](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_rec_slim_infer.nb) | |en_PP-OCRv3_rec| [New] Original lightweight model, supporting english, English, multilingual text recognition |[en_PP-OCRv3_rec.yml](../../configs/rec/PP-OCRv3/en_PP-OCRv3_rec.yml)| 9.6M |[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_rec_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_rec_train.tar) | |en_number_mobile_slim_v2.0_rec|Slim pruned and quantized lightweight model, supporting English and number recognition|[rec_en_number_lite_train.yml](../../configs/rec/multi_language/rec_en_number_lite_train.yml)| 2.7M | [inference model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/en_number_mobile_v2.0_rec_slim_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/en_number_mobile_v2.0_rec_slim_train.tar) | |en_number_mobile_v2.0_rec|Original lightweight model, supporting English and number recognition|[rec_en_number_lite_train.yml](../../configs/rec/multi_language/rec_en_number_lite_train.yml)|2.6M|[inference model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/en_number_mobile_v2.0_rec_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/en_number_mobile_v2.0_rec_train.tar) | @@ -108,7 +108,7 @@ Relationship of the above models is as follows. | ka_PP-OCRv3_rec | ppocr/utils/dict/ka_dict.txt | Lightweight model for Kannada recognition |[ka_PP-OCRv3_rec.yml](../../configs/rec/PP-OCRv3/multi_language/ka_PP-OCRv3_rec.yml)|9.9M|[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/ka_PP-OCRv3_rec_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/ka_PP-OCRv3_rec_train.tar) | | ta_PP-OCRv3_rec | ppocr/utils/dict/ta_dict.txt |Lightweight model for Tamil recognition|[ta_PP-OCRv3_rec.yml](../../configs/rec/PP-OCRv3/multi_language/ta_PP-OCRv3_rec.yml)|9.6M|[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/ta_PP-OCRv3_rec_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/ta_PP-OCRv3_rec_train.tar) | | latin_PP-OCRv3_rec | ppocr/utils/dict/latin_dict.txt | Lightweight model for latin recognition | [latin_PP-OCRv3_rec.yml](../../configs/rec/PP-OCRv3/multi_language/latin_PP-OCRv3_rec.yml) |9.7M|[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/latin_PP-OCRv3_rec_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/latin_PP-OCRv3_rec_train.tar) | -| arabic_PP-OCRv3_rec | ppocr/utils/dict/arabic_dict.txt | Lightweight model for arabic recognition | [arabic_PP-OCRv3_rec.yml](../../configs/rec/PP-OCRv3/multi_language/rec_arabic_lite_train.yml) |9.6M|[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/arabic_PP-OCRv3_rec_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/arabic_PP-OCRv3_rec_train.tar) | +| arabic_PP-OCRv3_rec | ppocr/utils/dict/arabic_dict.txt | Lightweight model for arabic recognition | [arabic_PP-OCRv3_rec.yml](../../configs/rec/PP-OCRv3/multi_language/arabic_PP-OCRv3_rec.yml) |9.6M|[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/arabic_PP-OCRv3_rec_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/arabic_PP-OCRv3_rec_train.tar) | | cyrillic_PP-OCRv3_rec | ppocr/utils/dict/cyrillic_dict.txt | Lightweight model for cyrillic recognition | [cyrillic_PP-OCRv3_rec.yml](../../configs/rec/PP-OCRv3/multi_language/cyrillic_PP-OCRv3_rec.yml) |9.6M|[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/cyrillic_PP-OCRv3_rec_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/cyrillic_PP-OCRv3_rec_train.tar) | | devanagari_PP-OCRv3_rec | ppocr/utils/dict/devanagari_dict.txt | Lightweight model for devanagari recognition | [devanagari_PP-OCRv3_rec.yml](../../configs/rec/PP-OCRv3/multi_language/devanagari_PP-OCRv3_rec.yml) |9.9M|[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/devanagari_PP-OCRv3_rec_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv3/multilingual/devanagari_PP-OCRv3_rec_train.tar) | diff --git a/doc/doc_en/multi_languages_en.md b/doc/doc_en/multi_languages_en.md index 4696a3e842242517d19bcac7d7bdef3b4c233b12..d9cb180f706eebdba4727f7909499487794545b9 100644 --- a/doc/doc_en/multi_languages_en.md +++ b/doc/doc_en/multi_languages_en.md @@ -187,10 +187,10 @@ In addition to installing the whl package for quick forecasting, PPOCR also provides a variety of forecasting deployment methods. If necessary, you can read related documents: -- [Python Inference](./inference_en.md) -- [C++ Inference](../../deploy/cpp_infer/readme_en.md) +- [Python Inference](./inference_ppocr_en.md) +- [C++ Inference](../../deploy/cpp_infer/readme.md) - [Serving](../../deploy/hubserving/readme_en.md) -- [Mobile](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/deploy/lite/readme_en.md) +- [Mobile](../../deploy/lite/readme.md) - [Benchmark](./benchmark_en.md) diff --git a/doc/doc_en/ppocr_introduction_en.md b/doc/doc_en/ppocr_introduction_en.md index 8fe6bc683ac69bdff0e3b4297f2eaa95b934fa17..d28ccb3529a46bdf0d3fd1d1c81f14137d10f2ea 100644 --- a/doc/doc_en/ppocr_introduction_en.md +++ b/doc/doc_en/ppocr_introduction_en.md @@ -29,16 +29,16 @@ PP-OCR pipeline is as follows: PP-OCR system is in continuous optimization. At present, PP-OCR and PP-OCRv2 have been released: -PP-OCR adopts 19 effective strategies from 8 aspects including backbone network selection and adjustment, prediction head design, data augmentation, learning rate transformation strategy, regularization parameter selection, pre-training model use, and automatic model tailoring and quantization to optimize and slim down the models of each module (as shown in the green box above). The final results are an ultra-lightweight Chinese and English OCR model with an overall size of 3.5M and a 2.8M English digital OCR model. For more details, please refer to the PP-OCR technical article (https://arxiv.org/abs/2009.09941). +PP-OCR adopts 19 effective strategies from 8 aspects including backbone network selection and adjustment, prediction head design, data augmentation, learning rate transformation strategy, regularization parameter selection, pre-training model use, and automatic model tailoring and quantization to optimize and slim down the models of each module (as shown in the green box above). The final results are an ultra-lightweight Chinese and English OCR model with an overall size of 3.5M and a 2.8M English digital OCR model. For more details, please refer to [PP-OCR technical report](https://arxiv.org/abs/2009.09941). #### PP-OCRv2 -On the basis of PP-OCR, PP-OCRv2 is further optimized in five aspects. The detection model adopts CML(Collaborative Mutual Learning) knowledge distillation strategy and CopyPaste data expansion strategy. The recognition model adopts LCNet lightweight backbone network, U-DML knowledge distillation strategy and enhanced CTC loss function improvement (as shown in the red box above), which further improves the inference speed and prediction effect. For more details, please refer to the technical report of PP-OCRv2 (https://arxiv.org/abs/2109.03144). +On the basis of PP-OCR, PP-OCRv2 is further optimized in five aspects. The detection model adopts CML(Collaborative Mutual Learning) knowledge distillation strategy and CopyPaste data expansion strategy. The recognition model adopts LCNet lightweight backbone network, U-DML knowledge distillation strategy and enhanced CTC loss function improvement (as shown in the red box above), which further improves the inference speed and prediction effect. For more details, please refer to [PP-OCRv2 technical report](https://arxiv.org/abs/2109.03144). #### PP-OCRv3 PP-OCRv3 upgraded the detection model and recognition model in 9 aspects based on PP-OCRv2: - PP-OCRv3 detector upgrades the CML(Collaborative Mutual Learning) text detection strategy proposed in PP-OCRv2, and further optimizes the effect of teacher model and student model respectively. In the optimization of teacher model, a pan module with large receptive field named LK-PAN is proposed and the DML distillation strategy is adopted; In the optimization of student model, a FPN module with residual attention mechanism named RSE-FPN is proposed. -- PP-OCRv3 recognizer is optimized based on text recognition algorithm [SVTR](https://arxiv.org/abs/2205.00159). SVTR no longer adopts RNN by introducing transformers structure, which can mine the context information of text line image more effectively, so as to improve the ability of text recognition. PP-OCRv3 adopts lightweight text recognition network SVTR_LCNet, guided training of CTC loss by attention loss, data augmentation strategy TextConAug, better pre-trained model by self-supervised TextRotNet, UDML(Unified Deep Mutual Learning), and UIM (Unlabeled Images Mining) to accelerate the model and improve the effect. +- PP-OCRv3 recognizer is optimized based on text recognition algorithm [SVTR](https://arxiv.org/abs/2205.00159). SVTR no longer adopts RNN by introducing transformers structure, which can mine the context information of text line image more effectively, so as to improve the ability of text recognition. PP-OCRv3 adopts lightweight text recognition network SVTR_LCNet, guided training of CTC by attention, data augmentation strategy TextConAug, better pre-trained model by self-supervised TextRotNet, UDML(Unified Deep Mutual Learning), and UIM (Unlabeled Images Mining) to accelerate the model and improve the effect. PP-OCRv3 pipeline is as follows: @@ -46,7 +46,7 @@ PP-OCRv3 pipeline is as follows:
-For more details, please refer to [PP-OCRv3 technical report](./PP-OCRv3_introduction_en.md). +For more details, please refer to [PP-OCRv3 technical report](https://arxiv.org/abs/2206.03001v2). ## 2. Features diff --git a/doc/doc_en/quickstart_en.md b/doc/doc_en/quickstart_en.md index d7aeb7773021aa6cf8f4d71298588915e5938fab..c678dc47625f4289a93621144bf5577b059d52b3 100644 --- a/doc/doc_en/quickstart_en.md +++ b/doc/doc_en/quickstart_en.md @@ -119,7 +119,18 @@ If you do not use the provided test image, you can replace the following `--imag ['PAIN', 0.9934559464454651] ``` -If you need to use the 2.0 model, please specify the parameter `--ocr_version PP-OCR`, paddleocr uses the PP-OCRv3 model by default(`--ocr_version PP-OCRv3`). More whl package usage can be found in [whl package](./whl_en.md) +**Version** +paddleocr uses the PP-OCRv3 model by default(`--ocr_version PP-OCRv3`). If you want to use other versions, you can set the parameter `--ocr_version`, the specific version description is as follows: +| version name | description | +| --- | --- | +| PP-OCRv3 | support Chinese and English detection and recognition, direction classifier, support multilingual recognition | +| PP-OCRv2 | only supports Chinese and English detection and recognition, direction classifier, multilingual model is not updated | +| PP-OCR | support Chinese and English detection and recognition, direction classifier, support multilingual recognition | + +If you want to add your own trained model, you can add model links and keys in [paddleocr](../../paddleocr.py) and recompile. + +More whl package usage can be found in [whl package](./whl_en.md) + #### 2.1.2 Multi-language Model diff --git a/doc/doc_en/recognition_en.md b/doc/doc_en/recognition_en.md index 60b4a1b26b373adc562ab9624e55ffe59a775a35..7d31b0ffe28c59ad3397d06fa178bcf8cbb822e9 100644 --- a/doc/doc_en/recognition_en.md +++ b/doc/doc_en/recognition_en.md @@ -306,7 +306,7 @@ python3 -m paddle.distributed.launch --ips="xx.xx.xx.xx,xx.xx.xx.xx" --gpus '0,1 -o Global.pretrained_model=./pretrain_models/rec_mv3_none_bilstm_ctc_v2.0_train ``` -**Note:** When using multi-machine and multi-gpu training, you need to replace the ips value in the above command with the address of your machine, and the machines need to be able to ping each other. In addition, training needs to be launched separately on multiple machines. The command to view the ip address of the machine is `ifconfig`. +**Note:** (1) When using multi-machine and multi-gpu training, you need to replace the ips value in the above command with the address of your machine, and the machines need to be able to ping each other. (2) Training needs to be launched separately on multiple machines. The command to view the ip address of the machine is `ifconfig`. (3) For more details about the distributed training speedup ratio, please refer to [Distributed Training Tutorial](./distributed_training_en.md). ### 2.6 Training with Knowledge Distillation diff --git a/doc/doc_en/update_en.md b/doc/doc_en/update_en.md index a900219b2462524425fc4303ea3bd571efcbab8f..a44dd0d70c611e1b5fb59d1e58b382704d0bbae8 100644 --- a/doc/doc_en/update_en.md +++ b/doc/doc_en/update_en.md @@ -1,8 +1,8 @@ # RECENT UPDATES - 2022.5.9 release PaddleOCR v2.5, including: - - [PP-OCRv3](./doc/doc_en/ppocr_introduction_en.md#pp-ocrv3): With comparable speed, the effect of Chinese scene is further improved by 5% compared with PP-OCRv2, the effect of English scene is improved by 11%, and the average recognition accuracy of 80 language multilingual models is improved by more than 5%. - - [PPOCRLabelv2](./PPOCRLabel): Add the annotation function for table recognition task, key information extraction task and irregular text image. - - Interactive e-book [*"Dive into OCR"*](./doc/doc_en/ocr_book_en.md), covers the cutting-edge theory and code practice of OCR full stack technology. + - [PP-OCRv3](./ppocr_introduction_en.md#pp-ocrv3): With comparable speed, the effect of Chinese scene is further improved by 5% compared with PP-OCRv2, the effect of English scene is improved by 11%, and the average recognition accuracy of 80 language multilingual models is improved by more than 5%. + - [PPOCRLabelv2](../../PPOCRLabel): Add the annotation function for table recognition task, key information extraction task and irregular text image. + - Interactive e-book [*"Dive into OCR"*](./ocr_book_en.md), covers the cutting-edge theory and code practice of OCR full stack technology. - 2022.5.7 Add support for metric and model logging during training to [Weights & Biases](https://docs.wandb.ai/). - 2021.12.21 OCR open source online course starts. The lesson starts at 8:30 every night and lasts for ten days. Free registration: https://aistudio.baidu.com/aistudio/course/introduce/25207 - 2021.12.21 release PaddleOCR v2.4, release 1 text detection algorithm (PSENet), 3 text recognition algorithms (NRTR、SEED、SAR), 1 key information extraction algorithm (SDMGR) and 3 DocVQA algorithms (LayoutLM、LayoutLMv2,LayoutXLM). diff --git a/paddleocr.py b/paddleocr.py index a1265f79def7018a5586be954127e5b7fdba011e..470dc60da3b15195bcd401aff5e50be5a2cfd13e 100644 --- a/paddleocr.py +++ b/paddleocr.py @@ -154,7 +154,13 @@ MODEL_URLS = { 'https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_infer.tar', 'dict_path': './ppocr/utils/ppocr_keys_v1.txt' } - } + }, + 'cls': { + 'ch': { + 'url': + 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar', + } + }, }, 'PP-OCR': { 'det': { diff --git a/ppocr/data/imaug/__init__.py b/ppocr/data/imaug/__init__.py index 548832fb0d116ba2de622bd97562b591d74501d8..63dfda91f8d0eb200d3c635fda43670039375784 100644 --- a/ppocr/data/imaug/__init__.py +++ b/ppocr/data/imaug/__init__.py @@ -22,8 +22,10 @@ from .make_shrink_map import MakeShrinkMap from .random_crop_data import EastRandomCropData, RandomCropImgMask from .make_pse_gt import MakePseGt + from .rec_img_aug import RecAug, RecConAug, RecResizeImg, ClsResizeImg, \ - SRNRecResizeImg, NRTRRecResizeImg, SARRecResizeImg, PRENResizeImg + SRNRecResizeImg, GrayRecResizeImg, SARRecResizeImg, PRENResizeImg, \ + ABINetRecResizeImg, SVTRRecResizeImg, ABINetRecAug from .ssl_img_aug import SSLRotateResize from .randaugment import RandAugment from .copy_paste import CopyPaste diff --git a/ppocr/data/imaug/abinet_aug.py b/ppocr/data/imaug/abinet_aug.py new file mode 100644 index 0000000000000000000000000000000000000000..eefdc75d5a5c0ac3f7136bf22a2adb31129bd313 --- /dev/null +++ b/ppocr/data/imaug/abinet_aug.py @@ -0,0 +1,407 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This code is refer from: +https://github.com/FangShancheng/ABINet/blob/main/transforms.py +""" +import math +import numbers +import random + +import cv2 +import numpy as np +from paddle.vision.transforms import Compose, ColorJitter + + +def sample_asym(magnitude, size=None): + return np.random.beta(1, 4, size) * magnitude + + +def sample_sym(magnitude, size=None): + return (np.random.beta(4, 4, size=size) - 0.5) * 2 * magnitude + + +def sample_uniform(low, high, size=None): + return np.random.uniform(low, high, size=size) + + +def get_interpolation(type='random'): + if type == 'random': + choice = [ + cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA + ] + interpolation = choice[random.randint(0, len(choice) - 1)] + elif type == 'nearest': + interpolation = cv2.INTER_NEAREST + elif type == 'linear': + interpolation = cv2.INTER_LINEAR + elif type == 'cubic': + interpolation = cv2.INTER_CUBIC + elif type == 'area': + interpolation = cv2.INTER_AREA + else: + raise TypeError( + 'Interpolation types only nearest, linear, cubic, area are supported!' + ) + return interpolation + + +class CVRandomRotation(object): + def __init__(self, degrees=15): + assert isinstance(degrees, + numbers.Number), "degree should be a single number." + assert degrees >= 0, "degree must be positive." + self.degrees = degrees + + @staticmethod + def get_params(degrees): + return sample_sym(degrees) + + def __call__(self, img): + angle = self.get_params(self.degrees) + src_h, src_w = img.shape[:2] + M = cv2.getRotationMatrix2D( + center=(src_w / 2, src_h / 2), angle=angle, scale=1.0) + abs_cos, abs_sin = abs(M[0, 0]), abs(M[0, 1]) + dst_w = int(src_h * abs_sin + src_w * abs_cos) + dst_h = int(src_h * abs_cos + src_w * abs_sin) + M[0, 2] += (dst_w - src_w) / 2 + M[1, 2] += (dst_h - src_h) / 2 + + flags = get_interpolation() + return cv2.warpAffine( + img, + M, (dst_w, dst_h), + flags=flags, + borderMode=cv2.BORDER_REPLICATE) + + +class CVRandomAffine(object): + def __init__(self, degrees, translate=None, scale=None, shear=None): + assert isinstance(degrees, + numbers.Number), "degree should be a single number." + assert degrees >= 0, "degree must be positive." + self.degrees = degrees + + if translate is not None: + assert isinstance(translate, (tuple, list)) and len(translate) == 2, \ + "translate should be a list or tuple and it must be of length 2." + for t in translate: + if not (0.0 <= t <= 1.0): + raise ValueError( + "translation values should be between 0 and 1") + self.translate = translate + + if scale is not None: + assert isinstance(scale, (tuple, list)) and len(scale) == 2, \ + "scale should be a list or tuple and it must be of length 2." + for s in scale: + if s <= 0: + raise ValueError("scale values should be positive") + self.scale = scale + + if shear is not None: + if isinstance(shear, numbers.Number): + if shear < 0: + raise ValueError( + "If shear is a single number, it must be positive.") + self.shear = [shear] + else: + assert isinstance(shear, (tuple, list)) and (len(shear) == 2), \ + "shear should be a list or tuple and it must be of length 2." + self.shear = shear + else: + self.shear = shear + + def _get_inverse_affine_matrix(self, center, angle, translate, scale, + shear): + # https://github.com/pytorch/vision/blob/v0.4.0/torchvision/transforms/functional.py#L717 + from numpy import sin, cos, tan + + if isinstance(shear, numbers.Number): + shear = [shear, 0] + + if not isinstance(shear, (tuple, list)) and len(shear) == 2: + raise ValueError( + "Shear should be a single value or a tuple/list containing " + + "two values. Got {}".format(shear)) + + rot = math.radians(angle) + sx, sy = [math.radians(s) for s in shear] + + cx, cy = center + tx, ty = translate + + # RSS without scaling + a = cos(rot - sy) / cos(sy) + b = -cos(rot - sy) * tan(sx) / cos(sy) - sin(rot) + c = sin(rot - sy) / cos(sy) + d = -sin(rot - sy) * tan(sx) / cos(sy) + cos(rot) + + # Inverted rotation matrix with scale and shear + # det([[a, b], [c, d]]) == 1, since det(rotation) = 1 and det(shear) = 1 + M = [d, -b, 0, -c, a, 0] + M = [x / scale for x in M] + + # Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1 + M[2] += M[0] * (-cx - tx) + M[1] * (-cy - ty) + M[5] += M[3] * (-cx - tx) + M[4] * (-cy - ty) + + # Apply center translation: C * RSS^-1 * C^-1 * T^-1 + M[2] += cx + M[5] += cy + return M + + @staticmethod + def get_params(degrees, translate, scale_ranges, shears, height): + angle = sample_sym(degrees) + if translate is not None: + max_dx = translate[0] * height + max_dy = translate[1] * height + translations = (np.round(sample_sym(max_dx)), + np.round(sample_sym(max_dy))) + else: + translations = (0, 0) + + if scale_ranges is not None: + scale = sample_uniform(scale_ranges[0], scale_ranges[1]) + else: + scale = 1.0 + + if shears is not None: + if len(shears) == 1: + shear = [sample_sym(shears[0]), 0.] + elif len(shears) == 2: + shear = [sample_sym(shears[0]), sample_sym(shears[1])] + else: + shear = 0.0 + + return angle, translations, scale, shear + + def __call__(self, img): + src_h, src_w = img.shape[:2] + angle, translate, scale, shear = self.get_params( + self.degrees, self.translate, self.scale, self.shear, src_h) + + M = self._get_inverse_affine_matrix((src_w / 2, src_h / 2), angle, + (0, 0), scale, shear) + M = np.array(M).reshape(2, 3) + + startpoints = [(0, 0), (src_w - 1, 0), (src_w - 1, src_h - 1), + (0, src_h - 1)] + project = lambda x, y, a, b, c: int(a * x + b * y + c) + endpoints = [(project(x, y, *M[0]), project(x, y, *M[1])) + for x, y in startpoints] + + rect = cv2.minAreaRect(np.array(endpoints)) + bbox = cv2.boxPoints(rect).astype(dtype=np.int) + max_x, max_y = bbox[:, 0].max(), bbox[:, 1].max() + min_x, min_y = bbox[:, 0].min(), bbox[:, 1].min() + + dst_w = int(max_x - min_x) + dst_h = int(max_y - min_y) + M[0, 2] += (dst_w - src_w) / 2 + M[1, 2] += (dst_h - src_h) / 2 + + # add translate + dst_w += int(abs(translate[0])) + dst_h += int(abs(translate[1])) + if translate[0] < 0: M[0, 2] += abs(translate[0]) + if translate[1] < 0: M[1, 2] += abs(translate[1]) + + flags = get_interpolation() + return cv2.warpAffine( + img, + M, (dst_w, dst_h), + flags=flags, + borderMode=cv2.BORDER_REPLICATE) + + +class CVRandomPerspective(object): + def __init__(self, distortion=0.5): + self.distortion = distortion + + def get_params(self, width, height, distortion): + offset_h = sample_asym( + distortion * height / 2, size=4).astype(dtype=np.int) + offset_w = sample_asym( + distortion * width / 2, size=4).astype(dtype=np.int) + topleft = (offset_w[0], offset_h[0]) + topright = (width - 1 - offset_w[1], offset_h[1]) + botright = (width - 1 - offset_w[2], height - 1 - offset_h[2]) + botleft = (offset_w[3], height - 1 - offset_h[3]) + + startpoints = [(0, 0), (width - 1, 0), (width - 1, height - 1), + (0, height - 1)] + endpoints = [topleft, topright, botright, botleft] + return np.array( + startpoints, dtype=np.float32), np.array( + endpoints, dtype=np.float32) + + def __call__(self, img): + height, width = img.shape[:2] + startpoints, endpoints = self.get_params(width, height, self.distortion) + M = cv2.getPerspectiveTransform(startpoints, endpoints) + + # TODO: more robust way to crop image + rect = cv2.minAreaRect(endpoints) + bbox = cv2.boxPoints(rect).astype(dtype=np.int) + max_x, max_y = bbox[:, 0].max(), bbox[:, 1].max() + min_x, min_y = bbox[:, 0].min(), bbox[:, 1].min() + min_x, min_y = max(min_x, 0), max(min_y, 0) + + flags = get_interpolation() + img = cv2.warpPerspective( + img, + M, (max_x, max_y), + flags=flags, + borderMode=cv2.BORDER_REPLICATE) + img = img[min_y:, min_x:] + return img + + +class CVRescale(object): + def __init__(self, factor=4, base_size=(128, 512)): + """ Define image scales using gaussian pyramid and rescale image to target scale. + + Args: + factor: the decayed factor from base size, factor=4 keeps target scale by default. + base_size: base size the build the bottom layer of pyramid + """ + if isinstance(factor, numbers.Number): + self.factor = round(sample_uniform(0, factor)) + elif isinstance(factor, (tuple, list)) and len(factor) == 2: + self.factor = round(sample_uniform(factor[0], factor[1])) + else: + raise Exception('factor must be number or list with length 2') + # assert factor is valid + self.base_h, self.base_w = base_size[:2] + + def __call__(self, img): + if self.factor == 0: return img + src_h, src_w = img.shape[:2] + cur_w, cur_h = self.base_w, self.base_h + scale_img = cv2.resize( + img, (cur_w, cur_h), interpolation=get_interpolation()) + for _ in range(self.factor): + scale_img = cv2.pyrDown(scale_img) + scale_img = cv2.resize( + scale_img, (src_w, src_h), interpolation=get_interpolation()) + return scale_img + + +class CVGaussianNoise(object): + def __init__(self, mean=0, var=20): + self.mean = mean + if isinstance(var, numbers.Number): + self.var = max(int(sample_asym(var)), 1) + elif isinstance(var, (tuple, list)) and len(var) == 2: + self.var = int(sample_uniform(var[0], var[1])) + else: + raise Exception('degree must be number or list with length 2') + + def __call__(self, img): + noise = np.random.normal(self.mean, self.var**0.5, img.shape) + img = np.clip(img + noise, 0, 255).astype(np.uint8) + return img + + +class CVMotionBlur(object): + def __init__(self, degrees=12, angle=90): + if isinstance(degrees, numbers.Number): + self.degree = max(int(sample_asym(degrees)), 1) + elif isinstance(degrees, (tuple, list)) and len(degrees) == 2: + self.degree = int(sample_uniform(degrees[0], degrees[1])) + else: + raise Exception('degree must be number or list with length 2') + self.angle = sample_uniform(-angle, angle) + + def __call__(self, img): + M = cv2.getRotationMatrix2D((self.degree // 2, self.degree // 2), + self.angle, 1) + motion_blur_kernel = np.zeros((self.degree, self.degree)) + motion_blur_kernel[self.degree // 2, :] = 1 + motion_blur_kernel = cv2.warpAffine(motion_blur_kernel, M, + (self.degree, self.degree)) + motion_blur_kernel = motion_blur_kernel / self.degree + img = cv2.filter2D(img, -1, motion_blur_kernel) + img = np.clip(img, 0, 255).astype(np.uint8) + return img + + +class CVGeometry(object): + def __init__(self, + degrees=15, + translate=(0.3, 0.3), + scale=(0.5, 2.), + shear=(45, 15), + distortion=0.5, + p=0.5): + self.p = p + type_p = random.random() + if type_p < 0.33: + self.transforms = CVRandomRotation(degrees=degrees) + elif type_p < 0.66: + self.transforms = CVRandomAffine( + degrees=degrees, translate=translate, scale=scale, shear=shear) + else: + self.transforms = CVRandomPerspective(distortion=distortion) + + def __call__(self, img): + if random.random() < self.p: + return self.transforms(img) + else: + return img + + +class CVDeterioration(object): + def __init__(self, var, degrees, factor, p=0.5): + self.p = p + transforms = [] + if var is not None: + transforms.append(CVGaussianNoise(var=var)) + if degrees is not None: + transforms.append(CVMotionBlur(degrees=degrees)) + if factor is not None: + transforms.append(CVRescale(factor=factor)) + + random.shuffle(transforms) + transforms = Compose(transforms) + self.transforms = transforms + + def __call__(self, img): + if random.random() < self.p: + + return self.transforms(img) + else: + return img + + +class CVColorJitter(object): + def __init__(self, + brightness=0.5, + contrast=0.5, + saturation=0.5, + hue=0.1, + p=0.5): + self.p = p + self.transforms = ColorJitter( + brightness=brightness, + contrast=contrast, + saturation=saturation, + hue=hue) + + def __call__(self, img): + if random.random() < self.p: return self.transforms(img) + else: return img diff --git a/ppocr/data/imaug/fce_targets.py b/ppocr/data/imaug/fce_targets.py index 4d1903c0a7316989ca24d8c4a934d7b99a2a2ff6..8c64276e26665d2779d35154bf9cd77edddad580 100644 --- a/ppocr/data/imaug/fce_targets.py +++ b/ppocr/data/imaug/fce_targets.py @@ -107,17 +107,20 @@ class FCENetTargets: for i in range(1, n): current_line_len = i * delta_length - while current_line_len >= length_cumsum[current_edge_ind + 1]: + while current_edge_ind + 1 < len(length_cumsum) and current_line_len >= length_cumsum[current_edge_ind + 1]: current_edge_ind += 1 + current_edge_end_shift = current_line_len - length_cumsum[ current_edge_ind] + + if current_edge_ind >= len(length_list): + break end_shift_ratio = current_edge_end_shift / length_list[ current_edge_ind] current_point = line[current_edge_ind] + (line[current_edge_ind + 1] - line[current_edge_ind] ) * end_shift_ratio resampled_line.append(current_point) - resampled_line.append(line[-1]) resampled_line = np.array(resampled_line) @@ -328,6 +331,8 @@ class FCENetTargets: resampled_top_line, resampled_bot_line = self.resample_sidelines( top_line, bot_line, self.resample_step) resampled_bot_line = resampled_bot_line[::-1] + if len(resampled_top_line) != len(resampled_bot_line): + continue center_line = (resampled_top_line + resampled_bot_line) / 2 line_head_shrink_len = norm(resampled_top_line[0] - diff --git a/ppocr/data/imaug/label_ops.py b/ppocr/data/imaug/label_ops.py index 02a5187dad27b76d04e866de45333d79383c1347..312d6dc9ad25bfa73aa9009f932fe6f3d3ca7644 100644 --- a/ppocr/data/imaug/label_ops.py +++ b/ppocr/data/imaug/label_ops.py @@ -23,7 +23,6 @@ import string from shapely.geometry import LineString, Point, Polygon import json import copy - from ppocr.utils.logging import get_logger @@ -74,9 +73,10 @@ class DetLabelEncode(object): s = pts.sum(axis=1) rect[0] = pts[np.argmin(s)] rect[2] = pts[np.argmax(s)] - diff = np.diff(pts, axis=1) - rect[1] = pts[np.argmin(diff)] - rect[3] = pts[np.argmax(diff)] + tmp = np.delete(pts, (np.argmin(s), np.argmax(s)), axis=0) + diff = np.diff(np.array(tmp), axis=1) + rect[1] = tmp[np.argmin(diff)] + rect[3] = tmp[np.argmax(diff)] return rect def expand_points_num(self, boxes): @@ -157,37 +157,6 @@ class BaseRecLabelEncode(object): return text_list -class NRTRLabelEncode(BaseRecLabelEncode): - """ Convert between text-label and text-index """ - - def __init__(self, - max_text_length, - character_dict_path=None, - use_space_char=False, - **kwargs): - - super(NRTRLabelEncode, self).__init__( - max_text_length, character_dict_path, use_space_char) - - def __call__(self, data): - text = data['label'] - text = self.encode(text) - if text is None: - return None - if len(text) >= self.max_text_len - 1: - return None - data['length'] = np.array(len(text)) - text.insert(0, 2) - text.append(3) - text = text + [0] * (self.max_text_len - len(text)) - data['label'] = np.array(text) - return data - - def add_special_char(self, dict_character): - dict_character = ['blank', '', '', ''] + dict_character - return dict_character - - class CTCLabelEncode(BaseRecLabelEncode): """ Convert between text-label and text-index """ @@ -438,12 +407,14 @@ class KieLabelEncode(object): texts.append(ann['transcription']) text_ind = [self.dict[c] for c in text if c in self.dict] text_inds.append(text_ind) - if 'label' in anno.keys(): + if 'label' in ann.keys(): labels.append(ann['label']) - elif 'key_cls' in anno.keys(): - labels.append(anno['key_cls']) + elif 'key_cls' in ann.keys(): + labels.append(ann['key_cls']) else: - raise ValueError("Cannot found 'key_cls' in ann.keys(), please check your training annotation.") + raise ValueError( + "Cannot found 'key_cls' in ann.keys(), please check your training annotation." + ) edges.append(ann.get('edge', 0)) ann_infos = dict( image=data['image'], @@ -1044,3 +1015,99 @@ class MultiLabelEncode(BaseRecLabelEncode): data_out['label_sar'] = sar['label'] data_out['length'] = ctc['length'] return data_out + + +class NRTRLabelEncode(BaseRecLabelEncode): + """ Convert between text-label and text-index """ + + def __init__(self, + max_text_length, + character_dict_path=None, + use_space_char=False, + **kwargs): + + super(NRTRLabelEncode, self).__init__( + max_text_length, character_dict_path, use_space_char) + + def __call__(self, data): + text = data['label'] + text = self.encode(text) + if text is None: + return None + if len(text) >= self.max_text_len - 1: + return None + data['length'] = np.array(len(text)) + text.insert(0, 2) + text.append(3) + text = text + [0] * (self.max_text_len - len(text)) + data['label'] = np.array(text) + return data + + def add_special_char(self, dict_character): + dict_character = ['blank', '', '', ''] + dict_character + return dict_character + + +class ViTSTRLabelEncode(BaseRecLabelEncode): + """ Convert between text-label and text-index """ + + def __init__(self, + max_text_length, + character_dict_path=None, + use_space_char=False, + ignore_index=0, + **kwargs): + + super(ViTSTRLabelEncode, self).__init__( + max_text_length, character_dict_path, use_space_char) + self.ignore_index = ignore_index + + def __call__(self, data): + text = data['label'] + text = self.encode(text) + if text is None: + return None + if len(text) >= self.max_text_len: + return None + data['length'] = np.array(len(text)) + text.insert(0, self.ignore_index) + text.append(1) + text = text + [self.ignore_index] * (self.max_text_len + 2 - len(text)) + data['label'] = np.array(text) + return data + + def add_special_char(self, dict_character): + dict_character = ['', ''] + dict_character + return dict_character + + +class ABINetLabelEncode(BaseRecLabelEncode): + """ Convert between text-label and text-index """ + + def __init__(self, + max_text_length, + character_dict_path=None, + use_space_char=False, + ignore_index=100, + **kwargs): + + super(ABINetLabelEncode, self).__init__( + max_text_length, character_dict_path, use_space_char) + self.ignore_index = ignore_index + + def __call__(self, data): + text = data['label'] + text = self.encode(text) + if text is None: + return None + if len(text) >= self.max_text_len: + return None + data['length'] = np.array(len(text)) + text.append(0) + text = text + [self.ignore_index] * (self.max_text_len + 1 - len(text)) + data['label'] = np.array(text) + return data + + def add_special_char(self, dict_character): + dict_character = [''] + dict_character + return dict_character diff --git a/ppocr/data/imaug/operators.py b/ppocr/data/imaug/operators.py index 09736515e7a388e191a12826e1e9e348e2fcde86..5397d71ccb466235e64f85e1eb9365ba03d2aa17 100644 --- a/ppocr/data/imaug/operators.py +++ b/ppocr/data/imaug/operators.py @@ -67,39 +67,6 @@ class DecodeImage(object): return data -class NRTRDecodeImage(object): - """ decode image """ - - def __init__(self, img_mode='RGB', channel_first=False, **kwargs): - self.img_mode = img_mode - self.channel_first = channel_first - - def __call__(self, data): - img = data['image'] - if six.PY2: - assert type(img) is str and len( - img) > 0, "invalid input 'img' in DecodeImage" - else: - assert type(img) is bytes and len( - img) > 0, "invalid input 'img' in DecodeImage" - img = np.frombuffer(img, dtype='uint8') - - img = cv2.imdecode(img, 1) - - if img is None: - return None - if self.img_mode == 'GRAY': - img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) - elif self.img_mode == 'RGB': - assert img.shape[2] == 3, 'invalid shape of image[%s]' % (img.shape) - img = img[:, :, ::-1] - img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) - if self.channel_first: - img = img.transpose((2, 0, 1)) - data['image'] = img - return data - - class NormalizeImage(object): """ normalize image such as substract mean, divide std """ diff --git a/ppocr/data/imaug/rec_img_aug.py b/ppocr/data/imaug/rec_img_aug.py index 7483dffe5b6d9a0a2204702757fcb49762a1cc7a..26773d0a516dfb0877453c7a5c8c8a2b5da92045 100644 --- a/ppocr/data/imaug/rec_img_aug.py +++ b/ppocr/data/imaug/rec_img_aug.py @@ -19,16 +19,109 @@ import random import copy from PIL import Image from .text_image_aug import tia_perspective, tia_stretch, tia_distort +from .abinet_aug import CVGeometry, CVDeterioration, CVColorJitter +from paddle.vision.transforms import Compose class RecAug(object): - def __init__(self, use_tia=True, aug_prob=0.4, **kwargs): - self.use_tia = use_tia - self.aug_prob = aug_prob + def __init__(self, + tia_prob=0.4, + crop_prob=0.4, + reverse_prob=0.4, + noise_prob=0.4, + jitter_prob=0.4, + blur_prob=0.4, + hsv_aug_prob=0.4, + **kwargs): + self.tia_prob = tia_prob + self.bda = BaseDataAugmentation(crop_prob, reverse_prob, noise_prob, + jitter_prob, blur_prob, hsv_aug_prob) def __call__(self, data): img = data['image'] - img = warp(img, 10, self.use_tia, self.aug_prob) + h, w, _ = img.shape + + # tia + if random.random() <= self.tia_prob: + if h >= 20 and w >= 20: + img = tia_distort(img, random.randint(3, 6)) + img = tia_stretch(img, random.randint(3, 6)) + img = tia_perspective(img) + + # bda + data['image'] = img + data = self.bda(data) + return data + + +class BaseDataAugmentation(object): + def __init__(self, + crop_prob=0.4, + reverse_prob=0.4, + noise_prob=0.4, + jitter_prob=0.4, + blur_prob=0.4, + hsv_aug_prob=0.4, + **kwargs): + self.crop_prob = crop_prob + self.reverse_prob = reverse_prob + self.noise_prob = noise_prob + self.jitter_prob = jitter_prob + self.blur_prob = blur_prob + self.hsv_aug_prob = hsv_aug_prob + + def __call__(self, data): + img = data['image'] + h, w, _ = img.shape + + if random.random() <= self.crop_prob and h >= 20 and w >= 20: + img = get_crop(img) + + if random.random() <= self.blur_prob: + img = blur(img) + + if random.random() <= self.hsv_aug_prob: + img = hsv_aug(img) + + if random.random() <= self.jitter_prob: + img = jitter(img) + + if random.random() <= self.noise_prob: + img = add_gasuss_noise(img) + + if random.random() <= self.reverse_prob: + img = 255 - img + + data['image'] = img + return data + + +class ABINetRecAug(object): + def __init__(self, + geometry_p=0.5, + deterioration_p=0.25, + colorjitter_p=0.25, + **kwargs): + self.transforms = Compose([ + CVGeometry( + degrees=45, + translate=(0.0, 0.0), + scale=(0.5, 2.), + shear=(45, 15), + distortion=0.5, + p=geometry_p), CVDeterioration( + var=20, degrees=6, factor=4, p=deterioration_p), + CVColorJitter( + brightness=0.5, + contrast=0.5, + saturation=0.5, + hue=0.1, + p=colorjitter_p) + ]) + + def __call__(self, data): + img = data['image'] + img = self.transforms(img) data['image'] = img return data @@ -87,46 +180,6 @@ class ClsResizeImg(object): return data -class NRTRRecResizeImg(object): - def __init__(self, image_shape, resize_type, padding=False, **kwargs): - self.image_shape = image_shape - self.resize_type = resize_type - self.padding = padding - - def __call__(self, data): - img = data['image'] - img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) - image_shape = self.image_shape - if self.padding: - imgC, imgH, imgW = image_shape - # todo: change to 0 and modified image shape - h = img.shape[0] - w = img.shape[1] - ratio = w / float(h) - if math.ceil(imgH * ratio) > imgW: - resized_w = imgW - else: - resized_w = int(math.ceil(imgH * ratio)) - resized_image = cv2.resize(img, (resized_w, imgH)) - norm_img = np.expand_dims(resized_image, -1) - norm_img = norm_img.transpose((2, 0, 1)) - resized_image = norm_img.astype(np.float32) / 128. - 1. - padding_im = np.zeros((imgC, imgH, imgW), dtype=np.float32) - padding_im[:, :, 0:resized_w] = resized_image - data['image'] = padding_im - return data - if self.resize_type == 'PIL': - image_pil = Image.fromarray(np.uint8(img)) - img = image_pil.resize(self.image_shape, Image.ANTIALIAS) - img = np.array(img) - if self.resize_type == 'OpenCV': - img = cv2.resize(img, self.image_shape) - norm_img = np.expand_dims(img, -1) - norm_img = norm_img.transpose((2, 0, 1)) - data['image'] = norm_img.astype(np.float32) / 128. - 1. - return data - - class RecResizeImg(object): def __init__(self, image_shape, @@ -207,6 +260,84 @@ class PRENResizeImg(object): return data +class GrayRecResizeImg(object): + def __init__(self, + image_shape, + resize_type, + inter_type='Image.ANTIALIAS', + scale=True, + padding=False, + **kwargs): + self.image_shape = image_shape + self.resize_type = resize_type + self.padding = padding + self.inter_type = eval(inter_type) + self.scale = scale + + def __call__(self, data): + img = data['image'] + img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + image_shape = self.image_shape + if self.padding: + imgC, imgH, imgW = image_shape + # todo: change to 0 and modified image shape + h = img.shape[0] + w = img.shape[1] + ratio = w / float(h) + if math.ceil(imgH * ratio) > imgW: + resized_w = imgW + else: + resized_w = int(math.ceil(imgH * ratio)) + resized_image = cv2.resize(img, (resized_w, imgH)) + norm_img = np.expand_dims(resized_image, -1) + norm_img = norm_img.transpose((2, 0, 1)) + resized_image = norm_img.astype(np.float32) / 128. - 1. + padding_im = np.zeros((imgC, imgH, imgW), dtype=np.float32) + padding_im[:, :, 0:resized_w] = resized_image + data['image'] = padding_im + return data + if self.resize_type == 'PIL': + image_pil = Image.fromarray(np.uint8(img)) + img = image_pil.resize(self.image_shape, self.inter_type) + img = np.array(img) + if self.resize_type == 'OpenCV': + img = cv2.resize(img, self.image_shape) + norm_img = np.expand_dims(img, -1) + norm_img = norm_img.transpose((2, 0, 1)) + if self.scale: + data['image'] = norm_img.astype(np.float32) / 128. - 1. + else: + data['image'] = norm_img.astype(np.float32) / 255. + return data + + +class ABINetRecResizeImg(object): + def __init__(self, image_shape, **kwargs): + self.image_shape = image_shape + + def __call__(self, data): + img = data['image'] + norm_img, valid_ratio = resize_norm_img_abinet(img, self.image_shape) + data['image'] = norm_img + data['valid_ratio'] = valid_ratio + return data + + +class SVTRRecResizeImg(object): + def __init__(self, image_shape, padding=True, **kwargs): + self.image_shape = image_shape + self.padding = padding + + def __call__(self, data): + img = data['image'] + + norm_img, valid_ratio = resize_norm_img(img, self.image_shape, + self.padding) + data['image'] = norm_img + data['valid_ratio'] = valid_ratio + return data + + def resize_norm_img_sar(img, image_shape, width_downsample_ratio=0.25): imgC, imgH, imgW_min, imgW_max = image_shape h = img.shape[0] @@ -325,6 +456,26 @@ def resize_norm_img_srn(img, image_shape): return np.reshape(img_black, (c, row, col)).astype(np.float32) +def resize_norm_img_abinet(img, image_shape): + imgC, imgH, imgW = image_shape + + resized_image = cv2.resize( + img, (imgW, imgH), interpolation=cv2.INTER_LINEAR) + resized_w = imgW + resized_image = resized_image.astype('float32') + resized_image = resized_image / 255. + + mean = np.array([0.485, 0.456, 0.406]) + std = np.array([0.229, 0.224, 0.225]) + resized_image = ( + resized_image - mean[None, None, ...]) / std[None, None, ...] + resized_image = resized_image.transpose((2, 0, 1)) + resized_image = resized_image.astype('float32') + + valid_ratio = min(1.0, float(resized_w / imgW)) + return resized_image, valid_ratio + + def srn_other_inputs(image_shape, num_heads, max_text_length): imgC, imgH, imgW = image_shape @@ -359,7 +510,7 @@ def flag(): return 1 if random.random() > 0.5000001 else -1 -def cvtColor(img): +def hsv_aug(img): """ cvtColor """ @@ -427,50 +578,6 @@ def get_crop(image): return crop_img -class Config: - """ - Config - """ - - def __init__(self, use_tia): - self.anglex = random.random() * 30 - self.angley = random.random() * 15 - self.anglez = random.random() * 10 - self.fov = 42 - self.r = 0 - self.shearx = random.random() * 0.3 - self.sheary = random.random() * 0.05 - self.borderMode = cv2.BORDER_REPLICATE - self.use_tia = use_tia - - def make(self, w, h, ang): - """ - make - """ - self.anglex = random.random() * 5 * flag() - self.angley = random.random() * 5 * flag() - self.anglez = -1 * random.random() * int(ang) * flag() - self.fov = 42 - self.r = 0 - self.shearx = 0 - self.sheary = 0 - self.borderMode = cv2.BORDER_REPLICATE - self.w = w - self.h = h - - self.perspective = self.use_tia - self.stretch = self.use_tia - self.distort = self.use_tia - - self.crop = True - self.affine = False - self.reverse = True - self.noise = True - self.jitter = True - self.blur = True - self.color = True - - def rad(x): """ rad @@ -554,48 +661,3 @@ def get_warpAffine(config): rz = np.array([[np.cos(rad(anglez)), np.sin(rad(anglez)), 0], [-np.sin(rad(anglez)), np.cos(rad(anglez)), 0]], np.float32) return rz - - -def warp(img, ang, use_tia=True, prob=0.4): - """ - warp - """ - h, w, _ = img.shape - config = Config(use_tia=use_tia) - config.make(w, h, ang) - new_img = img - - if config.distort: - img_height, img_width = img.shape[0:2] - if random.random() <= prob and img_height >= 20 and img_width >= 20: - new_img = tia_distort(new_img, random.randint(3, 6)) - - if config.stretch: - img_height, img_width = img.shape[0:2] - if random.random() <= prob and img_height >= 20 and img_width >= 20: - new_img = tia_stretch(new_img, random.randint(3, 6)) - - if config.perspective: - if random.random() <= prob: - new_img = tia_perspective(new_img) - - if config.crop: - img_height, img_width = img.shape[0:2] - if random.random() <= prob and img_height >= 20 and img_width >= 20: - new_img = get_crop(new_img) - - if config.blur: - if random.random() <= prob: - new_img = blur(new_img) - if config.color: - if random.random() <= prob: - new_img = cvtColor(new_img) - if config.jitter: - new_img = jitter(new_img) - if config.noise: - if random.random() <= prob: - new_img = add_gasuss_noise(new_img) - if config.reverse: - if random.random() <= prob: - new_img = 255 - new_img - return new_img diff --git a/ppocr/data/simple_dataset.py b/ppocr/data/simple_dataset.py index b5da9b8898423facf888839f941dff01caa03643..402f1e38fed9e32722e2dd160f10f779028807a3 100644 --- a/ppocr/data/simple_dataset.py +++ b/ppocr/data/simple_dataset.py @@ -33,7 +33,7 @@ class SimpleDataSet(Dataset): self.delimiter = dataset_config.get('delimiter', '\t') label_file_list = dataset_config.pop('label_file_list') data_source_num = len(label_file_list) - ratio_list = dataset_config.get("ratio_list", [1.0]) + ratio_list = dataset_config.get("ratio_list", 1.0) if isinstance(ratio_list, (float, int)): ratio_list = [float(ratio_list)] * int(data_source_num) diff --git a/ppocr/losses/__init__.py b/ppocr/losses/__init__.py index de8419b7c1cf6a30ab7195a1cbcbb10a5e52642d..7bea87f62f335a9a47c881d4bc789ce34aaa734a 100755 --- a/ppocr/losses/__init__.py +++ b/ppocr/losses/__init__.py @@ -30,7 +30,7 @@ from .det_fce_loss import FCELoss from .rec_ctc_loss import CTCLoss from .rec_att_loss import AttentionLoss from .rec_srn_loss import SRNLoss -from .rec_nrtr_loss import NRTRLoss +from .rec_ce_loss import CELoss from .rec_sar_loss import SARLoss from .rec_aster_loss import AsterLoss from .rec_pren_loss import PRENLoss @@ -60,7 +60,7 @@ def build_loss(config): support_dict = [ 'DBLoss', 'PSELoss', 'EASTLoss', 'SASTLoss', 'FCELoss', 'CTCLoss', 'ClsLoss', 'AttentionLoss', 'SRNLoss', 'PGLoss', 'CombinedLoss', - 'NRTRLoss', 'TableAttentionLoss', 'SARLoss', 'AsterLoss', 'SDMGRLoss', + 'CELoss', 'TableAttentionLoss', 'SARLoss', 'AsterLoss', 'SDMGRLoss', 'VQASerTokenLayoutLMLoss', 'LossFromOutput', 'PRENLoss', 'MultiLoss' ] config = copy.deepcopy(config) diff --git a/ppocr/losses/rec_aster_loss.py b/ppocr/losses/rec_aster_loss.py index fbb99d29a638540b02649a8912051339c08b22dd..52605e46db35339cc22f7f1e6642456bfaf02f11 100644 --- a/ppocr/losses/rec_aster_loss.py +++ b/ppocr/losses/rec_aster_loss.py @@ -27,12 +27,12 @@ class CosineEmbeddingLoss(nn.Layer): self.epsilon = 1e-12 def forward(self, x1, x2, target): - similarity = paddle.fluid.layers.reduce_sum( + similarity = paddle.sum( x1 * x2, dim=-1) / (paddle.norm( x1, axis=-1) * paddle.norm( x2, axis=-1) + self.epsilon) one_list = paddle.full_like(target, fill_value=1) - out = paddle.fluid.layers.reduce_mean( + out = paddle.mean( paddle.where( paddle.equal(target, one_list), 1. - similarity, paddle.maximum( diff --git a/ppocr/losses/rec_ce_loss.py b/ppocr/losses/rec_ce_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..614384de863c15b106aef831f8e938b89dadc246 --- /dev/null +++ b/ppocr/losses/rec_ce_loss.py @@ -0,0 +1,66 @@ +import paddle +from paddle import nn +import paddle.nn.functional as F + + +class CELoss(nn.Layer): + def __init__(self, + smoothing=False, + with_all=False, + ignore_index=-1, + **kwargs): + super(CELoss, self).__init__() + if ignore_index >= 0: + self.loss_func = nn.CrossEntropyLoss( + reduction='mean', ignore_index=ignore_index) + else: + self.loss_func = nn.CrossEntropyLoss(reduction='mean') + self.smoothing = smoothing + self.with_all = with_all + + def forward(self, pred, batch): + + if isinstance(pred, dict): # for ABINet + loss = {} + loss_sum = [] + for name, logits in pred.items(): + if isinstance(logits, list): + logit_num = len(logits) + all_tgt = paddle.concat([batch[1]] * logit_num, 0) + all_logits = paddle.concat(logits, 0) + flt_logtis = all_logits.reshape([-1, all_logits.shape[2]]) + flt_tgt = all_tgt.reshape([-1]) + else: + flt_logtis = logits.reshape([-1, logits.shape[2]]) + flt_tgt = batch[1].reshape([-1]) + loss[name + '_loss'] = self.loss_func(flt_logtis, flt_tgt) + loss_sum.append(loss[name + '_loss']) + loss['loss'] = sum(loss_sum) + return loss + else: + if self.with_all: # for ViTSTR + tgt = batch[1] + pred = pred.reshape([-1, pred.shape[2]]) + tgt = tgt.reshape([-1]) + loss = self.loss_func(pred, tgt) + return {'loss': loss} + else: # for NRTR + max_len = batch[2].max() + tgt = batch[1][:, 1:2 + max_len] + pred = pred.reshape([-1, pred.shape[2]]) + tgt = tgt.reshape([-1]) + if self.smoothing: + eps = 0.1 + n_class = pred.shape[1] + one_hot = F.one_hot(tgt, pred.shape[1]) + one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / ( + n_class - 1) + log_prb = F.log_softmax(pred, axis=1) + non_pad_mask = paddle.not_equal( + tgt, paddle.zeros( + tgt.shape, dtype=tgt.dtype)) + loss = -(one_hot * log_prb).sum(axis=1) + loss = loss.masked_select(non_pad_mask).mean() + else: + loss = self.loss_func(pred, tgt) + return {'loss': loss} diff --git a/ppocr/losses/rec_nrtr_loss.py b/ppocr/losses/rec_nrtr_loss.py deleted file mode 100644 index 200a6d0486dbf6f76dd674eb58f641b31a70f31c..0000000000000000000000000000000000000000 --- a/ppocr/losses/rec_nrtr_loss.py +++ /dev/null @@ -1,30 +0,0 @@ -import paddle -from paddle import nn -import paddle.nn.functional as F - - -class NRTRLoss(nn.Layer): - def __init__(self, smoothing=True, **kwargs): - super(NRTRLoss, self).__init__() - self.loss_func = nn.CrossEntropyLoss(reduction='mean', ignore_index=0) - self.smoothing = smoothing - - def forward(self, pred, batch): - pred = pred.reshape([-1, pred.shape[2]]) - max_len = batch[2].max() - tgt = batch[1][:, 1:2 + max_len] - tgt = tgt.reshape([-1]) - if self.smoothing: - eps = 0.1 - n_class = pred.shape[1] - one_hot = F.one_hot(tgt, pred.shape[1]) - one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1) - log_prb = F.log_softmax(pred, axis=1) - non_pad_mask = paddle.not_equal( - tgt, paddle.zeros( - tgt.shape, dtype=tgt.dtype)) - loss = -(one_hot * log_prb).sum(axis=1) - loss = loss.masked_select(non_pad_mask).mean() - else: - loss = self.loss_func(pred, tgt) - return {'loss': loss} diff --git a/ppocr/losses/table_att_loss.py b/ppocr/losses/table_att_loss.py index d7fd99e6952aacc0182a482ca5ae5ddaf959a026..51377efa2b5e802fe9f9fc1973c74deb00fc4816 100644 --- a/ppocr/losses/table_att_loss.py +++ b/ppocr/losses/table_att_loss.py @@ -19,7 +19,6 @@ from __future__ import print_function import paddle from paddle import nn from paddle.nn import functional as F -from paddle import fluid class TableAttentionLoss(nn.Layer): def __init__(self, structure_weight, loc_weight, use_giou=False, giou_weight=1.0, **kwargs): @@ -36,13 +35,13 @@ class TableAttentionLoss(nn.Layer): :param bbox:[[x1,y1,x2,y2], [x1,y1,x2,y2],,,] :return: loss ''' - ix1 = fluid.layers.elementwise_max(preds[:, 0], bbox[:, 0]) - iy1 = fluid.layers.elementwise_max(preds[:, 1], bbox[:, 1]) - ix2 = fluid.layers.elementwise_min(preds[:, 2], bbox[:, 2]) - iy2 = fluid.layers.elementwise_min(preds[:, 3], bbox[:, 3]) + ix1 = paddle.maximum(preds[:, 0], bbox[:, 0]) + iy1 = paddle.maximum(preds[:, 1], bbox[:, 1]) + ix2 = paddle.minimum(preds[:, 2], bbox[:, 2]) + iy2 = paddle.minimum(preds[:, 3], bbox[:, 3]) - iw = fluid.layers.clip(ix2 - ix1 + 1e-3, 0., 1e10) - ih = fluid.layers.clip(iy2 - iy1 + 1e-3, 0., 1e10) + iw = paddle.clip(ix2 - ix1 + 1e-3, 0., 1e10) + ih = paddle.clip(iy2 - iy1 + 1e-3, 0., 1e10) # overlap inters = iw * ih @@ -55,12 +54,12 @@ class TableAttentionLoss(nn.Layer): # ious ious = inters / uni - ex1 = fluid.layers.elementwise_min(preds[:, 0], bbox[:, 0]) - ey1 = fluid.layers.elementwise_min(preds[:, 1], bbox[:, 1]) - ex2 = fluid.layers.elementwise_max(preds[:, 2], bbox[:, 2]) - ey2 = fluid.layers.elementwise_max(preds[:, 3], bbox[:, 3]) - ew = fluid.layers.clip(ex2 - ex1 + 1e-3, 0., 1e10) - eh = fluid.layers.clip(ey2 - ey1 + 1e-3, 0., 1e10) + ex1 = paddle.minimum(preds[:, 0], bbox[:, 0]) + ey1 = paddle.minimum(preds[:, 1], bbox[:, 1]) + ex2 = paddle.maximum(preds[:, 2], bbox[:, 2]) + ey2 = paddle.maximum(preds[:, 3], bbox[:, 3]) + ew = paddle.clip(ex2 - ex1 + 1e-3, 0., 1e10) + eh = paddle.clip(ey2 - ey1 + 1e-3, 0., 1e10) # enclose erea enclose = ew * eh + eps diff --git a/ppocr/modeling/architectures/__init__.py b/ppocr/modeling/architectures/__init__.py index e9a01cf0281b91d29f2cce88375be3aaf43feb2e..1c955ef3abe9c38e816616cc9b5399c6832aa5f1 100755 --- a/ppocr/modeling/architectures/__init__.py +++ b/ppocr/modeling/architectures/__init__.py @@ -15,10 +15,13 @@ import copy import importlib +from paddle.jit import to_static +from paddle.static import InputSpec + from .base_model import BaseModel from .distillation_model import DistillationModel -__all__ = ['build_model'] +__all__ = ["build_model", "apply_to_static"] def build_model(config): @@ -30,3 +33,36 @@ def build_model(config): mod = importlib.import_module(__name__) arch = getattr(mod, name)(config) return arch + + +def apply_to_static(model, config, logger): + if config["Global"].get("to_static", False) is not True: + return model + assert "image_shape" in config[ + "Global"], "image_shape must be assigned for static training mode..." + supported_list = ["DB", "SVTR"] + if config["Architecture"]["algorithm"] in ["Distillation"]: + algo = list(config["Architecture"]["Models"].values())[0]["algorithm"] + else: + algo = config["Architecture"]["algorithm"] + assert algo in supported_list, f"algorithms that supports static training must in in {supported_list} but got {algo}" + + specs = [ + InputSpec( + [None] + config["Global"]["image_shape"], dtype='float32') + ] + + if algo == "SVTR": + specs.append([ + InputSpec( + [None, config["Global"]["max_text_length"]], + dtype='int64'), InputSpec( + [None, config["Global"]["max_text_length"]], dtype='int64'), + InputSpec( + [None], dtype='int64'), InputSpec( + [None], dtype='float64') + ]) + + model = to_static(model, input_spec=specs) + logger.info("Successfully to apply @to_static with specs: {}".format(specs)) + return model diff --git a/ppocr/modeling/backbones/__init__.py b/ppocr/modeling/backbones/__init__.py index 072d6e0f84d4126d256c26aa5baf17c9dc4e63df..f8959e263ecffb301dff227ff22e5e913375f919 100755 --- a/ppocr/modeling/backbones/__init__.py +++ b/ppocr/modeling/backbones/__init__.py @@ -28,35 +28,37 @@ def build_backbone(config, model_type): from .rec_mv1_enhance import MobileNetV1Enhance from .rec_nrtr_mtb import MTB from .rec_resnet_31 import ResNet31 + from .rec_resnet_45 import ResNet45 from .rec_resnet_aster import ResNet_ASTER from .rec_micronet import MicroNet from .rec_efficientb3_pren import EfficientNetb3_PREN from .rec_svtrnet import SVTRNet + from .rec_vitstr import ViTSTR support_dict = [ 'MobileNetV1Enhance', 'MobileNetV3', 'ResNet', 'ResNetFPN', 'MTB', - "ResNet31", "ResNet_ASTER", 'MicroNet', 'EfficientNetb3_PREN', - 'SVTRNet' + 'ResNet31', 'ResNet45', 'ResNet_ASTER', 'MicroNet', + 'EfficientNetb3_PREN', 'SVTRNet', 'ViTSTR' ] - elif model_type == "e2e": + elif model_type == 'e2e': from .e2e_resnet_vd_pg import ResNet support_dict = ['ResNet'] elif model_type == 'kie': from .kie_unet_sdmgr import Kie_backbone support_dict = ['Kie_backbone'] - elif model_type == "table": + elif model_type == 'table': from .table_resnet_vd import ResNet from .table_mobilenet_v3 import MobileNetV3 - support_dict = ["ResNet", "MobileNetV3"] + support_dict = ['ResNet', 'MobileNetV3'] elif model_type == 'vqa': from .vqa_layoutlm import LayoutLMForSer, LayoutLMv2ForSer, LayoutLMv2ForRe, LayoutXLMForSer, LayoutXLMForRe support_dict = [ - "LayoutLMForSer", "LayoutLMv2ForSer", 'LayoutLMv2ForRe', - "LayoutXLMForSer", 'LayoutXLMForRe' + 'LayoutLMForSer', 'LayoutLMv2ForSer', 'LayoutLMv2ForRe', + 'LayoutXLMForSer', 'LayoutXLMForRe' ] else: raise NotImplementedError - module_name = config.pop("name") + module_name = config.pop('name') assert module_name in support_dict, Exception( "when model typs is {}, backbone only support {}".format(model_type, support_dict)) diff --git a/ppocr/modeling/backbones/kie_unet_sdmgr.py b/ppocr/modeling/backbones/kie_unet_sdmgr.py index 545e4e7511e58c3d8220e9ec0be35474deba8806..4b1bd8030060b26acb9e60bd671a5b23d936347b 100644 --- a/ppocr/modeling/backbones/kie_unet_sdmgr.py +++ b/ppocr/modeling/backbones/kie_unet_sdmgr.py @@ -175,12 +175,7 @@ class Kie_backbone(nn.Layer): img, relations, texts, gt_bboxes, tag, img_size) x = self.img_feat(img) boxes, rois_num = self.bbox2roi(gt_bboxes) - feats = paddle.fluid.layers.roi_align( - x, - boxes, - spatial_scale=1.0, - pooled_height=7, - pooled_width=7, - rois_num=rois_num) + feats = paddle.vision.ops.roi_align( + x, boxes, spatial_scale=1.0, output_size=7, boxes_num=rois_num) feats = self.maxpool(feats).squeeze(-1).squeeze(-1) return [relations, texts, feats] diff --git a/ppocr/modeling/backbones/rec_resnet_45.py b/ppocr/modeling/backbones/rec_resnet_45.py new file mode 100644 index 0000000000000000000000000000000000000000..9093d0bc99b78806d36662dec36b6cfbdd4ae493 --- /dev/null +++ b/ppocr/modeling/backbones/rec_resnet_45.py @@ -0,0 +1,147 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This code is refer from: +https://github.com/FangShancheng/ABINet/tree/main/modules +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import paddle +from paddle import ParamAttr +from paddle.nn.initializer import KaimingNormal +import paddle.nn as nn +import paddle.nn.functional as F +import numpy as np +import math + +__all__ = ["ResNet45"] + + +def conv1x1(in_planes, out_planes, stride=1): + return nn.Conv2D( + in_planes, + out_planes, + kernel_size=1, + stride=1, + weight_attr=ParamAttr(initializer=KaimingNormal()), + bias_attr=False) + + +def conv3x3(in_channel, out_channel, stride=1): + return nn.Conv2D( + in_channel, + out_channel, + kernel_size=3, + stride=stride, + padding=1, + weight_attr=ParamAttr(initializer=KaimingNormal()), + bias_attr=False) + + +class BasicBlock(nn.Layer): + expansion = 1 + + def __init__(self, in_channels, channels, stride=1, downsample=None): + super().__init__() + self.conv1 = conv1x1(in_channels, channels) + self.bn1 = nn.BatchNorm2D(channels) + self.relu = nn.ReLU() + self.conv2 = conv3x3(channels, channels, stride) + self.bn2 = nn.BatchNorm2D(channels) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + residual = self.downsample(x) + out += residual + out = self.relu(out) + + return out + + +class ResNet45(nn.Layer): + def __init__(self, block=BasicBlock, layers=[3, 4, 6, 6, 3], in_channels=3): + self.inplanes = 32 + super(ResNet45, self).__init__() + self.conv1 = nn.Conv2D( + 3, + 32, + kernel_size=3, + stride=1, + padding=1, + weight_attr=ParamAttr(initializer=KaimingNormal()), + bias_attr=False) + self.bn1 = nn.BatchNorm2D(32) + self.relu = nn.ReLU() + + self.layer1 = self._make_layer(block, 32, layers[0], stride=2) + self.layer2 = self._make_layer(block, 64, layers[1], stride=1) + self.layer3 = self._make_layer(block, 128, layers[2], stride=2) + self.layer4 = self._make_layer(block, 256, layers[3], stride=1) + self.layer5 = self._make_layer(block, 512, layers[4], stride=1) + self.out_channels = 512 + + # for m in self.modules(): + # if isinstance(m, nn.Conv2D): + # n = m._kernel_size[0] * m._kernel_size[1] * m._out_channels + # m.weight.data.normal_(0, math.sqrt(2. / n)) + + def _make_layer(self, block, planes, blocks, stride=1): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + # downsample = True + downsample = nn.Sequential( + nn.Conv2D( + self.inplanes, + planes * block.expansion, + kernel_size=1, + stride=stride, + weight_attr=ParamAttr(initializer=KaimingNormal()), + bias_attr=False), + nn.BatchNorm2D(planes * block.expansion), ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample)) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes)) + + return nn.Sequential(*layers) + + def forward(self, x): + + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + # print(x) + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + # print(x) + x = self.layer4(x) + x = self.layer5(x) + return x diff --git a/ppocr/modeling/backbones/rec_resnet_fpn.py b/ppocr/modeling/backbones/rec_resnet_fpn.py index a7e876a2bd52a0ea70479c2009a291e4e2f8ce1f..79efd6e41e231ecad99aa4d01a8226a8550bd1ef 100644 --- a/ppocr/modeling/backbones/rec_resnet_fpn.py +++ b/ppocr/modeling/backbones/rec_resnet_fpn.py @@ -18,7 +18,6 @@ from __future__ import print_function from paddle import nn, ParamAttr from paddle.nn import functional as F -import paddle.fluid as fluid import paddle import numpy as np diff --git a/ppocr/modeling/backbones/rec_svtrnet.py b/ppocr/modeling/backbones/rec_svtrnet.py index c57bf46345d6e08f23b9258358f77f2285366314..c2c07f4476929d49237c8e9a10713f881f5f556b 100644 --- a/ppocr/modeling/backbones/rec_svtrnet.py +++ b/ppocr/modeling/backbones/rec_svtrnet.py @@ -147,7 +147,7 @@ class Attention(nn.Layer): dim, num_heads=8, mixer='Global', - HW=[8, 25], + HW=None, local_k=[7, 11], qkv_bias=False, qk_scale=None, @@ -210,7 +210,7 @@ class Block(nn.Layer): num_heads, mixer='Global', local_mixer=[7, 11], - HW=[8, 25], + HW=None, mlp_ratio=4., qkv_bias=False, qk_scale=None, @@ -274,7 +274,9 @@ class PatchEmbed(nn.Layer): img_size=[32, 100], in_channels=3, embed_dim=768, - sub_num=2): + sub_num=2, + patch_size=[4, 4], + mode='pope'): super().__init__() num_patches = (img_size[1] // (2 ** sub_num)) * \ (img_size[0] // (2 ** sub_num)) @@ -282,50 +284,56 @@ class PatchEmbed(nn.Layer): self.num_patches = num_patches self.embed_dim = embed_dim self.norm = None - if sub_num == 2: - self.proj = nn.Sequential( - ConvBNLayer( - in_channels=in_channels, - out_channels=embed_dim // 2, - kernel_size=3, - stride=2, - padding=1, - act=nn.GELU, - bias_attr=None), - ConvBNLayer( - in_channels=embed_dim // 2, - out_channels=embed_dim, - kernel_size=3, - stride=2, - padding=1, - act=nn.GELU, - bias_attr=None)) - if sub_num == 3: - self.proj = nn.Sequential( - ConvBNLayer( - in_channels=in_channels, - out_channels=embed_dim // 4, - kernel_size=3, - stride=2, - padding=1, - act=nn.GELU, - bias_attr=None), - ConvBNLayer( - in_channels=embed_dim // 4, - out_channels=embed_dim // 2, - kernel_size=3, - stride=2, - padding=1, - act=nn.GELU, - bias_attr=None), - ConvBNLayer( - in_channels=embed_dim // 2, - out_channels=embed_dim, - kernel_size=3, - stride=2, - padding=1, - act=nn.GELU, - bias_attr=None)) + if mode == 'pope': + if sub_num == 2: + self.proj = nn.Sequential( + ConvBNLayer( + in_channels=in_channels, + out_channels=embed_dim // 2, + kernel_size=3, + stride=2, + padding=1, + act=nn.GELU, + bias_attr=None), + ConvBNLayer( + in_channels=embed_dim // 2, + out_channels=embed_dim, + kernel_size=3, + stride=2, + padding=1, + act=nn.GELU, + bias_attr=None)) + if sub_num == 3: + self.proj = nn.Sequential( + ConvBNLayer( + in_channels=in_channels, + out_channels=embed_dim // 4, + kernel_size=3, + stride=2, + padding=1, + act=nn.GELU, + bias_attr=None), + ConvBNLayer( + in_channels=embed_dim // 4, + out_channels=embed_dim // 2, + kernel_size=3, + stride=2, + padding=1, + act=nn.GELU, + bias_attr=None), + ConvBNLayer( + in_channels=embed_dim // 2, + out_channels=embed_dim, + kernel_size=3, + stride=2, + padding=1, + act=nn.GELU, + bias_attr=None)) + elif mode == 'linear': + self.proj = nn.Conv2D( + 1, embed_dim, kernel_size=patch_size, stride=patch_size) + self.num_patches = img_size[0] // patch_size[0] * img_size[ + 1] // patch_size[1] def forward(self, x): B, C, H, W = x.shape diff --git a/ppocr/modeling/backbones/rec_vitstr.py b/ppocr/modeling/backbones/rec_vitstr.py new file mode 100644 index 0000000000000000000000000000000000000000..d5d7d5148a1120e6f97a321b4135c6780c0c5db2 --- /dev/null +++ b/ppocr/modeling/backbones/rec_vitstr.py @@ -0,0 +1,120 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This code is refer from: +https://github.com/roatienza/deep-text-recognition-benchmark/blob/master/modules/vitstr.py +""" + +import numpy as np +import paddle +import paddle.nn as nn +from ppocr.modeling.backbones.rec_svtrnet import Block, PatchEmbed, zeros_, trunc_normal_, ones_ + +scale_dim_heads = {'tiny': [192, 3], 'small': [384, 6], 'base': [768, 12]} + + +class ViTSTR(nn.Layer): + def __init__(self, + img_size=[224, 224], + in_channels=1, + scale='tiny', + seqlen=27, + patch_size=[16, 16], + embed_dim=None, + depth=12, + num_heads=None, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_path_rate=0., + drop_rate=0., + attn_drop_rate=0., + norm_layer='nn.LayerNorm', + act_layer='nn.GELU', + epsilon=1e-6, + out_channels=None, + **kwargs): + super().__init__() + self.seqlen = seqlen + embed_dim = embed_dim if embed_dim is not None else scale_dim_heads[ + scale][0] + num_heads = num_heads if num_heads is not None else scale_dim_heads[ + scale][1] + out_channels = out_channels if out_channels is not None else embed_dim + self.patch_embed = PatchEmbed( + img_size=img_size, + in_channels=in_channels, + embed_dim=embed_dim, + patch_size=patch_size, + mode='linear') + num_patches = self.patch_embed.num_patches + + self.pos_embed = self.create_parameter( + shape=[1, num_patches + 1, embed_dim], default_initializer=zeros_) + self.add_parameter("pos_embed", self.pos_embed) + self.cls_token = self.create_parameter( + shape=[1, 1, embed_dim], default_initializer=zeros_) + self.add_parameter("cls_token", self.cls_token) + + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = np.linspace(0, drop_path_rate, depth) + self.blocks = nn.LayerList([ + Block( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + act_layer=eval(act_layer), + epsilon=epsilon, + prenorm=False) for i in range(depth) + ]) + self.norm = eval(norm_layer)(embed_dim, epsilon=epsilon) + + self.out_channels = out_channels + + trunc_normal_(self.pos_embed) + trunc_normal_(self.cls_token) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + zeros_(m.bias) + elif isinstance(m, nn.LayerNorm): + zeros_(m.bias) + ones_(m.weight) + + def forward_features(self, x): + B = x.shape[0] + x = self.patch_embed(x) + cls_tokens = paddle.tile(self.cls_token, repeat_times=[B, 1, 1]) + x = paddle.concat((cls_tokens, x), axis=1) + x = x + self.pos_embed + x = self.pos_drop(x) + for blk in self.blocks: + x = blk(x) + x = self.norm(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = x[:, :self.seqlen] + return x.transpose([0, 2, 1]).unsqueeze(2) diff --git a/ppocr/modeling/heads/__init__.py b/ppocr/modeling/heads/__init__.py index 1670ea38e66baa683e6faab0ec4b12bc517f3c41..14e6aab854d8942083f1e3466f554c0876bcf403 100755 --- a/ppocr/modeling/heads/__init__.py +++ b/ppocr/modeling/heads/__init__.py @@ -33,6 +33,7 @@ def build_head(config): from .rec_aster_head import AsterHead from .rec_pren_head import PRENHead from .rec_multi_head import MultiHead + from .rec_abinet_head import ABINetHead # cls head from .cls_head import ClsHead @@ -46,7 +47,7 @@ def build_head(config): 'DBHead', 'PSEHead', 'FCEHead', 'EASTHead', 'SASTHead', 'CTCHead', 'ClsHead', 'AttentionHead', 'SRNHead', 'PGHead', 'Transformer', 'TableAttentionHead', 'SARHead', 'AsterHead', 'SDMGRHead', 'PRENHead', - 'MultiHead' + 'MultiHead', 'ABINetHead' ] #table head diff --git a/ppocr/modeling/heads/multiheadAttention.py b/ppocr/modeling/heads/multiheadAttention.py deleted file mode 100755 index 900865ba1a8d80a108b3247ce1aff91c242860f2..0000000000000000000000000000000000000000 --- a/ppocr/modeling/heads/multiheadAttention.py +++ /dev/null @@ -1,163 +0,0 @@ -# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import paddle -from paddle import nn -import paddle.nn.functional as F -from paddle.nn import Linear -from paddle.nn.initializer import XavierUniform as xavier_uniform_ -from paddle.nn.initializer import Constant as constant_ -from paddle.nn.initializer import XavierNormal as xavier_normal_ - -zeros_ = constant_(value=0.) -ones_ = constant_(value=1.) - - -class MultiheadAttention(nn.Layer): - """Allows the model to jointly attend to information - from different representation subspaces. - See reference: Attention Is All You Need - - .. math:: - \text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O - \text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V) - - Args: - embed_dim: total dimension of the model - num_heads: parallel attention layers, or heads - - """ - - def __init__(self, - embed_dim, - num_heads, - dropout=0., - bias=True, - add_bias_kv=False, - add_zero_attn=False): - super(MultiheadAttention, self).__init__() - self.embed_dim = embed_dim - self.num_heads = num_heads - self.dropout = dropout - self.head_dim = embed_dim // num_heads - assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" - self.scaling = self.head_dim**-0.5 - self.out_proj = Linear(embed_dim, embed_dim, bias_attr=bias) - self._reset_parameters() - self.conv1 = paddle.nn.Conv2D( - in_channels=embed_dim, out_channels=embed_dim, kernel_size=(1, 1)) - self.conv2 = paddle.nn.Conv2D( - in_channels=embed_dim, out_channels=embed_dim, kernel_size=(1, 1)) - self.conv3 = paddle.nn.Conv2D( - in_channels=embed_dim, out_channels=embed_dim, kernel_size=(1, 1)) - - def _reset_parameters(self): - xavier_uniform_(self.out_proj.weight) - - def forward(self, - query, - key, - value, - key_padding_mask=None, - incremental_state=None, - attn_mask=None): - """ - Inputs of forward function - query: [target length, batch size, embed dim] - key: [sequence length, batch size, embed dim] - value: [sequence length, batch size, embed dim] - key_padding_mask: if True, mask padding based on batch size - incremental_state: if provided, previous time steps are cashed - need_weights: output attn_output_weights - static_kv: key and value are static - - Outputs of forward function - attn_output: [target length, batch size, embed dim] - attn_output_weights: [batch size, target length, sequence length] - """ - q_shape = paddle.shape(query) - src_shape = paddle.shape(key) - q = self._in_proj_q(query) - k = self._in_proj_k(key) - v = self._in_proj_v(value) - q *= self.scaling - q = paddle.transpose( - paddle.reshape( - q, [q_shape[0], q_shape[1], self.num_heads, self.head_dim]), - [1, 2, 0, 3]) - k = paddle.transpose( - paddle.reshape( - k, [src_shape[0], q_shape[1], self.num_heads, self.head_dim]), - [1, 2, 0, 3]) - v = paddle.transpose( - paddle.reshape( - v, [src_shape[0], q_shape[1], self.num_heads, self.head_dim]), - [1, 2, 0, 3]) - if key_padding_mask is not None: - assert key_padding_mask.shape[0] == q_shape[1] - assert key_padding_mask.shape[1] == src_shape[0] - attn_output_weights = paddle.matmul(q, - paddle.transpose(k, [0, 1, 3, 2])) - if attn_mask is not None: - attn_mask = paddle.unsqueeze(paddle.unsqueeze(attn_mask, 0), 0) - attn_output_weights += attn_mask - if key_padding_mask is not None: - attn_output_weights = paddle.reshape( - attn_output_weights, - [q_shape[1], self.num_heads, q_shape[0], src_shape[0]]) - key = paddle.unsqueeze(paddle.unsqueeze(key_padding_mask, 1), 2) - key = paddle.cast(key, 'float32') - y = paddle.full( - shape=paddle.shape(key), dtype='float32', fill_value='-inf') - y = paddle.where(key == 0., key, y) - attn_output_weights += y - attn_output_weights = F.softmax( - attn_output_weights.astype('float32'), - axis=-1, - dtype=paddle.float32 if attn_output_weights.dtype == paddle.float16 - else attn_output_weights.dtype) - attn_output_weights = F.dropout( - attn_output_weights, p=self.dropout, training=self.training) - - attn_output = paddle.matmul(attn_output_weights, v) - attn_output = paddle.reshape( - paddle.transpose(attn_output, [2, 0, 1, 3]), - [q_shape[0], q_shape[1], self.embed_dim]) - attn_output = self.out_proj(attn_output) - - return attn_output - - def _in_proj_q(self, query): - query = paddle.transpose(query, [1, 2, 0]) - query = paddle.unsqueeze(query, axis=2) - res = self.conv1(query) - res = paddle.squeeze(res, axis=2) - res = paddle.transpose(res, [2, 0, 1]) - return res - - def _in_proj_k(self, key): - key = paddle.transpose(key, [1, 2, 0]) - key = paddle.unsqueeze(key, axis=2) - res = self.conv2(key) - res = paddle.squeeze(res, axis=2) - res = paddle.transpose(res, [2, 0, 1]) - return res - - def _in_proj_v(self, value): - value = paddle.transpose(value, [1, 2, 0]) #(1, 2, 0) - value = paddle.unsqueeze(value, axis=2) - res = self.conv3(value) - res = paddle.squeeze(res, axis=2) - res = paddle.transpose(res, [2, 0, 1]) - return res diff --git a/ppocr/modeling/heads/rec_abinet_head.py b/ppocr/modeling/heads/rec_abinet_head.py new file mode 100644 index 0000000000000000000000000000000000000000..a0f60f1be1727e85380eedb7d311ce9445f88b8e --- /dev/null +++ b/ppocr/modeling/heads/rec_abinet_head.py @@ -0,0 +1,296 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This code is refer from: +https://github.com/FangShancheng/ABINet/tree/main/modules +""" + +import math +import paddle +from paddle import nn +import paddle.nn.functional as F +from paddle.nn import LayerList +from ppocr.modeling.heads.rec_nrtr_head import TransformerBlock, PositionalEncoding + + +class BCNLanguage(nn.Layer): + def __init__(self, + d_model=512, + nhead=8, + num_layers=4, + dim_feedforward=2048, + dropout=0., + max_length=25, + detach=True, + num_classes=37): + super().__init__() + + self.d_model = d_model + self.detach = detach + self.max_length = max_length + 1 # additional stop token + self.proj = nn.Linear(num_classes, d_model, bias_attr=False) + self.token_encoder = PositionalEncoding( + dropout=0.1, dim=d_model, max_len=self.max_length) + self.pos_encoder = PositionalEncoding( + dropout=0, dim=d_model, max_len=self.max_length) + + self.decoder = nn.LayerList([ + TransformerBlock( + d_model=d_model, + nhead=nhead, + dim_feedforward=dim_feedforward, + attention_dropout_rate=dropout, + residual_dropout_rate=dropout, + with_self_attn=False, + with_cross_attn=True) for i in range(num_layers) + ]) + + self.cls = nn.Linear(d_model, num_classes) + + def forward(self, tokens, lengths): + """ + Args: + tokens: (B, N, C) where N is length, B is batch size and C is classes number + lengths: (B,) + """ + if self.detach: tokens = tokens.detach() + embed = self.proj(tokens) # (B, N, C) + embed = self.token_encoder(embed) # (B, N, C) + padding_mask = _get_mask(lengths, self.max_length) + zeros = paddle.zeros_like(embed) # (B, N, C) + qeury = self.pos_encoder(zeros) + for decoder_layer in self.decoder: + qeury = decoder_layer(qeury, embed, cross_mask=padding_mask) + output = qeury # (B, N, C) + + logits = self.cls(output) # (B, N, C) + + return output, logits + + +def encoder_layer(in_c, out_c, k=3, s=2, p=1): + return nn.Sequential( + nn.Conv2D(in_c, out_c, k, s, p), nn.BatchNorm2D(out_c), nn.ReLU()) + + +def decoder_layer(in_c, + out_c, + k=3, + s=1, + p=1, + mode='nearest', + scale_factor=None, + size=None): + align_corners = False if mode == 'nearest' else True + return nn.Sequential( + nn.Upsample( + size=size, + scale_factor=scale_factor, + mode=mode, + align_corners=align_corners), + nn.Conv2D(in_c, out_c, k, s, p), + nn.BatchNorm2D(out_c), + nn.ReLU()) + + +class PositionAttention(nn.Layer): + def __init__(self, + max_length, + in_channels=512, + num_channels=64, + h=8, + w=32, + mode='nearest', + **kwargs): + super().__init__() + self.max_length = max_length + self.k_encoder = nn.Sequential( + encoder_layer( + in_channels, num_channels, s=(1, 2)), + encoder_layer( + num_channels, num_channels, s=(2, 2)), + encoder_layer( + num_channels, num_channels, s=(2, 2)), + encoder_layer( + num_channels, num_channels, s=(2, 2))) + self.k_decoder = nn.Sequential( + decoder_layer( + num_channels, num_channels, scale_factor=2, mode=mode), + decoder_layer( + num_channels, num_channels, scale_factor=2, mode=mode), + decoder_layer( + num_channels, num_channels, scale_factor=2, mode=mode), + decoder_layer( + num_channels, in_channels, size=(h, w), mode=mode)) + + self.pos_encoder = PositionalEncoding( + dropout=0, dim=in_channels, max_len=max_length) + self.project = nn.Linear(in_channels, in_channels) + + def forward(self, x): + B, C, H, W = x.shape + k, v = x, x + + # calculate key vector + features = [] + for i in range(0, len(self.k_encoder)): + k = self.k_encoder[i](k) + features.append(k) + for i in range(0, len(self.k_decoder) - 1): + k = self.k_decoder[i](k) + # print(k.shape, features[len(self.k_decoder) - 2 - i].shape) + k = k + features[len(self.k_decoder) - 2 - i] + k = self.k_decoder[-1](k) + + # calculate query vector + # TODO q=f(q,k) + zeros = paddle.zeros( + (B, self.max_length, C), dtype=x.dtype) # (T, N, C) + q = self.pos_encoder(zeros) # (B, N, C) + q = self.project(q) # (B, N, C) + + # calculate attention + attn_scores = q @k.flatten(2) # (B, N, (H*W)) + attn_scores = attn_scores / (C**0.5) + attn_scores = F.softmax(attn_scores, axis=-1) + + v = v.flatten(2).transpose([0, 2, 1]) # (B, (H*W), C) + attn_vecs = attn_scores @v # (B, N, C) + + return attn_vecs, attn_scores.reshape([0, self.max_length, H, W]) + + +class ABINetHead(nn.Layer): + def __init__(self, + in_channels, + out_channels, + d_model=512, + nhead=8, + num_layers=3, + dim_feedforward=2048, + dropout=0.1, + max_length=25, + use_lang=False, + iter_size=1): + super().__init__() + self.max_length = max_length + 1 + self.pos_encoder = PositionalEncoding( + dropout=0.1, dim=d_model, max_len=8 * 32) + self.encoder = nn.LayerList([ + TransformerBlock( + d_model=d_model, + nhead=nhead, + dim_feedforward=dim_feedforward, + attention_dropout_rate=dropout, + residual_dropout_rate=dropout, + with_self_attn=True, + with_cross_attn=False) for i in range(num_layers) + ]) + self.decoder = PositionAttention( + max_length=max_length + 1, # additional stop token + mode='nearest', ) + self.out_channels = out_channels + self.cls = nn.Linear(d_model, self.out_channels) + self.use_lang = use_lang + if use_lang: + self.iter_size = iter_size + self.language = BCNLanguage( + d_model=d_model, + nhead=nhead, + num_layers=4, + dim_feedforward=dim_feedforward, + dropout=dropout, + max_length=max_length, + num_classes=self.out_channels) + # alignment + self.w_att_align = nn.Linear(2 * d_model, d_model) + self.cls_align = nn.Linear(d_model, self.out_channels) + + def forward(self, x, targets=None): + x = x.transpose([0, 2, 3, 1]) + _, H, W, C = x.shape + feature = x.flatten(1, 2) + feature = self.pos_encoder(feature) + for encoder_layer in self.encoder: + feature = encoder_layer(feature) + feature = feature.reshape([0, H, W, C]).transpose([0, 3, 1, 2]) + v_feature, attn_scores = self.decoder( + feature) # (B, N, C), (B, C, H, W) + vis_logits = self.cls(v_feature) # (B, N, C) + logits = vis_logits + vis_lengths = _get_length(vis_logits) + if self.use_lang: + align_logits = vis_logits + align_lengths = vis_lengths + all_l_res, all_a_res = [], [] + for i in range(self.iter_size): + tokens = F.softmax(align_logits, axis=-1) + lengths = align_lengths + lengths = paddle.clip( + lengths, 2, self.max_length) # TODO:move to langauge model + l_feature, l_logits = self.language(tokens, lengths) + + # alignment + all_l_res.append(l_logits) + fuse = paddle.concat((l_feature, v_feature), -1) + f_att = F.sigmoid(self.w_att_align(fuse)) + output = f_att * v_feature + (1 - f_att) * l_feature + align_logits = self.cls_align(output) # (B, N, C) + + align_lengths = _get_length(align_logits) + all_a_res.append(align_logits) + if self.training: + return { + 'align': all_a_res, + 'lang': all_l_res, + 'vision': vis_logits + } + else: + logits = align_logits + if self.training: + return logits + else: + return F.softmax(logits, -1) + + +def _get_length(logit): + """ Greed decoder to obtain length from logit""" + out = (logit.argmax(-1) == 0) + abn = out.any(-1) + out_int = out.cast('int32') + out = (out_int.cumsum(-1) == 1) & out + out = out.cast('int32') + out = out.argmax(-1) + out = out + 1 + out = paddle.where(abn, out, paddle.to_tensor(logit.shape[1])) + return out + + +def _get_mask(length, max_length): + """Generate a square mask for the sequence. The masked positions are filled with float('-inf'). + Unmasked positions are filled with float(0.0). + """ + length = length.unsqueeze(-1) + B = paddle.shape(length)[0] + grid = paddle.arange(0, max_length).unsqueeze(0).tile([B, 1]) + zero_mask = paddle.zeros([B, max_length], dtype='float32') + inf_mask = paddle.full([B, max_length], '-inf', dtype='float32') + diag_mask = paddle.diag( + paddle.full( + [max_length], '-inf', dtype=paddle.float32), + offset=0, + name=None) + mask = paddle.where(grid >= length, inf_mask, zero_mask) + mask = mask.unsqueeze(1) + diag_mask + return mask.unsqueeze(1) diff --git a/ppocr/modeling/heads/rec_nrtr_head.py b/ppocr/modeling/heads/rec_nrtr_head.py index 38ba0c917840ea7d1e2a3c2bf0da32c2c35f2b40..bf9ef56145e6edfb15bd30235b4a62588396ba96 100644 --- a/ppocr/modeling/heads/rec_nrtr_head.py +++ b/ppocr/modeling/heads/rec_nrtr_head.py @@ -14,20 +14,15 @@ import math import paddle -import copy from paddle import nn import paddle.nn.functional as F from paddle.nn import LayerList -from paddle.nn.initializer import XavierNormal as xavier_uniform_ -from paddle.nn import Dropout, Linear, LayerNorm, Conv2D +# from paddle.nn.initializer import XavierNormal as xavier_uniform_ +from paddle.nn import Dropout, Linear, LayerNorm import numpy as np -from ppocr.modeling.heads.multiheadAttention import MultiheadAttention -from paddle.nn.initializer import Constant as constant_ +from ppocr.modeling.backbones.rec_svtrnet import Mlp, zeros_, ones_ from paddle.nn.initializer import XavierNormal as xavier_normal_ -zeros_ = constant_(value=0.) -ones_ = constant_(value=1.) - class Transformer(nn.Layer): """A transformer model. User is able to modify the attributes as needed. The architechture @@ -45,7 +40,6 @@ class Transformer(nn.Layer): dropout: the dropout value (default=0.1). custom_encoder: custom encoder (default=None). custom_decoder: custom decoder (default=None). - """ def __init__(self, @@ -54,45 +48,49 @@ class Transformer(nn.Layer): num_encoder_layers=6, beam_size=0, num_decoder_layers=6, + max_len=25, dim_feedforward=1024, attention_dropout_rate=0.0, residual_dropout_rate=0.1, - custom_encoder=None, - custom_decoder=None, in_channels=0, out_channels=0, scale_embedding=True): super(Transformer, self).__init__() self.out_channels = out_channels + 1 + self.max_len = max_len self.embedding = Embeddings( d_model=d_model, vocab=self.out_channels, padding_idx=0, scale_embedding=scale_embedding) self.positional_encoding = PositionalEncoding( - dropout=residual_dropout_rate, - dim=d_model, ) - if custom_encoder is not None: - self.encoder = custom_encoder - else: - if num_encoder_layers > 0: - encoder_layer = TransformerEncoderLayer( - d_model, nhead, dim_feedforward, attention_dropout_rate, - residual_dropout_rate) - self.encoder = TransformerEncoder(encoder_layer, - num_encoder_layers) - else: - self.encoder = None - - if custom_decoder is not None: - self.decoder = custom_decoder + dropout=residual_dropout_rate, dim=d_model) + + if num_encoder_layers > 0: + self.encoder = nn.LayerList([ + TransformerBlock( + d_model, + nhead, + dim_feedforward, + attention_dropout_rate, + residual_dropout_rate, + with_self_attn=True, + with_cross_attn=False) for i in range(num_encoder_layers) + ]) else: - decoder_layer = TransformerDecoderLayer( - d_model, nhead, dim_feedforward, attention_dropout_rate, - residual_dropout_rate) - self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers) + self.encoder = None + + self.decoder = nn.LayerList([ + TransformerBlock( + d_model, + nhead, + dim_feedforward, + attention_dropout_rate, + residual_dropout_rate, + with_self_attn=True, + with_cross_attn=True) for i in range(num_decoder_layers) + ]) - self._reset_parameters() self.beam_size = beam_size self.d_model = d_model self.nhead = nhead @@ -105,7 +103,7 @@ class Transformer(nn.Layer): def _init_weights(self, m): - if isinstance(m, nn.Conv2D): + if isinstance(m, nn.Linear): xavier_normal_(m.weight) if m.bias is not None: zeros_(m.bias) @@ -113,24 +111,20 @@ class Transformer(nn.Layer): def forward_train(self, src, tgt): tgt = tgt[:, :-1] - tgt_key_padding_mask = self.generate_padding_mask(tgt) - tgt = self.embedding(tgt).transpose([1, 0, 2]) + tgt = self.embedding(tgt) tgt = self.positional_encoding(tgt) - tgt_mask = self.generate_square_subsequent_mask(tgt.shape[0]) + tgt_mask = self.generate_square_subsequent_mask(tgt.shape[1]) if self.encoder is not None: - src = self.positional_encoding(src.transpose([1, 0, 2])) - memory = self.encoder(src) + src = self.positional_encoding(src) + for encoder_layer in self.encoder: + src = encoder_layer(src) + memory = src # B N C else: - memory = src.squeeze(2).transpose([2, 0, 1]) - output = self.decoder( - tgt, - memory, - tgt_mask=tgt_mask, - memory_mask=None, - tgt_key_padding_mask=tgt_key_padding_mask, - memory_key_padding_mask=None) - output = output.transpose([1, 0, 2]) + memory = src # B N C + for decoder_layer in self.decoder: + tgt = decoder_layer(tgt, memory, self_mask=tgt_mask) + output = tgt logit = self.tgt_word_prj(output) return logit @@ -140,8 +134,8 @@ class Transformer(nn.Layer): src: the sequence to the encoder (required). tgt: the sequence to the decoder (required). Shape: - - src: :math:`(S, N, E)`. - - tgt: :math:`(T, N, E)`. + - src: :math:`(B, sN, C)`. + - tgt: :math:`(B, tN, C)`. Examples: >>> output = transformer_model(src, tgt) """ @@ -157,36 +151,35 @@ class Transformer(nn.Layer): return self.forward_test(src) def forward_test(self, src): + bs = paddle.shape(src)[0] if self.encoder is not None: - src = self.positional_encoding(paddle.transpose(src, [1, 0, 2])) - memory = self.encoder(src) + src = self.positional_encoding(src) + for encoder_layer in self.encoder: + src = encoder_layer(src) + memory = src # B N C else: - memory = paddle.transpose(paddle.squeeze(src, 2), [2, 0, 1]) + memory = src dec_seq = paddle.full((bs, 1), 2, dtype=paddle.int64) dec_prob = paddle.full((bs, 1), 1., dtype=paddle.float32) - for len_dec_seq in range(1, 25): - dec_seq_embed = paddle.transpose(self.embedding(dec_seq), [1, 0, 2]) + for len_dec_seq in range(1, self.max_len): + dec_seq_embed = self.embedding(dec_seq) dec_seq_embed = self.positional_encoding(dec_seq_embed) tgt_mask = self.generate_square_subsequent_mask( - paddle.shape(dec_seq_embed)[0]) - output = self.decoder( - dec_seq_embed, - memory, - tgt_mask=tgt_mask, - memory_mask=None, - tgt_key_padding_mask=None, - memory_key_padding_mask=None) - dec_output = paddle.transpose(output, [1, 0, 2]) + paddle.shape(dec_seq_embed)[1]) + tgt = dec_seq_embed + for decoder_layer in self.decoder: + tgt = decoder_layer(tgt, memory, self_mask=tgt_mask) + dec_output = tgt dec_output = dec_output[:, -1, :] - word_prob = F.softmax(self.tgt_word_prj(dec_output), axis=1) - preds_idx = paddle.argmax(word_prob, axis=1) + word_prob = F.softmax(self.tgt_word_prj(dec_output), axis=-1) + preds_idx = paddle.argmax(word_prob, axis=-1) if paddle.equal_all( preds_idx, paddle.full( paddle.shape(preds_idx), 3, dtype='int64')): break - preds_prob = paddle.max(word_prob, axis=1) + preds_prob = paddle.max(word_prob, axis=-1) dec_seq = paddle.concat( [dec_seq, paddle.reshape(preds_idx, [-1, 1])], axis=1) dec_prob = paddle.concat( @@ -194,10 +187,10 @@ class Transformer(nn.Layer): return [dec_seq, dec_prob] def forward_beam(self, images): - ''' Translation work in one batch ''' + """ Translation work in one batch """ def get_inst_idx_to_tensor_position_map(inst_idx_list): - ''' Indicate the position of an instance in a tensor. ''' + """ Indicate the position of an instance in a tensor. """ return { inst_idx: tensor_position for tensor_position, inst_idx in enumerate(inst_idx_list) @@ -205,7 +198,7 @@ class Transformer(nn.Layer): def collect_active_part(beamed_tensor, curr_active_inst_idx, n_prev_active_inst, n_bm): - ''' Collect tensor parts associated to active instances. ''' + """ Collect tensor parts associated to active instances. """ beamed_tensor_shape = paddle.shape(beamed_tensor) n_curr_active_inst = len(curr_active_inst_idx) @@ -237,9 +230,8 @@ class Transformer(nn.Layer): return active_src_enc, active_inst_idx_to_position_map def beam_decode_step(inst_dec_beams, len_dec_seq, enc_output, - inst_idx_to_position_map, n_bm, - memory_key_padding_mask): - ''' Decode and update beam status, and then return active beam idx ''' + inst_idx_to_position_map, n_bm): + """ Decode and update beam status, and then return active beam idx """ def prepare_beam_dec_seq(inst_dec_beams, len_dec_seq): dec_partial_seq = [ @@ -249,19 +241,15 @@ class Transformer(nn.Layer): dec_partial_seq = dec_partial_seq.reshape([-1, len_dec_seq]) return dec_partial_seq - def predict_word(dec_seq, enc_output, n_active_inst, n_bm, - memory_key_padding_mask): - dec_seq = paddle.transpose(self.embedding(dec_seq), [1, 0, 2]) + def predict_word(dec_seq, enc_output, n_active_inst, n_bm): + dec_seq = self.embedding(dec_seq) dec_seq = self.positional_encoding(dec_seq) tgt_mask = self.generate_square_subsequent_mask( - paddle.shape(dec_seq)[0]) - dec_output = self.decoder( - dec_seq, - enc_output, - tgt_mask=tgt_mask, - tgt_key_padding_mask=None, - memory_key_padding_mask=memory_key_padding_mask, ) - dec_output = paddle.transpose(dec_output, [1, 0, 2]) + paddle.shape(dec_seq)[1]) + tgt = dec_seq + for decoder_layer in self.decoder: + tgt = decoder_layer(tgt, enc_output, self_mask=tgt_mask) + dec_output = tgt dec_output = dec_output[:, -1, :] # Pick the last step: (bh * bm) * d_h word_prob = F.softmax(self.tgt_word_prj(dec_output), axis=1) @@ -281,8 +269,7 @@ class Transformer(nn.Layer): n_active_inst = len(inst_idx_to_position_map) dec_seq = prepare_beam_dec_seq(inst_dec_beams, len_dec_seq) - word_prob = predict_word(dec_seq, enc_output, n_active_inst, n_bm, - None) + word_prob = predict_word(dec_seq, enc_output, n_active_inst, n_bm) # Update the beam with predicted word prob information and collect incomplete instances active_inst_idx_list = collect_active_inst_idx_list( inst_dec_beams, word_prob, inst_idx_to_position_map) @@ -303,10 +290,10 @@ class Transformer(nn.Layer): with paddle.no_grad(): #-- Encode if self.encoder is not None: - src = self.positional_encoding(images.transpose([1, 0, 2])) + src = self.positional_encoding(images) src_enc = self.encoder(src) else: - src_enc = images.squeeze(2).transpose([0, 2, 1]) + src_enc = images n_bm = self.beam_size src_shape = paddle.shape(src_enc) @@ -317,11 +304,11 @@ class Transformer(nn.Layer): inst_idx_to_position_map = get_inst_idx_to_tensor_position_map( active_inst_idx_list) # Decode - for len_dec_seq in range(1, 25): + for len_dec_seq in range(1, self.max_len): src_enc_copy = src_enc.clone() active_inst_idx_list = beam_decode_step( inst_dec_beams, len_dec_seq, src_enc_copy, - inst_idx_to_position_map, n_bm, None) + inst_idx_to_position_map, n_bm) if not active_inst_idx_list: break # all instances have finished their path to src_enc, inst_idx_to_position_map = collate_active_info( @@ -354,261 +341,124 @@ class Transformer(nn.Layer): shape=[sz, sz], dtype='float32', fill_value='-inf'), diagonal=1) mask = mask + mask_inf - return mask - - def generate_padding_mask(self, x): - padding_mask = paddle.equal(x, paddle.to_tensor(0, dtype=x.dtype)) - return padding_mask + return mask.unsqueeze([0, 1]) - def _reset_parameters(self): - """Initiate parameters in the transformer model.""" - for p in self.parameters(): - if p.dim() > 1: - xavier_uniform_(p) +class MultiheadAttention(nn.Layer): + """Allows the model to jointly attend to information + from different representation subspaces. + See reference: Attention Is All You Need + .. math:: + \text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O + \text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V) -class TransformerEncoder(nn.Layer): - """TransformerEncoder is a stack of N encoder layers Args: - encoder_layer: an instance of the TransformerEncoderLayer() class (required). - num_layers: the number of sub-encoder-layers in the encoder (required). - norm: the layer normalization component (optional). - """ + embed_dim: total dimension of the model + num_heads: parallel attention layers, or heads - def __init__(self, encoder_layer, num_layers): - super(TransformerEncoder, self).__init__() - self.layers = _get_clones(encoder_layer, num_layers) - self.num_layers = num_layers + """ - def forward(self, src): - """Pass the input through the endocder layers in turn. - Args: - src: the sequnce to the encoder (required). - mask: the mask for the src sequence (optional). - src_key_padding_mask: the mask for the src keys per batch (optional). - """ - output = src + def __init__(self, embed_dim, num_heads, dropout=0., self_attn=False): + super(MultiheadAttention, self).__init__() + self.embed_dim = embed_dim + self.num_heads = num_heads + # self.dropout = dropout + self.head_dim = embed_dim // num_heads + assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" + self.scale = self.head_dim**-0.5 + self.self_attn = self_attn + if self_attn: + self.qkv = nn.Linear(embed_dim, embed_dim * 3) + else: + self.q = nn.Linear(embed_dim, embed_dim) + self.kv = nn.Linear(embed_dim, embed_dim * 2) + self.attn_drop = nn.Dropout(dropout) + self.out_proj = nn.Linear(embed_dim, embed_dim) - for i in range(self.num_layers): - output = self.layers[i](output, - src_mask=None, - src_key_padding_mask=None) + def forward(self, query, key=None, attn_mask=None): - return output + qN = query.shape[1] + if self.self_attn: + qkv = self.qkv(query).reshape( + (0, qN, 3, self.num_heads, self.head_dim)).transpose( + (2, 0, 3, 1, 4)) + q, k, v = qkv[0], qkv[1], qkv[2] + else: + kN = key.shape[1] + q = self.q(query).reshape( + [0, qN, self.num_heads, self.head_dim]).transpose([0, 2, 1, 3]) + kv = self.kv(key).reshape( + (0, kN, 2, self.num_heads, self.head_dim)).transpose( + (2, 0, 3, 1, 4)) + k, v = kv[0], kv[1] -class TransformerDecoder(nn.Layer): - """TransformerDecoder is a stack of N decoder layers + attn = (q.matmul(k.transpose((0, 1, 3, 2)))) * self.scale - Args: - decoder_layer: an instance of the TransformerDecoderLayer() class (required). - num_layers: the number of sub-decoder-layers in the decoder (required). - norm: the layer normalization component (optional). + if attn_mask is not None: + attn += attn_mask - """ + attn = F.softmax(attn, axis=-1) + attn = self.attn_drop(attn) - def __init__(self, decoder_layer, num_layers): - super(TransformerDecoder, self).__init__() - self.layers = _get_clones(decoder_layer, num_layers) - self.num_layers = num_layers + x = (attn.matmul(v)).transpose((0, 2, 1, 3)).reshape( + (0, qN, self.embed_dim)) + x = self.out_proj(x) - def forward(self, - tgt, - memory, - tgt_mask=None, - memory_mask=None, - tgt_key_padding_mask=None, - memory_key_padding_mask=None): - """Pass the inputs (and mask) through the decoder layer in turn. + return x - Args: - tgt: the sequence to the decoder (required). - memory: the sequnce from the last layer of the encoder (required). - tgt_mask: the mask for the tgt sequence (optional). - memory_mask: the mask for the memory sequence (optional). - tgt_key_padding_mask: the mask for the tgt keys per batch (optional). - memory_key_padding_mask: the mask for the memory keys per batch (optional). - """ - output = tgt - for i in range(self.num_layers): - output = self.layers[i]( - output, - memory, - tgt_mask=tgt_mask, - memory_mask=memory_mask, - tgt_key_padding_mask=tgt_key_padding_mask, - memory_key_padding_mask=memory_key_padding_mask) - - return output - - -class TransformerEncoderLayer(nn.Layer): - """TransformerEncoderLayer is made up of self-attn and feedforward network. - This standard encoder layer is based on the paper "Attention Is All You Need". - Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, - Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in - Neural Information Processing Systems, pages 6000-6010. Users may modify or implement - in a different way during application. - - Args: - d_model: the number of expected features in the input (required). - nhead: the number of heads in the multiheadattention models (required). - dim_feedforward: the dimension of the feedforward network model (default=2048). - dropout: the dropout value (default=0.1). - - """ +class TransformerBlock(nn.Layer): def __init__(self, d_model, nhead, dim_feedforward=2048, attention_dropout_rate=0.0, - residual_dropout_rate=0.1): - super(TransformerEncoderLayer, self).__init__() - self.self_attn = MultiheadAttention( - d_model, nhead, dropout=attention_dropout_rate) - - self.conv1 = Conv2D( - in_channels=d_model, - out_channels=dim_feedforward, - kernel_size=(1, 1)) - self.conv2 = Conv2D( - in_channels=dim_feedforward, - out_channels=d_model, - kernel_size=(1, 1)) - - self.norm1 = LayerNorm(d_model) - self.norm2 = LayerNorm(d_model) - self.dropout1 = Dropout(residual_dropout_rate) - self.dropout2 = Dropout(residual_dropout_rate) - - def forward(self, src, src_mask=None, src_key_padding_mask=None): - """Pass the input through the endocder layer. - Args: - src: the sequnce to the encoder layer (required). - src_mask: the mask for the src sequence (optional). - src_key_padding_mask: the mask for the src keys per batch (optional). - """ - src2 = self.self_attn( - src, - src, - src, - attn_mask=src_mask, - key_padding_mask=src_key_padding_mask) - src = src + self.dropout1(src2) - src = self.norm1(src) - - src = paddle.transpose(src, [1, 2, 0]) - src = paddle.unsqueeze(src, 2) - src2 = self.conv2(F.relu(self.conv1(src))) - src2 = paddle.squeeze(src2, 2) - src2 = paddle.transpose(src2, [2, 0, 1]) - src = paddle.squeeze(src, 2) - src = paddle.transpose(src, [2, 0, 1]) - - src = src + self.dropout2(src2) - src = self.norm2(src) - return src - - -class TransformerDecoderLayer(nn.Layer): - """TransformerDecoderLayer is made up of self-attn, multi-head-attn and feedforward network. - This standard decoder layer is based on the paper "Attention Is All You Need". - Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, - Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in - Neural Information Processing Systems, pages 6000-6010. Users may modify or implement - in a different way during application. - - Args: - d_model: the number of expected features in the input (required). - nhead: the number of heads in the multiheadattention models (required). - dim_feedforward: the dimension of the feedforward network model (default=2048). - dropout: the dropout value (default=0.1). - - """ + residual_dropout_rate=0.1, + with_self_attn=True, + with_cross_attn=False, + epsilon=1e-5): + super(TransformerBlock, self).__init__() + self.with_self_attn = with_self_attn + if with_self_attn: + self.self_attn = MultiheadAttention( + d_model, + nhead, + dropout=attention_dropout_rate, + self_attn=with_self_attn) + self.norm1 = LayerNorm(d_model, epsilon=epsilon) + self.dropout1 = Dropout(residual_dropout_rate) + self.with_cross_attn = with_cross_attn + if with_cross_attn: + self.cross_attn = MultiheadAttention( #for self_attn of encoder or cross_attn of decoder + d_model, + nhead, + dropout=attention_dropout_rate) + self.norm2 = LayerNorm(d_model, epsilon=epsilon) + self.dropout2 = Dropout(residual_dropout_rate) + + self.mlp = Mlp(in_features=d_model, + hidden_features=dim_feedforward, + act_layer=nn.ReLU, + drop=residual_dropout_rate) + + self.norm3 = LayerNorm(d_model, epsilon=epsilon) - def __init__(self, - d_model, - nhead, - dim_feedforward=2048, - attention_dropout_rate=0.0, - residual_dropout_rate=0.1): - super(TransformerDecoderLayer, self).__init__() - self.self_attn = MultiheadAttention( - d_model, nhead, dropout=attention_dropout_rate) - self.multihead_attn = MultiheadAttention( - d_model, nhead, dropout=attention_dropout_rate) - - self.conv1 = Conv2D( - in_channels=d_model, - out_channels=dim_feedforward, - kernel_size=(1, 1)) - self.conv2 = Conv2D( - in_channels=dim_feedforward, - out_channels=d_model, - kernel_size=(1, 1)) - - self.norm1 = LayerNorm(d_model) - self.norm2 = LayerNorm(d_model) - self.norm3 = LayerNorm(d_model) - self.dropout1 = Dropout(residual_dropout_rate) - self.dropout2 = Dropout(residual_dropout_rate) self.dropout3 = Dropout(residual_dropout_rate) - def forward(self, - tgt, - memory, - tgt_mask=None, - memory_mask=None, - tgt_key_padding_mask=None, - memory_key_padding_mask=None): - """Pass the inputs (and mask) through the decoder layer. + def forward(self, tgt, memory=None, self_mask=None, cross_mask=None): + if self.with_self_attn: + tgt1 = self.self_attn(tgt, attn_mask=self_mask) + tgt = self.norm1(tgt + self.dropout1(tgt1)) - Args: - tgt: the sequence to the decoder layer (required). - memory: the sequnce from the last layer of the encoder (required). - tgt_mask: the mask for the tgt sequence (optional). - memory_mask: the mask for the memory sequence (optional). - tgt_key_padding_mask: the mask for the tgt keys per batch (optional). - memory_key_padding_mask: the mask for the memory keys per batch (optional). - - """ - tgt2 = self.self_attn( - tgt, - tgt, - tgt, - attn_mask=tgt_mask, - key_padding_mask=tgt_key_padding_mask) - tgt = tgt + self.dropout1(tgt2) - tgt = self.norm1(tgt) - tgt2 = self.multihead_attn( - tgt, - memory, - memory, - attn_mask=memory_mask, - key_padding_mask=memory_key_padding_mask) - tgt = tgt + self.dropout2(tgt2) - tgt = self.norm2(tgt) - - # default - tgt = paddle.transpose(tgt, [1, 2, 0]) - tgt = paddle.unsqueeze(tgt, 2) - tgt2 = self.conv2(F.relu(self.conv1(tgt))) - tgt2 = paddle.squeeze(tgt2, 2) - tgt2 = paddle.transpose(tgt2, [2, 0, 1]) - tgt = paddle.squeeze(tgt, 2) - tgt = paddle.transpose(tgt, [2, 0, 1]) - - tgt = tgt + self.dropout3(tgt2) - tgt = self.norm3(tgt) + if self.with_cross_attn: + tgt2 = self.cross_attn(tgt, key=memory, attn_mask=cross_mask) + tgt = self.norm2(tgt + self.dropout2(tgt2)) + tgt = self.norm3(tgt + self.dropout3(self.mlp(tgt))) return tgt -def _get_clones(module, N): - return LayerList([copy.deepcopy(module) for i in range(N)]) - - class PositionalEncoding(nn.Layer): """Inject some information about the relative or absolute position of the tokens in the sequence. The positional encodings have the same dimension as @@ -651,8 +501,9 @@ class PositionalEncoding(nn.Layer): Examples: >>> output = pos_encoder(x) """ + x = x.transpose([1, 0, 2]) x = x + self.pe[:paddle.shape(x)[0], :] - return self.dropout(x) + return self.dropout(x).transpose([1, 0, 2]) class PositionalEncoding_2d(nn.Layer): @@ -725,7 +576,7 @@ class PositionalEncoding_2d(nn.Layer): class Embeddings(nn.Layer): - def __init__(self, d_model, vocab, padding_idx, scale_embedding): + def __init__(self, d_model, vocab, padding_idx=None, scale_embedding=True): super(Embeddings, self).__init__() self.embedding = nn.Embedding(vocab, d_model, padding_idx=padding_idx) w0 = np.random.normal(0.0, d_model**-0.5, @@ -742,7 +593,7 @@ class Embeddings(nn.Layer): class Beam(): - ''' Beam search ''' + """ Beam search """ def __init__(self, size, device=False): diff --git a/ppocr/modeling/heads/rec_sar_head.py b/ppocr/modeling/heads/rec_sar_head.py index 0e6b34404b61b44bebcbc7d67ddfd0a95382c39b..5e64cae85afafc555f2519ed6dd3f05eafff7ea2 100644 --- a/ppocr/modeling/heads/rec_sar_head.py +++ b/ppocr/modeling/heads/rec_sar_head.py @@ -83,7 +83,7 @@ class SAREncoder(nn.Layer): def forward(self, feat, img_metas=None): if img_metas is not None: - assert len(img_metas[0]) == feat.shape[0] + assert len(img_metas[0]) == paddle.shape(feat)[0] valid_ratios = None if img_metas is not None and self.mask: @@ -98,9 +98,10 @@ class SAREncoder(nn.Layer): if valid_ratios is not None: valid_hf = [] - T = holistic_feat.shape[1] - for i in range(len(valid_ratios)): - valid_step = min(T, math.ceil(T * valid_ratios[i])) - 1 + T = paddle.shape(holistic_feat)[1] + for i in range(paddle.shape(valid_ratios)[0]): + valid_step = paddle.minimum( + T, paddle.ceil(valid_ratios[i] * T).astype('int32')) - 1 valid_hf.append(holistic_feat[i, valid_step, :]) valid_hf = paddle.stack(valid_hf, axis=0) else: @@ -247,13 +248,14 @@ class ParallelSARDecoder(BaseDecoder): # bsz * (seq_len + 1) * h * w * attn_size attn_weight = self.conv1x1_2(attn_weight) # bsz * (seq_len + 1) * h * w * 1 - bsz, T, h, w, c = attn_weight.shape + bsz, T, h, w, c = paddle.shape(attn_weight) assert c == 1 if valid_ratios is not None: # cal mask of attention weight - for i in range(len(valid_ratios)): - valid_width = min(w, math.ceil(w * valid_ratios[i])) + for i in range(paddle.shape(valid_ratios)[0]): + valid_width = paddle.minimum( + w, paddle.ceil(valid_ratios[i] * w).astype("int32")) if valid_width < w: attn_weight[i, :, :, valid_width:, :] = float('-inf') @@ -288,7 +290,7 @@ class ParallelSARDecoder(BaseDecoder): img_metas: [label, valid_ratio] ''' if img_metas is not None: - assert len(img_metas[0]) == feat.shape[0] + assert paddle.shape(img_metas[0])[0] == paddle.shape(feat)[0] valid_ratios = None if img_metas is not None and self.mask: @@ -302,7 +304,6 @@ class ParallelSARDecoder(BaseDecoder): # bsz * (seq_len + 1) * C out_dec = self._2d_attention( in_dec, feat, out_enc, valid_ratios=valid_ratios) - # bsz * (seq_len + 1) * num_classes return out_dec[:, 1:, :] # bsz * seq_len * num_classes @@ -395,7 +396,6 @@ class SARHead(nn.Layer): if self.training: label = targets[0] # label - label = paddle.to_tensor(label, dtype='int64') final_out = self.decoder( feat, holistic_feat, label, img_metas=targets) else: diff --git a/ppocr/modeling/heads/rec_srn_head.py b/ppocr/modeling/heads/rec_srn_head.py index 8d59e4711a043afd9234f430a62c9876c0a8f6f4..1070d8cd648eb686c0a2e66df092b7dc6de29c42 100644 --- a/ppocr/modeling/heads/rec_srn_head.py +++ b/ppocr/modeling/heads/rec_srn_head.py @@ -20,13 +20,11 @@ import math import paddle from paddle import nn, ParamAttr from paddle.nn import functional as F -import paddle.fluid as fluid import numpy as np from .self_attention import WrapEncoderForFeature from .self_attention import WrapEncoder from paddle.static import Program from ppocr.modeling.backbones.rec_resnet_fpn import ResNetFPN -import paddle.fluid.framework as framework from collections import OrderedDict gradient_clip = 10 diff --git a/ppocr/modeling/heads/self_attention.py b/ppocr/modeling/heads/self_attention.py index 6c27fdbe434166e9277cc8d695bce2743cbd8ec6..6e4c65e3931ae74a0fde2a16694a69fdfa69b5ed 100644 --- a/ppocr/modeling/heads/self_attention.py +++ b/ppocr/modeling/heads/self_attention.py @@ -22,7 +22,6 @@ import paddle from paddle import ParamAttr, nn from paddle import nn, ParamAttr from paddle.nn import functional as F -import paddle.fluid as fluid import numpy as np gradient_clip = 10 @@ -288,10 +287,10 @@ class PrePostProcessLayer(nn.Layer): "layer_norm_%d" % len(self.sublayers()), paddle.nn.LayerNorm( normalized_shape=d_model, - weight_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(1.)), - bias_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(0.))))) + weight_attr=paddle.ParamAttr( + initializer=paddle.nn.initializer.Constant(1.)), + bias_attr=paddle.ParamAttr( + initializer=paddle.nn.initializer.Constant(0.))))) elif cmd == "d": # add dropout self.functors.append(lambda x: F.dropout( x, p=dropout_rate, mode="downscale_in_infer") @@ -324,7 +323,7 @@ class PrepareEncoder(nn.Layer): def forward(self, src_word, src_pos): src_word_emb = src_word - src_word_emb = fluid.layers.cast(src_word_emb, 'float32') + src_word_emb = paddle.cast(src_word_emb, 'float32') src_word_emb = paddle.scale(x=src_word_emb, scale=self.src_emb_dim**0.5) src_pos = paddle.squeeze(src_pos, axis=-1) src_pos_enc = self.emb(src_pos) @@ -367,7 +366,7 @@ class PrepareDecoder(nn.Layer): self.dropout_rate = dropout_rate def forward(self, src_word, src_pos): - src_word = fluid.layers.cast(src_word, 'int64') + src_word = paddle.cast(src_word, 'int64') src_word = paddle.squeeze(src_word, axis=-1) src_word_emb = self.emb0(src_word) src_word_emb = paddle.scale(x=src_word_emb, scale=self.src_emb_dim**0.5) diff --git a/ppocr/postprocess/__init__.py b/ppocr/postprocess/__init__.py index f50b5f1c5f8e617066bb47636c8f4d2b171b6ecb..2635117c84298bcf77a77cdcab553278f2df642b 100644 --- a/ppocr/postprocess/__init__.py +++ b/ppocr/postprocess/__init__.py @@ -27,7 +27,7 @@ from .sast_postprocess import SASTPostProcess from .fce_postprocess import FCEPostProcess from .rec_postprocess import CTCLabelDecode, AttnLabelDecode, SRNLabelDecode, \ DistillationCTCLabelDecode, TableLabelDecode, NRTRLabelDecode, SARLabelDecode, \ - SEEDLabelDecode, PRENLabelDecode + SEEDLabelDecode, PRENLabelDecode, ViTSTRLabelDecode, ABINetLabelDecode from .cls_postprocess import ClsPostProcess from .pg_postprocess import PGPostProcess from .vqa_token_ser_layoutlm_postprocess import VQASerTokenLayoutLMPostProcess @@ -42,7 +42,7 @@ def build_post_process(config, global_config=None): 'DistillationDBPostProcess', 'NRTRLabelDecode', 'SARLabelDecode', 'SEEDLabelDecode', 'VQASerTokenLayoutLMPostProcess', 'VQAReTokenLayoutLMPostProcess', 'PRENLabelDecode', - 'DistillationSARLabelDecode' + 'DistillationSARLabelDecode', 'ViTSTRLabelDecode', 'ABINetLabelDecode' ] if config['name'] == 'PSEPostProcess': diff --git a/ppocr/postprocess/rec_postprocess.py b/ppocr/postprocess/rec_postprocess.py index bf0fd890bf25949361665d212bf8e1a657054e5b..c77420ad19b76e203262568ba77f06ea248a1655 100644 --- a/ppocr/postprocess/rec_postprocess.py +++ b/ppocr/postprocess/rec_postprocess.py @@ -140,70 +140,6 @@ class DistillationCTCLabelDecode(CTCLabelDecode): return output -class NRTRLabelDecode(BaseRecLabelDecode): - """ Convert between text-label and text-index """ - - def __init__(self, character_dict_path=None, use_space_char=True, **kwargs): - super(NRTRLabelDecode, self).__init__(character_dict_path, - use_space_char) - - def __call__(self, preds, label=None, *args, **kwargs): - - if len(preds) == 2: - preds_id = preds[0] - preds_prob = preds[1] - if isinstance(preds_id, paddle.Tensor): - preds_id = preds_id.numpy() - if isinstance(preds_prob, paddle.Tensor): - preds_prob = preds_prob.numpy() - if preds_id[0][0] == 2: - preds_idx = preds_id[:, 1:] - preds_prob = preds_prob[:, 1:] - else: - preds_idx = preds_id - text = self.decode(preds_idx, preds_prob, is_remove_duplicate=False) - if label is None: - return text - label = self.decode(label[:, 1:]) - else: - if isinstance(preds, paddle.Tensor): - preds = preds.numpy() - preds_idx = preds.argmax(axis=2) - preds_prob = preds.max(axis=2) - text = self.decode(preds_idx, preds_prob, is_remove_duplicate=False) - if label is None: - return text - label = self.decode(label[:, 1:]) - return text, label - - def add_special_char(self, dict_character): - dict_character = ['blank', '', '', ''] + dict_character - return dict_character - - def decode(self, text_index, text_prob=None, is_remove_duplicate=False): - """ convert text-index into text-label. """ - result_list = [] - batch_size = len(text_index) - for batch_idx in range(batch_size): - char_list = [] - conf_list = [] - for idx in range(len(text_index[batch_idx])): - if text_index[batch_idx][idx] == 3: # end - break - try: - char_list.append(self.character[int(text_index[batch_idx][ - idx])]) - except: - continue - if text_prob is not None: - conf_list.append(text_prob[batch_idx][idx]) - else: - conf_list.append(1) - text = ''.join(char_list) - result_list.append((text.lower(), np.mean(conf_list).tolist())) - return result_list - - class AttnLabelDecode(BaseRecLabelDecode): """ Convert between text-label and text-index """ @@ -752,3 +688,122 @@ class PRENLabelDecode(BaseRecLabelDecode): return text label = self.decode(label) return text, label + + +class NRTRLabelDecode(BaseRecLabelDecode): + """ Convert between text-label and text-index """ + + def __init__(self, character_dict_path=None, use_space_char=True, **kwargs): + super(NRTRLabelDecode, self).__init__(character_dict_path, + use_space_char) + + def __call__(self, preds, label=None, *args, **kwargs): + + if len(preds) == 2: + preds_id = preds[0] + preds_prob = preds[1] + if isinstance(preds_id, paddle.Tensor): + preds_id = preds_id.numpy() + if isinstance(preds_prob, paddle.Tensor): + preds_prob = preds_prob.numpy() + if preds_id[0][0] == 2: + preds_idx = preds_id[:, 1:] + preds_prob = preds_prob[:, 1:] + else: + preds_idx = preds_id + text = self.decode(preds_idx, preds_prob, is_remove_duplicate=False) + if label is None: + return text + label = self.decode(label[:, 1:]) + else: + if isinstance(preds, paddle.Tensor): + preds = preds.numpy() + preds_idx = preds.argmax(axis=2) + preds_prob = preds.max(axis=2) + text = self.decode(preds_idx, preds_prob, is_remove_duplicate=False) + if label is None: + return text + label = self.decode(label[:, 1:]) + return text, label + + def add_special_char(self, dict_character): + dict_character = ['blank', '', '', ''] + dict_character + return dict_character + + def decode(self, text_index, text_prob=None, is_remove_duplicate=False): + """ convert text-index into text-label. """ + result_list = [] + batch_size = len(text_index) + for batch_idx in range(batch_size): + char_list = [] + conf_list = [] + for idx in range(len(text_index[batch_idx])): + try: + char_idx = self.character[int(text_index[batch_idx][idx])] + except: + continue + if char_idx == '': # end + break + char_list.append(char_idx) + if text_prob is not None: + conf_list.append(text_prob[batch_idx][idx]) + else: + conf_list.append(1) + text = ''.join(char_list) + result_list.append((text.lower(), np.mean(conf_list).tolist())) + return result_list + + +class ViTSTRLabelDecode(NRTRLabelDecode): + """ Convert between text-label and text-index """ + + def __init__(self, character_dict_path=None, use_space_char=False, + **kwargs): + super(ViTSTRLabelDecode, self).__init__(character_dict_path, + use_space_char) + + def __call__(self, preds, label=None, *args, **kwargs): + if isinstance(preds, paddle.Tensor): + preds = preds[:, 1:].numpy() + else: + preds = preds[:, 1:] + preds_idx = preds.argmax(axis=2) + preds_prob = preds.max(axis=2) + text = self.decode(preds_idx, preds_prob, is_remove_duplicate=False) + if label is None: + return text + label = self.decode(label[:, 1:]) + return text, label + + def add_special_char(self, dict_character): + dict_character = ['', ''] + dict_character + return dict_character + + +class ABINetLabelDecode(NRTRLabelDecode): + """ Convert between text-label and text-index """ + + def __init__(self, character_dict_path=None, use_space_char=False, + **kwargs): + super(ABINetLabelDecode, self).__init__(character_dict_path, + use_space_char) + + def __call__(self, preds, label=None, *args, **kwargs): + if isinstance(preds, dict): + preds = preds['align'][-1].numpy() + elif isinstance(preds, paddle.Tensor): + preds = preds.numpy() + else: + preds = preds + + preds_idx = preds.argmax(axis=2) + preds_prob = preds.max(axis=2) + text = self.decode(preds_idx, preds_prob, is_remove_duplicate=False) + if label is None: + return text + label = self.decode(label) + return text, label + + def add_special_char(self, dict_character): + dict_character = [''] + dict_character + return dict_character diff --git a/ppocr/utils/save_load.py b/ppocr/utils/save_load.py index b09f1db6e938e8eb99148d69efce016f1cbe8628..3647111fddaa848a75873ab689559c63dd6d4814 100644 --- a/ppocr/utils/save_load.py +++ b/ppocr/utils/save_load.py @@ -177,9 +177,9 @@ def save_model(model, model.backbone.model.save_pretrained(model_prefix) metric_prefix = os.path.join(model_prefix, 'metric') # save metric and config + with open(metric_prefix + '.states', 'wb') as f: + pickle.dump(kwargs, f, protocol=2) if is_best: - with open(metric_prefix + '.states', 'wb') as f: - pickle.dump(kwargs, f, protocol=2) logger.info('save best model is to {}'.format(model_prefix)) else: logger.info("save model in {}".format(model_prefix)) diff --git a/ppstructure/table/README.md b/ppstructure/table/README.md index d21ef4aa3813b4ff49dc0580be35c5e2e0483c8f..b6804c6f09b4ee3d17cd2b81e6cc6642c1c1be9a 100644 --- a/ppstructure/table/README.md +++ b/ppstructure/table/README.md @@ -18,7 +18,7 @@ The table recognition mainly contains three models The table recognition flow chart is as follows -![tableocr_pipeline](../../doc/table/tableocr_pipeline_en.jpg) +![tableocr_pipeline](../docs/table/tableocr_pipeline_en.jpg) 1. The coordinates of single-line text is detected by DB model, and then sends it to the recognition model to get the recognition result. 2. The table structure and cell coordinates is predicted by RARE model. diff --git a/ppstructure/table/predict_table.py b/ppstructure/table/predict_table.py index 402d6c24189d044e2ee6d359edef8624d4aae145..aa05459589208dde66a6710322593d091af41325 100644 --- a/ppstructure/table/predict_table.py +++ b/ppstructure/table/predict_table.py @@ -28,6 +28,7 @@ import numpy as np import time import tools.infer.predict_rec as predict_rec import tools.infer.predict_det as predict_det +import tools.infer.utility as utility from ppocr.utils.utility import get_image_file_list, check_and_read_gif from ppocr.utils.logging import get_logger from ppstructure.table.matcher import distance, compute_iou @@ -59,11 +60,37 @@ class TableSystem(object): self.text_recognizer = predict_rec.TextRecognizer( args) if text_recognizer is None else text_recognizer self.table_structurer = predict_strture.TableStructurer(args) + self.benchmark = args.benchmark + self.predictor, self.input_tensor, self.output_tensors, self.config = utility.create_predictor( + args, 'table', logger) + if args.benchmark: + import auto_log + pid = os.getpid() + gpu_id = utility.get_infer_gpuid() + self.autolog = auto_log.AutoLogger( + model_name="table", + model_precision=args.precision, + batch_size=1, + data_shape="dynamic", + save_path=None, #args.save_log_path, + inference_config=self.config, + pids=pid, + process_name=None, + gpu_ids=gpu_id if args.use_gpu else None, + time_keys=[ + 'preprocess_time', 'inference_time', 'postprocess_time' + ], + warmup=0, + logger=logger) def __call__(self, img, return_ocr_result_in_table=False): result = dict() ori_im = img.copy() + if self.benchmark: + self.autolog.times.start() structure_res, elapse = self.table_structurer(copy.deepcopy(img)) + if self.benchmark: + self.autolog.times.stamp() dt_boxes, elapse = self.text_detector(copy.deepcopy(img)) dt_boxes = sorted_boxes(dt_boxes) if return_ocr_result_in_table: @@ -77,13 +104,11 @@ class TableSystem(object): box = [x_min, y_min, x_max, y_max] r_boxes.append(box) dt_boxes = np.array(r_boxes) - logger.debug("dt_boxes num : {}, elapse : {}".format( len(dt_boxes), elapse)) if dt_boxes is None: return None, None img_crop_list = [] - for i in range(len(dt_boxes)): det_box = dt_boxes[i] x0, y0, x1, y1 = expand(2, det_box, ori_im.shape) @@ -92,10 +117,14 @@ class TableSystem(object): rec_res, elapse = self.text_recognizer(img_crop_list) logger.debug("rec_res num : {}, elapse : {}".format( len(rec_res), elapse)) + if self.benchmark: + self.autolog.times.stamp() if return_ocr_result_in_table: result['rec_res'] = rec_res pred_html, pred = self.rebuild_table(structure_res, dt_boxes, rec_res) result['html'] = pred_html + if self.benchmark: + self.autolog.times.end(stamp=True) return result def rebuild_table(self, structure_res, dt_boxes, rec_res): @@ -213,6 +242,8 @@ def main(args): logger.info('excel saved to {}'.format(excel_path)) elapse = time.time() - starttime logger.info("Predict time : {:.3f}s".format(elapse)) + if args.benchmark: + text_sys.autolog.report() if __name__ == "__main__": diff --git a/ppstructure/vqa/README_ch.md b/ppstructure/vqa/README_ch.md index ff513f8f7d603d66a372ce383883f3bcf97a7880..b677dc07bce6c1a752d753b6a1c538b4d3f99271 100644 --- a/ppstructure/vqa/README_ch.md +++ b/ppstructure/vqa/README_ch.md @@ -52,7 +52,7 @@ PP-Structure 里的 DOC-VQA算法基于PaddleNLP自然语言处理算法库进 ### 3.1 SER -![](../../doc/vqa/result_ser/zh_val_0_ser.jpg) | ![](../../doc/vqa/result_ser/zh_val_42_ser.jpg) +![](../docs/vqa/result_ser/zh_val_0_ser.jpg) | ![](../docs/vqa/result_ser/zh_val_42_ser.jpg) ---|--- 图中不同颜色的框表示不同的类别,对于XFUND数据集,有`QUESTION`, `ANSWER`, `HEADER` 3种类别 @@ -65,7 +65,7 @@ PP-Structure 里的 DOC-VQA算法基于PaddleNLP自然语言处理算法库进 ### 3.2 RE -![](../../doc/vqa/result_re/zh_val_21_re.jpg) | ![](../../doc/vqa/result_re/zh_val_40_re.jpg) +![](../docs/vqa/result_re/zh_val_21_re.jpg) | ![](../docs/vqa/result_re/zh_val_40_re.jpg) ---|--- diff --git a/test_tipc/build_server.sh b/test_tipc/build_server.sh new file mode 100644 index 0000000000000000000000000000000000000000..3173359785290ffa5c6f865efe96705e2b09fae1 --- /dev/null +++ b/test_tipc/build_server.sh @@ -0,0 +1,69 @@ +#使用镜像: +#registry.baidubce.com/paddlepaddle/paddle:latest-dev-cuda10.1-cudnn7-gcc82 + +#编译Serving Server: + +#client和app可以直接使用release版本 + +#server因为加入了自定义OP,需要重新编译 + +apt-get update +apt install -y libcurl4-openssl-dev libbz2-dev +wget https://paddle-serving.bj.bcebos.com/others/centos_ssl.tar && tar xf centos_ssl.tar && rm -rf centos_ssl.tar && mv libcrypto.so.1.0.2k /usr/lib/libcrypto.so.1.0.2k && mv libssl.so.1.0.2k /usr/lib/libssl.so.1.0.2k && ln -sf /usr/lib/libcrypto.so.1.0.2k /usr/lib/libcrypto.so.10 && ln -sf /usr/lib/libssl.so.1.0.2k /usr/lib/libssl.so.10 && ln -sf /usr/lib/libcrypto.so.10 /usr/lib/libcrypto.so && ln -sf /usr/lib/libssl.so.10 /usr/lib/libssl.so + +# 安装go依赖 +rm -rf /usr/local/go +wget -qO- https://paddle-ci.cdn.bcebos.com/go1.17.2.linux-amd64.tar.gz | tar -xz -C /usr/local +export GOROOT=/usr/local/go +export GOPATH=/root/gopath +export PATH=$PATH:$GOPATH/bin:$GOROOT/bin +go env -w GO111MODULE=on +go env -w GOPROXY=https://goproxy.cn,direct +go install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway@v1.15.2 +go install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger@v1.15.2 +go install github.com/golang/protobuf/protoc-gen-go@v1.4.3 +go install google.golang.org/grpc@v1.33.0 +go env -w GO111MODULE=auto + +# 下载opencv库 +wget https://paddle-qa.bj.bcebos.com/PaddleServing/opencv3.tar.gz && tar -xvf opencv3.tar.gz && rm -rf opencv3.tar.gz +export OPENCV_DIR=$PWD/opencv3 + +# clone Serving +git clone https://github.com/PaddlePaddle/Serving.git -b develop --depth=1 +cd Serving +export Serving_repo_path=$PWD +git submodule update --init --recursive +python -m pip install -r python/requirements.txt + + +export PYTHON_INCLUDE_DIR=$(python -c "from distutils.sysconfig import get_python_inc; print(get_python_inc())") +export PYTHON_LIBRARIES=$(python -c "import distutils.sysconfig as sysconfig; print(sysconfig.get_config_var('LIBDIR'))") +export PYTHON_EXECUTABLE=`which python` + +export CUDA_PATH='/usr/local/cuda' +export CUDNN_LIBRARY='/usr/local/cuda/lib64/' +export CUDA_CUDART_LIBRARY='/usr/local/cuda/lib64/' +export TENSORRT_LIBRARY_PATH='/usr/local/TensorRT6-cuda10.1-cudnn7/targets/x86_64-linux-gnu/' + +# cp 自定义OP代码 +cp -rf ../deploy/pdserving/general_detection_op.cpp ${Serving_repo_path}/core/general-server/op + +# 编译Server, export SERVING_BIN +mkdir server-build-gpu-opencv && cd server-build-gpu-opencv +cmake -DPYTHON_INCLUDE_DIR=$PYTHON_INCLUDE_DIR \ + -DPYTHON_LIBRARIES=$PYTHON_LIBRARIES \ + -DPYTHON_EXECUTABLE=$PYTHON_EXECUTABLE \ + -DCUDA_TOOLKIT_ROOT_DIR=${CUDA_PATH} \ + -DCUDNN_LIBRARY=${CUDNN_LIBRARY} \ + -DCUDA_CUDART_LIBRARY=${CUDA_CUDART_LIBRARY} \ + -DTENSORRT_ROOT=${TENSORRT_LIBRARY_PATH} \ + -DOPENCV_DIR=${OPENCV_DIR} \ + -DWITH_OPENCV=ON \ + -DSERVER=ON \ + -DWITH_GPU=ON .. +make -j32 + +python -m pip install python/dist/paddle* +export SERVING_BIN=$PWD/core/general-server/serving +cd ../../ diff --git a/test_tipc/common_func.sh b/test_tipc/common_func.sh index 85dfe217253bb1d4c8b92f17d26f138121a2a198..f7d8a1e04adee9d32332eda8cb5913bbaf168481 100644 --- a/test_tipc/common_func.sh +++ b/test_tipc/common_func.sh @@ -57,10 +57,11 @@ function status_check(){ last_status=$1 # the exit code run_command=$2 run_log=$3 + model_name=$4 if [ $last_status -eq 0 ]; then - echo -e "\033[33m Run successfully with command - ${run_command}! \033[0m" | tee -a ${run_log} + echo -e "\033[33m Run successfully with command - ${model_name} - ${run_command}! \033[0m" | tee -a ${run_log} else - echo -e "\033[33m Run failed with command - ${run_command}! \033[0m" | tee -a ${run_log} + echo -e "\033[33m Run failed with command - ${model_name} - ${run_command}! \033[0m" | tee -a ${run_log} fi } diff --git a/test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..a0c49a0812c5cb47c848d4b6e68e0b10f835c760 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,20 @@ +===========================cpp_infer_params=========================== +model_name:ch_PP-OCRv2 +use_opencv:True +infer_model:./inference/ch_PP-OCRv2_det_infer/ +infer_quant:False +inference:./deploy/cpp_infer/build/ppocr --rec_char_dict_path=./ppocr/utils/ppocr_keys_v1.txt --rec_img_h=32 +--use_gpu:True|False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:1 +--use_tensorrt:False +--precision:fp32 +--det_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ +--rec_model_dir:./inference/ch_PP-OCRv2_rec_infer/ +--benchmark:True +--det:True +--rec:True +--cls:False +--use_angle_cls:False \ No newline at end of file diff --git a/test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt index fcac6e3984cf3fd45fec9f7b736f794289278b25..32b290a9ed0ca04032e7854d9739e1612a6a095f 100644 --- a/test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt @@ -6,10 +6,10 @@ infer_export:null infer_quant:False inference:tools/infer/predict_system.py --use_gpu:False|True ---enable_mkldnn:False|True ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1 ---use_tensorrt:False|True +--use_tensorrt:False --precision:fp32 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ diff --git a/test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..24eb620eeb6fc0cf880bb88560805491d6e8e2d5 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -0,0 +1,17 @@ +===========================paddle2onnx_params=========================== +model_name:ch_PP-OCRv2 +python:python3.7 +2onnx: paddle2onnx +--det_model_dir:./inference/ch_PP-OCRv2_det_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_save_file:./inference/det_v2_onnx/model.onnx +--rec_model_dir:./inference/ch_PP-OCRv2_rec_infer/ +--rec_save_file:./inference/rec_v2_onnx/model.onnx +--opset_version:10 +--enable_onnx_checker:True +inference:tools/infer/predict_system.py --rec_image_shape="3,32,320" +--use_gpu:True|False +--det_model_dir: +--rec_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/00008790.jpg \ No newline at end of file diff --git a/test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..f0456b5c351d20222e331df6a5019a51b79b6d28 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,19 @@ +===========================serving_params=========================== +model_name:ch_PP-OCRv2 +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:./inference/ch_PP-OCRv2_det_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:./deploy/pdserving/ppocr_det_v2_serving/ +--det_serving_client:./deploy/pdserving/ppocr_det_v2_client/ +--rec_dirname:./inference/ch_PP-OCRv2_rec_infer/ +--rec_serving_server:./deploy/pdserving/ppocr_rec_v2_serving/ +--rec_serving_client:./deploy/pdserving/ppocr_rec_v2_client/ +serving_dir:./deploy/pdserving +web_service:-m paddle_serving_server.serve +--op:GeneralDetectionOp GeneralInferOp +--port:8181 +--gpu_id:"0"|null +cpp_client:ocr_cpp_client.py +--image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..4ad64db03c6bbd017644a947ccdc168fd721ad9f --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,23 @@ +===========================serving_params=========================== +model_name:ch_PP-OCRv2 +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:./inference/ch_PP-OCRv2_det_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:./deploy/pdserving/ppocr_det_v2_serving/ +--det_serving_client:./deploy/pdserving/ppocr_det_v2_client/ +--rec_dirname:./inference/ch_PP-OCRv2_rec_infer/ +--rec_serving_server:./deploy/pdserving/ppocr_rec_v2_serving/ +--rec_serving_client:./deploy/pdserving/ppocr_rec_v2_client/ +serving_dir:./deploy/pdserving +web_service:web_service.py --config=config.yml --opt op.det.concurrency="1" op.rec.concurrency="1" +op.det.local_service_conf.devices:gpu|null +op.det.local_service_conf.use_mkldnn:False +op.det.local_service_conf.thread_num:6 +op.det.local_service_conf.use_trt:False +op.det.local_service_conf.precision:fp32 +op.det.local_service_conf.model_config: +op.rec.local_service_conf.model_config: +pipline:pipeline_http_client.py +--image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_PP-OCRv2_det/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv2_det/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..7eccbd725c25938aeabb32499e67536f45eed184 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv2_det/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,20 @@ +===========================cpp_infer_params=========================== +model_name:ch_PP-OCRv2_det +use_opencv:True +infer_model:./inference/ch_PP-OCRv2_det_infer/ +infer_quant:False +inference:./deploy/cpp_infer/build/ppocr +--use_gpu:True|False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:1 +--use_tensorrt:False +--precision:fp32 +--det_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ +null:null +--benchmark:True +--det:True +--rec:False +--cls:False +--use_angle_cls:False \ No newline at end of file diff --git a/test_tipc/configs/ch_PP-OCRv2_det/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/ch_PP-OCRv2_det/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..2e7906076faf4d8002f7746afd6f0cbb4adb8253 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv2_det/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -0,0 +1,17 @@ +===========================paddle2onnx_params=========================== +model_name:ch_PP-OCRv2_det +python:python3.7 +2onnx: paddle2onnx +--det_model_dir:./inference/ch_PP-OCRv2_det_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_save_file:./inference/det_v2_onnx/model.onnx +--rec_model_dir: +--rec_save_file: +--opset_version:10 +--enable_onnx_checker:True +inference:tools/infer/predict_det.py +--use_gpu:True|False +--det_model_dir: +--rec_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ \ No newline at end of file diff --git a/test_tipc/configs/ch_PP-OCRv2_det/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv2_det/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..587a7d7ea6644f4eda200e7ed8f095d9428a1310 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv2_det/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,23 @@ +===========================serving_params=========================== +model_name:ch_PP-OCRv2_det +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:./inference/ch_PP-OCRv2_det_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:./deploy/pdserving/ppocr_det_v2_serving/ +--det_serving_client:./deploy/pdserving/ppocr_det_v2_client/ +--rec_dirname:null +--rec_serving_server:null +--rec_serving_client:null +serving_dir:./deploy/pdserving +web_service:web_service_det.py --config=config.yml --opt op.det.concurrency="1" +op.det.local_service_conf.devices:gpu|null +op.det.local_service_conf.use_mkldnn:False +op.det.local_service_conf.thread_num:6 +op.det.local_service_conf.use_trt:False +op.det.local_service_conf.precision:fp32 +op.det.local_service_conf.model_config: +op.rec.local_service_conf.model_config: +pipline:pipeline_http_client.py +--image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_PP-OCRv2_det/train_infer_python.txt b/test_tipc/configs/ch_PP-OCRv2_det/train_infer_python.txt index 797cf53a1ded756670709dd1a30c3ef25a9c0906..cab0cb0aa390c7cb1efa6e5d3bc636e9c974acba 100644 --- a/test_tipc/configs/ch_PP-OCRv2_det/train_infer_python.txt +++ b/test_tipc/configs/ch_PP-OCRv2_det/train_infer_python.txt @@ -1,10 +1,10 @@ ===========================train_params=========================== -model_name:ch_PPOCRv2_det +model_name:ch_PP-OCRv2_det python:python3.7 gpu_list:0|0,1 Global.use_gpu:True|True Global.auto_cast:fp32 -Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=500 +Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=50 Global.save_model_dir:./output/ Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4 Global.pretrained_model:null @@ -39,11 +39,11 @@ infer_export:null infer_quant:False inference:tools/infer/predict_det.py --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp32|fp16|int8 +--use_tensorrt:False +--precision:fp32 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ null:null diff --git a/test_tipc/configs/ch_PP-OCRv2_det/train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv2_det/train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..91a6288eb0d4f3d2a8c968a65916295d25024c32 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv2_det/train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:ch_PP-OCRv2_det +python:python3.7 +gpu_list:192.168.0.1,192.168.0.2;0,1 +Global.use_gpu:True +Global.auto_cast:fp32 +Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=50 +Global.save_model_dir:./output/ +Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4 +Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/ +null:null +## +trainer:norm_train +norm_train:tools/train.py -c configs/det/ch_PP-OCRv2/ch_PP-OCRv2_det_cml.yml -o +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:null +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.checkpoints: +norm_export:tools/export_model.py -c configs/det/ch_PP-OCRv2/ch_PP-OCRv2_det_cml.yml -o +quant_export:null +fpgm_export: +distill_export:null +export1:null +export2:null +inference_dir:Student +infer_model:./inference/ch_PP-OCRv2_det_infer/ +infer_export:null +infer_quant:False +inference:tools/infer/predict_det.py +--use_gpu:False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:1 +--use_tensorrt:False +--precision:fp32 +--det_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ +null:null +--benchmark:True +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,640,640]}];[{float32,[3,960,960]}] diff --git a/test_tipc/configs/ch_PP-OCRv2_det/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv2_det/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt index 033d40a80a3569f8bfd408cdb6df37e7ba5ecd0c..85b0ebcb9d747acdf520d4516722526d791b1151 100644 --- a/test_tipc/configs/ch_PP-OCRv2_det/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_PP-OCRv2_det/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt @@ -1,10 +1,10 @@ ===========================train_params=========================== -model_name:ch_PPOCRv2_det +model_name:ch_PP-OCRv2_det python:python3.7 gpu_list:0|0,1 Global.use_gpu:True|True Global.auto_cast:amp -Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=500 +Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=50 Global.save_model_dir:./output/ Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4 Global.pretrained_model:null @@ -39,11 +39,11 @@ infer_export:null infer_quant:False inference:tools/infer/predict_det.py --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp32|fp16|int8 +--use_tensorrt:False +--precision:fp32 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ null:null diff --git a/test_tipc/configs/ch_PP-OCRv2_det_PACT/train_infer_python.txt b/test_tipc/configs/ch_PP-OCRv2_det/train_pact_infer_python.txt similarity index 91% rename from test_tipc/configs/ch_PP-OCRv2_det_PACT/train_infer_python.txt rename to test_tipc/configs/ch_PP-OCRv2_det/train_pact_infer_python.txt index 038fa850614d45dbefe076b866571cead57b8450..1a20f97fdbcd10881de6e94d0724740ba44edc5c 100644 --- a/test_tipc/configs/ch_PP-OCRv2_det_PACT/train_infer_python.txt +++ b/test_tipc/configs/ch_PP-OCRv2_det/train_pact_infer_python.txt @@ -1,10 +1,10 @@ ===========================train_params=========================== -model_name:ch_PPOCRv2_det_PACT +model_name:ch_PP-OCRv2_det_PACT python:python3.7 gpu_list:0|0,1 Global.use_gpu:True|True Global.auto_cast:fp32 -Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=500 +Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=50 Global.save_model_dir:./output/ Train.loader.batch_size_per_card:lite_train_lite_infer=1|whole_train_whole_infer=4 Global.pretrained_model:null @@ -39,11 +39,11 @@ infer_export:null infer_quant:False inference:tools/infer/predict_det.py --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp32|fp16|int8 +--use_tensorrt:False +--precision:fp32 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ null:null diff --git a/test_tipc/configs/ch_PP-OCRv2_det_KL/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv2_det/train_ptq_infer_python.txt similarity index 84% rename from test_tipc/configs/ch_PP-OCRv2_det_KL/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt rename to test_tipc/configs/ch_PP-OCRv2_det/train_ptq_infer_python.txt index 1aad65b687992155133ed11533a14f642510361d..ccc9e5ced086c2c617359bafdc8772fe92eab8fa 100644 --- a/test_tipc/configs/ch_PP-OCRv2_det_KL/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_PP-OCRv2_det/train_ptq_infer_python.txt @@ -1,5 +1,5 @@ ===========================kl_quant_params=========================== -model_name:PPOCRv2_ocr_det_kl +model_name:ch_PP-OCRv2_det_KL python:python3.7 Global.pretrained_model:null Global.save_inference_dir:null @@ -8,10 +8,10 @@ infer_export:deploy/slim/quantization/quant_kl.py -c configs/det/ch_PP-OCRv2/ch_ infer_quant:True inference:tools/infer/predict_det.py --use_gpu:False|True ---enable_mkldnn:True ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1 ---use_tensorrt:False|True +--use_tensorrt:False --precision:int8 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ diff --git a/test_tipc/configs/ch_PP-OCRv2_det_KL/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv2_det_KL/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..1975e099d7f8328f521d976d76b1070b365164fe --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv2_det_KL/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,20 @@ +===========================cpp_infer_params=========================== +model_name:ch_PP-OCRv2_det_KL +use_opencv:True +infer_model:./inference/ch_PP-OCRv2_det_klquant_infer +infer_quant:False +inference:./deploy/cpp_infer/build/ppocr +--use_gpu:True|False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:1 +--use_tensorrt:False +--precision:fp32 +--det_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ +null:null +--benchmark:True +--det:True +--rec:False +--cls:False +--use_angle_cls:False \ No newline at end of file diff --git a/test_tipc/configs/ch_PP-OCRv2_det_KL/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv2_det_KL/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..e306b0a92afdc720227704c2e526e7c3cfe98ae6 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv2_det_KL/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,19 @@ +===========================serving_params=========================== +model_name:ch_PP-OCRv2_det_KL +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:./inference/ch_PP-OCRv2_det_klquant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:./deploy/pdserving/ppocr_det_v2_kl_serving/ +--det_serving_client:./deploy/pdserving/ppocr_det_v2_kl_client/ +--rec_dirname:./inference/ch_PP-OCRv2_rec_klquant_infer/ +--rec_serving_server:./deploy/pdserving/ppocr_rec_v2_kl_serving/ +--rec_serving_client:./deploy/pdserving/ppocr_rec_v2_kl_client/ +serving_dir:./deploy/pdserving +web_service:-m paddle_serving_server.serve +--op:GeneralDetectionOp GeneralInferOp +--port:8181 +--gpu_id:"0"|null +cpp_client:ocr_cpp_client.py +--image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_PP-OCRv2_det_KL/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv2_det_KL/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..2c96d2bfd88ce43f7701ee92a2c2bb8c909feed8 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv2_det_KL/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,23 @@ +===========================serving_params=========================== +model_name:ch_PP-OCRv2_det_KL +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:./inference/ch_PP-OCRv2_det_klquant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:./deploy/pdserving/ppocr_det_v2_kl_serving/ +--det_serving_client:./deploy/pdserving/ppocr_det_v2_kl_client/ +--rec_dirname:null +--rec_serving_server:null +--rec_serving_client:null +serving_dir:./deploy/pdserving +web_service:web_service_det.py --config=config.yml --opt op.det.concurrency="1" +op.det.local_service_conf.devices:gpu|null +op.det.local_service_conf.use_mkldnn:False +op.det.local_service_conf.thread_num:6 +op.det.local_service_conf.use_trt:False +op.det.local_service_conf.precision:fp32 +op.det.local_service_conf.model_config: +op.rec.local_service_conf.model_config: +pipline:pipeline_http_client.py +--image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_PP-OCRv2_det_PACT/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv2_det_PACT/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..43ef97d5063d347071501ab4d8874ee3a42e50d7 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv2_det_PACT/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,20 @@ +===========================cpp_infer_params=========================== +model_name:ch_PP-OCRv2_det_PACT +use_opencv:True +infer_model:./inference/ch_PP-OCRv2_det_pact_infer +infer_quant:False +inference:./deploy/cpp_infer/build/ppocr +--use_gpu:True|False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:1 +--use_tensorrt:False +--precision:fp32 +--det_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ +null:null +--benchmark:True +--det:True +--rec:False +--cls:False +--use_angle_cls:False \ No newline at end of file diff --git a/test_tipc/configs/ch_PP-OCRv2_det_PACT/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv2_det_PACT/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..b2d929b99c80d02c48795dab820da1836f7a2ebe --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv2_det_PACT/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,19 @@ +===========================serving_params=========================== +model_name:ch_PP-OCRv2_det_PACT +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:./inference/ch_PP-OCRv2_det_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:./deploy/pdserving/ppocr_det_v2_pact_serving/ +--det_serving_client:./deploy/pdserving/ppocr_det_v2_pact_client/ +--rec_dirname:./inference/ch_PP-OCRv2_rec_pact_infer/ +--rec_serving_server:./deploy/pdserving/ppocr_rec_v2_pact_serving/ +--rec_serving_client:./deploy/pdserving/ppocr_rec_v2_pact_client/ +serving_dir:./deploy/pdserving +web_service:-m paddle_serving_server.serve +--op:GeneralDetectionOp GeneralInferOp +--port:8181 +--gpu_id:"0"|null +cpp_client:ocr_cpp_client.py +--image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_PP-OCRv2_det_PACT/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv2_det_PACT/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..d5d99ab56d0e17789d1b6b0c593ed42e5ffad320 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv2_det_PACT/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,23 @@ +===========================serving_params=========================== +model_name:ch_PP-OCRv2_det_PACT +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:./inference/ch_PP-OCRv2_det_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:./deploy/pdserving/ppocr_det_v2_pact_serving/ +--det_serving_client:./deploy/pdserving/ppocr_det_v2_pact_client/ +--rec_dirname:null +--rec_serving_server:null +--rec_serving_client:null +serving_dir:./deploy/pdserving +web_service:web_service_det.py --config=config.yml --opt op.det.concurrency="1" +op.det.local_service_conf.devices:gpu|null +op.det.local_service_conf.use_mkldnn:False +op.det.local_service_conf.thread_num:6 +op.det.local_service_conf.use_trt:False +op.det.local_service_conf.precision:fp32 +op.det.local_service_conf.model_config: +op.rec.local_service_conf.model_config: +pipline:pipeline_http_client.py +--image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_PP-OCRv2_rec/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv2_rec/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..b1bff00b09cedbe5abac50a471091fb2daedf8f3 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv2_rec/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,20 @@ +===========================cpp_infer_params=========================== +model_name:ch_PP-OCRv2_rec +use_opencv:True +infer_model:./inference/ch_PP-OCRv2_rec_infer/ +infer_quant:False +inference:./deploy/cpp_infer/build/ppocr --rec_char_dict_path=./ppocr/utils/ppocr_keys_v1.txt --rec_img_h=32 +--use_gpu:True|False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:6 +--use_tensorrt:False +--precision:fp32 +--rec_model_dir: +--image_dir:./inference/rec_inference/ +null:null +--benchmark:True +--det:False +--rec:True +--cls:False +--use_angle_cls:False \ No newline at end of file diff --git a/test_tipc/configs/ch_PP-OCRv2_rec/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/ch_PP-OCRv2_rec/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..e374a5d8216f54eef52e0765be30a1dcc391dc7b --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv2_rec/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -0,0 +1,17 @@ +===========================paddle2onnx_params=========================== +model_name:ch_PP-OCRv2_rec +python:python3.7 +2onnx: paddle2onnx +--det_model_dir: +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_save_file: +--rec_model_dir:./inference/ch_PP-OCRv2_rec_infer/ +--rec_save_file:./inference/rec_v2_onnx/model.onnx +--opset_version:10 +--enable_onnx_checker:True +inference:tools/infer/predict_rec.py --rec_image_shape="3,32,320" +--use_gpu:True|False +--det_model_dir: +--rec_model_dir: +--image_dir:./inference/rec_inference/ \ No newline at end of file diff --git a/test_tipc/configs/ch_PP-OCRv2_rec/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv2_rec/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..e9e90d372e4a2fed645962ce6ad3b8112588f24a --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv2_rec/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,23 @@ +===========================serving_params=========================== +model_name:ch_PP-OCRv2_rec +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:null +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:null +--det_serving_client:null +--rec_dirname:./inference/ch_PP-OCRv2_rec_infer/ +--rec_serving_server:./deploy/pdserving/ppocr_rec_v2_serving/ +--rec_serving_client:./deploy/pdserving/ppocr_rec_v2_client/ +serving_dir:./deploy/pdserving +web_service:web_service_rec.py --config=config.yml --opt op.rec.concurrency="1" +op.det.local_service_conf.devices:gpu|null +op.det.local_service_conf.use_mkldnn:False +op.det.local_service_conf.thread_num:6 +op.det.local_service_conf.use_trt:False +op.det.local_service_conf.precision:fp32 +op.det.local_service_conf.model_config: +op.rec.local_service_conf.model_config: +pipline:pipeline_http_client.py --det=False +--image_dir:../../inference/rec_inference diff --git a/test_tipc/configs/ch_PP-OCRv2_rec/train_infer_python.txt b/test_tipc/configs/ch_PP-OCRv2_rec/train_infer_python.txt index 188eb3ccc5f7aa2b3724dc1fb7132af090c22ffa..df42b342ba5fa3947a69c2bde5548975ca92d857 100644 --- a/test_tipc/configs/ch_PP-OCRv2_rec/train_infer_python.txt +++ b/test_tipc/configs/ch_PP-OCRv2_rec/train_infer_python.txt @@ -1,10 +1,10 @@ ===========================train_params=========================== -model_name:PPOCRv2_ocr_rec +model_name:ch_PP-OCRv2_rec python:python3.7 gpu_list:0|0,1 Global.use_gpu:True|True Global.auto_cast:fp32 -Global.epoch_num:lite_train_lite_infer=3|whole_train_whole_infer=300 +Global.epoch_num:lite_train_lite_infer=3|whole_train_whole_infer=50 Global.save_model_dir:./output/ Train.loader.batch_size_per_card:lite_train_lite_infer=16|whole_train_whole_infer=128 Global.pretrained_model:null @@ -39,11 +39,11 @@ infer_export:null infer_quant:False inference:tools/infer/predict_rec.py --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1|6 ---use_tensorrt:False|True ---precision:fp32|int8 +--use_tensorrt:False +--precision:fp32 --rec_model_dir: --image_dir:./inference/rec_inference null:null diff --git a/test_tipc/configs/ch_PP-OCRv2_rec/train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv2_rec/train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..5795bc27e686164578fc246e1fa467efdc52f71f --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv2_rec/train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:ch_PP-OCRv2_rec +python:python3.7 +gpu_list:192.168.0.1,192.168.0.2;0,1 +Global.use_gpu:True +Global.auto_cast:fp32 +Global.epoch_num:lite_train_lite_infer=3|whole_train_whole_infer=50 +Global.save_model_dir:./output/ +Train.loader.batch_size_per_card:lite_train_lite_infer=16|whole_train_whole_infer=128 +Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./inference/rec_inference +null:null +## +trainer:norm_train +norm_train:tools/train.py -c test_tipc/configs/ch_PP-OCRv2_rec/ch_PP-OCRv2_rec_distillation.yml -o +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:null +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.checkpoints: +norm_export:tools/export_model.py -c test_tipc/configs/ch_PP-OCRv2_rec/ch_PP-OCRv2_rec_distillation.yml -o +quant_export: +fpgm_export: +distill_export:null +export1:null +export2:null +inference_dir:Student +infer_model:./inference/ch_PP-OCRv2_rec_infer +infer_export:null +infer_quant:False +inference:tools/infer/predict_rec.py +--use_gpu:False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:1|6 +--use_tensorrt:False +--precision:fp32 +--rec_model_dir: +--image_dir:./inference/rec_inference +null:null +--benchmark:True +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,32,320]}] diff --git a/test_tipc/configs/ch_PP-OCRv2_rec/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv2_rec/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt index 7c438cb8a3b6907c9ca352e90605d8b4f6fb17fd..1b8800f5cf8f1f864a03dd6a0408aafdd2cec3af 100644 --- a/test_tipc/configs/ch_PP-OCRv2_rec/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_PP-OCRv2_rec/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt @@ -1,10 +1,10 @@ ===========================train_params=========================== -model_name:PPOCRv2_ocr_rec +model_name:ch_PP-OCRv2_rec python:python3.7 gpu_list:0|0,1 Global.use_gpu:True|True Global.auto_cast:amp -Global.epoch_num:lite_train_lite_infer=3|whole_train_whole_infer=300 +Global.epoch_num:lite_train_lite_infer=3|whole_train_whole_infer=50 Global.save_model_dir:./output/ Train.loader.batch_size_per_card:lite_train_lite_infer=16|whole_train_whole_infer=128 Global.pretrained_model:null @@ -39,11 +39,11 @@ infer_export:null infer_quant:False inference:tools/infer/predict_rec.py --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1|6 ---use_tensorrt:False|True ---precision:fp32|int8 +--use_tensorrt:False +--precision:fp32 --rec_model_dir: --image_dir:./inference/rec_inference null:null diff --git a/test_tipc/configs/ch_PP-OCRv2_rec_PACT/train_infer_python.txt b/test_tipc/configs/ch_PP-OCRv2_rec/train_pact_infer_python.txt similarity index 88% rename from test_tipc/configs/ch_PP-OCRv2_rec_PACT/train_infer_python.txt rename to test_tipc/configs/ch_PP-OCRv2_rec/train_pact_infer_python.txt index 98c125229d7f968cd3f650c3885ba4edb0de754c..0ac75eff07a5ed4c17d7fdbe554fd4b5c0f11aed 100644 --- a/test_tipc/configs/ch_PP-OCRv2_rec_PACT/train_infer_python.txt +++ b/test_tipc/configs/ch_PP-OCRv2_rec/train_pact_infer_python.txt @@ -1,10 +1,10 @@ ===========================train_params=========================== -model_name:ch_PPOCRv2_rec_PACT +model_name:ch_PP-OCRv2_rec_PACT python:python3.7 gpu_list:0|0,1 Global.use_gpu:True|True Global.auto_cast:fp32 -Global.epoch_num:lite_train_lite_infer=6|whole_train_whole_infer=300 +Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=50 Global.save_model_dir:./output/ Train.loader.batch_size_per_card:lite_train_lite_infer=16|whole_train_whole_infer=128 Global.pretrained_model:pretrain_models/ch_PP-OCRv2_rec_train/best_accuracy @@ -39,11 +39,11 @@ infer_export:null infer_quant:True inference:tools/infer/predict_rec.py --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1|6 ---use_tensorrt:False|True ---precision:fp32|int8 +--use_tensorrt:False +--precision:fp32 --rec_model_dir: --image_dir:./inference/rec_inference null:null diff --git a/test_tipc/configs/ch_PP-OCRv2_rec_KL/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv2_rec/train_ptq_infer_python.txt similarity index 76% rename from test_tipc/configs/ch_PP-OCRv2_rec_KL/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt rename to test_tipc/configs/ch_PP-OCRv2_rec/train_ptq_infer_python.txt index 083a3ae26e726e290ffde4095821cbf3c40f7178..c30e0858efda4df8f9912183c5c31b56413a5252 100644 --- a/test_tipc/configs/ch_PP-OCRv2_rec_KL/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_PP-OCRv2_rec/train_ptq_infer_python.txt @@ -1,17 +1,17 @@ ===========================kl_quant_params=========================== -model_name:PPOCRv2_ocr_rec_kl +model_name:ch_PP-OCRv2_rec_KL python:python3.7 Global.pretrained_model:null Global.save_inference_dir:null infer_model:./inference/ch_PP-OCRv2_rec_infer/ infer_export:deploy/slim/quantization/quant_kl.py -c test_tipc/configs/ch_PP-OCRv2_rec/ch_PP-OCRv2_rec_distillation.yml -o infer_quant:True -inference:tools/infer/predict_rec.py +inference:tools/infer/predict_rec.py --rec_image_shape="3,32,320" --use_gpu:False|True ---enable_mkldnn:False|True ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1|6 ---use_tensorrt:True +--use_tensorrt:False --precision:int8 --rec_model_dir: --image_dir:./inference/rec_inference diff --git a/test_tipc/configs/ch_PP-OCRv2_rec_KL/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv2_rec_KL/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..95e4062d145b5c0fc390049dee322b0e85ecee98 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv2_rec_KL/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,20 @@ +===========================cpp_infer_params=========================== +model_name:ch_PP-OCRv2_rec_KL +use_opencv:True +infer_model:./inference/ch_PP-OCRv2_rec_klquant_infer +infer_quant:False +inference:./deploy/cpp_infer/build/ppocr --rec_char_dict_path=./ppocr/utils/ppocr_keys_v1.txt --rec_img_h=32 +--use_gpu:True|False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:6 +--use_tensorrt:False +--precision:fp32 +--rec_model_dir: +--image_dir:./inference/rec_inference/ +null:null +--benchmark:True +--det:False +--rec:True +--cls:False +--use_angle_cls:False \ No newline at end of file diff --git a/test_tipc/configs/ch_PP-OCRv2_rec_KL/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv2_rec_KL/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..34d4007b150385657a69ae93ef4e04bdf1ce149d --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv2_rec_KL/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,19 @@ +===========================serving_params=========================== +model_name:ch_PP-OCRv2_rec_KL +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:./inference/ch_PP-OCRv2_det_klquant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:./deploy/pdserving/ppocr_det_v2_kl_serving/ +--det_serving_client:./deploy/pdserving/ppocr_det_v2_kl_client/ +--rec_dirname:./inference/ch_PP-OCRv2_rec_klquant_infer/ +--rec_serving_server:./deploy/pdserving/ppocr_rec_v2_kl_serving/ +--rec_serving_client:./deploy/pdserving/ppocr_rec_v2_kl_client/ +serving_dir:./deploy/pdserving +web_service:-m paddle_serving_server.serve +--op:GeneralDetectionOp GeneralInferOp +--port:8181 +--gpu_id:"0"|null +cpp_client:ocr_cpp_client.py +--image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_PP-OCRv2_rec_KL/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv2_rec_KL/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..3405f2b5876609bf6d97cf3d379613bf38c16cda --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv2_rec_KL/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,23 @@ +===========================serving_params=========================== +model_name:ch_PP-OCRv2_rec_KL +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:null +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:null +--det_serving_client:null +--rec_dirname:./inference/ch_PP-OCRv2_rec_klquant_infer/ +--rec_serving_server:./deploy/pdserving/ppocr_rec_v2_kl_serving/ +--rec_serving_client:./deploy/pdserving/ppocr_rec_v2_kl_client/ +serving_dir:./deploy/pdserving +web_service:web_service_rec.py --config=config.yml --opt op.rec.concurrency="1" +op.det.local_service_conf.devices:gpu|null +op.det.local_service_conf.use_mkldnn:False +op.det.local_service_conf.thread_num:6 +op.det.local_service_conf.use_trt:False +op.det.local_service_conf.precision:fp32 +op.det.local_service_conf.model_config: +op.rec.local_service_conf.model_config: +pipline:pipeline_http_client.py --det=False +--image_dir:../../inference/rec_inference diff --git a/test_tipc/configs/ch_PP-OCRv2_rec_PACT/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv2_rec_PACT/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..b807eadd3ff7f38ce670f0db9d28aaacd62e207a --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv2_rec_PACT/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,20 @@ +===========================cpp_infer_params=========================== +model_name:ch_PP-OCRv2_rec_PACT +use_opencv:True +infer_model:./inference/ch_PP-OCRv2_rec_pact_infer +infer_quant:False +inference:./deploy/cpp_infer/build/ppocr --rec_char_dict_path=./ppocr/utils/ppocr_keys_v1.txt --rec_img_h=32 +--use_gpu:True|False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:6 +--use_tensorrt:False +--precision:fp32 +--rec_model_dir: +--image_dir:./inference/rec_inference/ +null:null +--benchmark:True +--det:False +--rec:True +--cls:False +--use_angle_cls:False \ No newline at end of file diff --git a/test_tipc/configs/ch_PP-OCRv2_rec_PACT/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv2_rec_PACT/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..2a174b9e31b24ca002a639b1e8675cc6d5592bcc --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv2_rec_PACT/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,19 @@ +===========================serving_params=========================== +model_name:ch_PP-OCRv2_rec_PACT +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:./inference/ch_PP-OCRv2_det_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:./deploy/pdserving/ppocr_det_v2_pact_serving/ +--det_serving_client:./deploy/pdserving/ppocr_det_v2_pact_client/ +--rec_dirname:./inference/ch_PP-OCRv2_rec_pact_infer/ +--rec_serving_server:./deploy/pdserving/ppocr_rec_v2_pact_serving/ +--rec_serving_client:./deploy/pdserving/ppocr_rec_v2_pact_client/ +serving_dir:./deploy/pdserving +web_service:-m paddle_serving_server.serve +--op:GeneralDetectionOp GeneralInferOp +--port:8181 +--gpu_id:"0"|null +cpp_client:ocr_cpp_client.py +--image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_PP-OCRv2_rec_PACT/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv2_rec_PACT/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..2b7ed81172473a7924d1af48937048bbee9d1953 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv2_rec_PACT/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,23 @@ +===========================serving_params=========================== +model_name:ch_PP-OCRv2_rec_PACT +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:null +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:null +--det_serving_client:null +--rec_dirname:./inference/ch_PP-OCRv2_rec_pact_infer/ +--rec_serving_server:./deploy/pdserving/ppocr_rec_v2_pact_serving/ +--rec_serving_client:./deploy/pdserving/ppocr_rec_v2_pact_client/ +serving_dir:./deploy/pdserving +web_service:web_service_rec.py --config=config.yml --opt op.rec.concurrency="1" +op.det.local_service_conf.devices:gpu|null +op.det.local_service_conf.use_mkldnn:False +op.det.local_service_conf.thread_num:6 +op.det.local_service_conf.use_trt:False +op.det.local_service_conf.precision:fp32 +op.det.local_service_conf.model_config: +op.rec.local_service_conf.model_config: +pipline:pipeline_http_client.py --det=False +--image_dir:../../inference/rec_inference diff --git a/test_tipc/configs/ch_PP-OCRv3/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv3/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..794af27d90edd54ff888ab167698d57a92eab7b6 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,20 @@ +===========================cpp_infer_params=========================== +model_name:ch_PP-OCRv3 +use_opencv:True +infer_model:./inference/ch_PP-OCRv3_det_infer/ +infer_quant:False +inference:./deploy/cpp_infer/build/ppocr --rec_img_h=48 --rec_char_dict_path=./ppocr/utils/ppocr_keys_v1.txt +--use_gpu:True|False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:1 +--use_tensorrt:False +--precision:fp32 +--det_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ +--rec_model_dir:./inference/ch_PP-OCRv3_rec_infer/ +--benchmark:True +--det:True +--rec:True +--cls:False +--use_angle_cls:False \ No newline at end of file diff --git a/test_tipc/configs/ch_PP-OCRv3/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv3/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..afacdc140515297c75f8674b245b34fce4c9fad9 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,19 @@ +===========================ch_PP-OCRv2=========================== +model_name:ch_PP-OCRv3 +python:python3.7 +infer_model:./inference/ch_PP-OCRv3_det_infer/ +infer_export:null +infer_quant:False +inference:tools/infer/predict_system.py --rec_image_shape="3,48,320" +--use_gpu:False|True +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:1 +--use_tensorrt:False +--precision:fp32 +--det_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ +--rec_model_dir:./inference/ch_PP-OCRv3_rec_infer/ +--benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ch_PP-OCRv3/model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt b/test_tipc/configs/ch_PP-OCRv3/model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..133a78c84ff1f3d11765b52b45489f5ba43d63f2 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3/model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt @@ -0,0 +1,13 @@ +===========================lite_params=========================== +inference:./ocr_db_crnn system +runtime_device:ARM_CPU +det_infer_model:ch_PP-OCRv3_det_infer|ch_PP-OCRv3_det_slim_quant_infer +rec_infer_model:ch_PP-OCRv3_rec_infer|ch_PP-OCRv3_rec_slim_quant_infer +cls_infer_model:ch_ppocr_mobile_v2.0_cls_infer|ch_ppocr_mobile_v2.0_cls_slim_infer +--cpu_threads:1|4 +--det_batch_size:1 +--rec_batch_size:1 +--image_dir:./test_data/icdar2015_lite/text_localization/ch4_test_images/ +--config_dir:./config.txt +--rec_dict_dir:./ppocr_keys_v1.txt +--benchmark:True diff --git a/test_tipc/configs/ch_PP-OCRv3/model_linux_gpu_normal_normal_lite_cpp_arm_gpu_opencl.txt b/test_tipc/configs/ch_PP-OCRv3/model_linux_gpu_normal_normal_lite_cpp_arm_gpu_opencl.txt new file mode 100644 index 0000000000000000000000000000000000000000..86e49fc100522ec78d38f41ab121d936526b9b4f --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3/model_linux_gpu_normal_normal_lite_cpp_arm_gpu_opencl.txt @@ -0,0 +1,13 @@ +===========================lite_params=========================== +inference:./ocr_db_crnn system +runtime_device:ARM_GPU_OPENCL +det_infer_model:ch_PP-OCRv3_det_infer|ch_PP-OCRv3_det_slim_quant_infer +rec_infer_model:ch_PP-OCRv3_rec_infer|ch_PP-OCRv3_rec_slim_quant_infer +cls_infer_model:ch_ppocr_mobile_v2.0_cls_infer|ch_ppocr_mobile_v2.0_cls_slim_infer +--cpu_threads:1|4 +--det_batch_size:1 +--rec_batch_size:1 +--image_dir:./test_data/icdar2015_lite/text_localization/ch4_test_images/ +--config_dir:./config.txt +--rec_dict_dir:./ppocr_keys_v1.txt +--benchmark:True diff --git a/test_tipc/configs/ch_PP-OCRv3/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/ch_PP-OCRv3/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..bf2556ef17dcdbd66ff81abe6dbb10639511cde4 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -0,0 +1,17 @@ +===========================paddle2onnx_params=========================== +model_name:ch_PP-OCRv3 +python:python3.7 +2onnx: paddle2onnx +--det_model_dir:./inference/ch_PP-OCRv3_det_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_save_file:./inference/det_v3_onnx/model.onnx +--rec_model_dir:./inference/ch_PP-OCRv3_rec_infer/ +--rec_save_file:./inference/rec_v3_onnx/model.onnx +--opset_version:10 +--enable_onnx_checker:True +inference:tools/infer/predict_system.py --rec_image_shape="3,48,320" +--use_gpu:True|False +--det_model_dir: +--rec_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/00008790.jpg \ No newline at end of file diff --git a/test_tipc/configs/ch_PP-OCRv3/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv3/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..91c57bed1b9e9bbafc6438766b81781433a06aa2 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,19 @@ +===========================serving_params=========================== +model_name:ch_PP-OCRv3 +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:./inference/ch_PP-OCRv3_det_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:./deploy/pdserving/ppocr_det_v3_serving/ +--det_serving_client:./deploy/pdserving/ppocr_det_v3_client/ +--rec_dirname:./inference/ch_PP-OCRv3_rec_infer/ +--rec_serving_server:./deploy/pdserving/ppocr_rec_v3_serving/ +--rec_serving_client:./deploy/pdserving/ppocr_rec_v3_client/ +serving_dir:./deploy/pdserving +web_service:-m paddle_serving_server.serve +--op:GeneralDetectionOp GeneralInferOp +--port:8181 +--gpu_id:"0"|null +cpp_client:ocr_cpp_client.py +--image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_PP-OCRv3/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv3/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..6f699ef5c02015d214ef08fb7047a6e9be84e24d --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,23 @@ +===========================serving_params=========================== +model_name:ch_PP-OCRv3 +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:./inference/ch_PP-OCRv3_det_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:./deploy/pdserving/ppocr_det_v3_serving/ +--det_serving_client:./deploy/pdserving/ppocr_det_v3_client/ +--rec_dirname:./inference/ch_PP-OCRv3_rec_infer/ +--rec_serving_server:./deploy/pdserving/ppocr_rec_v3_serving/ +--rec_serving_client:./deploy/pdserving/ppocr_rec_v3_client/ +serving_dir:./deploy/pdserving +web_service:web_service.py --config=config.yml --opt op.det.concurrency="1" op.rec.concurrency="1" +op.det.local_service_conf.devices:gpu|null +op.det.local_service_conf.use_mkldnn:False +op.det.local_service_conf.thread_num:6 +op.det.local_service_conf.use_trt:False +op.det.local_service_conf.precision:fp32 +op.det.local_service_conf.model_config: +op.rec.local_service_conf.model_config: +pipline:pipeline_http_client.py +--image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_PP-OCRv3_det/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv3_det/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..aecd0dd437ea028c555ffedf4227b55843dd7f2e --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3_det/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,20 @@ +===========================cpp_infer_params=========================== +model_name:ch_PP-OCRv3_det +use_opencv:True +infer_model:./inference/ch_PP-OCRv3_det_infer/ +infer_quant:False +inference:./deploy/cpp_infer/build/ppocr +--use_gpu:True|False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:1 +--use_tensorrt:False +--precision:fp32 +--det_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ +null:null +--benchmark:True +--det:True +--rec:False +--cls:False +--use_angle_cls:False \ No newline at end of file diff --git a/test_tipc/configs/ch_PP-OCRv3_det/model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt b/test_tipc/configs/ch_PP-OCRv3_det/model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..cbc101f93b5ba3822caf10ed6ecd55d32f827fce --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3_det/model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt @@ -0,0 +1,13 @@ +===========================lite_params=========================== +inference:./ocr_db_crnn det +runtime_device:ARM_CPU +det_infer_model:ch_PP-OCRv3_det_infer|ch_PP-OCRv3_det_slim_quant_infer +null:null +null:null +--cpu_threads:1|4 +--det_batch_size:1 +null:null +--image_dir:./test_data/icdar2015_lite/text_localization/ch4_test_images/ +--config_dir:./config.txt +null:null +--benchmark:True \ No newline at end of file diff --git a/test_tipc/configs/ch_PP-OCRv3_det/model_linux_gpu_normal_normal_lite_cpp_arm_gpu_opencl.txt b/test_tipc/configs/ch_PP-OCRv3_det/model_linux_gpu_normal_normal_lite_cpp_arm_gpu_opencl.txt new file mode 100644 index 0000000000000000000000000000000000000000..ba3f5b71e50c7a52ccd61450cec5e5b28935b98b --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3_det/model_linux_gpu_normal_normal_lite_cpp_arm_gpu_opencl.txt @@ -0,0 +1,13 @@ +===========================lite_params=========================== +inference:./ocr_db_crnn det +runtime_device:ARM_GPU_OPENCL +det_infer_model:ch_PP-OCRv3_det_infer|ch_PP-OCRv3_det_slim_quant_infer +null:null +null:null +--cpu_threads:1|4 +--det_batch_size:1 +null:null +--image_dir:./test_data/icdar2015_lite/text_localization/ch4_test_images/ +--config_dir:./config.txt +null:null +--benchmark:True diff --git a/test_tipc/configs/ch_PP-OCRv3_det/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/ch_PP-OCRv3_det/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..a448713b1c91dbd190770056b2ee403f2ac56cc6 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3_det/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -0,0 +1,17 @@ +===========================paddle2onnx_params=========================== +model_name:ch_PP-OCRv3_det +python:python3.7 +2onnx: paddle2onnx +--det_model_dir:./inference/ch_PP-OCRv3_det_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_save_file:./inference/det_v3_onnx/model.onnx +--rec_model_dir: +--rec_save_file: +--opset_version:10 +--enable_onnx_checker:True +inference:tools/infer/predict_det.py +--use_gpu:True|False +--det_model_dir: +--rec_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ \ No newline at end of file diff --git a/test_tipc/configs/ch_PP-OCRv3_det/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv3_det/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..6e2ec22cbd51e686ab64ad832559de1e2442fc98 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3_det/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,23 @@ +===========================serving_params=========================== +model_name:ch_PP-OCRv3_det +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:./inference/ch_PP-OCRv3_det_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:./deploy/pdserving/ppocr_det_v3_serving/ +--det_serving_client:./deploy/pdserving/ppocr_det_v3_client/ +--rec_dirname:null +--rec_serving_server:null +--rec_serving_client:null +serving_dir:./deploy/pdserving +web_service:web_service_det.py --config=config.yml --opt op.det.concurrency="1" +op.det.local_service_conf.devices:gpu|null +op.det.local_service_conf.use_mkldnn:False +op.det.local_service_conf.thread_num:6 +op.det.local_service_conf.use_trt:False +op.det.local_service_conf.precision:fp32 +op.det.local_service_conf.model_config: +op.rec.local_service_conf.model_config: +pipline:pipeline_http_client.py +--image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_PP-OCRv3_det/train_infer_python.txt b/test_tipc/configs/ch_PP-OCRv3_det/train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..a69e0ab81ec6963228e9ab2e39c5bb1d730b6323 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3_det/train_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:ch_PP-OCRv3_det +python:python3.7 +gpu_list:0|0,1 +Global.use_gpu:True|True +Global.auto_cast:fp32 +Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=50 +Global.save_model_dir:./output/ +Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4 +Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/ +null:null +## +trainer:norm_train +norm_train:tools/train.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml -o +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:null +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.checkpoints: +norm_export:tools/export_model.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml -o +quant_export:null +fpgm_export: +distill_export:null +export1:null +export2:null +inference_dir:Student +infer_model:./inference/ch_PP-OCRv3_det_infer/ +infer_export:null +infer_quant:False +inference:tools/infer/predict_det.py +--use_gpu:True|False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:1 +--use_tensorrt:False +--precision:fp32 +--det_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ +null:null +--benchmark:True +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,640,640]}];[{float32,[3,960,960]}] diff --git a/test_tipc/configs/ch_PP-OCRv3_det/train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv3_det/train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..7e987125a6681629a592d43f05c2ecfe51dac3f1 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3_det/train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:ch_PP-OCRv3_det +python:python3.7 +gpu_list:192.168.0.1,192.168.0.2;0,1 +Global.use_gpu:True +Global.auto_cast:fp32 +Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=50 +Global.save_model_dir:./output/ +Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4 +Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/ +null:null +## +trainer:norm_train +norm_train:tools/train.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml -o +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:null +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.checkpoints: +norm_export:tools/export_model.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml -o +quant_export:null +fpgm_export: +distill_export:null +export1:null +export2:null +inference_dir:Student +infer_model:./inference/ch_PP-OCRv3_det_infer/ +infer_export:null +infer_quant:False +inference:tools/infer/predict_det.py +--use_gpu:False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:1 +--use_tensorrt:False +--precision:fp32 +--det_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ +null:null +--benchmark:True +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,640,640]}];[{float32,[3,960,960]}] diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_det_PACT/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv3_det/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt similarity index 65% rename from test_tipc/configs/ch_ppocr_mobile_v2.0_det_PACT/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt rename to test_tipc/configs/ch_PP-OCRv3_det/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt index 1f9bec12ada6894fcffbe697ae4da2f0df95cc62..fe72cfb4e9ec50d24b1b115eca129a0bc9534b9c 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2.0_det_PACT/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_PP-OCRv3_det/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt @@ -1,10 +1,10 @@ ===========================train_params=========================== -model_name:ch_ppocr_mobile_v2.0_det_PACT +model_name:ch_PP-OCRv3_det python:python3.7 gpu_list:0|0,1 Global.use_gpu:True|True Global.auto_cast:amp -Global.epoch_num:lite_train_lite_infer=20|whole_train_whole_infer=300 +Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=50 Global.save_model_dir:./output/ Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4 Global.pretrained_model:null @@ -12,9 +12,9 @@ train_model_name:latest train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/ null:null ## -trainer:pact_train -norm_train:null -pact_train:deploy/slim/quantization/quant.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o +trainer:norm_train +norm_train:tools/train.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml -o +pact_train:null fpgm_train:null distill_train:null null:null @@ -27,23 +27,23 @@ null:null ===========================infer_params=========================== Global.save_inference_dir:./output/ Global.checkpoints: -norm_export:null -quant_export:deploy/slim/quantization/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o -fpgm_export:null +norm_export:tools/export_model.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml -o +quant_export:null +fpgm_export: distill_export:null export1:null export2:null -inference_dir:null -train_model:./inference/ch_ppocr_mobile_v2.0_det_prune_infer/ +inference_dir:Student +infer_model:./inference/ch_PP-OCRv3_det_infer/ infer_export:null infer_quant:False inference:tools/infer/predict_det.py --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp32|fp16|int8 +--use_tensorrt:False +--precision:fp32 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ null:null diff --git a/test_tipc/configs/ch_PP-OCRv2_det_PACT/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv3_det/train_pact_infer_python.txt similarity index 78% rename from test_tipc/configs/ch_PP-OCRv2_det_PACT/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt rename to test_tipc/configs/ch_PP-OCRv3_det/train_pact_infer_python.txt index d922a4a5dad67da81e3c9cf7bed48a0431a88b84..b536e69b05878fac3c2672a063ed7869d7a784fe 100644 --- a/test_tipc/configs/ch_PP-OCRv2_det_PACT/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_PP-OCRv3_det/train_pact_infer_python.txt @@ -1,12 +1,12 @@ ===========================train_params=========================== -model_name:ch_PPOCRv2_det_PACT +model_name:ch_PP-OCRv3_det_PACT python:python3.7 gpu_list:0|0,1 Global.use_gpu:True|True -Global.auto_cast:amp -Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=500 +Global.auto_cast:fp32 +Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=50 Global.save_model_dir:./output/ -Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4 +Train.loader.batch_size_per_card:lite_train_lite_infer=1|whole_train_whole_infer=4 Global.pretrained_model:null train_model_name:latest train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/ @@ -14,7 +14,7 @@ null:null ## trainer:pact_train norm_train:null -pact_train:deploy/slim/quantization/quant.py -c configs/det/ch_PP-OCRv2/ch_PP-OCRv2_det_cml.yml -o +pact_train:deploy/slim/quantization/quant.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml -o fpgm_train:null distill_train:null null:null @@ -28,22 +28,22 @@ null:null Global.save_inference_dir:./output/ Global.checkpoints: norm_export:null -quant_export:deploy/slim/quantization/export_model.py -c configs/det/ch_PP-OCRv2/ch_PP-OCRv2_det_cml.yml -o +quant_export:deploy/slim/quantization/export_model.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml -o fpgm_export: distill_export:null export1:null export2:null inference_dir:Student -infer_model:./inference/ch_PP-OCRv2_det_infer/ +infer_model:./inference/ch_PP-OCRv3_det_infer/ infer_export:null infer_quant:False inference:tools/infer/predict_det.py --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp32|fp16|int8 +--use_tensorrt:False +--precision:fp32 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ null:null diff --git a/test_tipc/configs/ch_PP-OCRv3_det/train_ptq_infer_python.txt b/test_tipc/configs/ch_PP-OCRv3_det/train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..c27e08a64075e15e4fbd8d4ffab7001752365417 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3_det/train_ptq_infer_python.txt @@ -0,0 +1,21 @@ +===========================kl_quant_params=========================== +model_name:ch_PP-OCRv3_det_KL +python:python3.7 +Global.pretrained_model:null +Global.save_inference_dir:null +infer_model:./inference/ch_PP-OCRv3_det_infer/ +infer_export:deploy/slim/quantization/quant_kl.py -c configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml -o +infer_quant:True +inference:tools/infer/predict_det.py +--use_gpu:False|True +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:1 +--use_tensorrt:False +--precision:int8 +--det_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ +null:null +--benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ch_PP-OCRv3_det_KL/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv3_det_KL/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..a34ffe22ae02c698fcc757c913d3fcadd572df2f --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3_det_KL/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,20 @@ +===========================cpp_infer_params=========================== +model_name:ch_PP-OCRv3_det_KL +use_opencv:True +infer_model:./inference/ch_PP-OCRv3_det_klquant_infer +infer_quant:False +inference:./deploy/cpp_infer/build/ppocr +--use_gpu:True|False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:1 +--use_tensorrt:False +--precision:fp32 +--det_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ +null:null +--benchmark:True +--det:True +--rec:False +--cls:False +--use_angle_cls:False \ No newline at end of file diff --git a/test_tipc/configs/ch_PP-OCRv3_det_KL/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv3_det_KL/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..22b429760ce6e83293ddaf074898002a3b0c8995 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3_det_KL/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,19 @@ +===========================serving_params=========================== +model_name:ch_PP-OCRv3_det_KL +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:./inference/ch_PP-OCRv3_det_klquant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:./deploy/pdserving/ppocr_det_v3_kl_serving/ +--det_serving_client:./deploy/pdserving/ppocr_det_v3_kl_client/ +--rec_dirname:./inference/ch_PP-OCRv3_rec_klquant_infer/ +--rec_serving_server:./deploy/pdserving/ppocr_rec_v3_kl_serving/ +--rec_serving_client:./deploy/pdserving/ppocr_rec_v3_kl_client/ +serving_dir:./deploy/pdserving +web_service:-m paddle_serving_server.serve +--op:GeneralDetectionOp GeneralInferOp +--port:8181 +--gpu_id:"0"|null +cpp_client:ocr_cpp_client.py +--image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_PP-OCRv3_det_KL/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv3_det_KL/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..23dbc49a388b3ff9bfe65a775bf28cf70e0d9a0b --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3_det_KL/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,23 @@ +===========================serving_params=========================== +model_name:ch_PP-OCRv3_det_KL +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:./inference/ch_PP-OCRv3_det_klquant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:./deploy/pdserving/ppocr_det_v3_kl_serving/ +--det_serving_client:./deploy/pdserving/ppocr_det_v3_kl_client/ +--rec_dirname:null +--rec_serving_server:null +--rec_serving_client:null +serving_dir:./deploy/pdserving +web_service:web_service_det.py --config=config.yml --opt op.det.concurrency="1" +op.det.local_service_conf.devices:gpu|null +op.det.local_service_conf.use_mkldnn:False +op.det.local_service_conf.thread_num:6 +op.det.local_service_conf.use_trt:False +op.det.local_service_conf.precision:fp32 +op.det.local_service_conf.model_config: +op.rec.local_service_conf.model_config: +pipline:pipeline_http_client.py +--image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_PP-OCRv3_det_PACT/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv3_det_PACT/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..3198b87552bc8c5e29ad7dd6e158c5b592aa82d5 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3_det_PACT/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,20 @@ +===========================cpp_infer_params=========================== +model_name:ch_PP-OCRv3_det_PACT +use_opencv:True +infer_model:./inference/ch_PP-OCRv3_det_pact_infer +infer_quant:False +inference:./deploy/cpp_infer/build/ppocr +--use_gpu:True|False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:1 +--use_tensorrt:False +--precision:fp32 +--det_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ +null:null +--benchmark:True +--det:True +--rec:False +--cls:False +--use_angle_cls:False \ No newline at end of file diff --git a/test_tipc/configs/ch_PP-OCRv3_det_PACT/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv3_det_PACT/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..7d300f4456b4761e885d46c1a4f8ece916d0c2ea --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3_det_PACT/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,19 @@ +===========================serving_params=========================== +model_name:ch_PP-OCRv3_det_PACT +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:./inference/ch_PP-OCRv3_det_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:./deploy/pdserving/ppocr_det_v3_pact_serving/ +--det_serving_client:./deploy/pdserving/ppocr_det_v3_pact_client/ +--rec_dirname:./inference/ch_PP-OCRv3_rec_pact_infer/ +--rec_serving_server:./deploy/pdserving/ppocr_rec_v3_pact_serving/ +--rec_serving_client:./deploy/pdserving/ppocr_rec_v3_pact_client/ +serving_dir:./deploy/pdserving +web_service:-m paddle_serving_server.serve +--op:GeneralDetectionOp GeneralInferOp +--port:8181 +--gpu_id:"0"|null +cpp_client:ocr_cpp_client.py +--image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_PP-OCRv3_det_PACT/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv3_det_PACT/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..4546644cbca46a8d11069f4149c3bae8683630c2 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3_det_PACT/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,23 @@ +===========================serving_params=========================== +model_name:ch_PP-OCRv3_det_PACT +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:./inference/ch_PP-OCRv3_det_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:./deploy/pdserving/ppocr_det_v3_pact_serving/ +--det_serving_client:./deploy/pdserving/ppocr_det_v3_pact_client/ +--rec_dirname:null +--rec_serving_server:null +--rec_serving_client:null +serving_dir:./deploy/pdserving +web_service:web_service_det.py --config=config.yml --opt op.det.concurrency="1" +op.det.local_service_conf.devices:gpu|null +op.det.local_service_conf.use_mkldnn:False +op.det.local_service_conf.thread_num:6 +op.det.local_service_conf.use_trt:False +op.det.local_service_conf.precision:fp32 +op.det.local_service_conf.model_config: +op.rec.local_service_conf.model_config: +pipline:pipeline_http_client.py +--image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_PP-OCRv3_rec/ch_PP-OCRv3_rec_distillation.yml b/test_tipc/configs/ch_PP-OCRv3_rec/ch_PP-OCRv3_rec_distillation.yml new file mode 100644 index 0000000000000000000000000000000000000000..f704a1dfb5dc2335c353a495dfbc0ce42cf35bf4 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3_rec/ch_PP-OCRv3_rec_distillation.yml @@ -0,0 +1,205 @@ +Global: + debug: false + use_gpu: true + epoch_num: 800 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec_ppocr_v3_distillation + save_epoch_step: 3 + eval_batch_step: [0, 2000] + cal_metric_during_train: true + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: false + infer_img: doc/imgs_words/ch/word_1.jpg + character_dict_path: ppocr/utils/ppocr_keys_v1.txt + max_text_length: &max_text_length 25 + infer_mode: false + use_space_char: true + distributed: true + save_res_path: ./output/rec/predicts_ppocrv3_distillation.txt + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Piecewise + decay_epochs : [700, 800] + values : [0.0005, 0.00005] + warmup_epoch: 5 + regularizer: + name: L2 + factor: 3.0e-05 + + +Architecture: + model_type: &model_type "rec" + name: DistillationModel + algorithm: Distillation + Models: + Teacher: + pretrained: + freeze_params: false + return_all_feats: true + model_type: *model_type + algorithm: SVTR + Transform: + Backbone: + name: MobileNetV1Enhance + scale: 0.5 + last_conv_stride: [1, 2] + last_pool_type: avg + Head: + name: MultiHead + head_list: + - CTCHead: + Neck: + name: svtr + dims: 64 + depth: 2 + hidden_dims: 120 + use_guide: True + Head: + fc_decay: 0.00001 + - SARHead: + enc_dim: 512 + max_text_length: *max_text_length + Student: + pretrained: + freeze_params: false + return_all_feats: true + model_type: *model_type + algorithm: SVTR + Transform: + Backbone: + name: MobileNetV1Enhance + scale: 0.5 + last_conv_stride: [1, 2] + last_pool_type: avg + Head: + name: MultiHead + head_list: + - CTCHead: + Neck: + name: svtr + dims: 64 + depth: 2 + hidden_dims: 120 + use_guide: True + Head: + fc_decay: 0.00001 + - SARHead: + enc_dim: 512 + max_text_length: *max_text_length +Loss: + name: CombinedLoss + loss_config_list: + - DistillationDMLLoss: + weight: 1.0 + act: "softmax" + use_log: true + model_name_pairs: + - ["Student", "Teacher"] + key: head_out + multi_head: True + dis_head: ctc + name: dml_ctc + - DistillationDMLLoss: + weight: 0.5 + act: "softmax" + use_log: true + model_name_pairs: + - ["Student", "Teacher"] + key: head_out + multi_head: True + dis_head: sar + name: dml_sar + - DistillationDistanceLoss: + weight: 1.0 + mode: "l2" + model_name_pairs: + - ["Student", "Teacher"] + key: backbone_out + - DistillationCTCLoss: + weight: 1.0 + model_name_list: ["Student", "Teacher"] + key: head_out + multi_head: True + - DistillationSARLoss: + weight: 1.0 + model_name_list: ["Student", "Teacher"] + key: head_out + multi_head: True + +PostProcess: + name: DistillationCTCLabelDecode + model_name: ["Student", "Teacher"] + key: head_out + multi_head: True + +Metric: + name: DistillationMetric + base_metric_name: RecMetric + main_indicator: acc + key: "Student" + ignore_space: True + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ic15_data/ + ext_op_transform_idx: 1 + label_file_list: + - ./train_data/ic15_data/rec_gt_train_lite.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - RecConAug: + prob: 0.5 + ext_data_num: 2 + image_shape: [48, 320, 3] + - RecAug: + - MultiLabelEncode: + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_sar + - length + - valid_ratio + loader: + shuffle: true + batch_size_per_card: 128 + drop_last: true + num_workers: 4 +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ic15_data + label_file_list: + - ./train_data/ic15_data/rec_gt_test_lite.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - MultiLabelEncode: + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_sar + - length + - valid_ratio + loader: + shuffle: false + drop_last: false + batch_size_per_card: 128 + num_workers: 4 diff --git a/test_tipc/configs/ch_PP-OCRv3_rec/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv3_rec/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..9d6ca2cf5ec6413cb675389218cbb3b82770ba73 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3_rec/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,20 @@ +===========================cpp_infer_params=========================== +model_name:ch_PP-OCRv3_rec +use_opencv:True +infer_model:./inference/ch_PP-OCRv3_rec_infer/ +infer_quant:False +inference:./deploy/cpp_infer/build/ppocr --rec_img_h=48 --rec_char_dict_path=./ppocr/utils/ppocr_keys_v1.txt +--use_gpu:True|False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:6 +--use_tensorrt:False +--precision:fp32 +--rec_model_dir: +--image_dir:./inference/rec_inference/ +null:null +--benchmark:True +--det:False +--rec:True +--cls:False +--use_angle_cls:False \ No newline at end of file diff --git a/test_tipc/configs/ch_PP-OCRv3_rec/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/ch_PP-OCRv3_rec/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..9114c0acfd8ea41952cbc301fcd53d639de050ef --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3_rec/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -0,0 +1,17 @@ +===========================paddle2onnx_params=========================== +model_name:ch_PP-OCRv3_rec +python:python3.7 +2onnx: paddle2onnx +--det_model_dir: +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_save_file: +--rec_model_dir:./inference/ch_PP-OCRv3_rec_infer/ +--rec_save_file:./inference/rec_v3_onnx/model.onnx +--opset_version:10 +--enable_onnx_checker:True +inference:tools/infer/predict_rec.py --rec_image_shape="3,48,320" +--use_gpu:True|False +--det_model_dir: +--rec_model_dir: +--image_dir:./inference/rec_inference/ \ No newline at end of file diff --git a/test_tipc/configs/ch_PP-OCRv3_rec/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv3_rec/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..f01db2e9504ffd400609f0f6556d92ea33ee49ad --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3_rec/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,23 @@ +===========================serving_params=========================== +model_name:ch_PP-OCRv3_rec +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:null +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:null +--det_serving_client:null +--rec_dirname:./inference/ch_PP-OCRv3_rec_infer/ +--rec_serving_server:./deploy/pdserving/ppocr_rec_v3_serving/ +--rec_serving_client:./deploy/pdserving/ppocr_rec_v3_client/ +serving_dir:./deploy/pdserving +web_service:web_service_rec.py --config=config.yml --opt op.rec.concurrency="1" +op.det.local_service_conf.devices:gpu|null +op.det.local_service_conf.use_mkldnn:False +op.det.local_service_conf.thread_num:6 +op.det.local_service_conf.use_trt:False +op.det.local_service_conf.precision:fp32 +op.det.local_service_conf.model_config: +op.rec.local_service_conf.model_config: +pipline:pipeline_http_client.py --det=False +--image_dir:../../inference/rec_inference diff --git a/test_tipc/configs/ch_PP-OCRv3_rec/train_infer_python.txt b/test_tipc/configs/ch_PP-OCRv3_rec/train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..1feb9d49fce69d92ef141c3a942f858fc68cfaab --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3_rec/train_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:ch_PP-OCRv3_rec +python:python3.7 +gpu_list:0|0,1 +Global.use_gpu:True|True +Global.auto_cast:fp32 +Global.epoch_num:lite_train_lite_infer=3|whole_train_whole_infer=50 +Global.save_model_dir:./output/ +Train.loader.batch_size_per_card:lite_train_lite_infer=16|whole_train_whole_infer=128 +Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./inference/rec_inference +null:null +## +trainer:norm_train +norm_train:tools/train.py -c test_tipc/configs/ch_PP-OCRv3_rec/ch_PP-OCRv3_rec_distillation.yml -o +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:null +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.checkpoints: +norm_export:tools/export_model.py -c test_tipc/configs/ch_PP-OCRv3_rec/ch_PP-OCRv3_rec_distillation.yml -o +quant_export: +fpgm_export: +distill_export:null +export1:null +export2:null +inference_dir:Student +infer_model:./inference/ch_PP-OCRv3_rec_infer +infer_export:null +infer_quant:False +inference:tools/infer/predict_rec.py --rec_image_shape="3,48,320" +--use_gpu:True|False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:1|6 +--use_tensorrt:False +--precision:fp32 +--rec_model_dir: +--image_dir:./inference/rec_inference +null:null +--benchmark:True +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,48,320]}] diff --git a/test_tipc/configs/ch_PP-OCRv3_rec/train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv3_rec/train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..7fcc8b4418c65b0f98624d92bd3896518f2ed465 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3_rec/train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:ch_PP-OCRv3_rec +python:python3.7 +gpu_list:192.168.0.1,192.168.0.2;0,1 +Global.use_gpu:True +Global.auto_cast:fp32 +Global.epoch_num:lite_train_lite_infer=3|whole_train_whole_infer=50 +Global.save_model_dir:./output/ +Train.loader.batch_size_per_card:lite_train_lite_infer=16|whole_train_whole_infer=64 +Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./inference/rec_inference +null:null +## +trainer:norm_train +norm_train:tools/train.py -c test_tipc/configs/ch_PP-OCRv3_rec/ch_PP-OCRv3_rec_distillation.yml -o +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:null +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.checkpoints: +norm_export:tools/export_model.py -c test_tipc/configs/ch_PP-OCRv3_rec/ch_PP-OCRv3_rec_distillation.yml -o +quant_export: +fpgm_export: +distill_export:null +export1:null +export2:null +inference_dir:Student +infer_model:./inference/ch_PP-OCRv3_rec_infer +infer_export:null +infer_quant:False +inference:tools/infer/predict_rec.py --rec_image_shape="3,48,320" +--use_gpu:False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:1|6 +--use_tensorrt:False +--precision:fp32 +--rec_model_dir: +--image_dir:./inference/rec_inference +null:null +--benchmark:True +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,48,320]}] diff --git a/test_tipc/configs/ch_PP-OCRv3_rec/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv3_rec/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..99e3c4247716bdf139440e22ea80c8542b2d9830 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3_rec/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:ch_PP-OCRv3_rec +python:python3.7 +gpu_list:0|0,1 +Global.use_gpu:True|True +Global.auto_cast:amp +Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=50 +Global.save_model_dir:./output/ +Train.loader.batch_size_per_card:lite_train_lite_infer=16|whole_train_whole_infer=128 +Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./inference/rec_inference +null:null +## +trainer:norm_train +norm_train:tools/train.py -c test_tipc/configs/ch_PP-OCRv3_rec/ch_PP-OCRv3_rec_distillation.yml -o +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:null +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.checkpoints: +norm_export:tools/export_model.py -c test_tipc/configs/ch_PP-OCRv3_rec/ch_PP-OCRv3_rec_distillation.yml -o +quant_export: +fpgm_export: +distill_export:null +export1:null +export2:null +inference_dir:Student +infer_model:./inference/ch_PP-OCRv3_rec_infer +infer_export:null +infer_quant:False +inference:tools/infer/predict_rec.py --rec_image_shape="3,48,320" +--use_gpu:True|False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:6 +--use_tensorrt:False +--precision:fp32 +--rec_model_dir: +--image_dir:./inference/rec_inference +null:null +--benchmark:True +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,48,320]}] diff --git a/test_tipc/configs/ch_PP-OCRv2_rec_PACT/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv3_rec/train_pact_infer_python.txt similarity index 66% rename from test_tipc/configs/ch_PP-OCRv2_rec_PACT/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt rename to test_tipc/configs/ch_PP-OCRv3_rec/train_pact_infer_python.txt index e22d8a564b008206611469048b424b528dd379bd..24469a91cfa0fa7d26ff24dba13c9c7e78a5ca10 100644 --- a/test_tipc/configs/ch_PP-OCRv2_rec_PACT/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_PP-OCRv3_rec/train_pact_infer_python.txt @@ -1,20 +1,20 @@ ===========================train_params=========================== -model_name:ch_PPOCRv2_rec_PACT +model_name:ch_PP-OCRv3_rec_PACT python:python3.7 gpu_list:0|0,1 Global.use_gpu:True|True -Global.auto_cast:amp -Global.epoch_num:lite_train_lite_infer=3|whole_train_whole_infer=300 +Global.auto_cast:fp32 +Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=50 Global.save_model_dir:./output/ Train.loader.batch_size_per_card:lite_train_lite_infer=16|whole_train_whole_infer=128 -Global.pretrained_model:null +Global.pretrained_model:pretrain_models/ch_PP-OCRv3_rec_train/best_accuracy train_model_name:latest train_infer_img_dir:./inference/rec_inference null:null ## trainer:pact_train norm_train:null -pact_train:deploy/slim/quantization/quant.py -c test_tipc/configs/ch_PP-OCRv2_rec/ch_PP-OCRv2_rec_distillation.yml -o +pact_train:deploy/slim/quantization/quant.py -c test_tipc/configs/ch_PP-OCRv3_rec/ch_PP-OCRv3_rec_distillation.yml -o fpgm_train:null distill_train:null null:null @@ -28,26 +28,26 @@ null:null Global.save_inference_dir:./output/ Global.checkpoints: norm_export:null -quant_export:deploy/slim/quantization/export_model.py -c test_tipc/configs/ch_PP-OCRv2_rec/ch_PP-OCRv2_rec_distillation.yml -o +quant_export:deploy/slim/quantization/export_model.py -c test_tipc/configs/ch_PP-OCRv3_rec/ch_PP-OCRv3_rec_distillation.yml -o fpgm_export: null distill_export:null export1:null export2:null inference_dir:Student -infer_model:./inference/ch_PP-OCRv2_rec_slim_quant_infer +infer_model:./inference/ch_PP-OCRv3_rec_slim_infer infer_export:null infer_quant:True -inference:tools/infer/predict_rec.py +inference:tools/infer/predict_rec.py --rec_image_shape="3,48,320" --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1|6 ---use_tensorrt:False|True ---precision:fp32|int8 +--use_tensorrt:False +--precision:fp32 --rec_model_dir: --image_dir:./inference/rec_inference null:null --benchmark:True null:null ===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,32,320]}] +random_infer_input:[{float32,[3,48,320]}] diff --git a/test_tipc/configs/ch_PP-OCRv3_rec/train_ptq_infer_python.txt b/test_tipc/configs/ch_PP-OCRv3_rec/train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..d1a8c7c00137661bb1b5cced46f7616877f0b0a2 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3_rec/train_ptq_infer_python.txt @@ -0,0 +1,21 @@ +===========================kl_quant_params=========================== +model_name:ch_PP-OCRv3_rec_KL +python:python3.7 +Global.pretrained_model: +Global.save_inference_dir:null +infer_model:./inference/ch_PP-OCRv3_rec_infer/ +infer_export:deploy/slim/quantization/quant_kl.py -c test_tipc/configs/ch_PP-OCRv3_rec/ch_PP-OCRv3_rec_distillation.yml -o +infer_quant:True +inference:tools/infer/predict_rec.py --rec_image_shape="3,48,320" +--use_gpu:False|True +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:1|6 +--use_tensorrt:False +--precision:int8 +--rec_model_dir: +--image_dir:./inference/rec_inference +null:null +--benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ch_PP-OCRv3_rec_KL/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv3_rec_KL/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..f1a308fcc8831f4d1ab66dffc69ee3deb9ac0bc3 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3_rec_KL/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,20 @@ +===========================cpp_infer_params=========================== +model_name:ch_PP-OCRv3_rec_KL +use_opencv:True +infer_model:./inference/ch_PP-OCRv3_rec_klquant_infer +infer_quant:False +inference:./deploy/cpp_infer/build/ppocr --rec_img_h=48 --rec_char_dict_path=./ppocr/utils/ppocr_keys_v1.txt +--use_gpu:True|False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:6 +--use_tensorrt:False +--precision:fp32 +--rec_model_dir: +--image_dir:./inference/rec_inference/ +null:null +--benchmark:True +--det:False +--rec:True +--cls:False +--use_angle_cls:False \ No newline at end of file diff --git a/test_tipc/configs/ch_PP-OCRv3_rec_KL/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv3_rec_KL/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..fa6f04e48c99f732624f60cba8df67783969ced5 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3_rec_KL/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,19 @@ +===========================serving_params=========================== +model_name:ch_PP-OCRv3_rec_KL +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:./inference/ch_PP-OCRv3_det_klquant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:./deploy/pdserving/ppocr_det_v3_kl_serving/ +--det_serving_client:./deploy/pdserving/ppocr_det_v3_kl_client/ +--rec_dirname:./inference/ch_PP-OCRv3_rec_klquant_infer/ +--rec_serving_server:./deploy/pdserving/ppocr_rec_v3_kl_serving/ +--rec_serving_client:./deploy/pdserving/ppocr_rec_v3_kl_client/ +serving_dir:./deploy/pdserving +web_service:-m paddle_serving_server.serve +--op:GeneralDetectionOp GeneralInferOp +--port:8181 +--gpu_id:"0"|null +cpp_client:ocr_cpp_client.py +--image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_PP-OCRv3_rec_KL/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv3_rec_KL/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..68586af0d97c658c07f0ce65b8ac5af44e909592 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3_rec_KL/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,23 @@ +===========================serving_params=========================== +model_name:ch_PP-OCRv3_rec_KL +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:null +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:null +--det_serving_client:null +--rec_dirname:./inference/ch_PP-OCRv3_rec_klquant_infer/ +--rec_serving_server:./deploy/pdserving/ppocr_rec_v3_kl_serving/ +--rec_serving_client:./deploy/pdserving/ppocr_rec_v3_kl_client/ +serving_dir:./deploy/pdserving +web_service:web_service_rec.py --config=config.yml --opt op.rec.concurrency="1" +op.det.local_service_conf.devices:gpu|null +op.det.local_service_conf.use_mkldnn:False +op.det.local_service_conf.thread_num:6 +op.det.local_service_conf.use_trt:False +op.det.local_service_conf.precision:fp32 +op.det.local_service_conf.model_config: +op.rec.local_service_conf.model_config: +pipline:pipeline_http_client.py --det=False +--image_dir:../../inference/rec_inference diff --git a/test_tipc/configs/ch_PP-OCRv3_rec_PACT/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv3_rec_PACT/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..8fc1132ca6016d78846527345528db2e003d5ad1 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3_rec_PACT/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,20 @@ +===========================cpp_infer_params=========================== +model_name:ch_PP-OCRv3_rec_PACT +use_opencv:True +infer_model:./inference/ch_PP-OCRv3_rec_pact_infer +infer_quant:False +inference:./deploy/cpp_infer/build/ppocr --rec_img_h=48 --rec_char_dict_path=./ppocr/utils/ppocr_keys_v1.txt +--use_gpu:True|False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:6 +--use_tensorrt:False +--precision:fp32 +--rec_model_dir: +--image_dir:./inference/rec_inference/ +null:null +--benchmark:True +--det:False +--rec:True +--cls:False +--use_angle_cls:False \ No newline at end of file diff --git a/test_tipc/configs/ch_PP-OCRv3_rec_PACT/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv3_rec_PACT/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..008df50d6d32c701ec7ce3814b2f0490916f2d7a --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3_rec_PACT/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,19 @@ +===========================serving_params=========================== +model_name:ch_PP-OCRv3_rec_PACT +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:./inference/ch_PP-OCRv3_det_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:./deploy/pdserving/ppocr_det_v3_pact_serving/ +--det_serving_client:./deploy/pdserving/ppocr_det_v3_pact_client/ +--rec_dirname:./inference/ch_PP-OCRv3_rec_pact_infer/ +--rec_serving_server:./deploy/pdserving/ppocr_rec_v3_pact_serving/ +--rec_serving_client:./deploy/pdserving/ppocr_rec_v3_pact_client/ +serving_dir:./deploy/pdserving +web_service:-m paddle_serving_server.serve +--op:GeneralDetectionOp GeneralInferOp +--port:8181 +--gpu_id:"0"|null +cpp_client:ocr_cpp_client.py +--image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_PP-OCRv3_rec_PACT/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv3_rec_PACT/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..826f586f0c6759f1c13a45891e9ba9c0688554e8 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3_rec_PACT/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,23 @@ +===========================serving_params=========================== +model_name:ch_PP-OCRv3_rec_PACT +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:null +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:null +--det_serving_client:null +--rec_dirname:./inference/ch_PP-OCRv3_rec_pact_infer/ +--rec_serving_server:./deploy/pdserving/ppocr_rec_v3_pact_serving/ +--rec_serving_client:./deploy/pdserving/ppocr_rec_v3_pact_client/ +serving_dir:./deploy/pdserving +web_service:web_service_rec.py --config=config.yml --opt op.rec.concurrency="1" +op.det.local_service_conf.devices:gpu|null +op.det.local_service_conf.use_mkldnn:False +op.det.local_service_conf.thread_num:6 +op.det.local_service_conf.use_trt:False +op.det.local_service_conf.precision:fp32 +op.det.local_service_conf.model_config: +op.rec.local_service_conf.model_config: +pipline:pipeline_http_client.py --det=False +--image_dir:../../inference/rec_inference diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_PACT/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv3_rec_PACT/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt similarity index 54% rename from test_tipc/configs/ch_ppocr_mobile_v2.0_rec_PACT/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt rename to test_tipc/configs/ch_PP-OCRv3_rec_PACT/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt index abed3cfba9b3f8c0ed626dbfcbda8621d8787001..c93b307debae630fccf41f29348c0b34761c1bf6 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_PACT/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_PP-OCRv3_rec_PACT/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt @@ -1,20 +1,20 @@ ===========================train_params=========================== -model_name:ch_ppocr_mobile_v2.0_rec_PACT +model_name:ch_PP-OCRv3_rec_PACT python:python3.7 -gpu_list:0 +gpu_list:0|0,1 Global.use_gpu:True|True Global.auto_cast:amp -Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=300 +Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=50 Global.save_model_dir:./output/ -Train.loader.batch_size_per_card:lite_train_lite_infer=128|whole_train_whole_infer=128 -Global.checkpoints:null +Train.loader.batch_size_per_card:lite_train_lite_infer=16|whole_train_whole_infer=128 +Global.pretrained_model:pretrain_models/ch_PP-OCRv3_rec_train/best_accuracy train_model_name:latest -train_infer_img_dir:./train_data/ic15_data/test/word_1.png +train_infer_img_dir:./inference/rec_inference null:null ## trainer:pact_train norm_train:null -pact_train:deploy/slim/quantization/quant.py -c test_tipc/configs/ch_ppocr_mobile_v2.0_rec_PACT/rec_chinese_lite_train_v2.0.yml -o +pact_train:deploy/slim/quantization/quant.py -c test_tipc/configs/ch_PP-OCRv3_rec/ch_PP-OCRv3_rec_distillation.yml -o fpgm_train:null distill_train:null null:null @@ -28,26 +28,26 @@ null:null Global.save_inference_dir:./output/ Global.checkpoints: norm_export:null -quant_export:deploy/slim/quantization/export_model.py -c test_tipc/configs/ch_ppocr_mobile_v2.0_rec_PACT/rec_chinese_lite_train_v2.0.yml -o -fpgm_export:null +quant_export:deploy/slim/quantization/export_model.py -c test_tipc/configs/ch_PP-OCRv3_rec/ch_PP-OCRv3_rec_distillation.yml -o +fpgm_export: null distill_export:null export1:null export2:null -inference_dir:null -infer_model:./inference/ch_ppocr_mobile_v2.0_rec_slim_infer/ +inference_dir:Student +infer_model:./inference/ch_PP-OCRv3_rec_slim_quant_infer infer_export:null -infer_quant:False -inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ppocr_keys_v1.txt --rec_image_shape="3,32,100" +infer_quant:True +inference:tools/infer/predict_rec.py --rec_image_shape="3,48,320" --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 ---rec_batch_num:1|6 ---use_tensorrt:False|True ---precision:fp32|int8 +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:6 +--use_tensorrt:False +--precision:fp32 --rec_model_dir: --image_dir:./inference/rec_inference ---save_log_path:./test/output/ +null:null --benchmark:True null:null ===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,32,320]}] +random_infer_input:[{float32,[3,48,320]}] diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..b42ab9db362b0ba56d795096fdc58a645b425480 --- /dev/null +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,20 @@ +===========================cpp_infer_params=========================== +model_name:ch_ppocr_mobile_v2.0 +use_opencv:True +infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/ +infer_quant:False +inference:./deploy/cpp_infer/build/ppocr --rec_char_dict_path=./ppocr/utils/ppocr_keys_v1.txt --rec_img_h=32 +--use_gpu:True|False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:1 +--use_tensorrt:False +--precision:fp32 +--det_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ +--rec_model_dir:./inference/ch_ppocr_mobile_v2.0_rec_infer/ +--benchmark:True +--det:True +--rec:True +--cls:False +--use_angle_cls:False \ No newline at end of file diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt index 4a46f0cf09dcf2bb812910f0cf322dda0749b87c..becad991eab2535b2df7862d0d25707ef37f08f8 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt @@ -6,10 +6,10 @@ infer_export:null infer_quant:False inference:tools/infer/predict_system.py --use_gpu:False|True ---enable_mkldnn:False|True ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1 ---use_tensorrt:False|True +--use_tensorrt:False --precision:fp32 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..17c2fbbae2e182c4a7631cb18908180d8c019b4f --- /dev/null +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -0,0 +1,17 @@ +===========================paddle2onnx_params=========================== +model_name:ch_ppocr_mobile_v2.0 +python:python3.7 +2onnx: paddle2onnx +--det_model_dir:./inference/ch_ppocr_mobile_v2.0_det_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_save_file:./inference/det_mobile_onnx/model.onnx +--rec_model_dir:./inference/ch_ppocr_mobile_v2.0_rec_infer/ +--rec_save_file:./inference/rec_mobile_onnx/model.onnx +--opset_version:10 +--enable_onnx_checker:True +inference:tools/infer/predict_system.py --rec_image_shape="3,32,320" +--use_gpu:True|False +--det_model_dir: +--rec_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ \ No newline at end of file diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..d18e9f11fdd2ff605cdd8f6c1bcf51ca780eb766 --- /dev/null +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,19 @@ +===========================serving_params=========================== +model_name:ch_ppocr_mobile_v2.0 +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:./inference/ch_ppocr_mobile_v2.0_det_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:./deploy/pdserving/ppocr_det_mobile_serving/ +--det_serving_client:./deploy/pdserving/ppocr_det_mobile_client/ +--rec_dirname:./inference/ch_ppocr_mobile_v2.0_rec_infer/ +--rec_serving_server:./deploy/pdserving/ppocr_rec_mobile_serving/ +--rec_serving_client:./deploy/pdserving/ppocr_rec_mobile_client/ +serving_dir:./deploy/pdserving +web_service:-m paddle_serving_server.serve +--op:GeneralDetectionOp GeneralInferOp +--port:8181 +--gpu_id:"0"|null +cpp_client:ocr_cpp_client.py +--image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..842c9340176d696c1e43e59491bdcab817f9256e --- /dev/null +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,23 @@ +===========================serving_params=========================== +model_name:ch_ppocr_mobile_v2.0 +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:./inference/ch_ppocr_mobile_v2.0_det_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:./deploy/pdserving/ppocr_det_mobile_serving/ +--det_serving_client:./deploy/pdserving/ppocr_det_mobile_client/ +--rec_dirname:./inference/ch_ppocr_mobile_v2.0_rec_infer/ +--rec_serving_server:./deploy/pdserving/ppocr_rec_mobile_serving/ +--rec_serving_client:./deploy/pdserving/ppocr_rec_mobile_client/ +serving_dir:./deploy/pdserving +web_service:web_service.py --config=config.yml --opt op.det.concurrency="1" op.rec.concurrency="1" +op.det.local_service_conf.devices:gpu|null +op.det.local_service_conf.use_mkldnn:False +op.det.local_service_conf.thread_num:6 +op.det.local_service_conf.use_trt:False +op.det.local_service_conf.precision:fp32 +op.det.local_service_conf.model_config: +op.rec.local_service_conf.model_config: +pipline:pipeline_http_client.py +--image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt index d0ae17ccb55f40ddf65de936ca3cfc06bdd19475..1d1c2ae283b2103c2e7282186ab1f53bec05cda3 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -1,15 +1,15 @@ ===========================cpp_infer_params=========================== -model_name:ocr_det +model_name:ch_ppocr_mobile_v2.0_det use_opencv:True infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/ infer_quant:False inference:./deploy/cpp_infer/build/ppocr --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp32|fp16 +--use_tensorrt:False +--precision:fp32 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ null:null diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_python_jetson.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_python_jetson.txt index 7d3f60bd42aad18c045aeee70fc60d2c17a2af13..24bb8746ab7793dbcb4af99102a007aca8b8e16b 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_python_jetson.txt +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_python_jetson.txt @@ -1,5 +1,5 @@ ===========================infer_params=========================== -model_name:ocr_det +model_name:ch_ppocr_mobile_v2.0_det python:python infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer infer_export:null @@ -7,10 +7,10 @@ infer_quant:False inference:tools/infer/predict_det.py --use_gpu:True|False --enable_mkldnn:False ---cpu_threads:1|6 +--cpu_threads:6 --rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp16|fp32 +--use_tensorrt:False +--precision:fp32 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ null:null diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt index 160bcdbd88661c3d795eb2faf6b93965598c3e22..00473d1062615834a42e350a727f50233efd831f 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -1,14 +1,17 @@ ===========================paddle2onnx_params=========================== -model_name:ocr_det_mobile +model_name:ch_ppocr_mobile_v2.0_det python:python3.7 2onnx: paddle2onnx ---model_dir:./inference/ch_ppocr_mobile_v2.0_det_infer/ +--det_model_dir:./inference/ch_ppocr_mobile_v2.0_det_infer/ --model_filename:inference.pdmodel --params_filename:inference.pdiparams ---save_file:./inference/det_mobile_onnx/model.onnx +--det_save_file:./inference/det_mobile_onnx/model.onnx +--rec_model_dir: +--rec_save_file: --opset_version:10 --enable_onnx_checker:True inference:tools/infer/predict_det.py --use_gpu:True|False --det_model_dir: +--rec_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ \ No newline at end of file diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt index 2326c9d2a7a785bf5f94124476fb3c21f91ceed2..c9dd5ad920d58f60ce36a7b489073279f23ba1b7 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -1,18 +1,23 @@ ===========================serving_params=========================== -model_name:ocr_det_mobile -python:python3.7|cpp +model_name:ch_ppocr_mobile_v2.0_det +python:python3.7 trans_model:-m paddle_serving_client.convert ---dirname:./inference/ch_ppocr_mobile_v2.0_det_infer/ +--det_dirname:./inference/ch_ppocr_mobile_v2.0_det_infer/ --model_filename:inference.pdmodel --params_filename:inference.pdiparams ---serving_server:./deploy/pdserving/ppocr_det_mobile_2.0_serving/ ---serving_client:./deploy/pdserving/ppocr_det_mobile_2.0_client/ +--det_serving_server:./deploy/pdserving/ppocr_det_mobile_serving/ +--det_serving_client:./deploy/pdserving/ppocr_det_mobile_client/ +--rec_dirname:null +--rec_serving_server:null +--rec_serving_client:null serving_dir:./deploy/pdserving web_service:web_service_det.py --config=config.yml --opt op.det.concurrency="1" -op.det.local_service_conf.devices:"0"|null -op.det.local_service_conf.use_mkldnn:True|False -op.det.local_service_conf.thread_num:1|6 -op.det.local_service_conf.use_trt:False|True -op.det.local_service_conf.precision:fp32|fp16|int8 -pipline:pipeline_rpc_client.py|pipeline_http_client.py ---image_dir:../../doc/imgs +op.det.local_service_conf.devices:gpu|null +op.det.local_service_conf.use_mkldnn:False +op.det.local_service_conf.thread_num:6 +op.det.local_service_conf.use_trt:False +op.det.local_service_conf.precision:fp32 +op.det.local_service_conf.model_config: +op.rec.local_service_conf.model_config: +pipline:pipeline_http_client.py +--image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_infer_python.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_infer_python.txt index 269693a86e9e371d52865f48d7fbaccce5d72393..789ed4d23d9c1fa3997daceee0627218aecd4c73 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_infer_python.txt +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_infer_python.txt @@ -4,7 +4,7 @@ python:python3.7 gpu_list:0|0,1 Global.use_gpu:True|True Global.auto_cast:null -Global.epoch_num:lite_train_lite_infer=100|whole_train_whole_infer=300 +Global.epoch_num:lite_train_lite_infer=100|whole_train_whole_infer=50 Global.save_model_dir:./output/ Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4 Global.pretrained_model:null @@ -39,11 +39,11 @@ infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2 infer_quant:False inference:tools/infer/predict_det.py --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp32|fp16|int8 +--use_tensorrt:False +--precision:fp32 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ null:null diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt deleted file mode 100644 index bfb71b781039493b12875ed4e99c8cd004a2e295..0000000000000000000000000000000000000000 --- a/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt +++ /dev/null @@ -1,51 +0,0 @@ -===========================train_params=========================== -model_name:ocr_det -python:python3.7 -gpu_list:xx.xx.xx.xx,yy.yy.yy.yy;0,1 -Global.use_gpu:True -Global.auto_cast:fp32|amp -Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=300 -Global.save_model_dir:./output/ -Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4 -Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/ -null:null -## -trainer:norm_train|pact_train|fpgm_train -norm_train:tools/train.py -c test_tipc/configs/ppocr_det_mobile/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained -pact_train:deploy/slim/quantization/quant.py -c test_tipc/configs/ppocr_det_mobile/det_mv3_db.yml -o -fpgm_train:deploy/slim/prune/sensitivity_anal.py -c test_tipc/configs/ppocr_det_mobile/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/det_mv3_db_v2.0_train/best_accuracy -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:null -null:null -## -===========================infer_params=========================== -Global.save_inference_dir:./output/ -Global.pretrained_model: -norm_export:tools/export_model.py -c test_tipc/configs/ppocr_det_mobile/det_mv3_db.yml -o -quant_export:deploy/slim/quantization/export_model.py -c test_tipc/configs/ppocr_det_mobile/det_mv3_db.yml -o -fpgm_export:deploy/slim/prune/export_prune_model.py -c test_tipc/configs/ppocr_det_mobile/det_mv3_db.yml -o -distill_export:null -export1:null -export2:null -inference_dir:null -train_model:./inference/ch_ppocr_mobile_v2.0_det_train/best_accuracy -infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o -infer_quant:False -inference:tools/infer/predict_det.py ---use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 ---rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp32|fp16|int8 ---det_model_dir: ---image_dir:./inference/ch_det_data_50/all-sum-510/ -null:null ---benchmark:True -null:null diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..5271f78bb778f9e419da7f9bbbb6b4a6fafb305b --- /dev/null +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:ch_ppocr_mobile_v2.0_det +python:python3.7 +gpu_list:192.168.0.1,192.168.0.2;0,1 +Global.use_gpu:True +Global.auto_cast:null +Global.epoch_num:lite_train_lite_infer=100|whole_train_whole_infer=50 +Global.save_model_dir:./output/ +Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4 +Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/ +null:null +## +trainer:norm_train +norm_train:tools/train.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:null +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.checkpoints: +norm_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o +quant_export:null +fpgm_export:null +distill_export:null +export1:null +export2:null +inference_dir:null +train_model:./inference/ch_ppocr_mobile_v2.0_det_train/best_accuracy +infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o +infer_quant:False +inference:tools/infer/predict_det.py +--use_gpu:False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:1 +--use_tensorrt:False +--precision:fp32 +--det_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ +null:null +--benchmark:True +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,640,640]}];[{float32,[3,960,960]}] \ No newline at end of file diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt index 593e7ec7ed42af9b65c520852ff6372f89890170..6b3352f741a56124eead2f71c03c783e5c81a70d 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt @@ -4,7 +4,7 @@ python:python3.7 gpu_list:0|0,1 Global.use_gpu:True|True Global.auto_cast:amp -Global.epoch_num:lite_train_lite_infer=100|whole_train_whole_infer=300 +Global.epoch_num:lite_train_lite_infer=100|whole_train_whole_infer=50 Global.save_model_dir:./output/ Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4 Global.pretrained_model:null @@ -39,11 +39,11 @@ infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2 infer_quant:False inference:tools/infer/predict_det.py --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp32|fp16|int8 +--use_tensorrt:False +--precision:fp32 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ null:null diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_mac_cpu_normal_normal_infer_python_mac_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_mac_cpu_normal_normal_infer_python_mac_cpu.txt index 014dad5fc9d87c08a0725f57127f8bf2cb248be3..3f321a1903cdd1076e181c4bb901f1c9dc6d7f58 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_mac_cpu_normal_normal_infer_python_mac_cpu.txt +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_mac_cpu_normal_normal_infer_python_mac_cpu.txt @@ -4,7 +4,7 @@ python:python gpu_list:-1 Global.use_gpu:False Global.auto_cast:null -Global.epoch_num:lite_train_lite_infer=5|whole_train_whole_infer=300 +Global.epoch_num:lite_train_lite_infer=5|whole_train_whole_infer=50 Global.save_model_dir:./output/ Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4 Global.pretrained_model:null diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_det_PACT/train_infer_python.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_pact_infer_python.txt similarity index 89% rename from test_tipc/configs/ch_ppocr_mobile_v2.0_det_PACT/train_infer_python.txt rename to test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_pact_infer_python.txt index 9d2855d8240a7c42295e6e2439d121504d307b09..04c8d0e194b687f58da1c449a6a0d8d9c1acd25e 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2.0_det_PACT/train_infer_python.txt +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_pact_infer_python.txt @@ -4,7 +4,7 @@ python:python3.7 gpu_list:0|0,1 Global.use_gpu:True|True Global.auto_cast:null -Global.epoch_num:lite_train_lite_infer=20|whole_train_whole_infer=300 +Global.epoch_num:lite_train_lite_infer=2|whole_train_whole_infer=50 Global.save_model_dir:./output/ Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4 Global.pretrained_model:null @@ -39,11 +39,11 @@ infer_export:null infer_quant:False inference:tools/infer/predict_det.py --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp32|fp16|int8 +--use_tensorrt:False +--precision:fp32 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ null:null diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_det_KL/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_ptq_infer_python.txt similarity index 89% rename from test_tipc/configs/ch_ppocr_mobile_v2.0_det_KL/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt rename to test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_ptq_infer_python.txt index 1039dcad06d63bb1fc1a47b7cc4760cd8d75ed63..2bdec848833b6cf3799370b0337fa00f185a94d5 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2.0_det_KL/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_ptq_infer_python.txt @@ -8,10 +8,10 @@ infer_export:deploy/slim/quantization/quant_kl.py -c configs/det/ch_ppocr_v2.0/c infer_quant:True inference:tools/infer/predict_det.py --use_gpu:False|True ---enable_mkldnn:True ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1 ---use_tensorrt:False|True +--use_tensorrt:False --precision:int8 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_windows_gpu_normal_normal_infer_python_windows_cpu_gpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_windows_gpu_normal_normal_infer_python_windows_cpu_gpu.txt index 6a63b39d976c0e9693deec097c37eb0ff212d8af..a3f6933a64bd64c8f39a90f72d139ad9fada55bb 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_windows_gpu_normal_normal_infer_python_windows_cpu_gpu.txt +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_windows_gpu_normal_normal_infer_python_windows_cpu_gpu.txt @@ -4,7 +4,7 @@ python:python gpu_list:0 Global.use_gpu:True Global.auto_cast:fp32|amp -Global.epoch_num:lite_train_lite_infer=5|whole_train_whole_infer=300 +Global.epoch_num:lite_train_lite_infer=5|whole_train_whole_infer=50 Global.save_model_dir:./output/ Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4 Global.pretrained_model:null @@ -39,10 +39,10 @@ infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2 infer_quant:False inference:tools/infer/predict_det.py --use_gpu:True|False ---enable_mkldnn:True|False +--enable_mkldnn:False --cpu_threads:1|6 --rec_batch_num:1 ---use_tensorrt:False|True +--use_tensorrt:False --precision:fp32|fp16|int8 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_det_FPGM/train_infer_python.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_det_FPGM/train_infer_python.txt index 47ccf2e69e75bc8c215be8d1837e5248d1b4b513..dae3f8053a0264611b5baca0f45839f3550fe6a4 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2.0_det_FPGM/train_infer_python.txt +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_det_FPGM/train_infer_python.txt @@ -4,7 +4,7 @@ python:python3.7 gpu_list:0|0,1 Global.use_gpu:True|True Global.auto_cast:null -Global.epoch_num:lite_train_lite_infer=5|whole_train_whole_infer=300 +Global.epoch_num:lite_train_lite_infer=5|whole_train_whole_infer=50 Global.save_model_dir:./output/ Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4 Global.pretrained_model:null @@ -39,11 +39,11 @@ infer_export:null infer_quant:False inference:tools/infer/predict_det.py --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp32|fp16|int8 +--use_tensorrt:False +--precision:fp32 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ null:null diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_det_FPGM/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_det_FPGM/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt index 5a95f026850b750bfadb85e0955f7426e5e73cb6..150a8a0315b83e8c62765a4aa66429cfd0590928 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2.0_det_FPGM/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_det_FPGM/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt @@ -4,7 +4,7 @@ python:python3.7 gpu_list:0|0,1 Global.use_gpu:True|True Global.auto_cast:amp -Global.epoch_num:lite_train_lite_infer=5|whole_train_whole_infer=300 +Global.epoch_num:lite_train_lite_infer=5|whole_train_whole_infer=50 Global.save_model_dir:./output/ Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4 Global.pretrained_model:null @@ -39,11 +39,11 @@ infer_export:null infer_quant:False inference:tools/infer/predict_det.py --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp32|fp16|int8 +--use_tensorrt:False +--precision:fp32 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ null:null diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_det_KL/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_det_KL/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..eb2fd0a001ab506f241bed7eac75d96cf4b5d5cb --- /dev/null +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_det_KL/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,20 @@ +===========================cpp_infer_params=========================== +model_name:ch_ppocr_mobile_v2.0_det_KL +use_opencv:True +infer_model:./inference/ch_ppocr_mobile_v2.0_det_klquant_infer +infer_quant:False +inference:./deploy/cpp_infer/build/ppocr +--use_gpu:True|False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:1 +--use_tensorrt:False +--precision:fp32 +--det_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ +null:null +--benchmark:True +--det:True +--rec:False +--cls:False +--use_angle_cls:False \ No newline at end of file diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_det_KL/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_det_KL/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..d9de1cc19a729485845601fe929bf57d74002641 --- /dev/null +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_det_KL/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,19 @@ +===========================serving_params=========================== +model_name:ch_ppocr_mobile_v2.0_det_KL +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:./inference/ch_ppocr_mobile_v2.0_det_klquant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:./deploy/pdserving/ppocr_det_mobile_kl_serving/ +--det_serving_client:./deploy/pdserving/ppocr_det_mobile_kl_client/ +--rec_dirname:./inference/ch_ppocr_mobile_v2.0_rec_klquant_infer/ +--rec_serving_server:./deploy/pdserving/ppocr_rec_mobile_kl_serving/ +--rec_serving_client:./deploy/pdserving/ppocr_rec_mobile_kl_client/ +serving_dir:./deploy/pdserving +web_service:-m paddle_serving_server.serve +--op:GeneralDetectionOp GeneralInferOp +--port:8181 +--gpu_id:"0"|null +cpp_client:ocr_cpp_client.py +--image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_det_KL/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_det_KL/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..049ec784581bddcce066bc049b66f6f0ceff9eed --- /dev/null +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_det_KL/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,23 @@ +===========================serving_params=========================== +model_name:ch_ppocr_mobile_v2.0_det_KL +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:./inference/ch_ppocr_mobile_v2.0_det_klquant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:./deploy/pdserving/ppocr_det_mobile_kl_serving/ +--det_serving_client:./deploy/pdserving/ppocr_det_mobile_kl_client/ +--rec_dirname:null +--rec_serving_server:null +--rec_serving_client:null +serving_dir:./deploy/pdserving +web_service:web_service_det.py --config=config.yml --opt op.det.concurrency="1" +op.det.local_service_conf.devices:gpu|null +op.det.local_service_conf.use_mkldnn:False +op.det.local_service_conf.thread_num:6 +op.det.local_service_conf.use_trt:False +op.det.local_service_conf.precision:fp32 +op.det.local_service_conf.model_config: +op.rec.local_service_conf.model_config: +pipline:pipeline_http_client.py +--image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_det_PACT/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_det_PACT/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..17723f41ab762f5316cba59c08ec719aa54f03b1 --- /dev/null +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_det_PACT/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,20 @@ +===========================cpp_infer_params=========================== +model_name:ch_ppocr_mobile_v2.0_det_PACT +use_opencv:True +infer_model:./inference/ch_ppocr_mobile_v2.0_det_pact_infer +infer_quant:False +inference:./deploy/cpp_infer/build/ppocr +--use_gpu:True|False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:1 +--use_tensorrt:False +--precision:fp32 +--det_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ +null:null +--benchmark:True +--det:True +--rec:False +--cls:False +--use_angle_cls:False \ No newline at end of file diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_det_PACT/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_det_PACT/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..1a49a10f9b9d4e32916dd35bae3380e2ca5bebb9 --- /dev/null +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_det_PACT/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,19 @@ +===========================serving_params=========================== +model_name:ch_ppocr_mobile_v2.0_det_PACT +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:./inference/ch_ppocr_mobile_v2.0_det_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:./deploy/pdserving/ppocr_det_mobile_pact_serving/ +--det_serving_client:./deploy/pdserving/ppocr_det_mobile_pact_client/ +--rec_dirname:./inference/ch_ppocr_mobile_v2.0_rec_pact_infer/ +--rec_serving_server:./deploy/pdserving/ppocr_rec_mobile_pact_serving/ +--rec_serving_client:./deploy/pdserving/ppocr_rec_mobile_pact_client/ +serving_dir:./deploy/pdserving +web_service:-m paddle_serving_server.serve +--op:GeneralDetectionOp GeneralInferOp +--port:8181 +--gpu_id:"0"|null +cpp_client:ocr_cpp_client.py +--image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_det_PACT/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_det_PACT/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..909d738919bed78d6db04e238818cd4fbbb75e5f --- /dev/null +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_det_PACT/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,23 @@ +===========================serving_params=========================== +model_name:ch_ppocr_mobile_v2.0_det_PACT +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:./inference/ch_ppocr_mobile_v2.0_det_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:./deploy/pdserving/ppocr_det_mobile_pact_serving/ +--det_serving_client:./deploy/pdserving/ppocr_det_mobile_pact_client/ +--rec_dirname:null +--rec_serving_server:null +--rec_serving_client:null +serving_dir:./deploy/pdserving +web_service:web_service_det.py --config=config.yml --opt op.det.concurrency="1" +op.det.local_service_conf.devices:gpu|null +op.det.local_service_conf.use_mkldnn:False +op.det.local_service_conf.thread_num:6 +op.det.local_service_conf.use_trt:False +op.det.local_service_conf.precision:fp32 +op.det.local_service_conf.model_config: +op.rec.local_service_conf.model_config: +pipline:pipeline_http_client.py +--image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_rec/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..480fb16cddfc4c2f4784cc8fa88512f063f7b2ae --- /dev/null +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,20 @@ +===========================cpp_infer_params=========================== +model_name:ch_ppocr_mobile_v2.0_rec +use_opencv:True +infer_model:./inference/ch_ppocr_mobile_v2.0_rec_infer/ +infer_quant:False +inference:./deploy/cpp_infer/build/ppocr --rec_char_dict_path=./ppocr/utils/ppocr_keys_v1.txt --rec_img_h=32 +--use_gpu:True|False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:6 +--use_tensorrt:False +--precision:fp32 +--rec_model_dir: +--image_dir:./inference/rec_inference/ +null:null +--benchmark:True +--det:False +--rec:True +--cls:False +--use_angle_cls:False \ No newline at end of file diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_rec/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt index f29b303879f555eaa9a392633aed6e0095f05cfb..5bab0c9e4c77edba302f6b536306816b09df9224 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2.0_rec/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -1,14 +1,17 @@ ===========================paddle2onnx_params=========================== -model_name:ocr_rec_mobile +model_name:ch_ppocr_mobile_v2.0_rec python:python3.7 2onnx: paddle2onnx ---model_dir:./inference/ch_ppocr_mobile_v2.0_rec_infer/ +--det_model_dir: --model_filename:inference.pdmodel --params_filename:inference.pdiparams ---save_file:./inference/rec_mobile_onnx/model.onnx +--det_save_file: +--rec_model_dir:./inference/ch_ppocr_mobile_v2.0_rec_infer/ +--rec_save_file:./inference/rec_mobile_onnx/model.onnx --opset_version:10 --enable_onnx_checker:True -inference:tools/infer/predict_rec.py +inference:tools/infer/predict_rec.py --rec_image_shape="3,32,320" --use_gpu:True|False +--det_model_dir: --rec_model_dir: ---image_dir:./inference/rec_inference \ No newline at end of file +--image_dir:./inference/rec_inference/ \ No newline at end of file diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_rec/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt index f890eff469ba82b87d2d83000add24cc9d380c49..c0c5291cc480f9f34aa5dcded3eafce7feac89e3 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2.0_rec/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -1,18 +1,23 @@ ===========================serving_params=========================== -model_name:ocr_rec_mobile -python:python3.7|cpp +model_name:ch_ppocr_mobile_v2.0_rec +python:python3.7 trans_model:-m paddle_serving_client.convert ---dirname:./inference/ch_ppocr_mobile_v2.0_rec_infer/ +--det_dirname:null --model_filename:inference.pdmodel --params_filename:inference.pdiparams ---serving_server:./deploy/pdserving/ppocr_rec_mobile_2.0_serving/ ---serving_client:./deploy/pdserving/ppocr_rec_mobile_2.0_client/ +--det_serving_server:null +--det_serving_client:null +--rec_dirname:./inference/ch_ppocr_mobile_v2.0_rec_infer/ +--rec_serving_server:./deploy/pdserving/ppocr_rec_mobile_serving/ +--rec_serving_client:./deploy/pdserving/ppocr_rec_mobile_client/ serving_dir:./deploy/pdserving -web_service:web_service_rec.py --config=config.yml --opt op.rec.concurrency=1 -op.rec.local_service_conf.devices:"0"|null -op.rec.local_service_conf.use_mkldnn:True|False -op.rec.local_service_conf.thread_num:1|6 -op.rec.local_service_conf.use_trt:False|True -op.rec.local_service_conf.precision:fp32|fp16|int8 -pipline:pipeline_rpc_client.py|pipeline_http_client.py ---image_dir:../../doc/imgs_words_en \ No newline at end of file +web_service:web_service_rec.py --config=config.yml --opt op.rec.concurrency="1" +op.det.local_service_conf.devices:gpu|null +op.det.local_service_conf.use_mkldnn:False +op.det.local_service_conf.thread_num:6 +op.det.local_service_conf.use_trt:False +op.det.local_service_conf.precision:fp32 +op.det.local_service_conf.model_config: +op.rec.local_service_conf.model_config: +pipline:pipeline_http_client.py --det=False +--image_dir:../../inference/rec_inference diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_rec/train_infer_python.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec/train_infer_python.txt index 5086f80d7bad4fb359f152cc1dc7195017aa31c3..f02b93926cac8116844142fe3ecb03959abb0530 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2.0_rec/train_infer_python.txt +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec/train_infer_python.txt @@ -4,7 +4,7 @@ python:python3.7 gpu_list:0|0,1 Global.use_gpu:True|True Global.auto_cast:null -Global.epoch_num:lite_train_lite_infer=2|whole_train_whole_infer=300 +Global.epoch_num:lite_train_lite_infer=2|whole_train_whole_infer=50 Global.save_model_dir:./output/ Train.loader.batch_size_per_card:lite_train_lite_infer=128|whole_train_whole_infer=128 Global.pretrained_model:null @@ -39,11 +39,11 @@ infer_export:tools/export_model.py -c configs/rec/rec_icdar15_train.yml -o infer_quant:False inference:tools/infer/predict_rec.py --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1|6 ---use_tensorrt:True|False ---precision:fp32|int8 +--use_tensorrt:False +--precision:fp32 --rec_model_dir: --image_dir:./inference/rec_inference --save_log_path:./test/output/ diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_rec/train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec/train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..631118c0a9ab98c10129f12ec1c1cf2bbac46115 --- /dev/null +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec/train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:ch_ppocr_mobile_v2.0_rec +python:python3.7 +gpu_list:192.168.0.1,192.168.0.2;0,1 +Global.use_gpu:True +Global.auto_cast:null +Global.epoch_num:lite_train_lite_infer=2|whole_train_whole_infer=50 +Global.save_model_dir:./output/ +Train.loader.batch_size_per_card:lite_train_lite_infer=128|whole_train_whole_infer=128 +Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./inference/rec_inference +null:null +## +trainer:norm_train +norm_train:tools/train.py -c configs/rec/rec_icdar15_train.yml -o +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c configs/rec/rec_icdar15_train.yml -o +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.checkpoints: +norm_export:tools/export_model.py -c configs/rec/rec_icdar15_train.yml -o +quant_export:null +fpgm_export:null +distill_export:null +export1:null +export2:null +## +train_model:./inference/ch_ppocr_mobile_v2.0_rec_train/best_accuracy +infer_export:tools/export_model.py -c configs/rec/rec_icdar15_train.yml -o +infer_quant:False +inference:tools/infer/predict_rec.py +--use_gpu:False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:1|6 +--use_tensorrt:False +--precision:fp32 +--rec_model_dir: +--image_dir:./inference/rec_inference +--save_log_path:./test/output/ +--benchmark:True +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,32,100]}] diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_rec/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt index 30fb939bff646adf301191f88a9a499acf9c61de..bd9c4a8df2565af73b6db24636b6dd132dac0cc2 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2.0_rec/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt @@ -4,7 +4,7 @@ python:python3.7 gpu_list:0|0,1 Global.use_gpu:True|True Global.auto_cast:amp -Global.epoch_num:lite_train_lite_infer=2|whole_train_whole_infer=300 +Global.epoch_num:lite_train_lite_infer=2|whole_train_whole_infer=50 Global.save_model_dir:./output/ Train.loader.batch_size_per_card:lite_train_lite_infer=128|whole_train_whole_infer=128 Global.pretrained_model:null @@ -39,11 +39,11 @@ infer_export:tools/export_model.py -c configs/rec/rec_icdar15_train.yml -o infer_quant:False inference:tools/infer/predict_rec.py --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1|6 ---use_tensorrt:True|False ---precision:fp32|int8 +--use_tensorrt:False +--precision:fp32 --rec_model_dir: --image_dir:./inference/rec_inference --save_log_path:./test/output/ diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_PACT/train_infer_python.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec/train_pact_infer_python.txt similarity index 94% rename from test_tipc/configs/ch_ppocr_mobile_v2.0_rec_PACT/train_infer_python.txt rename to test_tipc/configs/ch_ppocr_mobile_v2.0_rec/train_pact_infer_python.txt index 94909ec340c1bbc582dd60aa947f1905580b8966..77472fbdfb21c81bb713df175a135ccf9e652f25 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_PACT/train_infer_python.txt +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec/train_pact_infer_python.txt @@ -4,7 +4,7 @@ python:python3.7 gpu_list:0 Global.use_gpu:True|True Global.auto_cast:null -Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=300 +Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=50 Global.save_model_dir:./output/ Train.loader.batch_size_per_card:lite_train_lite_infer=128|whole_train_whole_infer=128 Global.checkpoints:null @@ -39,11 +39,11 @@ infer_export:null infer_quant:False inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ppocr_keys_v1.txt --rec_image_shape="3,32,100" --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1|6 ---use_tensorrt:False|True ---precision:fp32|int8 +--use_tensorrt:False +--precision:fp32 --rec_model_dir: --image_dir:./inference/rec_inference --save_log_path:./test/output/ diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_KL/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec/train_ptq_infer_python.txt similarity index 81% rename from test_tipc/configs/ch_ppocr_mobile_v2.0_rec_KL/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt rename to test_tipc/configs/ch_ppocr_mobile_v2.0_rec/train_ptq_infer_python.txt index 4b77994f3f68c196b4d6e7a16eb44ec5fdef0d9e..f63fe4c2bb6a17353ecb008d83e2bee9d38aec23 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_KL/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec/train_ptq_infer_python.txt @@ -6,12 +6,12 @@ Global.save_inference_dir:null infer_model:./inference/ch_ppocr_mobile_v2.0_rec_infer/ infer_export:deploy/slim/quantization/quant_kl.py -c test_tipc/configs/ch_ppocr_mobile_v2.0_rec_KL/rec_chinese_lite_train_v2.0.yml -o infer_quant:True -inference:tools/infer/predict_rec.py +inference:tools/infer/predict_rec.py --rec_image_shape="3,32,320" --use_gpu:False|True ---enable_mkldnn:True ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1 ---use_tensorrt:False|True +--use_tensorrt:False --precision:int8 --rec_model_dir: --image_dir:./inference/rec_inference diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_FPGM/train_infer_python.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_FPGM/train_infer_python.txt index 77494ac347a73f61d18c070075db476a093c3f62..89daceeb5f4a991699a490b51358d33240e74913 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_FPGM/train_infer_python.txt +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_FPGM/train_infer_python.txt @@ -4,7 +4,7 @@ python:python3.7 gpu_list:0 Global.use_gpu:True|True Global.auto_cast:null -Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=300 +Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=50 Global.save_model_dir:./output/ Train.loader.batch_size_per_card:lite_train_lite_infer=128|whole_train_whole_infer=128 Global.pretrained_model:null @@ -39,11 +39,11 @@ infer_export:null infer_quant:False inference:tools/infer/predict_rec.py --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp32|int8 +--use_tensorrt:False +--precision:fp32 --rec_model_dir: --image_dir:./inference/rec_inference null:null diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_FPGM/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_FPGM/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt index fda9cf4ddec6d3ab64045a4a7fdbb62183212021..7abc3e9340fe49a2b0bf0efd5e3c370817cd4e9d 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_FPGM/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_FPGM/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt @@ -4,7 +4,7 @@ python:python3.7 gpu_list:0 Global.use_gpu:True|True Global.auto_cast:amp -Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=300 +Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=50 Global.save_model_dir:./output/ Train.loader.batch_size_per_card:lite_train_lite_infer=128|whole_train_whole_infer=128 Global.pretrained_model:null @@ -39,11 +39,11 @@ infer_export:null infer_quant:False inference:tools/infer/predict_rec.py --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp32|int8 +--use_tensorrt:False +--precision:fp32 --rec_model_dir: --image_dir:./inference/rec_inference null:null diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_KL/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_KL/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..adf06257a772cfd16d4109497c6e6ef7c3f8af8b --- /dev/null +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_KL/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,20 @@ +===========================cpp_infer_params=========================== +model_name:ch_ppocr_mobile_v2.0_rec_KL +use_opencv:True +infer_model:./inference/ch_ppocr_mobile_v2.0_rec_klquant_infer +infer_quant:False +inference:./deploy/cpp_infer/build/ppocr --rec_char_dict_path=./ppocr/utils/ppocr_keys_v1.txt --rec_img_h=32 +--use_gpu:True|False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:6 +--use_tensorrt:False +--precision:fp32 +--rec_model_dir: +--image_dir:./inference/rec_inference/ +null:null +--benchmark:True +--det:False +--rec:True +--cls:False +--use_angle_cls:False \ No newline at end of file diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_KL/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_KL/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..ab518de55ae6b157b26bf332ec3b0afcab71f97a --- /dev/null +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_KL/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,19 @@ +===========================serving_params=========================== +model_name:ch_ppocr_mobile_v2.0_rec_KL +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:./inference/ch_ppocr_mobile_v2.0_det_klquant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:./deploy/pdserving/ppocr_det_mobile_kl_serving/ +--det_serving_client:./deploy/pdserving/ppocr_det_mobile_kl_client/ +--rec_dirname:./inference/ch_ppocr_mobile_v2.0_rec_klquant_infer/ +--rec_serving_server:./deploy/pdserving/ppocr_rec_mobile_kl_serving/ +--rec_serving_client:./deploy/pdserving/ppocr_rec_mobile_kl_client/ +serving_dir:./deploy/pdserving +web_service:-m paddle_serving_server.serve +--op:GeneralDetectionOp GeneralInferOp +--port:8181 +--gpu_id:"0"|null +cpp_client:ocr_cpp_client.py +--image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_KL/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_KL/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..948e3dceb3ef7e3f2199be5e417cfc5fc763d975 --- /dev/null +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_KL/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,23 @@ +===========================serving_params=========================== +model_name:ch_ppocr_mobile_v2.0_rec_KL +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:null +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:null +--det_serving_client:null +--rec_dirname:./inference/ch_ppocr_mobile_v2.0_rec_klquant_infer/ +--rec_serving_server:./deploy/pdserving/ppocr_rec_mobile_kl_serving/ +--rec_serving_client:./deploy/pdserving/ppocr_rec_mobile_kl_client/ +serving_dir:./deploy/pdserving +web_service:web_service_rec.py --config=config.yml --opt op.rec.concurrency="1" +op.det.local_service_conf.devices:gpu|null +op.det.local_service_conf.use_mkldnn:False +op.det.local_service_conf.thread_num:6 +op.det.local_service_conf.use_trt:False +op.det.local_service_conf.precision:fp32 +op.det.local_service_conf.model_config: +op.rec.local_service_conf.model_config: +pipline:pipeline_http_client.py --det=False +--image_dir:../../inference/rec_inference diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_PACT/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_PACT/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..ba2df90f75d2c70e043c45ea19d681aabc2b6fb2 --- /dev/null +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_PACT/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,20 @@ +===========================cpp_infer_params=========================== +model_name:ch_ppocr_mobile_v2.0_rec_PACT +use_opencv:True +infer_model:./inference/ch_ppocr_mobile_v2.0_rec_pact_infer +infer_quant:False +inference:./deploy/cpp_infer/build/ppocr --rec_char_dict_path=./ppocr/utils/ppocr_keys_v1.txt --rec_img_h=32 +--use_gpu:True|False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:6 +--use_tensorrt:False +--precision:fp32 +--rec_model_dir: +--image_dir:./inference/rec_inference/ +null:null +--benchmark:True +--det:False +--rec:True +--cls:False +--use_angle_cls:False \ No newline at end of file diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_PACT/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_PACT/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..229f70cf353318bf9ccc81f4e5be79dbc096de25 --- /dev/null +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_PACT/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,19 @@ +===========================serving_params=========================== +model_name:ch_ppocr_mobile_v2.0_rec_PACT +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:./inference/ch_ppocr_mobile_v2.0_det_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:./deploy/pdserving/ppocr_det_mobile_pact_serving/ +--det_serving_client:./deploy/pdserving/ppocr_det_mobile_pact_client/ +--rec_dirname:./inference/ch_ppocr_mobile_v2.0_rec_pact_infer/ +--rec_serving_server:./deploy/pdserving/ppocr_rec_mobile_pact_serving/ +--rec_serving_client:./deploy/pdserving/ppocr_rec_mobile_pact_client/ +serving_dir:./deploy/pdserving +web_service:-m paddle_serving_server.serve +--op:GeneralDetectionOp GeneralInferOp +--port:8181 +--gpu_id:"0"|null +cpp_client:ocr_cpp_client.py +--image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_PACT/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_PACT/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..f123f365432ab68f2484cc11dd9ef94c8a60ea8e --- /dev/null +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_PACT/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,23 @@ +===========================serving_params=========================== +model_name:ch_ppocr_mobile_v2.0_rec_PACT +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:null +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:null +--det_serving_client:null +--rec_dirname:./inference/ch_ppocr_mobile_v2.0_rec_pact_infer/ +--rec_serving_server:./deploy/pdserving/ppocr_rec_mobile_pact_serving/ +--rec_serving_client:./deploy/pdserving/ppocr_rec_mobile_pact_client/ +serving_dir:./deploy/pdserving +web_service:web_service_rec.py --config=config.yml --opt op.rec.concurrency="1" +op.det.local_service_conf.devices:gpu|null +op.det.local_service_conf.use_mkldnn:False +op.det.local_service_conf.thread_num:6 +op.det.local_service_conf.use_trt:False +op.det.local_service_conf.precision:fp32 +op.det.local_service_conf.model_config: +op.rec.local_service_conf.model_config: +pipline:pipeline_http_client.py --det=False +--image_dir:../../inference/rec_inference diff --git a/test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..7c980b2baeef7161a93dea360089b333f2003a31 --- /dev/null +++ b/test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,20 @@ +===========================cpp_infer_params=========================== +model_name:ch_ppocr_server_v2.0 +use_opencv:True +infer_model:./inference/ch_ppocr_server_v2.0_det_infer/ +infer_quant:False +inference:./deploy/cpp_infer/build/ppocr --rec_char_dict_path=./ppocr/utils/ppocr_keys_v1.txt --rec_img_h=32 +--use_gpu:True|False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:1 +--use_tensorrt:False +--precision:fp32 +--det_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ +--rec_model_dir:./inference/ch_ppocr_server_v2.0_rec_infer/ +--benchmark:True +--det:True +--rec:True +--cls:False +--use_angle_cls:False \ No newline at end of file diff --git a/test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt index 92d7031e884d10df3a5c98bf675d64d63b3cb335..b20596f7a1db6da04307a7e527ef596477d237d3 100644 --- a/test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt @@ -6,8 +6,8 @@ infer_export:null infer_quant:True inference:tools/infer/predict_system.py --use_gpu:False|True ---enable_mkldnn:False|True ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1 --use_tensorrt:False --precision:fp32 diff --git a/test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..e478896a54957481a3ce4c485ac02cd7979233dc --- /dev/null +++ b/test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -0,0 +1,17 @@ +===========================paddle2onnx_params=========================== +model_name:ch_ppocr_server_v2.0 +python:python3.7 +2onnx: paddle2onnx +--det_model_dir:./inference/ch_ppocr_server_v2.0_det_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_save_file:./inference/det_server_onnx/model.onnx +--rec_model_dir:./inference/ch_ppocr_server_v2.0_rec_infer/ +--rec_save_file:./inference/rec_server_onnx/model.onnx +--opset_version:10 +--enable_onnx_checker:True +inference:tools/infer/predict_system.py --rec_image_shape="3,32,320" +--use_gpu:True|False +--det_model_dir: +--rec_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/00008790.jpg \ No newline at end of file diff --git a/test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..bbfec44dbab08dcfb932a922797448e541ea385b --- /dev/null +++ b/test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,19 @@ +===========================serving_params=========================== +model_name:ch_ppocr_server_v2.0 +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:./inference/ch_ppocr_server_v2.0_det_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:./deploy/pdserving/ppocr_det_server_serving/ +--det_serving_client:./deploy/pdserving/ppocr_det_server_client/ +--rec_dirname:./inference/ch_ppocr_server_v2.0_rec_infer/ +--rec_serving_server:./deploy/pdserving/ppocr_rec_server_serving/ +--rec_serving_client:./deploy/pdserving/ppocr_rec_server_client/ +serving_dir:./deploy/pdserving +web_service:-m paddle_serving_server.serve +--op:GeneralDetectionOp GeneralInferOp +--port:8181 +--gpu_id:"0"|null +cpp_client:ocr_cpp_client.py +--image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..8853e709d40a0fba6bedd7ce582425e39b9076ed --- /dev/null +++ b/test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,23 @@ +===========================serving_params=========================== +model_name:ch_ppocr_server_v2.0 +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:./inference/ch_ppocr_server_v2.0_det_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:./deploy/pdserving/ppocr_det_server_serving/ +--det_serving_client:./deploy/pdserving/ppocr_det_server_client/ +--rec_dirname:./inference/ch_ppocr_server_v2.0_rec_infer/ +--rec_serving_server:./deploy/pdserving/ppocr_rec_server_serving/ +--rec_serving_client:./deploy/pdserving/ppocr_rec_server_client/ +serving_dir:./deploy/pdserving +web_service:web_service.py --config=config.yml --opt op.det.concurrency="1" op.rec.concurrency="1" +op.det.local_service_conf.devices:gpu|null +op.det.local_service_conf.use_mkldnn:False +op.det.local_service_conf.thread_num:6 +op.det.local_service_conf.use_trt:False +op.det.local_service_conf.precision:fp32 +op.det.local_service_conf.model_config: +op.rec.local_service_conf.model_config: +pipline:pipeline_http_client.py +--image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_ppocr_server_v2.0_det/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_server_v2.0_det/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..69ae939e2b6cab5e07bc4e401a83c66324754223 --- /dev/null +++ b/test_tipc/configs/ch_ppocr_server_v2.0_det/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,20 @@ +===========================cpp_infer_params=========================== +model_name:ch_ppocr_server_v2.0_det +use_opencv:True +infer_model:./inference/ch_ppocr_server_v2.0_det_infer/ +infer_quant:False +inference:./deploy/cpp_infer/build/ppocr +--use_gpu:True|False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:1 +--use_tensorrt:False +--precision:fp32 +--det_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ +null:null +--benchmark:True +--det:True +--rec:False +--cls:False +--use_angle_cls:False \ No newline at end of file diff --git a/test_tipc/configs/ch_ppocr_server_v2.0_det/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/ch_ppocr_server_v2.0_det/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt index 40fdc11241f3ac966ff01d4c51173f990cc594c5..c8bebf54f2ed2627cce9a22013d1566eb7a7b6ef 100644 --- a/test_tipc/configs/ch_ppocr_server_v2.0_det/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt +++ b/test_tipc/configs/ch_ppocr_server_v2.0_det/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -1,14 +1,17 @@ ===========================paddle2onnx_params=========================== -model_name:ocr_det_server +model_name:ch_ppocr_server_v2.0_det python:python3.7 2onnx: paddle2onnx ---model_dir:./inference/ch_ppocr_server_v2.0_det_infer/ +--det_model_dir:./inference/ch_ppocr_server_v2.0_det_infer/ --model_filename:inference.pdmodel --params_filename:inference.pdiparams ---save_file:./inference/det_server_onnx/model.onnx +--det_save_file:./inference/det_server_onnx/model.onnx +--rec_model_dir: +--rec_save_file: --opset_version:10 --enable_onnx_checker:True inference:tools/infer/predict_det.py --use_gpu:True|False --det_model_dir: ---image_dir:./inference/det_inference \ No newline at end of file +--rec_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/00008790.jpg \ No newline at end of file diff --git a/test_tipc/configs/ch_ppocr_server_v2.0_det/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_server_v2.0_det/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt index ec5464604697e15bdd4e0f7282d23a8e09f4a0b5..018dd1a227064479ebd60570113b122b035e7704 100644 --- a/test_tipc/configs/ch_ppocr_server_v2.0_det/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_ppocr_server_v2.0_det/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -1,18 +1,23 @@ ===========================serving_params=========================== -model_name:ocr_det_server -python:python3.7|cpp +model_name:ch_ppocr_server_v2.0_det +python:python3.7 trans_model:-m paddle_serving_client.convert ---dirname:./inference/ch_ppocr_server_v2.0_det_infer/ +--det_dirname:./inference/ch_ppocr_server_v2.0_det_infer/ --model_filename:inference.pdmodel --params_filename:inference.pdiparams ---serving_server:./deploy/pdserving/ppocr_det_server_2.0_serving/ ---serving_client:./deploy/pdserving/ppocr_det_server_2.0_client/ +--det_serving_server:./deploy/pdserving/ppocr_det_server_serving/ +--det_serving_client:./deploy/pdserving/ppocr_det_server_client/ +--rec_dirname:null +--rec_serving_server:null +--rec_serving_client:null serving_dir:./deploy/pdserving -web_service:web_service_det.py --config=config.yml --opt op.det.concurrency=1 -op.det.local_service_conf.devices:"0"|null -op.det.local_service_conf.use_mkldnn:True|False -op.det.local_service_conf.thread_num:1|6 -op.det.local_service_conf.use_trt:False|True -op.det.local_service_conf.precision:fp32|fp16|int8 -pipline:pipeline_rpc_client.py|pipeline_http_client.py ---image_dir:../../doc/imgs \ No newline at end of file +web_service:web_service_det.py --config=config.yml --opt op.det.concurrency="1" +op.det.local_service_conf.devices:gpu|null +op.det.local_service_conf.use_mkldnn:False +op.det.local_service_conf.thread_num:6 +op.det.local_service_conf.use_trt:False +op.det.local_service_conf.precision:fp32 +op.det.local_service_conf.model_config: +op.rec.local_service_conf.model_config: +pipline:pipeline_http_client.py +--image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_ppocr_server_v2.0_det/train_infer_python.txt b/test_tipc/configs/ch_ppocr_server_v2.0_det/train_infer_python.txt index 52489fe5298dbeba31ff0ff5abe03c0c49b46e0a..c16ca150029d03052396ca28a6396520e63b3f84 100644 --- a/test_tipc/configs/ch_ppocr_server_v2.0_det/train_infer_python.txt +++ b/test_tipc/configs/ch_ppocr_server_v2.0_det/train_infer_python.txt @@ -4,7 +4,7 @@ python:python3.7 gpu_list:0|0,1 Global.use_gpu:True|True Global.auto_cast:null -Global.epoch_num:lite_train_lite_infer=2|whole_train_whole_infer=300 +Global.epoch_num:lite_train_lite_infer=2|whole_train_whole_infer=50 Global.save_model_dir:./output/ Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_lite_infer=4 Global.pretrained_model:null @@ -39,11 +39,11 @@ infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_res18_db_ infer_quant:False inference:tools/infer/predict_det.py --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp32|fp16|int8 +--use_tensorrt:False +--precision:fp32 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ --save_log_path:null diff --git a/test_tipc/configs/ch_ppocr_server_v2.0_det/train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_server_v2.0_det/train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..12388d967755c54a46efdb915ef047896dddaef7 --- /dev/null +++ b/test_tipc/configs/ch_ppocr_server_v2.0_det/train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:ch_ppocr_server_v2.0_det +python:python3.7 +gpu_list:192.168.0.1,192.168.0.2;0,1 +Global.use_gpu:True +Global.auto_cast:null +Global.epoch_num:lite_train_lite_infer=2|whole_train_whole_infer=50 +Global.save_model_dir:./output/ +Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_lite_infer=4 +Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/ +null:null +## +trainer:norm_train +norm_train:tools/train.py -c test_tipc/configs/ch_ppocr_server_v2.0_det/det_r50_vd_db.yml -o +quant_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c test_tipc/configs/ch_ppocr_server_v2.0_det/det_r50_vd_db.yml -o +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.checkpoints: +norm_export:tools/export_model.py -c test_tipc/configs/ch_ppocr_server_v2.0_det/det_r50_vd_db.yml -o +quant_export:null +fpgm_export:null +distill_export:null +export1:null +export2:null +## +train_model:./inference/ch_ppocr_server_v2.0_det_train/best_accuracy +infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_res18_db_v2.0.yml -o +infer_quant:False +inference:tools/infer/predict_det.py +--use_gpu:False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:1 +--use_tensorrt:False +--precision:fp32 +--det_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ +--save_log_path:null +--benchmark:True +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,640,640]}];[{float32,[3,960,960]}] \ No newline at end of file diff --git a/test_tipc/configs/ch_ppocr_server_v2.0_det/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_server_v2.0_det/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt index 3e3764e8c6f62c72ffb8ceb268c8ceee660d02de..93ed14cb600229e744167f26573cba406880db8e 100644 --- a/test_tipc/configs/ch_ppocr_server_v2.0_det/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_ppocr_server_v2.0_det/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt @@ -4,7 +4,7 @@ python:python3.7 gpu_list:0|0,1 Global.use_gpu:True|True Global.auto_cast:amp -Global.epoch_num:lite_train_lite_infer=2|whole_train_whole_infer=300 +Global.epoch_num:lite_train_lite_infer=2|whole_train_whole_infer=50 Global.save_model_dir:./output/ Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_lite_infer=4 Global.pretrained_model:null @@ -39,11 +39,11 @@ infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_res18_db_ infer_quant:False inference:tools/infer/predict_det.py --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp32|fp16|int8 +--use_tensorrt:False +--precision:fp32 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ --save_log_path:null diff --git a/test_tipc/configs/ch_ppocr_server_v2.0_rec/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_server_v2.0_rec/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..cbec272cce544e332fd908d4946321a15543fcae --- /dev/null +++ b/test_tipc/configs/ch_ppocr_server_v2.0_rec/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,20 @@ +===========================cpp_infer_params=========================== +model_name:ch_ppocr_server_v2.0_rec +use_opencv:True +infer_model:./inference/ch_ppocr_server_v2.0_rec_infer/ +infer_quant:False +inference:./deploy/cpp_infer/build/ppocr --rec_char_dict_path=./ppocr/utils/ppocr_keys_v1.txt --rec_img_h=32 +--use_gpu:True|False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:6 +--use_tensorrt:False +--precision:fp32 +--rec_model_dir: +--image_dir:./inference/rec_inference/ +null:null +--benchmark:True +--det:False +--rec:True +--cls:False +--use_angle_cls:False \ No newline at end of file diff --git a/test_tipc/configs/ch_ppocr_server_v2.0_rec/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/ch_ppocr_server_v2.0_rec/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt index 05542332e94eab38d8a433a727e04bf0be15f423..462f6090d987ac2c58656136e896e71bcdc3bee1 100644 --- a/test_tipc/configs/ch_ppocr_server_v2.0_rec/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt +++ b/test_tipc/configs/ch_ppocr_server_v2.0_rec/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -1,14 +1,17 @@ ===========================paddle2onnx_params=========================== -model_name:ocr_rec_server +model_name:ch_ppocr_server_v2.0_rec python:python3.7 2onnx: paddle2onnx ---model_dir:./inference/ch_ppocr_server_v2.0_rec_infer/ +--det_model_dir: --model_filename:inference.pdmodel --params_filename:inference.pdiparams ---save_file:./inference/rec_server_onnx/model.onnx +--det_save_file: +--rec_model_dir:./inference/ch_ppocr_server_v2.0_rec_infer/ +--rec_save_file:./inference/rec_server_onnx/model.onnx --opset_version:10 --enable_onnx_checker:True -inference:tools/infer/predict_rec.py +inference:tools/infer/predict_rec.py --rec_image_shape="3,32,320" --use_gpu:True|False +--det_model_dir: --rec_model_dir: ---image_dir:./inference/rec_inference \ No newline at end of file +--image_dir:./inference/rec_inference/ \ No newline at end of file diff --git a/test_tipc/configs/ch_ppocr_server_v2.0_rec/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_server_v2.0_rec/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt index d72abc6054d5f2eccf35f305076b7062fdf49848..7f456320b687549fbcd6d4f0be7a1b4a2969684a 100644 --- a/test_tipc/configs/ch_ppocr_server_v2.0_rec/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_ppocr_server_v2.0_rec/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -1,18 +1,23 @@ ===========================serving_params=========================== -model_name:ocr_rec_server -python:python3.7|cpp +model_name:ch_ppocr_server_v2.0_rec +python:python3.7 trans_model:-m paddle_serving_client.convert ---dirname:./inference/ch_ppocr_server_v2.0_rec_infer/ +--det_dirname:null --model_filename:inference.pdmodel --params_filename:inference.pdiparams ---serving_server:./deploy/pdserving/ppocr_rec_server_2.0_serving/ ---serving_client:./deploy/pdserving/ppocr_rec_server_2.0_client/ +--det_serving_server:null +--det_serving_client:null +--rec_dirname:./inference/ch_ppocr_server_v2.0_rec_infer/ +--rec_serving_server:./deploy/pdserving/ppocr_rec_server_serving/ +--rec_serving_client:./deploy/pdserving/ppocr_rec_server_client/ serving_dir:./deploy/pdserving -web_service:web_service_rec.py --config=config.yml --opt op.rec.concurrency=1 -op.rec.local_service_conf.devices:"0"|null -op.rec.local_service_conf.use_mkldnn:True|False -op.rec.local_service_conf.thread_num:1|6 -op.rec.local_service_conf.use_trt:False|True -op.rec.local_service_conf.precision:fp32|fp16|int8 -pipline:pipeline_rpc_client.py|pipeline_http_client.py ---image_dir:../../doc/imgs_words_en \ No newline at end of file +web_service:web_service_rec.py --config=config.yml --opt op.rec.concurrency="1" +op.det.local_service_conf.devices:gpu|null +op.det.local_service_conf.use_mkldnn:False +op.det.local_service_conf.thread_num:6 +op.det.local_service_conf.use_trt:False +op.det.local_service_conf.precision:fp32 +op.det.local_service_conf.model_config: +op.rec.local_service_conf.model_config: +pipline:pipeline_http_client.py --det=False +--image_dir:../../inference/rec_inference diff --git a/test_tipc/configs/ch_ppocr_server_v2.0_rec/train_infer_python.txt b/test_tipc/configs/ch_ppocr_server_v2.0_rec/train_infer_python.txt index 78a046c503686762688ce08097d68479f1023879..64c0cf455cdd058d1840a9ad1f86954293d2e219 100644 --- a/test_tipc/configs/ch_ppocr_server_v2.0_rec/train_infer_python.txt +++ b/test_tipc/configs/ch_ppocr_server_v2.0_rec/train_infer_python.txt @@ -39,11 +39,11 @@ infer_export:tools/export_model.py -c test_tipc/configs/ch_ppocr_server_v2.0_rec infer_quant:False inference:tools/infer/predict_rec.py --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1|6 ---use_tensorrt:True|False ---precision:fp32|int8 +--use_tensorrt:False +--precision:fp32 --rec_model_dir: --image_dir:./inference/rec_inference --save_log_path:./test/output/ diff --git a/test_tipc/configs/ch_ppocr_server_v2.0_rec/train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_server_v2.0_rec/train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..9884ab247b80de4ca700bf084cea4faa89c86396 --- /dev/null +++ b/test_tipc/configs/ch_ppocr_server_v2.0_rec/train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:ch_ppocr_server_v2.0_rec +python:python3.7 +gpu_list:192.168.0.1,192.168.0.2;0,1 +Global.use_gpu:True +Global.auto_cast:null +Global.epoch_num:lite_train_lite_infer=5|whole_train_whole_infer=100 +Global.save_model_dir:./output/ +Train.loader.batch_size_per_card:lite_train_lite_infer=128|whole_train_whole_infer=128 +Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./inference/rec_inference +null:null +## +trainer:norm_train +norm_train:tools/train.py -c test_tipc/configs/ch_ppocr_server_v2.0_rec/rec_icdar15_train.yml -o +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c test_tipc/configs/ch_ppocr_server_v2.0_rec/rec_icdar15_train.yml -o +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.checkpoints: +norm_export:tools/export_model.py -c test_tipc/configs/ch_ppocr_server_v2.0_rec/rec_icdar15_train.yml -o +quant_export:null +fpgm_export:null +distill_export:null +export1:null +export2:null +## +train_model:./inference/ch_ppocr_server_v2.0_rec_train/best_accuracy +infer_export:tools/export_model.py -c test_tipc/configs/ch_ppocr_server_v2.0_rec/rec_icdar15_train.yml -o +infer_quant:False +inference:tools/infer/predict_rec.py +--use_gpu:False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:1|6 +--use_tensorrt:False +--precision:fp32 +--rec_model_dir: +--image_dir:./inference/rec_inference +--save_log_path:./test/output/ +--benchmark:True +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,32,100]}] diff --git a/test_tipc/configs/ch_ppocr_server_v2.0_rec/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_server_v2.0_rec/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt index 78c15047fb522127075591cc9687392af77a300a..63ddaa4a8b2dcb19823034ee85af14b248b109b2 100644 --- a/test_tipc/configs/ch_ppocr_server_v2.0_rec/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_ppocr_server_v2.0_rec/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt @@ -39,11 +39,11 @@ infer_export:tools/export_model.py -c test_tipc/configs/ch_ppocr_server_v2.0_rec infer_quant:False inference:tools/infer/predict_rec.py --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1|6 ---use_tensorrt:True|False ---precision:fp32|int8 +--use_tensorrt:False +--precision:fp32 --rec_model_dir: --image_dir:./inference/rec_inference --save_log_path:./test/output/ diff --git a/test_tipc/configs/det_mv3_db_v2_0/train_infer_python.txt b/test_tipc/configs/det_mv3_db_v2_0/train_infer_python.txt index fab8f50d5451f90183d02e30c6529d63af42fe7f..ab3aa59b601db58b48cf18de79f77710611e2596 100644 --- a/test_tipc/configs/det_mv3_db_v2_0/train_infer_python.txt +++ b/test_tipc/configs/det_mv3_db_v2_0/train_infer_python.txt @@ -39,11 +39,11 @@ infer_export:tools/export_model.py -c configs/det/det_mv3_db.yml -o infer_quant:False inference:tools/infer/predict_det.py --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp32|fp16|int8 +--use_tensorrt:False +--precision:fp32 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ null:null diff --git a/test_tipc/configs/det_mv3_east_v2.0/train_infer_python.txt b/test_tipc/configs/det_mv3_east_v2.0/train_infer_python.txt index 5634297973bafbdad6c168e369d15520db09aba3..1ec1597a4d50ba1c41cfb076fa7431f170e183bf 100644 --- a/test_tipc/configs/det_mv3_east_v2.0/train_infer_python.txt +++ b/test_tipc/configs/det_mv3_east_v2.0/train_infer_python.txt @@ -39,11 +39,11 @@ infer_export:tools/export_model.py -c test_tipc/configs/det_mv3_east_v2.0/det_mv infer_quant:False inference:tools/infer/predict_det.py --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1 --use_tensorrt:False ---precision:fp32|fp16|int8 +--precision:fp32 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ --save_log_path:null diff --git a/test_tipc/configs/det_mv3_pse_v2.0/train_infer_python.txt b/test_tipc/configs/det_mv3_pse_v2.0/train_infer_python.txt index 661adc4a324e0d51846d05d52a4cf1862661c095..daeec69f84a766e1d6cd2f8906772c27f5f8d048 100644 --- a/test_tipc/configs/det_mv3_pse_v2.0/train_infer_python.txt +++ b/test_tipc/configs/det_mv3_pse_v2.0/train_infer_python.txt @@ -39,11 +39,11 @@ infer_export:tools/export_model.py -c test_tipc/configs/det_mv3_pse_v2.0/det_mv3 infer_quant:False inference:tools/infer/predict_det.py --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1 --use_tensorrt:False ---precision:fp32|fp16 +--precision:fp32 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ --save_log_path:null diff --git a/test_tipc/configs/det_r18_vd_db_v2_0/train_infer_python.txt b/test_tipc/configs/det_r18_vd_db_v2_0/train_infer_python.txt index 77023ef2c18ba1a189b8b066773370c8bb060d87..33e4dbf2337f3799328516119a213bc0f14af9fe 100644 --- a/test_tipc/configs/det_r18_vd_db_v2_0/train_infer_python.txt +++ b/test_tipc/configs/det_r18_vd_db_v2_0/train_infer_python.txt @@ -39,11 +39,11 @@ infer_export:null infer_quant:False inference:tools/infer/predict_det.py --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp32|fp16|int8 +--use_tensorrt:False +--precision:fp32 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ --save_log_path:null diff --git a/test_tipc/configs/det_r50_db_v2.0/train_infer_python.txt b/test_tipc/configs/det_r50_db_v2.0/train_infer_python.txt index 3fd875711e03f1c31db0948e68f573ba7e113b51..151f2769cc2d97d6a3546f338383dd811aa06ace 100644 --- a/test_tipc/configs/det_r50_db_v2.0/train_infer_python.txt +++ b/test_tipc/configs/det_r50_db_v2.0/train_infer_python.txt @@ -39,11 +39,11 @@ infer_export:tools/export_model.py -c configs/det/det_r50_vd_db.yml -o infer_quant:False inference:tools/infer/predict_det.py --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp32|fp16|int8 +--use_tensorrt:False +--precision:fp32 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ --save_log_path:null diff --git a/test_tipc/configs/det_r50_vd_east_v2_0/train_infer_python.txt b/test_tipc/configs/det_r50_vd_east_v2_0/train_infer_python.txt index c1748c5d2fca9690926f6645205084fb9a858185..8477a4fa74f7a0617104aa83617fc6f61b8234b3 100644 --- a/test_tipc/configs/det_r50_vd_east_v2_0/train_infer_python.txt +++ b/test_tipc/configs/det_r50_vd_east_v2_0/train_infer_python.txt @@ -39,11 +39,11 @@ infer_export:tools/export_model.py -c test_tipc/configs/det_r50_vd_east_v2_0/det infer_quant:False inference:tools/infer/predict_det.py --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1 --use_tensorrt:False ---precision:fp32|fp16|int8 +--precision:fp32 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ --save_log_path:null diff --git a/test_tipc/configs/det_r50_vd_pse_v2_0/train_infer_python.txt b/test_tipc/configs/det_r50_vd_pse_v2_0/train_infer_python.txt index 55ebcd3547b2e92c86e1c0007e0a1bcb9758cced..62da89fe1c8e3a7c2b7586eae6b2589f94237a2e 100644 --- a/test_tipc/configs/det_r50_vd_pse_v2_0/train_infer_python.txt +++ b/test_tipc/configs/det_r50_vd_pse_v2_0/train_infer_python.txt @@ -39,11 +39,11 @@ infer_export:tools/export_model.py -c test_tipc/configs/det_r50_vd_pse_v2_0/det_ infer_quant:False inference:tools/infer/predict_det.py --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1 --use_tensorrt:False ---precision:fp32|fp16|int8 +--precision:fp32 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ --save_log_path:null diff --git a/test_tipc/configs/det_r50_vd_sast_icdar15_v2.0/train_infer_python.txt b/test_tipc/configs/det_r50_vd_sast_icdar15_v2.0/train_infer_python.txt index 16f37ace6fbc469cdd6fd2928e26978508a0841f..b70ef46b4afb3a39f3bbd3d6274f0135a0646a37 100644 --- a/test_tipc/configs/det_r50_vd_sast_icdar15_v2.0/train_infer_python.txt +++ b/test_tipc/configs/det_r50_vd_sast_icdar15_v2.0/train_infer_python.txt @@ -4,16 +4,16 @@ python:python3.7 gpu_list:0|0,1 Global.use_gpu:True|True Global.auto_cast:null -Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=5000 +Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=500 Global.save_model_dir:./output/ Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4 -Global.pretrained_model:null +Global.pretrained_model:./pretrain_models/det_r50_vd_sast_icdar15_v2.0_train/best_accuracy train_model_name:latest train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/ null:null ## trainer:norm_train -norm_train:tools/train.py -c test_tipc/configs/det_r50_vd_sast_icdar15_v2.0/det_r50_vd_sast_icdar2015.yml -o Global.pretrained_model=./pretrain_models/ResNet50_vd_ssld_pretrained +norm_train:tools/train.py -c test_tipc/configs/det_r50_vd_sast_icdar15_v2.0/det_r50_vd_sast_icdar2015.yml -o pact_train:null fpgm_train:null distill_train:null @@ -39,13 +39,13 @@ infer_export:tools/export_model.py -c test_tipc/configs/det_r50_vd_sast_icdar15_ infer_quant:False inference:tools/infer/predict_det.py --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1 --use_tensorrt:False ---precision:fp32|int8 +--precision:fp32 --det_model_dir: ---image_dir:./inference/ch_det_data_50/all-sum-510/ +--image_dir:./inference/ch_det_data_50/all-sum-510/00008790.jpg null:null --benchmark:True --det_algorithm:SAST diff --git a/test_tipc/configs/det_r50_vd_sast_totaltext_v2.0/train_infer_python.txt b/test_tipc/configs/det_r50_vd_sast_totaltext_v2.0/train_infer_python.txt index 5e4c5666b7b90b754c77153612661aa5e01f4cb2..7be5af7ddee0ed0f688980f5d5dca5a99c9705a0 100644 --- a/test_tipc/configs/det_r50_vd_sast_totaltext_v2.0/train_infer_python.txt +++ b/test_tipc/configs/det_r50_vd_sast_totaltext_v2.0/train_infer_python.txt @@ -39,11 +39,11 @@ infer_export:tools/export_model.py -c test_tipc/configs/det_r50_vd_sast_totaltex infer_quant:False inference:tools/infer/predict_det.py --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1 --use_tensorrt:False ---precision:fp32|int8 +--precision:fp32 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ null:null diff --git a/test_tipc/configs/en_server_pgnetA/train_infer_python.txt b/test_tipc/configs/en_server_pgnetA/train_infer_python.txt index 8a1509baab46d4c52c56b3afbaf23350adc86584..a9dd4e676bf607d654b40f1f5ff21ec735bc9d40 100644 --- a/test_tipc/configs/en_server_pgnetA/train_infer_python.txt +++ b/test_tipc/configs/en_server_pgnetA/train_infer_python.txt @@ -39,11 +39,11 @@ infer_export:tools/export_model.py -c configs/e2e/e2e_r50_vd_pg.yml -o infer_quant:False inference:tools/infer/predict_e2e.py --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1 --use_tensorrt:False ---precision:fp32|fp16|int8 +--precision:fp32 --e2e_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ null:null diff --git a/test_tipc/configs/en_table_structure/table_mv3.yml b/test_tipc/configs/en_table_structure/table_mv3.yml new file mode 100755 index 0000000000000000000000000000000000000000..adf326bd02aeff4683c8f37a704125b4e426efa9 --- /dev/null +++ b/test_tipc/configs/en_table_structure/table_mv3.yml @@ -0,0 +1,117 @@ +Global: + use_gpu: true + epoch_num: 10 + log_smooth_window: 20 + print_batch_step: 5 + save_model_dir: ./output/table_mv3/ + save_epoch_step: 3 + # evaluation is run every 400 iterations after the 0th iteration + eval_batch_step: [0, 400] + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/table/table.jpg + # for data or label process + character_dict_path: ppocr/utils/dict/table_structure_dict.txt + character_type: en + max_text_length: 100 + max_elem_length: 800 + max_cell_num: 500 + infer_mode: False + process_total_num: 0 + process_cut_num: 0 + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + clip_norm: 5.0 + lr: + learning_rate: 0.001 + regularizer: + name: 'L2' + factor: 0.00000 + +Architecture: + model_type: table + algorithm: TableAttn + Backbone: + name: MobileNetV3 + scale: 1.0 + model_name: large + Head: + name: TableAttentionHead + hidden_size: 256 + l2_decay: 0.00001 + loc_type: 2 + max_text_length: 100 + max_elem_length: 800 + max_cell_num: 500 + +Loss: + name: TableAttentionLoss + structure_weight: 100.0 + loc_weight: 10000.0 + +PostProcess: + name: TableLabelDecode + +Metric: + name: TableMetric + main_indicator: acc + +Train: + dataset: + name: PubTabDataSet + data_dir: ./train_data/pubtabnet/train + label_file_path: ./train_data/pubtabnet/train.jsonl + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - ResizeTableImage: + max_len: 488 + - TableLabelEncode: + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: 'hwc' + - PaddingTableImage: + - ToCHWImage: + - KeepKeys: + keep_keys: ['image', 'structure', 'bbox_list', 'sp_tokens', 'bbox_list_mask'] + loader: + shuffle: True + batch_size_per_card: 32 + drop_last: True + num_workers: 1 + +Eval: + dataset: + name: PubTabDataSet + data_dir: ./train_data/pubtabnet/test/ + label_file_path: ./train_data/pubtabnet/test.jsonl + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - ResizeTableImage: + max_len: 488 + - TableLabelEncode: + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: 'hwc' + - PaddingTableImage: + - ToCHWImage: + - KeepKeys: + keep_keys: ['image', 'structure', 'bbox_list', 'sp_tokens', 'bbox_list_mask'] + loader: + shuffle: False + drop_last: False + batch_size_per_card: 16 + num_workers: 1 diff --git a/test_tipc/configs/en_table_structure/train_infer_python.txt b/test_tipc/configs/en_table_structure/train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..d9f3b30e16c75281a929130d877b947a23c16190 --- /dev/null +++ b/test_tipc/configs/en_table_structure/train_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:en_table_structure +python:python3.7 +gpu_list:0|0,1 +Global.use_gpu:True|True +Global.auto_cast:fp32 +Global.epoch_num:lite_train_lite_infer=3|whole_train_whole_infer=50 +Global.save_model_dir:./output/ +Train.loader.batch_size_per_card:lite_train_lite_infer=16|whole_train_whole_infer=128 +Global.pretrained_model:./pretrain_models/en_ppocr_mobile_v2.0_table_structure_train/best_accuracy +train_model_name:latest +train_infer_img_dir:./ppstructure/docs/table/table.jpg +null:null +## +trainer:norm_train +norm_train:tools/train.py -c test_tipc/configs/en_table_structure/table_mv3.yml -o +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:null +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.checkpoints: +norm_export:tools/export_model.py -c test_tipc/configs/en_table_structure/table_mv3.yml -o +quant_export: +fpgm_export: +distill_export:null +export1:null +export2:null +## +infer_model:./inference/en_ppocr_mobile_v2.0_table_structure_infer +infer_export:null +infer_quant:False +inference:ppstructure/table/predict_table.py --det_model_dir=./inference/en_ppocr_mobile_v2.0_table_det_infer --rec_model_dir=./inference/en_ppocr_mobile_v2.0_table_rec_infer --rec_char_dict_path=./ppocr/utils/dict/table_dict.txt --table_char_dict_path=./ppocr/utils/dict/table_structure_dict.txt --image_dir=./ppstructure/docs/table/table.jpg --det_limit_side_len=736 --det_limit_type=min --output ./output/table +--use_gpu:True|False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:1 +--use_tensorrt:False +--precision:fp32 +--table_model_dir: +--image_dir:./ppstructure/docs/table/table.jpg +null:null +--benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,488,488]}] diff --git a/test_tipc/configs/en_table_structure/train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/en_table_structure/train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..41d236c3765fbf6a711c6739d8dee4f41a147039 --- /dev/null +++ b/test_tipc/configs/en_table_structure/train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:en_table_structure +python:python3.7 +gpu_list:192.168.0.1,192.168.0.2;0,1 +Global.use_gpu:True +Global.auto_cast:fp32 +Global.epoch_num:lite_train_lite_infer=3|whole_train_whole_infer=50 +Global.save_model_dir:./output/ +Train.loader.batch_size_per_card:lite_train_lite_infer=16|whole_train_whole_infer=128 +Global.pretrained_model:./pretrain_models/en_ppocr_mobile_v2.0_table_structure_train/best_accuracy +train_model_name:latest +train_infer_img_dir:./ppstructure/docs/table/table.jpg +null:null +## +trainer:norm_train +norm_train:tools/train.py -c test_tipc/configs/en_table_structure/table_mv3.yml -o +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:null +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.checkpoints: +norm_export:tools/export_model.py -c test_tipc/configs/en_table_structure/table_mv3.yml -o +quant_export: +fpgm_export: +distill_export:null +export1:null +export2:null +## +infer_model:./inference/en_ppocr_mobile_v2.0_table_structure_infer +infer_export:null +infer_quant:False +inference:ppstructure/table/predict_table.py --det_model_dir=./inference/en_ppocr_mobile_v2.0_table_det_infer --rec_model_dir=./inference/en_ppocr_mobile_v2.0_table_rec_infer --rec_char_dict_path=./ppocr/utils/dict/table_dict.txt --table_char_dict_path=./ppocr/utils/dict/table_structure_dict.txt --image_dir=./ppstructure/docs/table/table.jpg --det_limit_side_len=736 --det_limit_type=min --output ./output/table +--use_gpu:False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:1 +--use_tensorrt:False +--precision:fp32 +--table_model_dir: +--image_dir:./ppstructure/docs/table/table.jpg +null:null +--benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,488,488]}] diff --git a/test_tipc/configs/en_table_structure/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/en_table_structure/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..31ac1ed53f2adc9810bc4fd2cf4f874d89d49606 --- /dev/null +++ b/test_tipc/configs/en_table_structure/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:en_table_structure +python:python3.7 +gpu_list:0|0,1 +Global.use_gpu:True|True +Global.auto_cast:amp +Global.epoch_num:lite_train_lite_infer=3|whole_train_whole_infer=50 +Global.save_model_dir:./output/ +Train.loader.batch_size_per_card:lite_train_lite_infer=16|whole_train_whole_infer=128 +Global.pretrained_model:./pretrain_models/en_ppocr_mobile_v2.0_table_structure_train/best_accuracy +train_model_name:latest +train_infer_img_dir:./ppstructure/docs/table/table.jpg +null:null +## +trainer:norm_train +norm_train:tools/train.py -c test_tipc/configs/en_table_structure/table_mv3.yml -o +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:null +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.checkpoints: +norm_export:tools/export_model.py -c test_tipc/configs/en_table_structure/table_mv3.yml -o +quant_export: +fpgm_export: +distill_export:null +export1:null +export2:null +## +infer_model:./inference/en_ppocr_mobile_v2.0_table_structure_infer +infer_export:null +infer_quant:False +inference:ppstructure/table/predict_table.py --det_model_dir=./inference/en_ppocr_mobile_v2.0_table_det_infer --rec_model_dir=./inference/en_ppocr_mobile_v2.0_table_rec_infer --rec_char_dict_path=./ppocr/utils/dict/table_dict.txt --table_char_dict_path=./ppocr/utils/dict/table_structure_dict.txt --image_dir=./ppstructure/docs/table/table.jpg --det_limit_side_len=736 --det_limit_type=min --output ./output/table +--use_gpu:True|False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:1 +--use_tensorrt:False +--precision:fp32 +--table_model_dir: +--image_dir:./ppstructure/docs/table/table.jpg +null:null +--benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,488,488]}] diff --git a/test_tipc/configs/en_table_structure/train_pact_infer_python.txt b/test_tipc/configs/en_table_structure/train_pact_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..f62e8b68bc6c1af06a65a8dfb438d5d63576e123 --- /dev/null +++ b/test_tipc/configs/en_table_structure/train_pact_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:en_table_structure_PACT +python:python3.7 +gpu_list:0|0,1 +Global.use_gpu:True|True +Global.auto_cast:fp32 +Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=50 +Global.save_model_dir:./output/ +Train.loader.batch_size_per_card:lite_train_lite_infer=16|whole_train_whole_infer=128 +Global.pretrained_model:./pretrain_models/en_ppocr_mobile_v2.0_table_structure_train/best_accuracy +train_model_name:latest +train_infer_img_dir:./ppstructure/docs/table/table.jpg +null:null +## +trainer:pact_train +norm_train:null +pact_train:deploy/slim/quantization/quant.py -c test_tipc/configs/en_table_structure/table_mv3.yml -o +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:null +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.checkpoints: +norm_export:null +quant_export:deploy/slim/quantization/export_model.py -c test_tipc/configs/en_table_structure/table_mv3.yml -o +fpgm_export: +distill_export:null +export1:null +export2:null +## +infer_model:./inference/en_ppocr_mobile_v2.0_table_structure_infer +infer_export:null +infer_quant:True +inference:ppstructure/table/predict_table.py --det_model_dir=./inference/en_ppocr_mobile_v2.0_table_det_infer --rec_model_dir=./inference/en_ppocr_mobile_v2.0_table_rec_infer --rec_char_dict_path=./ppocr/utils/dict/table_dict.txt --table_char_dict_path=./ppocr/utils/dict/table_structure_dict.txt --image_dir=./ppstructure/docs/table/table.jpg --det_limit_side_len=736 --det_limit_type=min --output ./output/table +--use_gpu:True|False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:1 +--use_tensorrt:False +--precision:fp32 +--table_model_dir: +--image_dir:./ppstructure/docs/table/table.jpg +null:null +--benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,488,488]}] diff --git a/test_tipc/configs/en_table_structure/train_ptq_infer_python.txt b/test_tipc/configs/en_table_structure/train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..e8f7bbaa50417b97f79596634677fff0a95cb47f --- /dev/null +++ b/test_tipc/configs/en_table_structure/train_ptq_infer_python.txt @@ -0,0 +1,21 @@ +===========================train_params=========================== +model_name:en_table_structure_KL +python:python3.7 +Global.pretrained_model: +Global.save_inference_dir:null +infer_model:./inference/en_ppocr_mobile_v2.0_table_structure_infer/ +infer_export:deploy/slim/quantization/quant_kl.py -c test_tipc/configs/en_table_structure/table_mv3.yml -o +infer_quant:True +inference:ppstructure/table/predict_table.py --det_model_dir=./inference/en_ppocr_mobile_v2.0_table_det_infer --rec_model_dir=./inference/en_ppocr_mobile_v2.0_table_rec_infer --rec_char_dict_path=./ppocr/utils/dict/table_dict.txt --table_char_dict_path=./ppocr/utils/dict/table_structure_dict.txt --image_dir=./ppstructure/docs/table/table.jpg --det_limit_side_len=736 --det_limit_type=min --output ./output/table +--use_gpu:True|False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:1 +--use_tensorrt:False +--precision:int8 +--table_model_dir: +--image_dir:./ppstructure/docs/table/table.jpg +null:null +--benchmark:False +null:null +null:null diff --git a/test_tipc/configs/rec_mtb_nrtr/rec_mtb_nrtr.yml b/test_tipc/configs/rec_mtb_nrtr/rec_mtb_nrtr.yml index 15119bb2a9de02c19684d21ad5a1859db94895ce..8118d587248b7e4797e3a75c897e7b0a3d71b364 100644 --- a/test_tipc/configs/rec_mtb_nrtr/rec_mtb_nrtr.yml +++ b/test_tipc/configs/rec_mtb_nrtr/rec_mtb_nrtr.yml @@ -49,7 +49,7 @@ Architecture: Loss: - name: NRTRLoss + name: CELoss smoothing: True PostProcess: @@ -69,7 +69,7 @@ Train: img_mode: BGR channel_first: False - NRTRLabelEncode: # Class handling label - - NRTRRecResizeImg: + - GrayRecResizeImg: image_shape: [100, 32] resize_type: PIL # PIL or OpenCV - KeepKeys: @@ -90,7 +90,7 @@ Eval: img_mode: BGR channel_first: False - NRTRLabelEncode: # Class handling label - - NRTRRecResizeImg: + - GrayRecResizeImg: image_shape: [100, 32] resize_type: PIL # PIL or OpenCV - KeepKeys: @@ -99,5 +99,5 @@ Eval: shuffle: False drop_last: False batch_size_per_card: 256 - num_workers: 1 + num_workers: 4 use_shared_memory: False diff --git a/test_tipc/configs/rec_mtb_nrtr/train_infer_python.txt b/test_tipc/configs/rec_mtb_nrtr/train_infer_python.txt index de6de5a0caa36fb3ff89d8dbf5c7ff8b7965ca7f..fed8ba26753bb770e062f751a9ba1e8e35fc6843 100644 --- a/test_tipc/configs/rec_mtb_nrtr/train_infer_python.txt +++ b/test_tipc/configs/rec_mtb_nrtr/train_infer_python.txt @@ -39,11 +39,11 @@ infer_export:tools/export_model.py -c test_tipc/configs/rec_mtb_nrtr/rec_mtb_nrt infer_quant:False inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/EN_symbol_dict.txt --rec_image_shape="1,32,100" --rec_algorithm="NRTR" --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1|6 ---use_tensorrt:True|False ---precision:fp32|int8 +--use_tensorrt:False +--precision:fp32 --rec_model_dir: --image_dir:./inference/rec_inference --save_log_path:./test/output/ diff --git a/test_tipc/configs/rec_mv3_none_bilstm_ctc_v2.0/train_infer_python.txt b/test_tipc/configs/rec_mv3_none_bilstm_ctc_v2.0/train_infer_python.txt index e67dd1509054b34bfac6a36eaaca16fa31c0f1a0..39bf9227902480ffe4ed37d454c21d6a163c41bd 100644 --- a/test_tipc/configs/rec_mv3_none_bilstm_ctc_v2.0/train_infer_python.txt +++ b/test_tipc/configs/rec_mv3_none_bilstm_ctc_v2.0/train_infer_python.txt @@ -39,11 +39,11 @@ infer_export:tools/export_model.py -c test_tipc/configs/rec_mv3_none_bilstm_ctc_ infer_quant:False inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dict.txt --rec_image_shape="3,32,100" --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1|6 ---use_tensorrt:True|False ---precision:fp32|int8 +--use_tensorrt:False +--precision:fp32 --rec_model_dir: --image_dir:./inference/rec_inference --save_log_path:./test/output/ diff --git a/test_tipc/configs/rec_mv3_none_none_ctc_v2.0/train_infer_python.txt b/test_tipc/configs/rec_mv3_none_none_ctc_v2.0/train_infer_python.txt index aa3e88d284fe557c109cb8d794e2caecbec7a7ee..593de3ff20aa9890e7d9a02a9e5ca5b130e5a266 100644 --- a/test_tipc/configs/rec_mv3_none_none_ctc_v2.0/train_infer_python.txt +++ b/test_tipc/configs/rec_mv3_none_none_ctc_v2.0/train_infer_python.txt @@ -39,11 +39,11 @@ infer_export:tools/export_model.py -c test_tipc/configs/rec_mv3_none_none_ctc_v2 infer_quant:False inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dict.txt --rec_image_shape="3,32,100" --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1|6 ---use_tensorrt:True|False ---precision:fp32|int8 +--use_tensorrt:False +--precision:fp32 --rec_model_dir: --image_dir:./inference/rec_inference --save_log_path:./test/output/ diff --git a/test_tipc/configs/rec_mv3_tps_bilstm_att_v2.0/train_infer_python.txt b/test_tipc/configs/rec_mv3_tps_bilstm_att_v2.0/train_infer_python.txt index c22767c60fa8294aa244536b4c04135f7f7ade02..1b2d9abb0f00467ce92c4f51f97c283bc3e85c5e 100644 --- a/test_tipc/configs/rec_mv3_tps_bilstm_att_v2.0/train_infer_python.txt +++ b/test_tipc/configs/rec_mv3_tps_bilstm_att_v2.0/train_infer_python.txt @@ -39,11 +39,11 @@ infer_export:tools/export_model.py -c test_tipc/configs/rec_mv3_tps_bilstm_att_v infer_quant:False inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dict.txt --rec_image_shape="3,32,100" --rec_algorithm="RARE" --min_subgraph_size=5 --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1|6 ---use_tensorrt:True|False ---precision:fp32|int8 +--use_tensorrt:False +--precision:fp32 --rec_model_dir: --image_dir:./inference/rec_inference --save_log_path:./test/output/ diff --git a/test_tipc/configs/rec_mv3_tps_bilstm_ctc_v2.0/train_infer_python.txt b/test_tipc/configs/rec_mv3_tps_bilstm_ctc_v2.0/train_infer_python.txt index 7a3096eb1e3a94bf3967a80d49b622603ae06ff8..1367c7abd4c9ca5b0c6f1eb291dd2af8d9fa4de4 100644 --- a/test_tipc/configs/rec_mv3_tps_bilstm_ctc_v2.0/train_infer_python.txt +++ b/test_tipc/configs/rec_mv3_tps_bilstm_ctc_v2.0/train_infer_python.txt @@ -39,11 +39,11 @@ infer_export:tools/export_model.py -c test_tipc/configs/rec_mv3_tps_bilstm_ctc_v infer_quant:False inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dict.txt --rec_image_shape="3,32,100" --rec_algorithm="StarNet" --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1|6 ---use_tensorrt:True|False ---precision:fp32|int8 +--use_tensorrt:False +--precision:fp32 --rec_model_dir: --image_dir:./inference/rec_inference --save_log_path:./test/output/ diff --git a/test_tipc/configs/rec_r31_sar/train_infer_python.txt b/test_tipc/configs/rec_r31_sar/train_infer_python.txt index 1a32a3d507d8923a8b51be726c7624ea2049ae14..03ec54abb65ac41d3b5ad4f6e2fdcf7abb34c344 100644 --- a/test_tipc/configs/rec_r31_sar/train_infer_python.txt +++ b/test_tipc/configs/rec_r31_sar/train_infer_python.txt @@ -38,12 +38,12 @@ train_model:./inference/rec_r31_sar_train/best_accuracy infer_export:tools/export_model.py -c test_tipc/configs/rec_r31_sar/rec_r31_sar.yml -o infer_quant:False inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/dict90.txt --rec_image_shape="3,48,48,160" --rec_algorithm="SAR" ---use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--use_gpu:True +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1|6 ---use_tensorrt:True|False ---precision:fp32|int8 +--use_tensorrt:False +--precision:fp32 --rec_model_dir: --image_dir:./inference/rec_inference --save_log_path:./test/output/ diff --git a/test_tipc/configs/rec_r34_vd_none_bilstm_ctc_v2.0/train_infer_python.txt b/test_tipc/configs/rec_r34_vd_none_bilstm_ctc_v2.0/train_infer_python.txt index 02cea56fbe922bb94cceb77c079371f180cac618..46aa3d719051a4f124583f88709026569d95c1c7 100644 --- a/test_tipc/configs/rec_r34_vd_none_bilstm_ctc_v2.0/train_infer_python.txt +++ b/test_tipc/configs/rec_r34_vd_none_bilstm_ctc_v2.0/train_infer_python.txt @@ -39,11 +39,11 @@ infer_export:tools/export_model.py -c test_tipc/configs/rec_r34_vd_none_bilstm_c infer_quant:False inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dict.txt --rec_image_shape="3,32,100" --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1|6 ---use_tensorrt:True|False ---precision:fp32|int8 +--use_tensorrt:False +--precision:fp32 --rec_model_dir: --image_dir:./inference/rec_inference --save_log_path:./test/output/ diff --git a/test_tipc/configs/rec_r34_vd_none_none_ctc_v2.0/train_infer_python.txt b/test_tipc/configs/rec_r34_vd_none_none_ctc_v2.0/train_infer_python.txt index 5e7c1d34314cfc8aab1c97d5f6e74b0dd75f496a..3e066d7b72a6a707322b3aabe41ca6d698496433 100644 --- a/test_tipc/configs/rec_r34_vd_none_none_ctc_v2.0/train_infer_python.txt +++ b/test_tipc/configs/rec_r34_vd_none_none_ctc_v2.0/train_infer_python.txt @@ -39,11 +39,11 @@ infer_export:tools/export_model.py -c test_tipc/configs/rec_r34_vd_none_none_ctc infer_quant:False inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dict.txt --rec_image_shape="3,32,100" --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1|6 ---use_tensorrt:True|False ---precision:fp32|int8 +--use_tensorrt:False +--precision:fp32 --rec_model_dir: --image_dir:./inference/rec_inference --save_log_path:./test/output/ diff --git a/test_tipc/configs/rec_r34_vd_tps_bilstm_att_v2.0/train_infer_python.txt b/test_tipc/configs/rec_r34_vd_tps_bilstm_att_v2.0/train_infer_python.txt index 55e937881bec1852fade4f99d81a319b8b2c5b67..1e4f46633efbf36fc78ed2beb7ed883d1483b3b0 100644 --- a/test_tipc/configs/rec_r34_vd_tps_bilstm_att_v2.0/train_infer_python.txt +++ b/test_tipc/configs/rec_r34_vd_tps_bilstm_att_v2.0/train_infer_python.txt @@ -39,11 +39,11 @@ infer_export:tools/export_model.py -c test_tipc/configs/rec_r34_vd_tps_bilstm_at infer_quant:False inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dict.txt --rec_image_shape="3,32,100" --rec_algorithm="RARE" --min_subgraph_size=5 --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1|6 ---use_tensorrt:True|False ---precision:fp32|int8 +--use_tensorrt:False +--precision:fp32 --rec_model_dir: --image_dir:./inference/rec_inference --save_log_path:./test/output/ diff --git a/test_tipc/configs/rec_r34_vd_tps_bilstm_ctc_v2.0/train_infer_python.txt b/test_tipc/configs/rec_r34_vd_tps_bilstm_ctc_v2.0/train_infer_python.txt index 5b5ba0fd01c02b3b16d147edaf93495aeeaab7bf..9e795b66453039696ed5eedb92fba5e25150413c 100644 --- a/test_tipc/configs/rec_r34_vd_tps_bilstm_ctc_v2.0/train_infer_python.txt +++ b/test_tipc/configs/rec_r34_vd_tps_bilstm_ctc_v2.0/train_infer_python.txt @@ -39,11 +39,11 @@ infer_export:tools/export_model.py -c test_tipc/configs/rec_r34_vd_tps_bilstm_ct infer_quant:False inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dict.txt --rec_image_shape="3,32,100" --rec_algorithm="StarNet" --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1|6 ---use_tensorrt:True|False ---precision:fp32|int8 +--use_tensorrt:False +--precision:fp32 --rec_model_dir: --image_dir:./inference/rec_inference --save_log_path:./test/output/ diff --git a/test_tipc/configs/rec_r45_abinet/rec_r45_abinet.yml b/test_tipc/configs/rec_r45_abinet/rec_r45_abinet.yml new file mode 100644 index 0000000000000000000000000000000000000000..5b5890e7728b9a1cb629744bd5d56488657c73f3 --- /dev/null +++ b/test_tipc/configs/rec_r45_abinet/rec_r45_abinet.yml @@ -0,0 +1,106 @@ +Global: + use_gpu: True + epoch_num: 10 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec/r45_abinet/ + save_epoch_step: 1 + # evaluation is run every 2000 iterations + eval_batch_step: [0, 2000] + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words_en/word_10.png + # for data or label process + character_dict_path: + character_type: en + max_text_length: 25 + infer_mode: False + use_space_char: False + save_res_path: ./output/rec/predicts_abinet.txt + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.99 + clip_norm: 20.0 + lr: + name: Piecewise + decay_epochs: [6] + values: [0.0001, 0.00001] + regularizer: + name: 'L2' + factor: 0. + +Architecture: + model_type: rec + algorithm: ABINet + in_channels: 3 + Transform: + Backbone: + name: ResNet45 + + Head: + name: ABINetHead + use_lang: True + iter_size: 3 + + +Loss: + name: CELoss + ignore_index: &ignore_index 100 # Must be greater than the number of character classes + +PostProcess: + name: ABINetLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ic15_data/ + label_file_list: ["./train_data/ic15_data/rec_gt_train.txt"] + transforms: + - DecodeImage: # load image + img_mode: RGB + channel_first: False + - ABINetRecAug: + - ABINetLabelEncode: # Class handling label + ignore_index: *ignore_index + - ABINetRecResizeImg: + image_shape: [3, 32, 128] + padding: False + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 96 + drop_last: True + num_workers: 4 + +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ic15_data + label_file_list: ["./train_data/ic15_data/rec_gt_test.txt"] + transforms: + - DecodeImage: # load image + img_mode: RGB + channel_first: False + - ABINetLabelEncode: # Class handling label + ignore_index: *ignore_index + - ABINetRecResizeImg: + image_shape: [3, 32, 128] + padding: False + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 4 + use_shared_memory: False diff --git a/test_tipc/configs/rec_r45_abinet/train_infer_python.txt b/test_tipc/configs/rec_r45_abinet/train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..ecab1bcbbde11fc6d14357b6715033704c2c3316 --- /dev/null +++ b/test_tipc/configs/rec_r45_abinet/train_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:rec_abinet +python:python3.7 +gpu_list:0|0,1 +Global.use_gpu:True|True +Global.auto_cast:null +Global.epoch_num:lite_train_lite_infer=2|whole_train_whole_infer=300 +Global.save_model_dir:./output/ +Train.loader.batch_size_per_card:lite_train_lite_infer=16|whole_train_whole_infer=64 +Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./inference/rec_inference +null:null +## +trainer:norm_train +norm_train:tools/train.py -c test_tipc/configs/rec_r45_abinet/rec_r45_abinet.yml -o +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c test_tipc/configs/rec_r45_abinet/rec_r45_abinet.yml -o +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.checkpoints: +norm_export:tools/export_model.py -c test_tipc/configs/rec_r45_abinet/rec_r45_abinet.yml -o +quant_export:null +fpgm_export:null +distill_export:null +export1:null +export2:null +## +train_model:./inference/rec_r45_abinet_train/best_accuracy +infer_export:tools/export_model.py -c test_tipc/configs/rec_r45_abinet/rec_r45_abinet.yml -o +infer_quant:False +inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dict.txt --rec_image_shape="3,32,128" --rec_algorithm="ABINet" +--use_gpu:True|False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:1|6 +--use_tensorrt:False +--precision:fp32 +--rec_model_dir: +--image_dir:./inference/rec_inference +--save_log_path:./test/output/ +--benchmark:True +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,32,128]}] diff --git a/test_tipc/configs/rec_r50_fpn_vd_none_srn/train_infer_python.txt b/test_tipc/configs/rec_r50_fpn_vd_none_srn/train_infer_python.txt index 4877512b689ec87b7b2cd0258a2fac706968322b..b5a5286010a5830dc23031b3e0885247fb6ae53f 100644 --- a/test_tipc/configs/rec_r50_fpn_vd_none_srn/train_infer_python.txt +++ b/test_tipc/configs/rec_r50_fpn_vd_none_srn/train_infer_python.txt @@ -39,11 +39,11 @@ infer_export:tools/export_model.py -c test_tipc/configs/rec_r50_fpn_vd_none_srn/ infer_quant:False inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dict.txt --rec_image_shape="1,64,256" --rec_algorithm="SRN" --use_space_char=False --min_subgraph_size=3 --use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 +--enable_mkldnn:False +--cpu_threads:6 --rec_batch_num:1|6 ---use_tensorrt:True|False ---precision:fp32|int8 +--use_tensorrt:False +--precision:fp32 --rec_model_dir: --image_dir:./inference/rec_inference --save_log_path:./test/output/ diff --git a/test_tipc/configs/rec_svtrnet/rec_svtrnet.yml b/test_tipc/configs/rec_svtrnet/rec_svtrnet.yml new file mode 100644 index 0000000000000000000000000000000000000000..140b17e0e79f9895167e9c51d86ced173e44a541 --- /dev/null +++ b/test_tipc/configs/rec_svtrnet/rec_svtrnet.yml @@ -0,0 +1,117 @@ +Global: + use_gpu: True + epoch_num: 20 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec/svtr/ + save_epoch_step: 1 + # evaluation is run every 2000 iterations after the 0th iteration + eval_batch_step: [0, 2000] + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words_en/word_10.png + # for data or label process + character_dict_path: + character_type: en + max_text_length: 25 + infer_mode: False + use_space_char: False + save_res_path: ./output/rec/predicts_svtr_tiny.txt + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.99 + epsilon: 8.e-8 + weight_decay: 0.05 + no_weight_decay_name: norm pos_embed + one_dim_param_no_weight_decay: true + lr: + name: Cosine + learning_rate: 0.0005 + warmup_epoch: 2 + +Architecture: + model_type: rec + algorithm: SVTR + Transform: + name: STN_ON + tps_inputsize: [32, 64] + tps_outputsize: [32, 100] + num_control_points: 20 + tps_margins: [0.05,0.05] + stn_activation: none + Backbone: + name: SVTRNet + img_size: [32, 100] + out_char_num: 25 + out_channels: 192 + patch_merging: 'Conv' + embed_dim: [64, 128, 256] + depth: [3, 6, 3] + num_heads: [2, 4, 8] + mixer: ['Local','Local','Local','Local','Local','Local','Global','Global','Global','Global','Global','Global'] + local_mixer: [[7, 11], [7, 11], [7, 11]] + last_stage: True + prenorm: false + Neck: + name: SequenceEncoder + encoder_type: reshape + Head: + name: CTCHead + +Loss: + name: CTCLoss + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ic15_data/ + label_file_list: ["./train_data/ic15_data/rec_gt_train.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - SVTRRecResizeImg: + image_shape: [3, 64, 256] + padding: False + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 512 + drop_last: True + num_workers: 4 + +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ic15_data + label_file_list: ["./train_data/ic15_data/rec_gt_test.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - SVTRRecResizeImg: + image_shape: [3, 64, 256] + padding: False + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 2 diff --git a/test_tipc/configs/rec_svtrnet/train_infer_python.txt b/test_tipc/configs/rec_svtrnet/train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..a7e4a24063b2e248f2ab92d5efd257a2837c0a34 --- /dev/null +++ b/test_tipc/configs/rec_svtrnet/train_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:rec_svtrnet +python:python3.7 +gpu_list:0|0,1 +Global.use_gpu:True|True +Global.auto_cast:null +Global.epoch_num:lite_train_lite_infer=2|whole_train_whole_infer=300 +Global.save_model_dir:./output/ +Train.loader.batch_size_per_card:lite_train_lite_infer=16|whole_train_whole_infer=64 +Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./inference/rec_inference +null:null +## +trainer:norm_train +norm_train:tools/train.py -c test_tipc/configs/rec_svtrnet/rec_svtrnet.yml -o +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c test_tipc/configs/rec_svtrnet/rec_svtrnet.yml -o +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.checkpoints: +norm_export:tools/export_model.py -c test_tipc/configs/rec_svtrnet/rec_svtrnet.yml -o +quant_export:null +fpgm_export:null +distill_export:null +export1:null +export2:null +## +train_model:./inference/rec_svtrnet_train/best_accuracy +infer_export:tools/export_model.py -c test_tipc/configs/rec_svtrnet/rec_svtrnet.yml -o +infer_quant:False +inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dict.txt --rec_image_shape="3,64,256" --rec_algorithm="SVTR" +--use_gpu:True|False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:1|6 +--use_tensorrt:False +--precision:fp32 +--rec_model_dir: +--image_dir:./inference/rec_inference +--save_log_path:./test/output/ +--benchmark:True +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,64,256]}] diff --git a/test_tipc/configs/rec_vitstr_none_ce/rec_vitstr_none_ce.yml b/test_tipc/configs/rec_vitstr_none_ce/rec_vitstr_none_ce.yml new file mode 100644 index 0000000000000000000000000000000000000000..a0aed488755f7cb6fed18a5747e9b7f62f57da86 --- /dev/null +++ b/test_tipc/configs/rec_vitstr_none_ce/rec_vitstr_none_ce.yml @@ -0,0 +1,104 @@ +Global: + use_gpu: True + epoch_num: 20 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec/vitstr_none_ce/ + save_epoch_step: 1 + # evaluation is run every 2000 iterations after the 0th iteration# + eval_batch_step: [0, 2000] + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words_en/word_10.png + # for data or label process + character_dict_path: ppocr/utils/EN_symbol_dict.txt + max_text_length: 25 + infer_mode: False + use_space_char: False + save_res_path: ./output/rec/predicts_vitstr.txt + + +Optimizer: + name: Adadelta + epsilon: 1.e-8 + rho: 0.95 + clip_norm: 5.0 + lr: + learning_rate: 1.0 + +Architecture: + model_type: rec + algorithm: ViTSTR + in_channels: 1 + Transform: + Backbone: + name: ViTSTR + Neck: + name: SequenceEncoder + encoder_type: reshape + Head: + name: CTCHead + +Loss: + name: CELoss + smoothing: False + with_all: True + ignore_index: &ignore_index 0 # Must be zero or greater than the number of character classes + +PostProcess: + name: ViTSTRLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ic15_data/ + label_file_list: ["./train_data/ic15_data/rec_gt_train.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - ViTSTRLabelEncode: # Class handling label + ignore_index: *ignore_index + - GrayRecResizeImg: + image_shape: [224, 224] # W H + resize_type: PIL # PIL or OpenCV + inter_type: 'Image.BICUBIC' + scale: false + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 48 + drop_last: True + num_workers: 8 + +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ic15_data + label_file_list: ["./train_data/ic15_data/rec_gt_test.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - ViTSTRLabelEncode: # Class handling label + ignore_index: *ignore_index + - GrayRecResizeImg: + image_shape: [224, 224] # W H + resize_type: PIL # PIL or OpenCV + inter_type: 'Image.BICUBIC' + scale: false + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 2 diff --git a/test_tipc/configs/rec_vitstr_none_ce/train_infer_python.txt b/test_tipc/configs/rec_vitstr_none_ce/train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..04c5742ea2ddaf01e782d8b39c21bcbcfa0a7ce7 --- /dev/null +++ b/test_tipc/configs/rec_vitstr_none_ce/train_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:rec_vitstr +python:python3.7 +gpu_list:0|0,1 +Global.use_gpu:True|True +Global.auto_cast:null +Global.epoch_num:lite_train_lite_infer=2|whole_train_whole_infer=300 +Global.save_model_dir:./output/ +Train.loader.batch_size_per_card:lite_train_lite_infer=16|whole_train_whole_infer=64 +Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./inference/rec_inference +null:null +## +trainer:norm_train +norm_train:tools/train.py -c test_tipc/configs/rec_vitstr_none_ce/rec_vitstr_none_ce.yml -o +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c test_tipc/configs/rec_vitstr_none_ce/rec_vitstr_none_ce.yml -o +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.checkpoints: +norm_export:tools/export_model.py -c test_tipc/configs/rec_vitstr_none_ce/rec_vitstr_none_ce.yml -o +quant_export:null +fpgm_export:null +distill_export:null +export1:null +export2:null +## +train_model:./inference/rec_vitstr_none_ce_train/best_accuracy +infer_export:tools/export_model.py -c test_tipc/configs/rec_vitstr_none_ce/rec_vitstr_none_ce.yml -o +infer_quant:False +inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/EN_symbol_dict.txt --rec_image_shape="1,224,224" --rec_algorithm="ViTSTR" +--use_gpu:True|False +--enable_mkldnn:False +--cpu_threads:6 +--rec_batch_num:1|6 +--use_tensorrt:False +--precision:fp32 +--rec_model_dir: +--image_dir:./inference/rec_inference +--save_log_path:./test/output/ +--benchmark:True +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[1,224,224]}] diff --git a/test_tipc/docs/jeston_test_train_inference_python.md b/test_tipc/docs/jeston_test_train_inference_python.md index 9e9d15fb674ca04558b1f8cb616dc4e44934dbb9..b25175ed0071dd3728ae22c7588ca20535af0505 100644 --- a/test_tipc/docs/jeston_test_train_inference_python.md +++ b/test_tipc/docs/jeston_test_train_inference_python.md @@ -115,4 +115,4 @@ ValueError: The results of python_infer_gpu_usetrt_True_precision_fp32_batchsize ## 3. 更多教程 本文档为功能测试用,更丰富的训练预测使用教程请参考: [模型训练](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/training.md) -[基于Python预测引擎推理](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/inference.md) +[基于Python预测引擎推理](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/inference_ppocr.md) diff --git a/test_tipc/docs/mac_test_train_inference_python.md b/test_tipc/docs/mac_test_train_inference_python.md index ea6e0218b12b342347677ea512d3afd89053261a..c37291a8fc9b239564adce8f556565f51f2a9475 100644 --- a/test_tipc/docs/mac_test_train_inference_python.md +++ b/test_tipc/docs/mac_test_train_inference_python.md @@ -152,4 +152,4 @@ ValueError: The results of python_infer_cpu_usemkldnn_False_threads_1_batchsize_ ## 3. 更多教程 本文档为功能测试用,更丰富的训练预测使用教程请参考: [模型训练](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/training.md) -[基于Python预测引擎推理](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/inference.md) +[基于Python预测引擎推理](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/inference_ppocr.md) diff --git a/test_tipc/docs/test_serving.md b/test_tipc/docs/test_serving.md index 8600eff3b95b1fec7d0519c1d26cf2a3232b59e5..71f01c0d5ff47004d70baa17b404c10714a6fb64 100644 --- a/test_tipc/docs/test_serving.md +++ b/test_tipc/docs/test_serving.md @@ -1,6 +1,6 @@ # PaddleServing预测功能测试 -PaddleServing预测功能测试的主程序为`test_serving.sh`,可以测试基于PaddleServing的部署功能。 +PaddleServing预测功能测试的主程序为`test_serving_infer_python.sh`和`test_serving_infer_cpp.sh`,可以测试基于PaddleServing的部署功能。 ## 1. 测试结论汇总 @@ -17,13 +17,23 @@ PaddleServing预测功能测试的主程序为`test_serving.sh`,可以测试 运行环境配置请参考[文档](./install.md)的内容配置TIPC的运行环境。 ### 2.1 功能测试 -先运行`prepare.sh`准备数据和模型,然后运行`test_serving.sh`进行测试,最终在```test_tipc/output```目录下生成`serving_infer_*.log`后缀的日志文件。 +**python serving** +先运行`prepare.sh`准备数据和模型,然后运行`test_serving_infer_python.sh`进行测试,最终在```test_tipc/output```目录下生成`serving_infer_python*.log`后缀的日志文件。 ```shell bash test_tipc/prepare.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt "serving_infer" # 用法: -bash test_tipc/test_serving.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt +bash test_tipc/test_serving_infer_python.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt "serving_infer" +``` +**cpp serving** +先运行`prepare.sh`准备数据和模型,然后运行`test_serving_infer_cpp.sh`进行测试,最终在```test_tipc/output```目录下生成`serving_infer_cpp*.log`后缀的日志文件。 + +```shell +bash test_tipc/prepare.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt "serving_infer" + +# 用法: +bash test_tipc/test_serving_infer_cpp.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt "serving_infer" ``` #### 运行结果 diff --git a/test_tipc/docs/test_train_fleet_inference_python.md b/test_tipc/docs/test_train_fleet_inference_python.md new file mode 100644 index 0000000000000000000000000000000000000000..9fddb5d1634b452f1906a83bca4157dbaec47c81 --- /dev/null +++ b/test_tipc/docs/test_train_fleet_inference_python.md @@ -0,0 +1,107 @@ +# Linux GPU/CPU 多机多卡训练推理测试 + +Linux GPU/CPU 多机多卡训练推理测试的主程序为`test_train_inference_python.sh`,可以测试基于Python的模型训练、评估、推理等基本功能。 + +## 1. 测试结论汇总 + +- 训练相关: + +| 算法名称 | 模型名称 | 多机多卡 | +| :----: | :----: | :----: | +| PP-OCRv3 | ch_PP-OCRv3_rec | 分布式训练 | + + +- 推理相关: + +| 算法名称 | 模型名称 | device_CPU | device_GPU | batchsize | +| :----: | :----: | :----: | :----: | :----: | +| PP-OCRv3 | ch_PP-OCRv3_rec | 支持 | - | 1/6 | + + +## 2. 测试流程 + +运行环境配置请参考[文档](./install.md)的内容配置TIPC的运行环境。 + +### 2.1 功能测试 + +#### 2.1.1 修改配置文件 + +首先,修改配置文件中的`ip`设置: 假设两台机器的`ip`地址分别为`192.168.0.1`和`192.168.0.2`,则对应的配置文件`gpu_list`字段需要修改为`gpu_list:192.168.0.1,192.168.0.2;0,1`; `ip`地址查看命令为`ifconfig`。 + + +#### 2.1.2 准备数据 + +运行`prepare.sh`准备数据和模型,以配置文件`test_tipc/configs/ch_PP-OCRv3_rec/train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt`为例,数据准备命令如下所示。 + +```shell +bash test_tipc/prepare.sh test_tipc/configs/ch_PP-OCRv3_rec/train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt lite_train_lite_infer +``` + +**注意:** 由于是多机训练,这里需要在所有的节点上均运行启动上述命令,准备数据。 + +#### 2.1.3 修改起始端口并开始测试 + +在多机的节点上使用下面的命令设置分布式的起始端口(否则后面运行的时候会由于无法找到运行端口而hang住),一般建议设置在`10000~20000`之间。 + +```shell +export FLAGS_START_PORT=17000 +``` + +以配置文件`test_tipc/configs/ch_PP-OCRv3_rec/train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt`为例,测试方法如下所示。 + +```shell +bash test_tipc/test_train_inference_python.sh test_tipc/configs/ch_PP-OCRv3_rec/train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt lite_train_lite_infer +``` + +**注意:** 由于是多机训练,这里需要在所有的节点上均运行启动上述命令进行测试。 + + +#### 2.1.4 输出结果 + +输出结果如下,表示命令运行成功。 + +```bash + Run successfully with command - ch_PP-OCRv3_rec - python3.7 -m paddle.distributed.launch --ips=192.168.0.1,192.168.0.2 --gpus=0,1 tools/train.py -c test_tipc/configs/ch_PP-OCRv3_rec/ch_PP-OCRv3_rec_distillation.yml -o Global.use_gpu=True Global.save_model_dir=./test_tipc/output/ch_PP-OCRv3_rec/lite_train_lite_infer/norm_train_gpus_0,1_autocast_fp32_nodes_2 Global.epoch_num=3 Global.auto_cast=fp32 Train.loader.batch_size_per_card=16 ! + ...... + Run successfully with command - ch_PP-OCRv3_rec - python3.7 tools/infer/predict_rec.py --rec_image_shape="3,48,320" --use_gpu=False --enable_mkldnn=False --cpu_threads=6 --rec_model_dir=./test_tipc/output/ch_PP-OCRv3_rec/lite_train_lite_infer/norm_train_gpus_0,1_autocast_fp32_nodes_2/Student --rec_batch_num=1 --image_dir=./inference/rec_inference --benchmark=True --precision=fp32 > ./test_tipc/output/ch_PP-OCRv3_rec/lite_train_lite_infer/python_infer_cpu_usemkldnn_False_threads_6_precision_fp32_batchsize_1.log 2>&1 ! +``` + +在开启benchmark参数时,可以得到测试的详细数据,包含运行环境信息(系统版本、CUDA版本、CUDNN版本、驱动版本),Paddle版本信息,参数设置信息(运行设备、线程数、是否开启内存优化等),模型信息(模型名称、精度),数据信息(batchsize、是否为动态shape等),性能信息(CPU,GPU的占用、运行耗时、预处理耗时、推理耗时、后处理耗时),内容如下所示: + +``` +[2022/06/02 22:53:35] ppocr INFO: + +[2022/06/02 22:53:35] ppocr INFO: ---------------------- Env info ---------------------- +[2022/06/02 22:53:35] ppocr INFO: OS_version: Ubuntu 16.04 +[2022/06/02 22:53:35] ppocr INFO: CUDA_version: 10.1.243 +[2022/06/02 22:53:35] ppocr INFO: CUDNN_version: 7.6.5 +[2022/06/02 22:53:35] ppocr INFO: drivier_version: 460.32.03 +[2022/06/02 22:53:35] ppocr INFO: ---------------------- Paddle info ---------------------- +[2022/06/02 22:53:35] ppocr INFO: paddle_version: 2.3.0-rc0 +[2022/06/02 22:53:35] ppocr INFO: paddle_commit: 5d4980c052583fec022812d9c29460aff7cdc18b +[2022/06/02 22:53:35] ppocr INFO: log_api_version: 1.0 +[2022/06/02 22:53:35] ppocr INFO: ----------------------- Conf info ----------------------- +[2022/06/02 22:53:35] ppocr INFO: runtime_device: cpu +[2022/06/02 22:53:35] ppocr INFO: ir_optim: True +[2022/06/02 22:53:35] ppocr INFO: enable_memory_optim: True +[2022/06/02 22:53:35] ppocr INFO: enable_tensorrt: False +[2022/06/02 22:53:35] ppocr INFO: enable_mkldnn: False +[2022/06/02 22:53:35] ppocr INFO: cpu_math_library_num_threads: 6 +[2022/06/02 22:53:35] ppocr INFO: ----------------------- Model info ---------------------- +[2022/06/02 22:53:35] ppocr INFO: model_name: rec +[2022/06/02 22:53:35] ppocr INFO: precision: fp32 +[2022/06/02 22:53:35] ppocr INFO: ----------------------- Data info ----------------------- +[2022/06/02 22:53:35] ppocr INFO: batch_size: 1 +[2022/06/02 22:53:35] ppocr INFO: input_shape: dynamic +[2022/06/02 22:53:35] ppocr INFO: data_num: 6 +[2022/06/02 22:53:35] ppocr INFO: ----------------------- Perf info ----------------------- +[2022/06/02 22:53:35] ppocr INFO: cpu_rss(MB): 288.957, gpu_rss(MB): None, gpu_util: None% +[2022/06/02 22:53:35] ppocr INFO: total time spent(s): 0.4824 +[2022/06/02 22:53:35] ppocr INFO: preprocess_time(ms): 0.1136, inference_time(ms): 79.5877, postprocess_time(ms): 0.6945 +``` + +该信息可以在运行log中查看,以上面的`ch_PP-OCRv3_rec`为例,log位置在`./test_tipc/output/ch_PP-OCRv3_rec/lite_train_lite_infer/results_python.log`。 + +如果运行失败,也会在终端中输出运行失败的日志信息以及对应的运行命令。可以基于该命令,分析运行失败的原因。 + +**注意:** 由于分布式训练时,仅在`trainer_id=0`所在的节点中保存模型,因此其他的节点中在运行模型导出与推理时会报错,为正常现象。 diff --git a/test_tipc/docs/test_train_inference_python.md b/test_tipc/docs/test_train_inference_python.md index fa969cbe1b9b6fd524efdaad5002afcfcf40e119..99de9400797493f429f8176a9b6b374a76df4872 100644 --- a/test_tipc/docs/test_train_inference_python.md +++ b/test_tipc/docs/test_train_inference_python.md @@ -153,4 +153,4 @@ python3.7 test_tipc/compare_results.py --gt_file=./test_tipc/results/python_*.tx ## 3. 更多教程 本文档为功能测试用,更丰富的训练预测使用教程请参考: [模型训练](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/training.md) -[基于Python预测引擎推理](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/inference.md) +[基于Python预测引擎推理](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/inference_ppocr.md) diff --git a/test_tipc/docs/win_test_train_inference_python.md b/test_tipc/docs/win_test_train_inference_python.md index 95585af0380be410c230a799b7d61e607de5f654..6e3ce93bb3123133075b9d65c64850a87de5f828 100644 --- a/test_tipc/docs/win_test_train_inference_python.md +++ b/test_tipc/docs/win_test_train_inference_python.md @@ -156,4 +156,4 @@ ValueError: The results of python_infer_cpu_usemkldnn_False_threads_1_batchsize_ ## 3. 更多教程 本文档为功能测试用,更丰富的训练预测使用教程请参考: [模型训练](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/training.md) -[基于Python预测引擎推理](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/inference.md) +[基于Python预测引擎推理](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/inference_ppocr.md) diff --git a/test_tipc/prepare.sh b/test_tipc/prepare.sh index 6a8983009e527b8a59b41c1d9b950e8e3f349ef2..2c9bd2901b52ff7d4b6af483a9aa201aef339099 100644 --- a/test_tipc/prepare.sh +++ b/test_tipc/prepare.sh @@ -44,30 +44,49 @@ if [ ${MODE} = "lite_train_lite_infer" ];then # pretrain lite train data wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV3_large_x0_5_pretrained.pdparams --no-check-certificate wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_db_v2.0_train.tar --no-check-certificate - if [[ ${model_name} =~ "PPOCRv2_det" ]];then + if [[ ${model_name} =~ "ch_PP-OCRv2_det" ]];then wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_distill_train.tar --no-check-certificate cd ./pretrain_models/ && tar xf ch_PP-OCRv2_det_distill_train.tar && cd ../ fi + if [[ ${model_name} =~ "ch_PP-OCRv3_det" ]];then + wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_distill_train.tar --no-check-certificate + cd ./pretrain_models/ && tar xf ch_PP-OCRv3_det_distill_train.tar && cd ../ + fi + if [ ${model_name} == "en_table_structure" ];then + wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/dygraph_v2.1/table/en_ppocr_mobile_v2.0_table_structure_train.tar --no-check-certificate + cd ./pretrain_models/ && tar xf en_ppocr_mobile_v2.0_table_structure_train.tar && cd ../ + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_det_infer.tar --no-check-certificate + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_rec_infer.tar --no-check-certificate + cd ./inference/ && tar xf en_ppocr_mobile_v2.0_table_det_infer.tar && tar xf en_ppocr_mobile_v2.0_table_rec_infer.tar && cd ../ + fi cd ./pretrain_models/ && tar xf det_mv3_db_v2.0_train.tar && cd ../ rm -rf ./train_data/icdar2015 rm -rf ./train_data/ic15_data + rm -rf ./train_data/pubtabnet wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015_lite.tar --no-check-certificate wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ic15_data.tar --no-check-certificate + wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dataset/pubtabnet.tar --no-check-certificate wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/rec_inference.tar --no-check-certificate wget -nc -P ./deploy/slim/prune https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/sen.pickle --no-check-certificate - cd ./train_data/ && tar xf icdar2015_lite.tar && tar xf ic15_data.tar + cd ./train_data/ && tar xf icdar2015_lite.tar && tar xf ic15_data.tar && tar xf pubtabnet.tar ln -s ./icdar2015_lite ./icdar2015 + wget -nc -P ./ic15_data/ https://paddleocr.bj.bcebos.com/dataset/rec_gt_train_lite.txt --no-check-certificate + wget -nc -P ./ic15_data/ https://paddleocr.bj.bcebos.com/dataset/rec_gt_test_lite.txt --no-check-certificate cd ../ cd ./inference && tar xf rec_inference.tar && cd ../ - if [ ${model_name} == "ch_PPOCRv2_det" ] || [ ${model_name} == "ch_PPOCRv2_det_PACT" ]; then + if [ ${model_name} == "ch_PP-OCRv2_det" ] || [ ${model_name} == "ch_PP-OCRv2_det_PACT" ]; then wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_train.tar --no-check-certificate cd ./pretrain_models/ && tar xf ch_ppocr_server_v2.0_det_train.tar && cd ../ fi - if [ ${model_name} == "ch_PPOCRv2_rec" ] || [ ${model_name} == "ch_PPOCRv2_rec_PACT" ]; then + if [ ${model_name} == "ch_PP-OCRv2_rec" ] || [ ${model_name} == "ch_PP-OCRv2_rec_PACT" ]; then wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_train.tar --no-check-certificate cd ./pretrain_models/ && tar xf ch_PP-OCRv2_rec_train.tar && cd ../ fi + if [ ${model_name} == "ch_PP-OCRv3_rec" ] || [ ${model_name} == "ch_PP-OCRv3_rec_PACT" ]; then + wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_train.tar --no-check-certificate + cd ./pretrain_models/ && tar xf ch_PP-OCRv3_rec_train.tar && cd ../ + fi if [ ${model_name} == "det_r18_db_v2_0" ]; then wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/pretrained/ResNet18_vd_pretrained.pdparams --no-check-certificate fi @@ -79,8 +98,10 @@ if [ ${MODE} = "lite_train_lite_infer" ];then fi if [ ${model_name} == "det_r50_vd_sast_icdar15_v2.0" ] || [ ${model_name} == "det_r50_vd_sast_totaltext_v2.0" ]; then wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNet50_vd_ssld_pretrained.pdparams --no-check-certificate + wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_sast_icdar15_v2.0_train.tar --no-check-certificate wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/total_text_lite.tar --no-check-certificate cd ./train_data && tar xf total_text_lite.tar && ln -s total_text_lite total_text && cd ../ + cd ./pretrain_models && tar xf det_r50_vd_sast_icdar15_v2.0_train.tar && cd ../ fi if [ ${model_name} == "det_mv3_db_v2_0" ]; then wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_db_v2.0_train.tar --no-check-certificate @@ -104,13 +125,22 @@ elif [ ${MODE} = "whole_train_whole_infer" ];then wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV3_large_x0_5_pretrained.pdparams --no-check-certificate rm -rf ./train_data/icdar2015 rm -rf ./train_data/ic15_data + rm -rf ./train_data/pubtabnet wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015.tar --no-check-certificate wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ic15_data.tar --no-check-certificate - cd ./train_data/ && tar xf icdar2015.tar && tar xf ic15_data.tar && cd ../ - if [ ${model_name} == "ch_PPOCRv2_det" ]; then + wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dataset/pubtabnet.tar --no-check-certificate + cd ./train_data/ && tar xf icdar2015.tar && tar xf ic15_data.tar && tar xf pubtabnet.tar + wget -nc -P ./ic15_data/ https://paddleocr.bj.bcebos.com/dataset/rec_gt_train_lite.txt --no-check-certificate + wget -nc -P ./ic15_data/ https://paddleocr.bj.bcebos.com/dataset/rec_gt_test_lite.txt --no-check-certificate + cd ../ + if [ ${model_name} == "ch_PP-OCRv2_det" ]; then wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_distill_train.tar --no-check-certificate cd ./pretrain_models/ && tar xf ch_PP-OCRv2_det_distill_train.tar && cd ../ fi + if [ ${model_name} == "ch_PP-OCRv3_det" ]; then + wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_distill_train.tar --no-check-certificate + cd ./pretrain_models/ && tar xf ch_PP-OCRv3_det_distill_train.tar && cd ../ + fi if [ ${model_name} == "en_server_pgnetA" ]; then wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/total_text_lite.tar --no-check-certificate wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/pgnet/en_server_pgnetA.tar --no-check-certificate @@ -122,19 +152,41 @@ elif [ ${MODE} = "whole_train_whole_infer" ];then wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/total_text_lite.tar --no-check-certificate cd ./train_data && tar xf total_text.tar && ln -s total_text_lite total_text && cd ../ fi + if [[ ${model_name} =~ "en_table_structure" ]];then + wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/dygraph_v2.1/table/en_ppocr_mobile_v2.0_table_structure_train.tar --no-check-certificate + cd ./pretrain_models/ && tar xf en_ppocr_mobile_v2.0_table_structure_train.tar && cd ../ + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_det_infer.tar --no-check-certificate + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_rec_infer.tar --no-check-certificate + cd ./inference/ && tar xf en_ppocr_mobile_v2.0_table_det_infer.tar && tar xf en_ppocr_mobile_v2.0_table_rec_infer.tar && cd ../ + fi elif [ ${MODE} = "lite_train_whole_infer" ];then wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV3_large_x0_5_pretrained.pdparams --no-check-certificate rm -rf ./train_data/icdar2015 rm -rf ./train_data/ic15_data + rm -rf ./train_data/pubtabnet wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015_infer.tar --no-check-certificate wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ic15_data.tar --no-check-certificate - cd ./train_data/ && tar xf icdar2015_infer.tar && tar xf ic15_data.tar + wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dataset/pubtabnet.tar --no-check-certificate + cd ./train_data/ && tar xf icdar2015_infer.tar && tar xf ic15_data.tar && tar xf pubtabnet.tar ln -s ./icdar2015_infer ./icdar2015 + wget -nc -P ./ic15_data/ https://paddleocr.bj.bcebos.com/dataset/rec_gt_train_lite.txt --no-check-certificate + wget -nc -P ./ic15_data/ https://paddleocr.bj.bcebos.com/dataset/rec_gt_test_lite.txt --no-check-certificate cd ../ - if [ ${model_name} == "ch_PPOCRv2_det" ]; then + if [ ${model_name} == "ch_PP-OCRv2_det" ]; then wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_distill_train.tar --no-check-certificate cd ./pretrain_models/ && tar xf ch_PP-OCRv2_det_distill_train.tar && cd ../ fi + if [ ${model_name} == "ch_PP-OCRv3_det" ]; then + wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_distill_train.tar --no-check-certificate + cd ./pretrain_models/ && tar xf ch_PP-OCRv3_det_distill_train.tar && cd ../ + fi + if [[ ${model_name} =~ "en_table_structure" ]];then + wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/dygraph_v2.1/table/en_ppocr_mobile_v2.0_table_structure_train.tar --no-check-certificate + cd ./pretrain_models/ && tar xf en_ppocr_mobile_v2.0_table_structure_train.tar && cd ../ + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_det_infer.tar --no-check-certificate + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_rec_infer.tar --no-check-certificate + cd ./inference/ && tar xf en_ppocr_mobile_v2.0_table_det_infer.tar && tar xf en_ppocr_mobile_v2.0_table_rec_infer.tar && cd ../ + fi elif [ ${MODE} = "whole_infer" ];then wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/rec_inference.tar --no-check-certificate @@ -169,17 +221,42 @@ elif [ ${MODE} = "whole_infer" ];then wget -nc -P ./inference https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_infer.tar --no-check-certificate cd ./inference && tar xf ${eval_model_name}.tar && cd ../ fi - if [[ ${model_name} =~ "ch_PPOCRv2_det" ]]; then + if [[ ${model_name} =~ "ch_PP-OCRv2" ]]; then + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_infer.tar --no-check-certificate + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_infer.tar --no-check-certificate + cd ./inference && tar xf ch_PP-OCRv2_det_infer.tar && tar xf ch_PP-OCRv2_rec_infer.tar && tar xf ch_det_data_50.tar && cd ../ + fi + if [[ ${model_name} =~ "ch_PP-OCRv3" ]]; then + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar --no-check-certificate + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar --no-check-certificate + cd ./inference && tar xf ch_PP-OCRv3_det_infer.tar && tar xf ch_PP-OCRv3_rec_infer.tar && tar xf ch_det_data_50.tar && cd ../ + fi + if [[ ${model_name} =~ "ch_PP-OCRv2_det" ]]; then eval_model_name="ch_PP-OCRv2_det_infer" wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_infer.tar --no-check-certificate cd ./inference && tar xf ${eval_model_name}.tar && tar xf ch_det_data_50.tar && cd ../ fi - if [[ ${model_name} =~ "PPOCRv2_ocr_rec" ]]; then + if [[ ${model_name} =~ "ch_PP-OCRv3_det" ]]; then + eval_model_name="ch_PP-OCRv3_det_infer" + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar --no-check-certificate + cd ./inference && tar xf ${eval_model_name}.tar && tar xf ch_det_data_50.tar && cd ../ + fi + if [[ ${model_name} =~ "ch_PP-OCRv2_rec" ]]; then eval_model_name="ch_PP-OCRv2_rec_infer" wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_infer.tar --no-check-certificate wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_slim_quant_infer.tar --no-check-certificate cd ./inference && tar xf ${eval_model_name}.tar && tar xf ch_PP-OCRv2_rec_slim_quant_infer.tar && cd ../ fi + if [[ ${model_name} =~ "ch_PP-OCRv3_rec" ]]; then + eval_model_name="ch_PP-OCRv3_rec_infer" + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar --no-check-certificate + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_slim_infer.tar --no-check-certificate + cd ./inference && tar xf ${eval_model_name}.tar && tar xf ch_PP-OCRv3_rec_slim_infer.tar && cd ../ + fi + if [[ ${model_name} == "ch_PP-OCRv3_rec_PACT" ]]; then + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_slim_infer.tar --no-check-certificate + cd ./inference && tar xf ch_PP-OCRv3_rec_slim_infer.tar && cd ../ + fi if [ ${model_name} == "en_server_pgnetA" ]; then wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/pgnet/en_server_pgnetA.tar --no-check-certificate cd ./inference && tar xf en_server_pgnetA.tar && tar xf ch_det_data_50.tar && cd ../ @@ -269,9 +346,15 @@ elif [ ${MODE} = "whole_infer" ];then wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_east_v2.0_train.tar --no-check-certificate cd ./inference/ && tar xf det_r50_vd_east_v2.0_train.tar & cd ../ fi + if [[ ${model_name} =~ "en_table_structure" ]];then + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar --no-check-certificate + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_det_infer.tar --no-check-certificate + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_rec_infer.tar --no-check-certificate + cd ./inference/ && tar xf en_ppocr_mobile_v2.0_table_structure_infer.tar && tar xf en_ppocr_mobile_v2.0_table_det_infer.tar && tar xf en_ppocr_mobile_v2.0_table_rec_infer.tar && cd ../ + fi fi -if [ ${MODE} = "klquant_whole_infer" ]; then +if [[ ${model_name} =~ "KL" ]]; then wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015_lite.tar --no-check-certificate cd ./train_data/ && tar xf icdar2015_lite.tar && rm -rf ./icdar2015 && ln -s ./icdar2015_lite ./icdar2015 && cd ../ if [ ${model_name} = "ch_ppocr_mobile_v2.0_det_KL" ]; then @@ -279,41 +362,151 @@ if [ ${MODE} = "klquant_whole_infer" ]; then wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate cd ./inference && tar xf ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_det_data_50.tar && cd ../ fi - if [ ${model_name} = "PPOCRv2_ocr_rec_kl" ]; then + if [ ${model_name} = "ch_PP-OCRv2_rec_KL" ]; then wget -nc -P ./inference https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_infer.tar --no-check-certificate wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/rec_inference.tar --no-check-certificate wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ic15_data.tar --no-check-certificate cd ./train_data/ && tar xf ic15_data.tar && cd ../ cd ./inference && tar xf rec_inference.tar && tar xf ch_PP-OCRv2_rec_infer.tar && cd ../ fi - if [ ${model_name} = "PPOCRv2_ocr_det_kl" ]; then + if [ ${model_name} = "ch_PP-OCRv3_rec_KL" ]; then + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar --no-check-certificate + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/rec_inference.tar --no-check-certificate + wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ic15_data.tar --no-check-certificate + cd ./train_data/ && tar xf ic15_data.tar + wget -nc -P ./ic15_data/ https://paddleocr.bj.bcebos.com/dataset/rec_gt_train_lite.txt --no-check-certificate + wget -nc -P ./ic15_data/ https://paddleocr.bj.bcebos.com/dataset/rec_gt_test_lite.txt --no-check-certificate + cd ../ + cd ./inference && tar xf rec_inference.tar && tar xf ch_PP-OCRv3_rec_infer.tar && cd ../ + fi + if [ ${model_name} = "ch_PP-OCRv2_det_KL" ]; then wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate wget -nc -P ./inference https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_infer.tar --no-check-certificate cd ./inference && tar xf ch_PP-OCRv2_det_infer.tar && tar xf ch_det_data_50.tar && cd ../ fi + if [ ${model_name} = "ch_PP-OCRv3_det_KL" ]; then + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar --no-check-certificate + cd ./inference && tar xf ch_PP-OCRv3_det_infer.tar && tar xf ch_det_data_50.tar && cd ../ + fi if [ ${model_name} = "ch_ppocr_mobile_v2.0_rec_KL" ]; then wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar --no-check-certificate wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/rec_inference.tar --no-check-certificate wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ic15_data.tar --no-check-certificate cd ./train_data/ && tar xf ic15_data.tar && cd ../ cd ./inference && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf rec_inference.tar && cd ../ - fi + fi + if [ ${model_name} = "en_table_structure_KL" ];then + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar --no-check-certificate + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_det_infer.tar --no-check-certificate + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_rec_infer.tar --no-check-certificate + wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dataset/pubtabnet.tar --no-check-certificate + cd ./inference/ && tar xf en_ppocr_mobile_v2.0_table_structure_infer.tar && tar xf en_ppocr_mobile_v2.0_table_det_infer.tar && tar xf en_ppocr_mobile_v2.0_table_rec_infer.tar && cd ../ + cd ./train_data/ && tar xf pubtabnet.tar && cd ../ + fi fi if [ ${MODE} = "cpp_infer" ];then - if [ ${model_name} = "ocr_det" ]; then + if [ ${model_name} = "ch_ppocr_mobile_v2.0_det" ]; then wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar --no-check-certificate cd ./inference && tar xf ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_det_data_50.tar && cd ../ + elif [ ${model_name} = "ch_ppocr_mobile_v2.0_det_KL" ]; then + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/tipc_fake_model/ch_ppocr_mobile_v2.0_det_klquant_infer.tar --no-check-certificate + cd ./inference && tar xf ch_ppocr_mobile_v2.0_det_klquant_infer.tar && tar xf ch_det_data_50.tar && cd ../ + elif [ ${model_name} = "ch_ppocr_mobile_v2.0_det_PACT" ]; then + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/tipc_fake_model/ch_ppocr_mobile_v2.0_det_pact_infer.tar --no-check-certificate + cd ./inference && tar xf ch_ppocr_mobile_v2.0_det_pact_infer.tar && tar xf ch_det_data_50.tar && cd ../ elif [ ${model_name} = "ch_ppocr_mobile_v2.0_rec" ]; then wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/rec_inference.tar --no-check-certificate wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar --no-check-certificate cd ./inference && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf rec_inference.tar && cd ../ - elif [ ${model_name} = "ocr_system" ]; then + elif [ ${model_name} = "ch_ppocr_mobile_v2.0_rec_KL" ]; then + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/rec_inference.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/tipc_fake_model/ch_ppocr_mobile_v2.0_rec_klquant_infer.tar --no-check-certificate + cd ./inference && tar xf ch_ppocr_mobile_v2.0_rec_klquant_infer.tar && tar xf rec_inference.tar && cd ../ + elif [ ${model_name} = "ch_ppocr_mobile_v2.0_rec_PACT" ]; then + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/rec_inference.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/tipc_fake_model/ch_ppocr_mobile_v2.0_rec_pact_infer.tar --no-check-certificate + cd ./inference && tar xf ch_ppocr_mobile_v2.0_rec_pact_infer.tar && tar xf rec_inference.tar && cd ../ + elif [ ${model_name} = "ch_ppocr_server_v2.0_det" ]; then + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_infer.tar --no-check-certificate + cd ./inference && tar xf ch_ppocr_server_v2.0_det_infer.tar && tar xf ch_det_data_50.tar && cd ../ + elif [ ${model_name} = "ch_ppocr_server_v2.0_rec" ]; then + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/rec_inference.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_rec_infer.tar --no-check-certificate + cd ./inference && tar xf ch_ppocr_server_v2.0_rec_infer.tar && tar xf rec_inference.tar && cd ../ + elif [ ${model_name} = "ch_PP-OCRv2_det" ]; then + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_infer.tar --no-check-certificate + cd ./inference && tar xf ch_PP-OCRv2_det_infer.tar && tar xf ch_det_data_50.tar && cd ../ + elif [ ${model_name} = "ch_PP-OCRv2_det_KL" ]; then + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/tipc_fake_model/ch_PP-OCRv2_det_klquant_infer.tar --no-check-certificate + cd ./inference && tar xf ch_PP-OCRv2_det_klquant_infer.tar && tar xf ch_det_data_50.tar && cd ../ + elif [ ${model_name} = "ch_PP-OCRv2_det_PACT" ]; then + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/tipc_fake_model/ch_PP-OCRv2_det_pact_infer.tar --no-check-certificate + cd ./inference && tar xf ch_PP-OCRv2_det_pact_infer.tar && tar xf ch_det_data_50.tar && cd ../ + elif [ ${model_name} = "ch_PP-OCRv2_rec" ]; then + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/rec_inference.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_infer.tar --no-check-certificate + cd ./inference && tar xf ch_PP-OCRv2_rec_infer.tar && tar xf rec_inference.tar && cd ../ + elif [ ${model_name} = "ch_PP-OCRv2_rec_KL" ]; then + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/rec_inference.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/tipc_fake_model/ch_PP-OCRv2_rec_klquant_infer.tar --no-check-certificate + cd ./inference && tar xf ch_PP-OCRv2_rec_klquant_infer.tar && tar xf rec_inference.tar && cd ../ + elif [ ${model_name} = "ch_PP-OCRv2_rec_PACT" ]; then + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/rec_inference.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/tipc_fake_model/ch_PP-OCRv2_rec_pact_infer.tar --no-check-certificate + cd ./inference && tar xf ch_PP-OCRv2_rec_pact_infer.tar && tar xf rec_inference.tar && cd ../ + elif [ ${model_name} = "ch_PP-OCRv3_det" ]; then + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar --no-check-certificate + cd ./inference && tar xf ch_PP-OCRv3_det_infer.tar && tar xf ch_det_data_50.tar && cd ../ + elif [ ${model_name} = "ch_PP-OCRv3_det_KL" ]; then + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/tipc_fake_model/ch_PP-OCRv3_det_klquant_infer.tar --no-check-certificate + cd ./inference && tar xf ch_PP-OCRv3_det_klquant_infer.tar && tar xf ch_det_data_50.tar && cd ../ + elif [ ${model_name} = "ch_PP-OCRv3_det_PACT" ]; then + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/tipc_fake_model/ch_PP-OCRv3_det_pact_infer.tar --no-check-certificate + cd ./inference && tar xf ch_PP-OCRv3_det_pact_infer.tar && tar xf ch_det_data_50.tar && cd ../ + elif [ ${model_name} = "ch_PP-OCRv3_rec" ]; then + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/rec_inference.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar --no-check-certificate + cd ./inference && tar xf ch_PP-OCRv3_rec_infer.tar && tar xf rec_inference.tar && cd ../ + elif [ ${model_name} = "ch_PP-OCRv3_rec_KL" ]; then + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/rec_inference.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/tipc_fake_model/ch_PP-OCRv3_rec_klquant_infer.tar --no-check-certificate + cd ./inference && tar xf ch_PP-OCRv3_rec_klquant_infer.tar && tar xf rec_inference.tar && cd ../ + elif [ ${model_name} = "ch_PP-OCRv3_rec_PACT" ]; then + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/rec_inference.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/tipc_fake_model/ch_PP-OCRv3_rec_pact_infer.tar --no-check-certificate + cd ./inference && tar xf ch_PP-OCRv3_rec_pact_infer.tar && tar xf rec_inference.tar && cd ../ + elif [ ${model_name} = "ch_ppocr_mobile_v2.0" ]; then wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar --no-check-certificate wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar --no-check-certificate cd ./inference && tar xf ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf ch_det_data_50.tar && cd ../ + elif [ ${model_name} = "ch_ppocr_server_v2.0" ]; then + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_infer.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_rec_infer.tar --no-check-certificate + cd ./inference && tar xf ch_ppocr_server_v2.0_det_infer.tar && tar xf ch_ppocr_server_v2.0_rec_infer.tar && tar xf ch_det_data_50.tar && cd ../ + elif [ ${model_name} = "ch_PP-OCRv2" ]; then + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_infer.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_infer.tar --no-check-certificate + cd ./inference && tar xf ch_PP-OCRv2_det_infer.tar && tar xf ch_PP-OCRv2_rec_infer.tar && tar xf ch_det_data_50.tar && cd ../ + elif [ ${model_name} = "ch_PP-OCRv3" ]; then + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar --no-check-certificate + cd ./inference && tar xf ch_PP-OCRv3_det_infer.tar && tar xf ch_PP-OCRv3_rec_infer.tar && tar xf ch_det_data_50.tar && cd ../ fi fi @@ -323,29 +516,84 @@ if [ ${MODE} = "serving_infer" ];then IFS='|' array=(${python_name_list}) python_name=${array[0]} - ${python_name} -m pip install paddle-serving-server-gpu==0.8.3.post101 - ${python_name} -m pip install paddle_serving_client==0.8.3 - ${python_name} -m pip install paddle-serving-app==0.8.3 - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_infer.tar - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_rec_infer.tar - cd ./inference && tar xf ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf ch_ppocr_server_v2.0_rec_infer.tar && tar xf ch_ppocr_server_v2.0_det_infer.tar && cd ../ + ${python_name} -m pip install paddle-serving-server-gpu + ${python_name} -m pip install paddle_serving_client + ${python_name} -m pip install paddle-serving-app + # wget model + if [ ${model_name} == "ch_ppocr_mobile_v2.0_det_KL" ] || [ ${model_name} == "ch_ppocr_mobile_v2.0_rec_KL" ] ; then + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/tipc_fake_model/ch_ppocr_mobile_v2.0_det_klquant_infer.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/tipc_fake_model/ch_ppocr_mobile_v2.0_rec_klquant_infer.tar --no-check-certificate + cd ./inference && tar xf ch_ppocr_mobile_v2.0_det_klquant_infer.tar && tar xf ch_ppocr_mobile_v2.0_rec_klquant_infer.tar && cd ../ + elif [ ${model_name} == "ch_PP-OCRv2_det_KL" ] || [ ${model_name} == "ch_PP-OCRv2_rec_KL" ] ; then + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/tipc_fake_model/ch_PP-OCRv2_det_klquant_infer.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/tipc_fake_model/ch_PP-OCRv2_rec_klquant_infer.tar --no-check-certificate + cd ./inference && tar xf ch_PP-OCRv2_det_klquant_infer.tar && tar xf ch_PP-OCRv2_rec_klquant_infer.tar && cd ../ + elif [ ${model_name} == "ch_PP-OCRv3_det_KL" ] || [ ${model_name} == "ch_PP-OCRv3_rec_KL" ] ; then + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/tipc_fake_model/ch_PP-OCRv3_det_klquant_infer.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/tipc_fake_model/ch_PP-OCRv3_rec_klquant_infer.tar --no-check-certificate + cd ./inference && tar xf ch_PP-OCRv3_det_klquant_infer.tar && tar xf ch_PP-OCRv3_rec_klquant_infer.tar && cd ../ + elif [ ${model_name} == "ch_ppocr_mobile_v2.0_det_PACT" ] || [ ${model_name} == "ch_ppocr_mobile_v2.0_rec_PACT" ] ; then + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/tipc_fake_model/ch_ppocr_mobile_v2.0_det_pact_infer.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/tipc_fake_model/ch_ppocr_mobile_v2.0_rec_pact_infer.tar --no-check-certificate + cd ./inference && tar xf ch_ppocr_mobile_v2.0_det_pact_infer.tar && tar xf ch_ppocr_mobile_v2.0_rec_pact_infer.tar && cd ../ + elif [ ${model_name} == "ch_PP-OCRv2_det_PACT" ] || [ ${model_name} == "ch_PP-OCRv2_rec_PACT" ] ; then + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/tipc_fake_model/ch_PP-OCRv2_det_pact_infer.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/tipc_fake_model/ch_PP-OCRv2_rec_pact_infer.tar --no-check-certificate + cd ./inference && tar xf ch_PP-OCRv2_det_pact_infer.tar && tar xf ch_PP-OCRv2_rec_pact_infer.tar && cd ../ + elif [ ${model_name} == "ch_PP-OCRv3_det_PACT" ] || [ ${model_name} == "ch_PP-OCRv3_rec_PACT" ] ; then + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/tipc_fake_model/ch_PP-OCRv3_det_pact_infer.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/tipc_fake_model/ch_PP-OCRv3_rec_pact_infer.tar --no-check-certificate + cd ./inference && tar xf ch_PP-OCRv3_det_pact_infer.tar && tar xf ch_PP-OCRv3_rec_pact_infer.tar && cd ../ + elif [[ ${model_name} =~ "ch_ppocr_mobile_v2.0" ]]; then + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar --no-check-certificate + cd ./inference && tar xf ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar && cd ../ + elif [[ ${model_name} =~ "ch_ppocr_server_v2.0" ]]; then + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_infer.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_rec_infer.tar --no-check-certificate + cd ./inference && tar xf ch_ppocr_server_v2.0_det_infer.tar && tar xf ch_ppocr_server_v2.0_rec_infer.tar && cd ../ + elif [[ ${model_name} =~ "ch_PP-OCRv2" ]]; then + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_infer.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_infer.tar --no-check-certificate + cd ./inference && tar xf ch_PP-OCRv2_det_infer.tar && tar xf ch_PP-OCRv2_rec_infer.tar && cd ../ + elif [[ ${model_name} =~ "ch_PP-OCRv3" ]]; then + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar --no-check-certificate + cd ./inference && tar xf ch_PP-OCRv3_det_infer.tar && tar xf ch_PP-OCRv3_rec_infer.tar && cd ../ + fi + # wget data + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/rec_inference.tar --no-check-certificate + cd ./inference && tar xf ch_det_data_50.tar && tar xf rec_inference.tar && cd ../ fi if [ ${MODE} = "paddle2onnx_infer" ];then # prepare serving env python_name=$(func_parser_value "${lines[2]}") - ${python_name} -m pip install install paddle2onnx - ${python_name} -m pip install onnxruntime==1.4.0 + ${python_name} -m pip install paddle2onnx + ${python_name} -m pip install onnxruntime # wget model - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_infer.tar - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_rec_infer.tar + if [[ ${model_name} =~ "ch_ppocr_mobile_v2.0" ]]; then + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar --no-check-certificate + cd ./inference && tar xf ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar && cd ../ + elif [[ ${model_name} =~ "ch_ppocr_server_v2.0" ]]; then + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_infer.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_rec_infer.tar --no-check-certificate + cd ./inference && tar xf ch_ppocr_server_v2.0_det_infer.tar && tar xf ch_ppocr_server_v2.0_rec_infer.tar && cd ../ + elif [[ ${model_name} =~ "ch_PP-OCRv2" ]]; then + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_infer.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_infer.tar --no-check-certificate + cd ./inference && tar xf ch_PP-OCRv2_det_infer.tar && tar xf ch_PP-OCRv2_rec_infer.tar && cd ../ + elif [[ ${model_name} =~ "ch_PP-OCRv3" ]]; then + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar --no-check-certificate + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar --no-check-certificate + cd ./inference && tar xf ch_PP-OCRv3_det_infer.tar && tar xf ch_PP-OCRv3_rec_infer.tar && cd ../ + fi + # wget data wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/rec_inference.tar - cd ./inference && tar xf ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf ch_ppocr_server_v2.0_rec_infer.tar && tar xf ch_ppocr_server_v2.0_det_infer.tar && tar xf ch_det_data_50.tar && tar xf rec_inference.tar && cd ../ + cd ./inference && tar xf ch_det_data_50.tar && tar xf rec_inference.tar && cd ../ fi diff --git a/test_tipc/prepare_lite_cpp.sh b/test_tipc/prepare_lite_cpp.sh index 94af43c88d60aad706009c8bdd1e519a7b7e9acc..9148cb5dd72e16790e10db1cb266e4169cd4fdab 100644 --- a/test_tipc/prepare_lite_cpp.sh +++ b/test_tipc/prepare_lite_cpp.sh @@ -51,6 +51,8 @@ for model in ${lite_model_list[*]}; do inference_model_url=https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/${model}.tar elif [[ $model =~ "v2.0" ]]; then inference_model_url=https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/${model}.tar + elif [[ $model =~ "PP-OCRv3" ]]; then + inference_model_url=https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/${model}.tar else echo "Model is wrong, please check." exit 3 diff --git a/test_tipc/readme.md b/test_tipc/readme.md index 8110f0073be248259c7cdd002d209c150a52fb71..effb2f168b6cc91012bef3de120de9e98a21dbda 100644 --- a/test_tipc/readme.md +++ b/test_tipc/readme.md @@ -138,6 +138,7 @@ bash test_tipc/test_train_inference_python.sh ./test_tipc/configs/ch_ppocr_mobil ## 4. 开始测试 各功能测试中涉及混合精度、裁剪、量化等训练相关,及mkldnn、Tensorrt等多种预测相关参数配置,请点击下方相应链接了解更多细节和使用教程: - [test_train_inference_python 使用](docs/test_train_inference_python.md) :测试基于Python的模型训练、评估、推理等基本功能,包括裁剪、量化、蒸馏。 +- [test_train_fleet_inference_python 使用](./docs/test_train_fleet_inference_python.md):测试基于Python的多机多卡训练与推理等基本功能。 - [test_inference_cpp 使用](docs/test_inference_cpp.md):测试基于C++的模型推理。 - [test_serving 使用](docs/test_serving.md):测试基于Paddle Serving的服务化部署功能。 - [test_lite_arm_cpp 使用](docs/test_lite_arm_cpp.md):测试基于Paddle-Lite的ARM CPU端c++预测部署功能。 diff --git a/test_tipc/test_inference_cpp.sh b/test_tipc/test_inference_cpp.sh index 9885e3937255658d4aacc5835eba634b74ea12a0..c0c7c18a38a46b00c839757e303049135a508691 100644 --- a/test_tipc/test_inference_cpp.sh +++ b/test_tipc/test_inference_cpp.sh @@ -43,7 +43,7 @@ cpp_cls_value=$(func_parser_value "${lines[18]}") cpp_use_angle_cls_key=$(func_parser_key "${lines[19]}") cpp_use_angle_cls_value=$(func_parser_value "${lines[19]}") -LOG_PATH="./test_tipc/output" +LOG_PATH="./test_tipc/output/${model_name}/cpp_infer" mkdir -p ${LOG_PATH} status_log="${LOG_PATH}/results_cpp.log" @@ -84,7 +84,7 @@ function func_cpp_inference(){ eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${command}" "${status_log}" + status_check $last_status "${command}" "${status_log}" "${model_name}" done done done @@ -117,7 +117,7 @@ function func_cpp_inference(){ eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${command}" "${status_log}" + status_check $last_status "${command}" "${status_log}" "${model_name}" done done @@ -178,7 +178,23 @@ if [ ${use_opencv} = "True" ]; then else OPENCV_DIR='' fi -LIB_DIR=$(pwd)/Paddle/build/paddle_inference_install_dir/ +if [ -d "paddle_inference/" ] ;then + echo "################### download paddle inference skipped ###################" +else + echo "################### download paddle inference ###################" + PADDLEInfer=$3 + if [ "" = "$PADDLEInfer" ];then + wget -nc https://paddle-inference-lib.bj.bcebos.com/2.3.0/cxx_c/Linux/GPU/x86-64_gcc8.2_avx_mkl_cuda10.1_cudnn7.6.5_trt6.0.1.5/paddle_inference.tgz --no-check-certificate + else + wget -nc $PADDLEInfer --no-check-certificate + fi + tar zxf paddle_inference.tgz + if [ ! -d "paddle_inference" ]; then + ln -s paddle_inference_install_dir paddle_inference + fi + echo "################### download paddle inference finished ###################" +fi +LIB_DIR=$(pwd)/paddle_inference/ CUDA_LIB_DIR=$(dirname `find /usr -name libcudart.so`) CUDNN_LIB_DIR=$(dirname `find /usr -name libcudnn.so`) @@ -205,11 +221,10 @@ echo "################### build PaddleOCR demo finished ###################" # set cuda device GPUID=$2 if [ ${#GPUID} -le 0 ];then - env=" " + env="export CUDA_VISIBLE_DEVICES=0" else env="export CUDA_VISIBLE_DEVICES=${GPUID}" fi -set CUDA_VISIBLE_DEVICES eval $env diff --git a/test_tipc/test_inference_python.sh b/test_tipc/test_inference_python.sh index 27276d55b95051e167432600308f42127d784ee6..2a31a468f0d54d1979e82c8f0da98cac6f4edcec 100644 --- a/test_tipc/test_inference_python.sh +++ b/test_tipc/test_inference_python.sh @@ -44,7 +44,7 @@ infer_value1=$(func_parser_value "${lines[17]}") -LOG_PATH="./test_tipc/output" +LOG_PATH="./test_tipc/output/${model_name}/${MODE}" mkdir -p ${LOG_PATH} status_log="${LOG_PATH}/results_python.log" @@ -88,7 +88,7 @@ function func_inference(){ eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${command}" "${status_log}" + status_check $last_status "${command}" "${status_log}" "${model_name}" done done done @@ -113,13 +113,13 @@ function func_inference(){ set_tensorrt=$(func_set_params "${use_trt_key}" "${use_trt}") set_precision=$(func_set_params "${precision_key}" "${precision}") set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}") - set_infer_params0=$(func_set_params "${save_log_key}" "${save_log_value}") + set_infer_params0=$(func_set_params "${rec_model_key}" "${rec_model_value}") set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}") command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} ${set_infer_params0} > ${_save_log_path} 2>&1 " eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${command}" "${status_log}" + status_check $last_status "${command}" "${status_log}" "${model_name}" done done @@ -153,7 +153,7 @@ if [ ${MODE} = "whole_infer" ]; then echo ${infer_run_exports[Count]} eval $export_cmd status_export=$? - status_check $status_export "${export_cmd}" "${status_log}" + status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" else save_infer_dir=${infer_model} fi diff --git a/test_tipc/test_paddle2onnx.sh b/test_tipc/test_paddle2onnx.sh index 300c61770d2519fad0502147e2cee4a3e4f50ac9..356bc98041fffa8f0437c6419fc72c06d5e719f7 100644 --- a/test_tipc/test_paddle2onnx.sh +++ b/test_tipc/test_paddle2onnx.sh @@ -11,7 +11,7 @@ python=$(func_parser_value "${lines[2]}") # parser params -dataline=$(awk 'NR==1, NR==12{print}' $FILENAME) +dataline=$(awk 'NR==1, NR==17{print}' $FILENAME) IFS=$'\n' lines=(${dataline}) @@ -19,29 +19,33 @@ lines=(${dataline}) model_name=$(func_parser_value "${lines[1]}") python=$(func_parser_value "${lines[2]}") padlle2onnx_cmd=$(func_parser_value "${lines[3]}") -infer_model_dir_key=$(func_parser_key "${lines[4]}") -infer_model_dir_value=$(func_parser_value "${lines[4]}") +det_infer_model_dir_key=$(func_parser_key "${lines[4]}") +det_infer_model_dir_value=$(func_parser_value "${lines[4]}") model_filename_key=$(func_parser_key "${lines[5]}") model_filename_value=$(func_parser_value "${lines[5]}") params_filename_key=$(func_parser_key "${lines[6]}") params_filename_value=$(func_parser_value "${lines[6]}") -save_file_key=$(func_parser_key "${lines[7]}") -save_file_value=$(func_parser_value "${lines[7]}") -opset_version_key=$(func_parser_key "${lines[8]}") -opset_version_value=$(func_parser_value "${lines[8]}") -enable_onnx_checker_key=$(func_parser_key "${lines[9]}") -enable_onnx_checker_value=$(func_parser_value "${lines[9]}") +det_save_file_key=$(func_parser_key "${lines[7]}") +det_save_file_value=$(func_parser_value "${lines[7]}") +rec_infer_model_dir_key=$(func_parser_key "${lines[8]}") +rec_infer_model_dir_value=$(func_parser_value "${lines[8]}") +rec_save_file_key=$(func_parser_key "${lines[9]}") +rec_save_file_value=$(func_parser_value "${lines[9]}") +opset_version_key=$(func_parser_key "${lines[10]}") +opset_version_value=$(func_parser_value "${lines[10]}") +enable_onnx_checker_key=$(func_parser_key "${lines[11]}") +enable_onnx_checker_value=$(func_parser_value "${lines[11]}") # parser onnx inference -inference_py=$(func_parser_value "${lines[10]}") -use_gpu_key=$(func_parser_key "${lines[11]}") -use_gpu_value=$(func_parser_value "${lines[11]}") -det_model_key=$(func_parser_key "${lines[12]}") -image_dir_key=$(func_parser_key "${lines[13]}") -image_dir_value=$(func_parser_value "${lines[13]}") +inference_py=$(func_parser_value "${lines[12]}") +use_gpu_key=$(func_parser_key "${lines[13]}") +use_gpu_list=$(func_parser_value "${lines[13]}") +det_model_key=$(func_parser_key "${lines[14]}") +rec_model_key=$(func_parser_key "${lines[15]}") +image_dir_key=$(func_parser_key "${lines[16]}") +image_dir_value=$(func_parser_value "${lines[16]}") - -LOG_PATH="./test_tipc/output" -mkdir -p ./test_tipc/output +LOG_PATH="./test_tipc/output/${model_name}/paddle2onnx" +mkdir -p ${LOG_PATH} status_log="${LOG_PATH}/results_paddle2onnx.log" @@ -50,24 +54,103 @@ function func_paddle2onnx(){ _script=$1 # paddle2onnx - _save_log_path="${LOG_PATH}/paddle2onnx_infer_cpu.log" - set_dirname=$(func_set_params "${infer_model_dir_key}" "${infer_model_dir_value}") - set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}") - set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}") - set_save_model=$(func_set_params "${save_file_key}" "${save_file_value}") - set_opset_version=$(func_set_params "${opset_version_key}" "${opset_version_value}") - set_enable_onnx_checker=$(func_set_params "${enable_onnx_checker_key}" "${enable_onnx_checker_value}") - trans_model_cmd="${padlle2onnx_cmd} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_save_model} ${set_opset_version} ${set_enable_onnx_checker}" - eval $trans_model_cmd - last_status=${PIPESTATUS[0]} - status_check $last_status "${trans_model_cmd}" "${status_log}" + if [ ${model_name} = "ch_PP-OCRv2" ] || [ ${model_name} = "ch_PP-OCRv3" ] || [ ${model_name} = "ch_ppocr_mobile_v2.0" ] || [ ${model_name} = "ch_ppocr_server_v2.0" ]; then + # trans det + set_dirname=$(func_set_params "--model_dir" "${det_infer_model_dir_value}") + set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}") + set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}") + set_save_model=$(func_set_params "--save_file" "${det_save_file_value}") + set_opset_version=$(func_set_params "${opset_version_key}" "${opset_version_value}") + set_enable_onnx_checker=$(func_set_params "${enable_onnx_checker_key}" "${enable_onnx_checker_value}") + trans_det_log="${LOG_PATH}/trans_model_det.log" + trans_model_cmd="${padlle2onnx_cmd} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_save_model} ${set_opset_version} ${set_enable_onnx_checker} > ${trans_det_log} 2>&1 " + eval $trans_model_cmd + last_status=${PIPESTATUS[0]} + status_check $last_status "${trans_model_cmd}" "${status_log}" "${model_name}" + # trans rec + set_dirname=$(func_set_params "--model_dir" "${rec_infer_model_dir_value}") + set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}") + set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}") + set_save_model=$(func_set_params "--save_file" "${rec_save_file_value}") + set_opset_version=$(func_set_params "${opset_version_key}" "${opset_version_value}") + set_enable_onnx_checker=$(func_set_params "${enable_onnx_checker_key}" "${enable_onnx_checker_value}") + trans_rec_log="${LOG_PATH}/trans_model_rec.log" + trans_model_cmd="${padlle2onnx_cmd} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_save_model} ${set_opset_version} ${set_enable_onnx_checker} > ${trans_rec_log} 2>&1 " + eval $trans_model_cmd + last_status=${PIPESTATUS[0]} + status_check $last_status "${trans_model_cmd}" "${status_log}" "${model_name}" + elif [[ ${model_name} =~ "det" ]]; then + # trans det + set_dirname=$(func_set_params "--model_dir" "${det_infer_model_dir_value}") + set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}") + set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}") + set_save_model=$(func_set_params "--save_file" "${det_save_file_value}") + set_opset_version=$(func_set_params "${opset_version_key}" "${opset_version_value}") + set_enable_onnx_checker=$(func_set_params "${enable_onnx_checker_key}" "${enable_onnx_checker_value}") + trans_det_log="${LOG_PATH}/trans_model_det.log" + trans_model_cmd="${padlle2onnx_cmd} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_save_model} ${set_opset_version} ${set_enable_onnx_checker} > ${trans_det_log} 2>&1 " + eval $trans_model_cmd + last_status=${PIPESTATUS[0]} + status_check $last_status "${trans_model_cmd}" "${status_log}" "${model_name}" + elif [[ ${model_name} =~ "rec" ]]; then + # trans rec + set_dirname=$(func_set_params "--model_dir" "${rec_infer_model_dir_value}") + set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}") + set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}") + set_save_model=$(func_set_params "--save_file" "${rec_save_file_value}") + set_opset_version=$(func_set_params "${opset_version_key}" "${opset_version_value}") + set_enable_onnx_checker=$(func_set_params "${enable_onnx_checker_key}" "${enable_onnx_checker_value}") + trans_rec_log="${LOG_PATH}/trans_model_rec.log" + trans_model_cmd="${padlle2onnx_cmd} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_save_model} ${set_opset_version} ${set_enable_onnx_checker} > ${trans_rec_log} 2>&1 " + eval $trans_model_cmd + last_status=${PIPESTATUS[0]} + status_check $last_status "${trans_model_cmd}" "${status_log}" "${model_name}" + fi + # python inference - set_gpu=$(func_set_params "${use_gpu_key}" "${use_gpu_value}") - set_model_dir=$(func_set_params "${det_model_key}" "${save_file_value}") - set_img_dir=$(func_set_params "${image_dir_key}" "${image_dir_value}") - infer_model_cmd="${python} ${inference_py} ${set_gpu} ${set_img_dir} ${set_model_dir} --use_onnx=True > ${_save_log_path} 2>&1 " - eval $infer_model_cmd - status_check $last_status "${infer_model_cmd}" "${status_log}" + for use_gpu in ${use_gpu_list[*]}; do + if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then + _save_log_path="${LOG_PATH}/paddle2onnx_infer_cpu.log" + set_gpu=$(func_set_params "${use_gpu_key}" "${use_gpu}") + set_img_dir=$(func_set_params "${image_dir_key}" "${image_dir_value}") + if [ ${model_name} = "ch_PP-OCRv2" ] || [ ${model_name} = "ch_PP-OCRv3" ] || [ ${model_name} = "ch_ppocr_mobile_v2.0" ] || [ ${model_name} = "ch_ppocr_server_v2.0" ]; then + set_det_model_dir=$(func_set_params "${det_model_key}" "${det_save_file_value}") + set_rec_model_dir=$(func_set_params "${rec_model_key}" "${rec_save_file_value}") + infer_model_cmd="${python} ${inference_py} ${set_gpu} ${set_img_dir} ${set_det_model_dir} ${set_rec_model_dir} --use_onnx=True > ${_save_log_path} 2>&1 " + elif [[ ${model_name} =~ "det" ]]; then + set_det_model_dir=$(func_set_params "${det_model_key}" "${det_save_file_value}") + infer_model_cmd="${python} ${inference_py} ${set_gpu} ${set_img_dir} ${set_det_model_dir} --use_onnx=True > ${_save_log_path} 2>&1 " + elif [[ ${model_name} =~ "rec" ]]; then + set_rec_model_dir=$(func_set_params "${rec_model_key}" "${rec_save_file_value}") + infer_model_cmd="${python} ${inference_py} ${set_gpu} ${set_img_dir} ${set_rec_model_dir} --use_onnx=True > ${_save_log_path} 2>&1 " + fi + eval $infer_model_cmd + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check $last_status "${infer_model_cmd}" "${status_log}" "${model_name}" + elif [ ${use_gpu} = "True" ] || [ ${use_gpu} = "gpu" ]; then + _save_log_path="${LOG_PATH}/paddle2onnx_infer_gpu.log" + set_gpu=$(func_set_params "${use_gpu_key}" "${use_gpu}") + set_img_dir=$(func_set_params "${image_dir_key}" "${image_dir_value}") + if [ ${model_name} = "ch_PP-OCRv2" ] || [ ${model_name} = "ch_PP-OCRv3" ] || [ ${model_name} = "ch_ppocr_mobile_v2.0" ] || [ ${model_name} = "ch_ppocr_server_v2.0" ]; then + set_det_model_dir=$(func_set_params "${det_model_key}" "${det_save_file_value}") + set_rec_model_dir=$(func_set_params "${rec_model_key}" "${rec_save_file_value}") + infer_model_cmd="${python} ${inference_py} ${set_gpu} ${set_img_dir} ${set_det_model_dir} ${set_rec_model_dir} --use_onnx=True > ${_save_log_path} 2>&1 " + elif [[ ${model_name} =~ "det" ]]; then + set_det_model_dir=$(func_set_params "${det_model_key}" "${det_save_file_value}") + infer_model_cmd="${python} ${inference_py} ${set_gpu} ${set_img_dir} ${set_det_model_dir} --use_onnx=True > ${_save_log_path} 2>&1 " + elif [[ ${model_name} =~ "rec" ]]; then + set_rec_model_dir=$(func_set_params "${rec_model_key}" "${rec_save_file_value}") + infer_model_cmd="${python} ${inference_py} ${set_gpu} ${set_img_dir} ${set_rec_model_dir} --use_onnx=True > ${_save_log_path} 2>&1 " + fi + eval $infer_model_cmd + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check $last_status "${infer_model_cmd}" "${status_log}" "${model_name}" + else + echo "Does not support hardware other than CPU and GPU Currently!" + fi + done } diff --git a/test_tipc/test_ptq_inference_python.sh b/test_tipc/test_ptq_inference_python.sh new file mode 100644 index 0000000000000000000000000000000000000000..c1aa3daa6c0c647d7a9f9980e7698a85ee09ca78 --- /dev/null +++ b/test_tipc/test_ptq_inference_python.sh @@ -0,0 +1,158 @@ +#!/bin/bash +source test_tipc/common_func.sh + +FILENAME=$1 +# MODE be one of [''whole_infer'] +MODE=$2 + +IFS=$'\n' +# parser klquant_infer params + +dataline=$(awk 'NR==1, NR==17{print}' $FILENAME) +lines=(${dataline}) +model_name=$(func_parser_value "${lines[1]}") +python=$(func_parser_value "${lines[2]}") +export_weight=$(func_parser_key "${lines[3]}") +save_infer_key=$(func_parser_key "${lines[4]}") +# parser inference model +infer_model_dir_list=$(func_parser_value "${lines[5]}") +infer_export_list=$(func_parser_value "${lines[6]}") +infer_is_quant=$(func_parser_value "${lines[7]}") +# parser inference +inference_py=$(func_parser_value "${lines[8]}") +use_gpu_key=$(func_parser_key "${lines[9]}") +use_gpu_list=$(func_parser_value "${lines[9]}") +use_mkldnn_key=$(func_parser_key "${lines[10]}") +use_mkldnn_list=$(func_parser_value "${lines[10]}") +cpu_threads_key=$(func_parser_key "${lines[11]}") +cpu_threads_list=$(func_parser_value "${lines[11]}") +batch_size_key=$(func_parser_key "${lines[12]}") +batch_size_list=$(func_parser_value "${lines[12]}") +use_trt_key=$(func_parser_key "${lines[13]}") +use_trt_list=$(func_parser_value "${lines[13]}") +precision_key=$(func_parser_key "${lines[14]}") +precision_list=$(func_parser_value "${lines[14]}") +infer_model_key=$(func_parser_key "${lines[15]}") +image_dir_key=$(func_parser_key "${lines[16]}") +infer_img_dir=$(func_parser_value "${lines[16]}") +save_log_key=$(func_parser_key "${lines[17]}") +save_log_value=$(func_parser_value "${lines[17]}") +benchmark_key=$(func_parser_key "${lines[18]}") +benchmark_value=$(func_parser_value "${lines[18]}") +infer_key1=$(func_parser_key "${lines[19]}") +infer_value1=$(func_parser_value "${lines[19]}") + + +LOG_PATH="./test_tipc/output/${model_name}/${MODE}" +mkdir -p ${LOG_PATH} +status_log="${LOG_PATH}/results_python.log" + + +function func_inference(){ + IFS='|' + _python=$1 + _script=$2 + _model_dir=$3 + _log_path=$4 + _img_dir=$5 + _flag_quant=$6 + # inference + for use_gpu in ${use_gpu_list[*]}; do + if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then + for use_mkldnn in ${use_mkldnn_list[*]}; do + for threads in ${cpu_threads_list[*]}; do + for batch_size in ${batch_size_list[*]}; do + for precision in ${precision_list[*]}; do + if [ ${use_mkldnn} = "False" ] && [ ${precision} = "fp16" ]; then + continue + fi # skip when enable fp16 but disable mkldnn + if [ ${_flag_quant} = "True" ] && [ ${precision} != "int8" ]; then + continue + fi # skip when quant model inference but precision is not int8 + set_precision=$(func_set_params "${precision_key}" "${precision}") + + _save_log_path="${_log_path}/python_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log" + set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}") + set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}") + set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}") + set_mkldnn=$(func_set_params "${use_mkldnn_key}" "${use_mkldnn}") + set_cpu_threads=$(func_set_params "${cpu_threads_key}" "${threads}") + set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}") + set_infer_params0=$(func_set_params "${save_log_key}" "${save_log_value}") + set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}") + command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_params0} ${set_infer_data} ${set_benchmark} ${set_precision} ${set_infer_params1} > ${_save_log_path} 2>&1 " + eval $command + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check $last_status "${command}" "${status_log}" "${model_name}" + done + done + done + done + elif [ ${use_gpu} = "True" ] || [ ${use_gpu} = "gpu" ]; then + for use_trt in ${use_trt_list[*]}; do + for precision in ${precision_list[*]}; do + if [ ${_flag_quant} = "True" ] && [ ${precision} != "int8" ]; then + continue + fi # skip when quant model inference but precision is not int8 + for batch_size in ${batch_size_list[*]}; do + _save_log_path="${_log_path}/python_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log" + set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}") + set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}") + set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}") + set_tensorrt=$(func_set_params "${use_trt_key}" "${use_trt}") + set_precision=$(func_set_params "${precision_key}" "${precision}") + set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}") + set_infer_params0=$(func_set_params "${save_log_key}" "${save_log_value}") + set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}") + command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} ${set_infer_params0} > ${_save_log_path} 2>&1 " + eval $command + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check $last_status "${command}" "${status_log}" "${model_name}" + + done + done + done + else + echo "Does not support hardware other than CPU and GPU Currently!" + fi + done +} + +if [ ${MODE} = "whole_infer" ]; then + GPUID=$3 + if [ ${#GPUID} -le 0 ];then + env=" " + else + env="export CUDA_VISIBLE_DEVICES=${GPUID}" + fi + # set CUDA_VISIBLE_DEVICES + eval $env + export Count=0 + IFS="|" + infer_run_exports=(${infer_export_list}) + infer_quant_flag=(${infer_is_quant}) + for infer_model in ${infer_model_dir_list[*]}; do + # run export + if [ ${infer_run_exports[Count]} != "null" ];then + save_infer_dir="${infer_model}_klquant" + set_export_weight=$(func_set_params "${export_weight}" "${infer_model}") + set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_dir}") + export_log_path="${LOG_PATH}/_export_${Count}.log" + export_cmd="${python} ${infer_run_exports[Count]} ${set_export_weight} ${set_save_infer_key} > ${export_log_path} 2>&1 " + echo ${infer_run_exports[Count]} + echo $export_cmd + eval $export_cmd + status_export=$? + status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" + else + save_infer_dir=${infer_model} + fi + #run inference + is_quant="True" + func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant} + Count=$(($Count + 1)) + done +fi + diff --git a/test_tipc/test_serving.sh b/test_tipc/test_serving.sh deleted file mode 100644 index 260b252f4144b66d42902112708f2e45fa0b7ac1..0000000000000000000000000000000000000000 --- a/test_tipc/test_serving.sh +++ /dev/null @@ -1,175 +0,0 @@ -#!/bin/bash -source test_tipc/common_func.sh - -FILENAME=$1 -dataline=$(awk 'NR==1, NR==18{print}' $FILENAME) - -# parser params -IFS=$'\n' -lines=(${dataline}) - -# parser serving -model_name=$(func_parser_value "${lines[1]}") -python_list=$(func_parser_value "${lines[2]}") -trans_model_py=$(func_parser_value "${lines[3]}") -infer_model_dir_key=$(func_parser_key "${lines[4]}") -infer_model_dir_value=$(func_parser_value "${lines[4]}") -model_filename_key=$(func_parser_key "${lines[5]}") -model_filename_value=$(func_parser_value "${lines[5]}") -params_filename_key=$(func_parser_key "${lines[6]}") -params_filename_value=$(func_parser_value "${lines[6]}") -serving_server_key=$(func_parser_key "${lines[7]}") -serving_server_value=$(func_parser_value "${lines[7]}") -serving_client_key=$(func_parser_key "${lines[8]}") -serving_client_value=$(func_parser_value "${lines[8]}") -serving_dir_value=$(func_parser_value "${lines[9]}") -web_service_py=$(func_parser_value "${lines[10]}") -web_use_gpu_key=$(func_parser_key "${lines[11]}") -web_use_gpu_list=$(func_parser_value "${lines[11]}") -web_use_mkldnn_key=$(func_parser_key "${lines[12]}") -web_use_mkldnn_list=$(func_parser_value "${lines[12]}") -web_cpu_threads_key=$(func_parser_key "${lines[13]}") -web_cpu_threads_list=$(func_parser_value "${lines[13]}") -web_use_trt_key=$(func_parser_key "${lines[14]}") -web_use_trt_list=$(func_parser_value "${lines[14]}") -web_precision_key=$(func_parser_key "${lines[15]}") -web_precision_list=$(func_parser_value "${lines[15]}") -pipeline_py=$(func_parser_value "${lines[16]}") -image_dir_key=$(func_parser_key "${lines[17]}") -image_dir_value=$(func_parser_value "${lines[17]}") - -LOG_PATH="../../test_tipc/output" -mkdir -p ./test_tipc/output -status_log="${LOG_PATH}/results_serving.log" - -function func_serving(){ - IFS='|' - _python=$1 - _script=$2 - _model_dir=$3 - # pdserving - set_dirname=$(func_set_params "${infer_model_dir_key}" "${infer_model_dir_value}") - set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}") - set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}") - set_serving_server=$(func_set_params "${serving_server_key}" "${serving_server_value}") - set_serving_client=$(func_set_params "${serving_client_key}" "${serving_client_value}") - set_image_dir=$(func_set_params "${image_dir_key}" "${image_dir_value}") - python_list=(${python_list}) - trans_model_cmd="${python_list[0]} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}" - eval $trans_model_cmd - cd ${serving_dir_value} - unset https_proxy - unset http_proxy - for python in ${python_list[*]}; do - if [ ${python} = "cpp" ]; then - for use_gpu in ${web_use_gpu_list[*]}; do - if [ ${use_gpu} = "null" ]; then - web_service_cpp_cmd="${python_list[0]} -m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293" - eval $web_service_cpp_cmd - last_status=${PIPESTATUS[0]} - status_check $last_status "${web_service_cpp_cmd}" "${status_log}" - sleep 2s - _save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log" - pipeline_cmd="${python_list[0]} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/" - eval $pipeline_cmd - last_status=${PIPESTATUS[0]} - status_check $last_status "${pipeline_cmd}" "${status_log}" - sleep 2s - ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 - else - web_service_cpp_cmd="${python_list[0]} -m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293 --gpu_id=0" - eval $web_service_cpp_cmd - sleep 2s - _save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log" - pipeline_cmd="${python_list[0]} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/" - eval $pipeline_cmd - last_status=${PIPESTATUS[0]} - status_check $last_status "${pipeline_cmd}" "${status_log}" - sleep 2s - ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 - fi - done - else - # python serving - for use_gpu in ${web_use_gpu_list[*]}; do - if [ ${use_gpu} = "null" ]; then - for use_mkldnn in ${web_use_mkldnn_list[*]}; do - for threads in ${web_cpu_threads_list[*]}; do - set_cpu_threads=$(func_set_params "${web_cpu_threads_key}" "${threads}") - web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}="" ${web_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} &" - eval $web_service_cmd - last_status=${PIPESTATUS[0]} - status_check $last_status "${web_service_cmd}" "${status_log}" - sleep 2s - for pipeline in ${pipeline_py[*]}; do - _save_log_path="${LOG_PATH}/server_infer_cpu_${pipeline%_client*}_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_1.log" - pipeline_cmd="${python} ${pipeline} ${set_image_dir} > ${_save_log_path} 2>&1 " - eval $pipeline_cmd - last_status=${PIPESTATUS[0]} - eval "cat ${_save_log_path}" - status_check $last_status "${pipeline_cmd}" "${status_log}" - sleep 2s - done - ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 - done - done - elif [ ${use_gpu} = "0" ]; then - for use_trt in ${web_use_trt_list[*]}; do - for precision in ${web_precision_list[*]}; do - if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then - continue - fi - if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then - continue - fi - if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [[ ${_flag_quant} = "True" ]]; then - continue - fi - set_tensorrt=$(func_set_params "${web_use_trt_key}" "${use_trt}") - if [ ${use_trt} = True ]; then - device_type=2 - fi - set_precision=$(func_set_params "${web_precision_key}" "${precision}") - web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} & " - eval $web_service_cmd - last_status=${PIPESTATUS[0]} - status_check $last_status "${web_service_cmd}" "${status_log}" - - sleep 2s - for pipeline in ${pipeline_py[*]}; do - _save_log_path="${LOG_PATH}/server_infer_gpu_${pipeline%_client*}_usetrt_${use_trt}_precision_${precision}_batchsize_1.log" - pipeline_cmd="${python} ${pipeline} ${set_image_dir}> ${_save_log_path} 2>&1" - eval $pipeline_cmd - last_status=${PIPESTATUS[0]} - eval "cat ${_save_log_path}" - status_check $last_status "${pipeline_cmd}" "${status_log}" - sleep 2s - done - ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 - done - done - else - echo "Does not support hardware other than CPU and GPU Currently!" - fi - done - fi - done -} - - -#set cuda device -GPUID=$2 -if [ ${#GPUID} -le 0 ];then - env="export CUDA_VISIBLE_DEVICES=0" -else - env="export CUDA_VISIBLE_DEVICES=${GPUID}" -fi -eval $env -echo $env - - -echo "################### run test ###################" - -export Count=0 -IFS="|" -func_serving "${web_service_cmd}" diff --git a/test_tipc/test_serving_infer_cpp.sh b/test_tipc/test_serving_infer_cpp.sh new file mode 100644 index 0000000000000000000000000000000000000000..4088c66f576187215e5945793868d13fc74005af --- /dev/null +++ b/test_tipc/test_serving_infer_cpp.sh @@ -0,0 +1,138 @@ +#!/bin/bash +source test_tipc/common_func.sh + +function func_parser_model_config(){ + strs=$1 + IFS="/" + array=(${strs}) + tmp=${array[-1]} + echo ${tmp} +} + +FILENAME=$1 +dataline=$(awk 'NR==1, NR==19{print}' $FILENAME) +MODE=$2 + +# parser params +IFS=$'\n' +lines=(${dataline}) + +# parser serving +model_name=$(func_parser_value "${lines[1]}") +python_list=$(func_parser_value "${lines[2]}") +trans_model_py=$(func_parser_value "${lines[3]}") +det_infer_model_dir_key=$(func_parser_key "${lines[4]}") +det_infer_model_dir_value=$(func_parser_value "${lines[4]}") +model_filename_key=$(func_parser_key "${lines[5]}") +model_filename_value=$(func_parser_value "${lines[5]}") +params_filename_key=$(func_parser_key "${lines[6]}") +params_filename_value=$(func_parser_value "${lines[6]}") +det_serving_server_key=$(func_parser_key "${lines[7]}") +det_serving_server_value=$(func_parser_value "${lines[7]}") +det_serving_client_key=$(func_parser_key "${lines[8]}") +det_serving_client_value=$(func_parser_value "${lines[8]}") +rec_infer_model_dir_key=$(func_parser_key "${lines[9]}") +rec_infer_model_dir_value=$(func_parser_value "${lines[9]}") +rec_serving_server_key=$(func_parser_key "${lines[10]}") +rec_serving_server_value=$(func_parser_value "${lines[10]}") +rec_serving_client_key=$(func_parser_key "${lines[11]}") +rec_serving_client_value=$(func_parser_value "${lines[11]}") +det_server_value=$(func_parser_model_config "${lines[7]}") +det_client_value=$(func_parser_model_config "${lines[8]}") +rec_server_value=$(func_parser_model_config "${lines[10]}") +rec_client_value=$(func_parser_model_config "${lines[11]}") +serving_dir_value=$(func_parser_value "${lines[12]}") +web_service_py=$(func_parser_value "${lines[13]}") +op_key=$(func_parser_key "${lines[14]}") +op_value=$(func_parser_value "${lines[14]}") +port_key=$(func_parser_key "${lines[15]}") +port_value=$(func_parser_value "${lines[15]}") +gpu_key=$(func_parser_key "${lines[16]}") +gpu_value=$(func_parser_value "${lines[16]}") +cpp_client_py=$(func_parser_value "${lines[17]}") +image_dir_key=$(func_parser_key "${lines[18]}") +image_dir_value=$(func_parser_value "${lines[18]}") + +LOG_PATH="$(pwd)/test_tipc/output/${model_name}/${MODE}/cpp" +mkdir -p ${LOG_PATH} +status_log="${LOG_PATH}/results_cpp_serving.log" + +function func_serving(){ + IFS='|' + _python=$1 + _script=$2 + _model_dir=$3 + # pdserving + set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}") + set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}") + # trans det + set_dirname=$(func_set_params "--dirname" "${det_infer_model_dir_value}") + set_serving_server=$(func_set_params "--serving_server" "${det_serving_server_value}") + set_serving_client=$(func_set_params "--serving_client" "${det_serving_client_value}") + python_list=(${python_list}) + trans_det_log="${LOG_PATH}/cpp_trans_model_det.log" + trans_model_cmd="${python_list[0]} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client} > ${trans_det_log} 2>&1 " + eval $trans_model_cmd + cp "deploy/pdserving/serving_client_conf.prototxt" ${det_serving_client_value} + # trans rec + set_dirname=$(func_set_params "--dirname" "${rec_infer_model_dir_value}") + set_serving_server=$(func_set_params "--serving_server" "${rec_serving_server_value}") + set_serving_client=$(func_set_params "--serving_client" "${rec_serving_client_value}") + python_list=(${python_list}) + trans_rec_log="${LOG_PATH}/cpp_trans_model_rec.log" + trans_model_cmd="${python_list[0]} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client} > ${trans_rec_log} 2>&1 " + eval $trans_model_cmd + last_status=${PIPESTATUS[0]} + status_check $last_status "${trans_model_cmd}" "${status_log}" "${model_name}" + set_image_dir=$(func_set_params "${image_dir_key}" "${image_dir_value}") + python_list=(${python_list}) + cd ${serving_dir_value} + # cpp serving + for gpu_id in ${gpu_value[*]}; do + if [ ${gpu_id} = "null" ]; then + server_log_path="${LOG_PATH}/cpp_server_cpu.log" + web_service_cpp_cmd="${python_list[0]} ${web_service_py} --model ${det_server_value} ${rec_server_value} ${op_key} ${op_value} ${port_key} ${port_value} > ${server_log_path} 2>&1 " + eval $web_service_cpp_cmd + last_status=${PIPESTATUS[0]} + status_check $last_status "${web_service_cpp_cmd}" "${status_log}" "${model_name}" + sleep 5s + _save_log_path="${LOG_PATH}/cpp_client_cpu.log" + cpp_client_cmd="${python_list[0]} ${cpp_client_py} ${det_client_value} ${rec_client_value} > ${_save_log_path} 2>&1" + eval $cpp_client_cmd + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check $last_status "${cpp_client_cmd}" "${status_log}" "${model_name}" + ps ux | grep -i ${port_value} | awk '{print $2}' | xargs kill -s 9 + else + server_log_path="${LOG_PATH}/cpp_server_gpu.log" + web_service_cpp_cmd="${python_list[0]} ${web_service_py} --model ${det_server_value} ${rec_server_value} ${op_key} ${op_value} ${port_key} ${port_value} ${gpu_key} ${gpu_id} > ${server_log_path} 2>&1 " + eval $web_service_cpp_cmd + sleep 5s + _save_log_path="${LOG_PATH}/cpp_client_gpu.log" + cpp_client_cmd="${python_list[0]} ${cpp_client_py} ${det_client_value} ${rec_client_value} > ${_save_log_path} 2>&1" + eval $cpp_client_cmd + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check $last_status "${cpp_client_cmd}" "${status_log}" "${model_name}" + ps ux | grep -i ${port_value} | awk '{print $2}' | xargs kill -s 9 + fi + done +} + + +#set cuda device +GPUID=$3 +if [ ${#GPUID} -le 0 ];then + env="export CUDA_VISIBLE_DEVICES=0" +else + env="export CUDA_VISIBLE_DEVICES=${GPUID}" +fi +eval $env +echo $env + + +echo "################### run test ###################" + +export Count=0 +IFS="|" +func_serving "${web_service_cpp_cmd}" diff --git a/test_tipc/test_serving_infer_python.sh b/test_tipc/test_serving_infer_python.sh new file mode 100644 index 0000000000000000000000000000000000000000..57dab6aeb5dab31605a56abd072cb1568368a66c --- /dev/null +++ b/test_tipc/test_serving_infer_python.sh @@ -0,0 +1,229 @@ +#!/bin/bash +source test_tipc/common_func.sh + +function func_parser_model_config(){ + strs=$1 + IFS="/" + array=(${strs}) + tmp=${array[-1]} + echo ${tmp} +} + +FILENAME=$1 +dataline=$(awk 'NR==1, NR==23{print}' $FILENAME) +MODE=$2 + +# parser params +IFS=$'\n' +lines=(${dataline}) + +# parser serving +model_name=$(func_parser_value "${lines[1]}") +python_list=$(func_parser_value "${lines[2]}") +trans_model_py=$(func_parser_value "${lines[3]}") +det_infer_model_dir_key=$(func_parser_key "${lines[4]}") +det_infer_model_dir_value=$(func_parser_value "${lines[4]}") +model_filename_key=$(func_parser_key "${lines[5]}") +model_filename_value=$(func_parser_value "${lines[5]}") +params_filename_key=$(func_parser_key "${lines[6]}") +params_filename_value=$(func_parser_value "${lines[6]}") +det_serving_server_key=$(func_parser_key "${lines[7]}") +det_serving_server_value=$(func_parser_value "${lines[7]}") +det_serving_client_key=$(func_parser_key "${lines[8]}") +det_serving_client_value=$(func_parser_value "${lines[8]}") +rec_infer_model_dir_key=$(func_parser_key "${lines[9]}") +rec_infer_model_dir_value=$(func_parser_value "${lines[9]}") +rec_serving_server_key=$(func_parser_key "${lines[10]}") +rec_serving_server_value=$(func_parser_value "${lines[10]}") +rec_serving_client_key=$(func_parser_key "${lines[11]}") +rec_serving_client_value=$(func_parser_value "${lines[11]}") +serving_dir_value=$(func_parser_value "${lines[12]}") +web_service_py=$(func_parser_value "${lines[13]}") +web_use_gpu_key=$(func_parser_key "${lines[14]}") +web_use_gpu_list=$(func_parser_value "${lines[14]}") +web_use_mkldnn_key=$(func_parser_key "${lines[15]}") +web_use_mkldnn_list=$(func_parser_value "${lines[15]}") +web_cpu_threads_key=$(func_parser_key "${lines[16]}") +web_cpu_threads_list=$(func_parser_value "${lines[16]}") +web_use_trt_key=$(func_parser_key "${lines[17]}") +web_use_trt_list=$(func_parser_value "${lines[17]}") +web_precision_key=$(func_parser_key "${lines[18]}") +web_precision_list=$(func_parser_value "${lines[18]}") +det_server_key=$(func_parser_key "${lines[19]}") +det_server_value=$(func_parser_model_config "${lines[7]}") +det_client_value=$(func_parser_model_config "${lines[8]}") +rec_server_key=$(func_parser_key "${lines[20]}") +rec_server_value=$(func_parser_model_config "${lines[10]}") +rec_client_value=$(func_parser_model_config "${lines[11]}") +pipeline_py=$(func_parser_value "${lines[21]}") +image_dir_key=$(func_parser_key "${lines[22]}") +image_dir_value=$(func_parser_value "${lines[22]}") + +LOG_PATH="$(pwd)/test_tipc/output/${model_name}/${MODE}/python" +mkdir -p ${LOG_PATH} +status_log="${LOG_PATH}/results_python_serving.log" + +function func_serving(){ + IFS='|' + _python=$1 + _script=$2 + _model_dir=$3 + # pdserving + set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}") + set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}") + if [ ${model_name} = "ch_PP-OCRv2" ] || [ ${model_name} = "ch_PP-OCRv3" ] || [ ${model_name} = "ch_ppocr_mobile_v2.0" ] || [ ${model_name} = "ch_ppocr_server_v2.0" ]; then + # trans det + set_dirname=$(func_set_params "--dirname" "${det_infer_model_dir_value}") + set_serving_server=$(func_set_params "--serving_server" "${det_serving_server_value}") + set_serving_client=$(func_set_params "--serving_client" "${det_serving_client_value}") + python_list=(${python_list}) + trans_det_log="${LOG_PATH}/python_trans_model_det.log" + trans_model_cmd="${python_list[0]} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client} > ${trans_det_log} 2>&1 " + eval $trans_model_cmd + # trans rec + set_dirname=$(func_set_params "--dirname" "${rec_infer_model_dir_value}") + set_serving_server=$(func_set_params "--serving_server" "${rec_serving_server_value}") + set_serving_client=$(func_set_params "--serving_client" "${rec_serving_client_value}") + python_list=(${python_list}) + trans_rec_log="${LOG_PATH}/python_trans_model_rec.log" + trans_model_cmd="${python_list[0]} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client} > ${trans_rec_log} 2>&1 " + eval $trans_model_cmd + elif [[ ${model_name} =~ "det" ]]; then + # trans det + set_dirname=$(func_set_params "--dirname" "${det_infer_model_dir_value}") + set_serving_server=$(func_set_params "--serving_server" "${det_serving_server_value}") + set_serving_client=$(func_set_params "--serving_client" "${det_serving_client_value}") + python_list=(${python_list}) + trans_det_log="${LOG_PATH}/python_trans_model_det.log" + trans_model_cmd="${python_list[0]} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client} > ${trans_det_log} 2>&1 " + eval $trans_model_cmd + elif [[ ${model_name} =~ "rec" ]]; then + # trans rec + set_dirname=$(func_set_params "--dirname" "${rec_infer_model_dir_value}") + set_serving_server=$(func_set_params "--serving_server" "${rec_serving_server_value}") + set_serving_client=$(func_set_params "--serving_client" "${rec_serving_client_value}") + python_list=(${python_list}) + trans_rec_log="${LOG_PATH}/python_trans_model_rec.log" + trans_model_cmd="${python_list[0]} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client} > ${trans_rec_log} 2>&1 " + eval $trans_model_cmd + fi + set_image_dir=$(func_set_params "${image_dir_key}" "${image_dir_value}") + python_list=(${python_list}) + + cd ${serving_dir_value} + python=${python_list[0]} + + # python serving + for use_gpu in ${web_use_gpu_list[*]}; do + if [ ${use_gpu} = "null" ]; then + for use_mkldnn in ${web_use_mkldnn_list[*]}; do + for threads in ${web_cpu_threads_list[*]}; do + set_cpu_threads=$(func_set_params "${web_cpu_threads_key}" "${threads}") + server_log_path="${LOG_PATH}/python_server_cpu_usemkldnn_${use_mkldnn}_threads_${threads}.log" + if [ ${model_name} = "ch_PP-OCRv2" ] || [ ${model_name} = "ch_PP-OCRv3" ] || [ ${model_name} = "ch_ppocr_mobile_v2.0" ] || [ ${model_name} = "ch_ppocr_server_v2.0" ]; then + set_det_model_config=$(func_set_params "${det_server_key}" "${det_server_value}") + set_rec_model_config=$(func_set_params "${rec_server_key}" "${rec_server_value}") + web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}="" ${web_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_det_model_config} ${set_rec_model_config} > ${server_log_path} 2>&1 " + eval $web_service_cmd + last_status=${PIPESTATUS[0]} + status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" + elif [[ ${model_name} =~ "det" ]]; then + set_det_model_config=$(func_set_params "${det_server_key}" "${det_server_value}") + web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}="" ${web_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_det_model_config} > ${server_log_path} 2>&1 " + eval $web_service_cmd + last_status=${PIPESTATUS[0]} + status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" + elif [[ ${model_name} =~ "rec" ]]; then + set_rec_model_config=$(func_set_params "${rec_server_key}" "${rec_server_value}") + web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}="" ${web_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_rec_model_config} > ${server_log_path} 2>&1 " + eval $web_service_cmd + last_status=${PIPESTATUS[0]} + status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" + fi + sleep 2s + for pipeline in ${pipeline_py[*]}; do + _save_log_path="${LOG_PATH}/python_client_cpu_${pipeline%_client*}_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_1.log" + pipeline_cmd="${python} ${pipeline} ${set_image_dir} > ${_save_log_path} 2>&1 " + eval $pipeline_cmd + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}" + sleep 2s + done + ps ux | grep -E 'web_service' | awk '{print $2}' | xargs kill -s 9 + done + done + elif [ ${use_gpu} = "gpu" ]; then + for use_trt in ${web_use_trt_list[*]}; do + for precision in ${web_precision_list[*]}; do + server_log_path="${LOG_PATH}/python_server_gpu_usetrt_${use_trt}_precision_${precision}.log" + if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then + continue + fi + if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then + continue + fi + if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [[ ${_flag_quant} = "True" ]]; then + continue + fi + set_tensorrt=$(func_set_params "${web_use_trt_key}" "${use_trt}") + if [ ${use_trt} = True ]; then + device_type=2 + fi + set_precision=$(func_set_params "${web_precision_key}" "${precision}") + if [ ${model_name} = "ch_PP-OCRv2" ] || [ ${model_name} = "ch_PP-OCRv3" ] || [ ${model_name} = "ch_ppocr_mobile_v2.0" ] || [ ${model_name} = "ch_ppocr_server_v2.0" ]; then + set_det_model_config=$(func_set_params "${det_server_key}" "${det_server_value}") + set_rec_model_config=$(func_set_params "${rec_server_key}" "${rec_server_value}") + web_service_cmd="${python} ${web_service_py} ${set_tensorrt} ${set_precision} ${set_det_model_config} ${set_rec_model_config} > ${server_log_path} 2>&1 " + eval $web_service_cmd + last_status=${PIPESTATUS[0]} + status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" + elif [[ ${model_name} =~ "det" ]]; then + set_det_model_config=$(func_set_params "${det_server_key}" "${det_server_value}") + web_service_cmd="${python} ${web_service_py} ${set_tensorrt} ${set_precision} ${set_det_model_config} > ${server_log_path} 2>&1 " + eval $web_service_cmd + last_status=${PIPESTATUS[0]} + status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" + elif [[ ${model_name} =~ "rec" ]]; then + set_rec_model_config=$(func_set_params "${rec_server_key}" "${rec_server_value}") + web_service_cmd="${python} ${web_service_py} ${set_tensorrt} ${set_precision} ${set_rec_model_config} > ${server_log_path} 2>&1 " + eval $web_service_cmd + last_status=${PIPESTATUS[0]} + status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" + fi + sleep 2s + for pipeline in ${pipeline_py[*]}; do + _save_log_path="${LOG_PATH}/python_client_gpu_${pipeline%_client*}_usetrt_${use_trt}_precision_${precision}_batchsize_1.log" + pipeline_cmd="${python} ${pipeline} ${set_image_dir}> ${_save_log_path} 2>&1" + eval $pipeline_cmd + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}" + sleep 2s + done + ps ux | grep -E 'web_service' | awk '{print $2}' | xargs kill -s 9 + done + done + else + echo "Does not support hardware other than CPU and GPU Currently!" + fi + done +} + + +#set cuda device +GPUID=$3 +if [ ${#GPUID} -le 0 ];then + env="export CUDA_VISIBLE_DEVICES=0" +else + env="export CUDA_VISIBLE_DEVICES=${GPUID}" +fi +eval $env +echo $env + + +echo "################### run test ###################" + +export Count=0 +IFS="|" +func_serving "${web_service_cmd}" diff --git a/test_tipc/test_train_inference_python.sh b/test_tipc/test_train_inference_python.sh index fe98cb00f6cc428995d7f91db55895e0f1cd9bfd..fa68cb2632ee69fe361f99093e7a2352006ed283 100644 --- a/test_tipc/test_train_inference_python.sh +++ b/test_tipc/test_train_inference_python.sh @@ -2,7 +2,7 @@ source test_tipc/common_func.sh FILENAME=$1 -# MODE be one of ['lite_train_lite_infer' 'lite_train_whole_infer' 'whole_train_whole_infer', 'whole_infer', 'klquant_whole_infer'] +# MODE be one of ['lite_train_lite_infer' 'lite_train_whole_infer' 'whole_train_whole_infer', 'whole_infer'] MODE=$2 dataline=$(awk 'NR==1, NR==51{print}' $FILENAME) @@ -88,44 +88,7 @@ benchmark_value=$(func_parser_value "${lines[49]}") infer_key1=$(func_parser_key "${lines[50]}") infer_value1=$(func_parser_value "${lines[50]}") -# parser klquant_infer -if [ ${MODE} = "klquant_whole_infer" ]; then - dataline=$(awk 'NR==1, NR==17{print}' $FILENAME) - lines=(${dataline}) - model_name=$(func_parser_value "${lines[1]}") - python=$(func_parser_value "${lines[2]}") - export_weight=$(func_parser_key "${lines[3]}") - save_infer_key=$(func_parser_key "${lines[4]}") - # parser inference model - infer_model_dir_list=$(func_parser_value "${lines[5]}") - infer_export_list=$(func_parser_value "${lines[6]}") - infer_is_quant=$(func_parser_value "${lines[7]}") - # parser inference - inference_py=$(func_parser_value "${lines[8]}") - use_gpu_key=$(func_parser_key "${lines[9]}") - use_gpu_list=$(func_parser_value "${lines[9]}") - use_mkldnn_key=$(func_parser_key "${lines[10]}") - use_mkldnn_list=$(func_parser_value "${lines[10]}") - cpu_threads_key=$(func_parser_key "${lines[11]}") - cpu_threads_list=$(func_parser_value "${lines[11]}") - batch_size_key=$(func_parser_key "${lines[12]}") - batch_size_list=$(func_parser_value "${lines[12]}") - use_trt_key=$(func_parser_key "${lines[13]}") - use_trt_list=$(func_parser_value "${lines[13]}") - precision_key=$(func_parser_key "${lines[14]}") - precision_list=$(func_parser_value "${lines[14]}") - infer_model_key=$(func_parser_key "${lines[15]}") - image_dir_key=$(func_parser_key "${lines[16]}") - infer_img_dir=$(func_parser_value "${lines[16]}") - save_log_key=$(func_parser_key "${lines[17]}") - save_log_value=$(func_parser_value "${lines[17]}") - benchmark_key=$(func_parser_key "${lines[18]}") - benchmark_value=$(func_parser_value "${lines[18]}") - infer_key1=$(func_parser_key "${lines[19]}") - infer_value1=$(func_parser_value "${lines[19]}") -fi - -LOG_PATH="./test_tipc/output/${model_name}" +LOG_PATH="./test_tipc/output/${model_name}/${MODE}" mkdir -p ${LOG_PATH} status_log="${LOG_PATH}/results_python.log" @@ -142,9 +105,9 @@ function func_inference(){ for use_gpu in ${use_gpu_list[*]}; do if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then for use_mkldnn in ${use_mkldnn_list[*]}; do - if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then - continue - fi + # if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then + # continue + # fi for threads in ${cpu_threads_list[*]}; do for batch_size in ${batch_size_list[*]}; do for precision in ${precision_list[*]}; do @@ -169,7 +132,7 @@ function func_inference(){ eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${command}" "${status_log}" + status_check $last_status "${command}" "${status_log}" "${model_name}" done done done @@ -200,7 +163,7 @@ function func_inference(){ eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${command}" "${status_log}" + status_check $last_status "${command}" "${status_log}" "${model_name}" done done @@ -211,7 +174,7 @@ function func_inference(){ done } -if [ ${MODE} = "whole_infer" ] || [ ${MODE} = "klquant_whole_infer" ]; then +if [ ${MODE} = "whole_infer" ]; then GPUID=$3 if [ ${#GPUID} -le 0 ];then env=" " @@ -226,29 +189,22 @@ if [ ${MODE} = "whole_infer" ] || [ ${MODE} = "klquant_whole_infer" ]; then infer_quant_flag=(${infer_is_quant}) for infer_model in ${infer_model_dir_list[*]}; do # run export - if [ ${infer_run_exports[Count]} != "null" ];then - if [ ${MODE} = "klquant_whole_infer" ]; then - save_infer_dir="${infer_model}_klquant" - fi - if [ ${MODE} = "whole_infer" ]; then - save_infer_dir="${infer_model}" - fi + if [ ${infer_run_exports[Count]} != "null" ];then + save_infer_dir="${infer_model}" set_export_weight=$(func_set_params "${export_weight}" "${infer_model}") set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_dir}") - export_cmd="${python} ${infer_run_exports[Count]} ${set_export_weight} ${set_save_infer_key}" + export_log_path="${LOG_PATH}/_export_${Count}.log" + export_cmd="${python} ${infer_run_exports[Count]} ${set_export_weight} ${set_save_infer_key} > ${export_log_path} 2>&1 " echo ${infer_run_exports[Count]} echo $export_cmd eval $export_cmd status_export=$? - status_check $status_export "${export_cmd}" "${status_log}" + status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" else save_infer_dir=${infer_model} fi #run inference is_quant=${infer_quant_flag[Count]} - if [ ${MODE} = "klquant_whole_infer" ]; then - is_quant="True" - fi func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant} Count=$(($Count + 1)) done @@ -315,7 +271,9 @@ else set_batchsize=$(func_set_params "${train_batch_key}" "${train_batch_value}") set_train_params1=$(func_set_params "${train_param_key1}" "${train_param_value1}") set_use_gpu=$(func_set_params "${train_use_gpu_key}" "${train_use_gpu}") - if [ ${#ips} -le 26 ];then + # if length of ips >= 15, then it is seen as multi-machine + # 15 is the min length of ips info for multi-machine: 0.0.0.0,0.0.0.0 + if [ ${#ips} -le 15 ];then save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}" nodes=1 else @@ -330,14 +288,14 @@ else set_save_model=$(func_set_params "${save_model_key}" "${save_log}") if [ ${#gpu} -le 2 ];then # train with cpu or single gpu cmd="${python} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1} ${set_amp_config} " - elif [ ${#ips} -le 26 ];then # train with multi-gpu + elif [ ${#ips} -le 15 ];then # train with multi-gpu cmd="${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1} ${set_amp_config}" else # train with multi-machine cmd="${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} ${set_use_gpu} ${set_save_model} ${set_pretrain} ${set_epoch} ${set_autocast} ${set_batchsize} ${set_train_params1} ${set_amp_config}" fi # run train eval $cmd - status_check $? "${cmd}" "${status_log}" + status_check $? "${cmd}" "${status_log}" "${model_name}" set_eval_pretrain=$(func_set_params "${pretrain_model_key}" "${save_log}/${train_model_name}") @@ -345,19 +303,21 @@ else if [ ${eval_py} != "null" ]; then eval ${env} set_eval_params1=$(func_set_params "${eval_key1}" "${eval_value1}") - eval_cmd="${python} ${eval_py} ${set_eval_pretrain} ${set_use_gpu} ${set_eval_params1}" + eval_log_path="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}_nodes_${nodes}_eval.log" + eval_cmd="${python} ${eval_py} ${set_eval_pretrain} ${set_use_gpu} ${set_eval_params1} > ${eval_log_path} 2>&1 " eval $eval_cmd - status_check $? "${eval_cmd}" "${status_log}" + status_check $? "${eval_cmd}" "${status_log}" "${model_name}" fi # run export model if [ ${run_export} != "null" ]; then # run export model save_infer_path="${save_log}" + export_log_path="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}_nodes_${nodes}_export.log" set_export_weight=$(func_set_params "${export_weight}" "${save_log}/${train_model_name}") set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_path}") - export_cmd="${python} ${run_export} ${set_export_weight} ${set_save_infer_key}" + export_cmd="${python} ${run_export} ${set_export_weight} ${set_save_infer_key} > ${export_log_path} 2>&1 " eval $export_cmd - status_check $? "${export_cmd}" "${status_log}" + status_check $? "${export_cmd}" "${status_log}" "${model_name}" #run inference eval $env diff --git a/tools/end2end/convert_ppocr_label.py b/tools/end2end/convert_ppocr_label.py index 8084cac785125f23885399931f98531326b6fb20..c64b9ed168113879182262b609bea692fcb73165 100644 --- a/tools/end2end/convert_ppocr_label.py +++ b/tools/end2end/convert_ppocr_label.py @@ -85,10 +85,16 @@ def convert_label(label_dir, mode="gt", save_dir="./save_results/"): print("The convert label saved in {}".format(save_dir)) -if __name__ == "__main__": +def parse_args(): + import argparse + parser = argparse.ArgumentParser(description="args") + parser.add_argument("--label_path", type=str, required=True) + parser.add_argument("--save_folder", type=str, required=True) + parser.add_argument("--mode", type=str, default=False) + args = parser.parse_args() + return args - ppocr_label_gt = "/paddle/Datasets/chinese/test_set/Label_refine_310_V2.txt" - convert_label(ppocr_label_gt, "gt", "./save_gt_310_V2/") - ppocr_label_gt = "./infer_results/ch_PPOCRV2_infer.txt" - convert_label(ppocr_label_gt_en, "pred", "./save_PPOCRV2_infer/") +if __name__ == "__main__": + args = parse_args() + convert_label(args.label_path, args.mode, args.save_folder) diff --git a/tools/end2end/readme.md b/tools/end2end/readme.md index 69da06dcdabc92c0b6f1831341e592e674ea7473..636ee764aef35a551f729cce20c16357ddf4f495 100644 --- a/tools/end2end/readme.md +++ b/tools/end2end/readme.md @@ -23,19 +23,13 @@ all-sum-510/00224225.jpg [{"transcription": "超赞", "points": [[8.0, 48 **步骤二:** 将步骤一保存的数据转换为端对端评测需要的数据格式: -修改 `tools/convert_ppocr_label.py`中的代码,convert_label函数中设置输入标签路径,Mode,保存标签路径等,对预测数据的GTlabel和预测结果的label格式进行转换。 -``` -ppocr_label_gt = "gt_label.txt" -convert_label(ppocr_label_gt, "gt", "./save_gt_label/") +修改 `tools/end2end/convert_ppocr_label.py`中的代码,convert_label函数中设置输入标签路径,Mode,保存标签路径等,对预测数据的GTlabel和预测结果的label格式进行转换。 -ppocr_label_gt = "./ch_PP-OCRv2_results/system_results.txt" -convert_label(ppocr_label_gt_en, "pred", "./save_PPOCRV2_infer/") ``` +python3 tools/end2end/convert_ppocr_label.py --mode=gt --label_path=path/to/label_txt --save_folder=save_gt_label -运行`convert_ppocr_label.py`: -``` -python3 tools/convert_ppocr_label.py +python3 tools/end2end/convert_ppocr_label.py --mode=pred --label_path=path/to/pred_txt --save_folder=save_PPOCRV2_infer ``` 得到如下结果: diff --git a/tools/export_model.py b/tools/export_model.py index e971f6cb20025d529d0387d287ec87a76abbdbe7..b10d41d5b288258ad895cefa7d8cc243eff10546 100755 --- a/tools/export_model.py +++ b/tools/export_model.py @@ -17,7 +17,7 @@ import sys __dir__ = os.path.dirname(os.path.abspath(__file__)) sys.path.append(__dir__) -sys.path.append(os.path.abspath(os.path.join(__dir__, ".."))) +sys.path.insert(0, os.path.abspath(os.path.join(__dir__, ".."))) import argparse @@ -31,7 +31,12 @@ from ppocr.utils.logging import get_logger from tools.program import load_config, merge_config, ArgsParser -def export_single_model(model, arch_config, save_path, logger, quanter=None): +def export_single_model(model, + arch_config, + save_path, + logger, + input_shape=None, + quanter=None): if arch_config["algorithm"] == "SRN": max_text_length = arch_config["Head"]["max_text_length"] other_shape = [ @@ -64,7 +69,7 @@ def export_single_model(model, arch_config, save_path, logger, quanter=None): else: other_shape = [ paddle.static.InputSpec( - shape=[None, 3, 64, 256], dtype="float32"), + shape=[None] + input_shape, dtype="float32"), ] model = to_static(model, input_spec=other_shape) elif arch_config["algorithm"] == "PREN": @@ -73,10 +78,29 @@ def export_single_model(model, arch_config, save_path, logger, quanter=None): shape=[None, 3, 64, 512], dtype="float32"), ] model = to_static(model, input_spec=other_shape) + elif arch_config["algorithm"] == "ViTSTR": + other_shape = [ + paddle.static.InputSpec( + shape=[None, 1, 224, 224], dtype="float32"), + ] + model = to_static(model, input_spec=other_shape) + elif arch_config["algorithm"] == "ABINet": + other_shape = [ + paddle.static.InputSpec( + shape=[None, 3, 32, 128], dtype="float32"), + ] + # print([None, 3, 32, 128]) + model = to_static(model, input_spec=other_shape) + elif arch_config["algorithm"] == "NRTR": + other_shape = [ + paddle.static.InputSpec( + shape=[None, 1, 32, 100], dtype="float32"), + ] + model = to_static(model, input_spec=other_shape) else: infer_shape = [3, -1, -1] if arch_config["model_type"] == "rec": - infer_shape = [3, 32, -1] # for rec model, H must be 32 + infer_shape = [3, 48, -1] # for rec model, H must be 32 if "Transform" in arch_config and arch_config[ "Transform"] is not None and arch_config["Transform"][ "name"] == "TPS": @@ -84,8 +108,6 @@ def export_single_model(model, arch_config, save_path, logger, quanter=None): "When there is tps in the network, variable length input is not supported, and the input size needs to be the same as during training" ) infer_shape[-1] = 100 - if arch_config["algorithm"] == "NRTR": - infer_shape = [1, 32, 100] elif arch_config["model_type"] == "table": infer_shape = [3, 488, 488] model = to_static( @@ -157,6 +179,13 @@ def main(): arch_config = config["Architecture"] + if arch_config["algorithm"] == "SVTR" and arch_config["Head"][ + "name"] != 'MultiHead': + input_shape = config["Eval"]["dataset"]["transforms"][-2][ + 'SVTRRecResizeImg']['image_shape'] + else: + input_shape = None + if arch_config["algorithm"] in ["Distillation", ]: # distillation model archs = list(arch_config["Models"].values()) for idx, name in enumerate(model.model_name_list): @@ -165,7 +194,8 @@ def main(): sub_model_save_path, logger) else: save_path = os.path.join(save_path, "inference") - export_single_model(model, arch_config, save_path, logger) + export_single_model( + model, arch_config, save_path, logger, input_shape=input_shape) if __name__ == "__main__": diff --git a/tools/infer/predict_det.py b/tools/infer/predict_det.py index 5f2675d667c2aab8186886a60d8d447f43419954..7b6bebf1fbced2de5bb0e4e75840fb8dd7beb374 100755 --- a/tools/infer/predict_det.py +++ b/tools/infer/predict_det.py @@ -154,9 +154,10 @@ class TextDetector(object): s = pts.sum(axis=1) rect[0] = pts[np.argmin(s)] rect[2] = pts[np.argmax(s)] - diff = np.diff(pts, axis=1) - rect[1] = pts[np.argmin(diff)] - rect[3] = pts[np.argmax(diff)] + tmp = np.delete(pts, (np.argmin(s), np.argmax(s)), axis=0) + diff = np.diff(np.array(tmp), axis=1) + rect[1] = tmp[np.argmin(diff)] + rect[3] = tmp[np.argmax(diff)] return rect def clip_det_res(self, points, img_height, img_width): diff --git a/tools/infer/predict_rec.py b/tools/infer/predict_rec.py index 3664ef2caf4b888d6a3918202256c99cc54c5eb1..a95f55596647acc0eaca9616b5630917d7ebdf3a 100755 --- a/tools/infer/predict_rec.py +++ b/tools/infer/predict_rec.py @@ -69,6 +69,18 @@ class TextRecognizer(object): "character_dict_path": args.rec_char_dict_path, "use_space_char": args.use_space_char } + elif self.rec_algorithm == 'ViTSTR': + postprocess_params = { + 'name': 'ViTSTRLabelDecode', + "character_dict_path": args.rec_char_dict_path, + "use_space_char": args.use_space_char + } + elif self.rec_algorithm == 'ABINet': + postprocess_params = { + 'name': 'ABINetLabelDecode', + "character_dict_path": args.rec_char_dict_path, + "use_space_char": args.use_space_char + } self.postprocess_op = build_post_process(postprocess_params) self.predictor, self.input_tensor, self.output_tensors, self.config = \ utility.create_predictor(args, 'rec', logger) @@ -96,15 +108,22 @@ class TextRecognizer(object): def resize_norm_img(self, img, max_wh_ratio): imgC, imgH, imgW = self.rec_image_shape - if self.rec_algorithm == 'NRTR': + if self.rec_algorithm == 'NRTR' or self.rec_algorithm == 'ViTSTR': img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # return padding_im image_pil = Image.fromarray(np.uint8(img)) - img = image_pil.resize([100, 32], Image.ANTIALIAS) + if self.rec_algorithm == 'ViTSTR': + img = image_pil.resize([imgW, imgH], Image.BICUBIC) + else: + img = image_pil.resize([imgW, imgH], Image.ANTIALIAS) img = np.array(img) norm_img = np.expand_dims(img, -1) norm_img = norm_img.transpose((2, 0, 1)) - return norm_img.astype(np.float32) / 128. - 1. + if self.rec_algorithm == 'ViTSTR': + norm_img = norm_img.astype(np.float32) / 255. + else: + norm_img = norm_img.astype(np.float32) / 128. - 1. + return norm_img assert imgC == img.shape[2] imgW = int((imgH * max_wh_ratio)) @@ -132,17 +151,6 @@ class TextRecognizer(object): padding_im[:, :, 0:resized_w] = resized_image return padding_im - def resize_norm_img_svtr(self, img, image_shape): - - imgC, imgH, imgW = image_shape - resized_image = cv2.resize( - img, (imgW, imgH), interpolation=cv2.INTER_LINEAR) - resized_image = resized_image.astype('float32') - resized_image = resized_image.transpose((2, 0, 1)) / 255 - resized_image -= 0.5 - resized_image /= 0.5 - return resized_image - def resize_norm_img_srn(self, img, image_shape): imgC, imgH, imgW = image_shape @@ -250,6 +258,35 @@ class TextRecognizer(object): return padding_im, resize_shape, pad_shape, valid_ratio + def resize_norm_img_svtr(self, img, image_shape): + + imgC, imgH, imgW = image_shape + resized_image = cv2.resize( + img, (imgW, imgH), interpolation=cv2.INTER_LINEAR) + resized_image = resized_image.astype('float32') + resized_image = resized_image.transpose((2, 0, 1)) / 255 + resized_image -= 0.5 + resized_image /= 0.5 + return resized_image + + def resize_norm_img_abinet(self, img, image_shape): + + imgC, imgH, imgW = image_shape + + resized_image = cv2.resize( + img, (imgW, imgH), interpolation=cv2.INTER_LINEAR) + resized_image = resized_image.astype('float32') + resized_image = resized_image / 255. + + mean = np.array([0.485, 0.456, 0.406]) + std = np.array([0.229, 0.224, 0.225]) + resized_image = ( + resized_image - mean[None, None, ...]) / std[None, None, ...] + resized_image = resized_image.transpose((2, 0, 1)) + resized_image = resized_image.astype('float32') + + return resized_image + def __call__(self, img_list): img_num = len(img_list) # Calculate the aspect ratio of all text bars @@ -300,6 +337,11 @@ class TextRecognizer(object): self.rec_image_shape) norm_img = norm_img[np.newaxis, :] norm_img_batch.append(norm_img) + elif self.rec_algorithm == "ABINet": + norm_img = self.resize_norm_img_abinet( + img_list[indices[ino]], self.rec_image_shape) + norm_img = norm_img[np.newaxis, :] + norm_img_batch.append(norm_img) else: norm_img = self.resize_norm_img(img_list[indices[ino]], max_wh_ratio) diff --git a/tools/infer/utility.py b/tools/infer/utility.py index 74ec42ec842abe0f214f13eea6b30a613cfc517b..366212f228eec33f11c825bfaf1e360258af9b2e 100644 --- a/tools/infer/utility.py +++ b/tools/infer/utility.py @@ -34,6 +34,7 @@ def init_args(): parser = argparse.ArgumentParser() # params for prediction engine parser.add_argument("--use_gpu", type=str2bool, default=True) + parser.add_argument("--use_xpu", type=str2bool, default=False) parser.add_argument("--ir_optim", type=str2bool, default=True) parser.add_argument("--use_tensorrt", type=str2bool, default=False) parser.add_argument("--min_subgraph_size", type=int, default=15) @@ -201,7 +202,8 @@ def create_predictor(args, mode, logger): workspace_size=1 << 30, precision_mode=precision, max_batch_size=args.max_batch_size, - min_subgraph_size=args.min_subgraph_size) + min_subgraph_size=args.min_subgraph_size, + use_calib_mode=False) # skip the minmum trt subgraph use_dynamic_shape = True if mode == "det": @@ -275,6 +277,7 @@ def create_predictor(args, mode, logger): min_input_shape = {"x": [1, 3, imgH, 10]} max_input_shape = {"x": [args.rec_batch_num, 3, imgH, 2304]} opt_input_shape = {"x": [args.rec_batch_num, 3, imgH, 320]} + config.exp_disable_tensorrt_ops(["transpose2"]) elif mode == "cls": min_input_shape = {"x": [1, 3, 48, 10]} max_input_shape = {"x": [args.rec_batch_num, 3, 48, 1024]} @@ -285,6 +288,8 @@ def create_predictor(args, mode, logger): config.set_trt_dynamic_shape_info( min_input_shape, max_input_shape, opt_input_shape) + elif args.use_xpu: + config.enable_xpu(10 * 1024 * 1024) else: config.disable_gpu() if hasattr(args, "cpu_threads"): diff --git a/tools/infer_rec.py b/tools/infer_rec.py index 193e24a4de12392130d16b86a3407db74602e1f4..a08fa25b467482da4a2996912ad2cc8cc7c398da 100755 --- a/tools/infer_rec.py +++ b/tools/infer_rec.py @@ -157,7 +157,7 @@ def main(): if info is not None: logger.info("\t result: {}".format(info)) - fout.write(file + "\t" + info) + fout.write(file + "\t" + info + "\n") logger.info("success!") diff --git a/tools/program.py b/tools/program.py index 7c02dc0149f36085ef05ca378b79d27e92d6dd57..aa3ba82c44d6afba725a8059dc7f8cae41947b3d 100755 --- a/tools/program.py +++ b/tools/program.py @@ -112,20 +112,25 @@ def merge_config(config, opts): return config -def check_gpu(use_gpu): +def check_device(use_gpu, use_xpu=False): """ Log error and exit when set use_gpu=true in paddlepaddle cpu version. """ - err = "Config use_gpu cannot be set as true while you are " \ - "using paddlepaddle cpu version ! \nPlease try: \n" \ - "\t1. Install paddlepaddle-gpu to run model on GPU \n" \ - "\t2. Set use_gpu as false in config file to run " \ + err = "Config {} cannot be set as true while your paddle " \ + "is not compiled with {} ! \nPlease try: \n" \ + "\t1. Install paddlepaddle to run model on {} \n" \ + "\t2. Set {} as false in config file to run " \ "model on CPU" try: + if use_gpu and use_xpu: + print("use_xpu and use_gpu can not both be ture.") if use_gpu and not paddle.is_compiled_with_cuda(): - print(err) + print(err.format("use_gpu", "cuda", "gpu", "use_gpu")) + sys.exit(1) + if use_xpu and not paddle.device.is_compiled_with_xpu(): + print(err.format("use_xpu", "xpu", "xpu", "use_xpu")) sys.exit(1) except Exception as e: pass @@ -250,6 +255,8 @@ def train(config, with paddle.amp.auto_cast(): if model_type == 'table' or extra_input: preds = model(images, data=batch[1:]) + elif model_type in ["kie", 'vqa']: + preds = model(batch) else: preds = model(images) else: @@ -302,7 +309,8 @@ def train(config, train_stats.update(stats) if log_writer is not None and dist.get_rank() == 0: - log_writer.log_metrics(metrics=train_stats.get(), prefix="TRAIN", step=global_step) + log_writer.log_metrics( + metrics=train_stats.get(), prefix="TRAIN", step=global_step) if dist.get_rank() == 0 and ( (global_step > 0 and global_step % print_batch_step == 0) or @@ -349,7 +357,8 @@ def train(config, # logger metric if log_writer is not None: - log_writer.log_metrics(metrics=cur_metric, prefix="EVAL", step=global_step) + log_writer.log_metrics( + metrics=cur_metric, prefix="EVAL", step=global_step) if cur_metric[main_indicator] >= best_model_dict[ main_indicator]: @@ -372,11 +381,18 @@ def train(config, logger.info(best_str) # logger best metric if log_writer is not None: - log_writer.log_metrics(metrics={ - "best_{}".format(main_indicator): best_model_dict[main_indicator] - }, prefix="EVAL", step=global_step) - - log_writer.log_model(is_best=True, prefix="best_accuracy", metadata=best_model_dict) + log_writer.log_metrics( + metrics={ + "best_{}".format(main_indicator): + best_model_dict[main_indicator] + }, + prefix="EVAL", + step=global_step) + + log_writer.log_model( + is_best=True, + prefix="best_accuracy", + metadata=best_model_dict) reader_start = time.time() if dist.get_rank() == 0: @@ -408,7 +424,8 @@ def train(config, epoch=epoch, global_step=global_step) if log_writer is not None: - log_writer.log_model(is_best=False, prefix='iter_epoch_{}'.format(epoch)) + log_writer.log_model( + is_best=False, prefix='iter_epoch_{}'.format(epoch)) best_str = 'best metric, {}'.format(', '.join( ['{}: {}'.format(k, v) for k, v in best_model_dict.items()])) @@ -547,7 +564,7 @@ def preprocess(is_train=False): # check if set use_gpu=True in paddlepaddle cpu version use_gpu = config['Global']['use_gpu'] - check_gpu(use_gpu) + use_xpu = config['Global'].get('use_xpu', False) # check if set use_xpu=True in paddlepaddle cpu/gpu version use_xpu = False @@ -559,14 +576,17 @@ def preprocess(is_train=False): assert alg in [ 'EAST', 'DB', 'SAST', 'Rosetta', 'CRNN', 'STARNet', 'RARE', 'SRN', 'CLS', 'PGNet', 'Distillation', 'NRTR', 'TableAttn', 'SAR', 'PSE', - 'SEED', 'SDMGR', 'LayoutXLM', 'LayoutLM', 'PREN', 'FCE', 'SVTR' + 'SEED', 'SDMGR', 'LayoutXLM', 'LayoutLM', 'PREN', 'FCE', 'SVTR', + 'ViTSTR', 'ABINet' ] - device = 'cpu' - if use_gpu: - device = 'gpu:{}'.format(dist.ParallelEnv().dev_id) if use_xpu: - device = 'xpu' + device = 'xpu:{0}'.format(os.getenv('FLAGS_selected_xpus', 0)) + else: + device = 'gpu:{}'.format(dist.ParallelEnv() + .dev_id) if use_gpu else 'cpu' + check_device(use_gpu, use_xpu) + device = paddle.set_device(device) config['Global']['distributed'] = dist.get_world_size() != 1 @@ -578,7 +598,8 @@ def preprocess(is_train=False): vdl_writer_path = '{}/vdl/'.format(save_model_dir) log_writer = VDLLogger(save_model_dir) loggers.append(log_writer) - if ('use_wandb' in config['Global'] and config['Global']['use_wandb']) or 'wandb' in config: + if ('use_wandb' in config['Global'] and + config['Global']['use_wandb']) or 'wandb' in config: save_dir = config['Global']['save_model_dir'] wandb_writer_path = "{}/wandb".format(save_dir) if "wandb" in config: diff --git a/tools/train.py b/tools/train.py index 42aba548d6bf5fc35f033ef2baca0fb54d79e75a..b7c25e34231fb650fd2c7c89dc17320f561962f9 100755 --- a/tools/train.py +++ b/tools/train.py @@ -35,6 +35,7 @@ from ppocr.postprocess import build_post_process from ppocr.metrics import build_metric from ppocr.utils.save_load import load_model from ppocr.utils.utility import set_seed +from ppocr.modeling.architectures import apply_to_static import tools.program as program dist.get_world_size() @@ -121,6 +122,8 @@ def main(config, device, logger, vdl_writer): if config['Global']['distributed']: model = paddle.DataParallel(model) + model = apply_to_static(model, config, logger) + # build loss loss_class = build_loss(config['Loss'])