提交 a9182cbe 编写于 作者: qq_25193841's avatar qq_25193841

Merge remote-tracking branch 'origin/dygraph' into dygraph

......@@ -1149,6 +1149,9 @@ class MainWindow(QMainWindow):
for box in self.result_dic:
trans_dic = {"label": box[1][0], "points": box[0], "difficult": False}
if self.kie_mode:
if len(box) == 3:
trans_dic.update({"key_cls": box[2]})
else:
trans_dic.update({"key_cls": "None"})
if trans_dic["label"] == "" and mode == 'Auto':
continue
......@@ -2047,6 +2050,7 @@ class MainWindow(QMainWindow):
rec_flag = 0
for shape in self.canvas.shapes:
box = [[int(p.x()), int(p.y())] for p in shape.points]
kie_cls = shape.key_cls
if len(box) > 4:
box = self.gen_quad_from_poly(np.array(box))
......@@ -2062,15 +2066,25 @@ class MainWindow(QMainWindow):
if shape.line_color == DEFAULT_LOCK_COLOR:
shape.label = result[0][0]
result.insert(0, box)
if self.kie_mode:
result.append(kie_cls)
self.result_dic_locked.append(result)
else:
result.insert(0, box)
if self.kie_mode:
result.append(kie_cls)
self.result_dic.append(result)
else:
print('Can not recognise the box')
if shape.line_color == DEFAULT_LOCK_COLOR:
shape.label = result[0][0]
if self.kie_mode:
self.result_dic_locked.append([box, (self.noLabelText, 0), kie_cls])
else:
self.result_dic_locked.append([box, (self.noLabelText, 0)])
else:
if self.kie_mode:
self.result_dic.append([box, (self.noLabelText, 0), kie_cls])
else:
self.result_dic.append([box, (self.noLabelText, 0)])
try:
......
......@@ -48,6 +48,7 @@ class Shape(object):
def __init__(self, label=None, line_color=None, difficult=False, key_cls="None", paintLabel=False):
self.label = label
self.idx = 0
self.points = []
self.fill = False
self.selected = False
......
此差异已折叠。
5ZQ
I4UL
PWL
SNOG
ZL02
1C30
O3H
YHRS
N03S
1U5Y
JTK
EN4F
YKJ
DWNH
R42W
X0V
4OF5
08AM
Y93S
GWE2
0KR
9U2A
DBQ
Y6J
ROZ
K06
KIEY
NZQJ
UN1B
6X4
\ No newline at end of file
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is refer from:
https://github.com/zcswdt/Color_OCR_image_generator
"""
import os
import random
from PIL import Image, ImageDraw, ImageFont
import json
import argparse
def get_char_lines(txt_root_path):
"""
desc:get corpus line
"""
txt_files = os.listdir(txt_root_path)
char_lines = []
for txt in txt_files:
f = open(os.path.join(txt_root_path, txt), mode='r', encoding='utf-8')
lines = f.readlines()
f.close()
for line in lines:
char_lines.append(line.strip())
return char_lines
def get_horizontal_text_picture(image_file, chars, fonts_list, cf):
"""
desc:gen horizontal text picture
"""
img = Image.open(image_file)
if img.mode != 'RGB':
img = img.convert('RGB')
img_w, img_h = img.size
# random choice font
font_path = random.choice(fonts_list)
# random choice font size
font_size = random.randint(cf.font_min_size, cf.font_max_size)
font = ImageFont.truetype(font_path, font_size)
ch_w = []
ch_h = []
for ch in chars:
wt, ht = font.getsize(ch)
ch_w.append(wt)
ch_h.append(ht)
f_w = sum(ch_w)
f_h = max(ch_h)
# add space
char_space_width = max(ch_w)
f_w += (char_space_width * (len(chars) - 1))
x1 = random.randint(0, img_w - f_w)
y1 = random.randint(0, img_h - f_h)
x2 = x1 + f_w
y2 = y1 + f_h
crop_y1 = y1
crop_x1 = x1
crop_y2 = y2
crop_x2 = x2
best_color = (0, 0, 0)
draw = ImageDraw.Draw(img)
for i, ch in enumerate(chars):
draw.text((x1, y1), ch, best_color, font=font)
x1 += (ch_w[i] + char_space_width)
crop_img = img.crop((crop_x1, crop_y1, crop_x2, crop_y2))
return crop_img, chars
def get_vertical_text_picture(image_file, chars, fonts_list, cf):
"""
desc:gen vertical text picture
"""
img = Image.open(image_file)
if img.mode != 'RGB':
img = img.convert('RGB')
img_w, img_h = img.size
# random choice font
font_path = random.choice(fonts_list)
# random choice font size
font_size = random.randint(cf.font_min_size, cf.font_max_size)
font = ImageFont.truetype(font_path, font_size)
ch_w = []
ch_h = []
for ch in chars:
wt, ht = font.getsize(ch)
ch_w.append(wt)
ch_h.append(ht)
f_w = max(ch_w)
f_h = sum(ch_h)
x1 = random.randint(0, img_w - f_w)
y1 = random.randint(0, img_h - f_h)
x2 = x1 + f_w
y2 = y1 + f_h
crop_y1 = y1
crop_x1 = x1
crop_y2 = y2
crop_x2 = x2
best_color = (0, 0, 0)
draw = ImageDraw.Draw(img)
i = 0
for ch in chars:
draw.text((x1, y1), ch, best_color, font=font)
y1 = y1 + ch_h[i]
i = i + 1
crop_img = img.crop((crop_x1, crop_y1, crop_x2, crop_y2))
crop_img = crop_img.transpose(Image.ROTATE_90)
return crop_img, chars
def get_fonts(fonts_path):
"""
desc: get all fonts
"""
font_files = os.listdir(fonts_path)
fonts_list=[]
for font_file in font_files:
font_path=os.path.join(fonts_path, font_file)
fonts_list.append(font_path)
return fonts_list
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--num_img', type=int, default=30, help="Number of images to generate")
parser.add_argument('--font_min_size', type=int, default=11)
parser.add_argument('--font_max_size', type=int, default=12,
help="Help adjust the size of the generated text and the size of the picture")
parser.add_argument('--bg_path', type=str, default='./background',
help='The generated text pictures will be pasted onto the pictures of this folder')
parser.add_argument('--det_bg_path', type=str, default='./det_background',
help='The generated text pictures will use the pictures of this folder as the background')
parser.add_argument('--fonts_path', type=str, default='../../StyleText/fonts',
help='The font used to generate the picture')
parser.add_argument('--corpus_path', type=str, default='./corpus',
help='The corpus used to generate the text picture')
parser.add_argument('--output_dir', type=str, default='./output/', help='Images save dir')
cf = parser.parse_args()
# save path
if not os.path.exists(cf.output_dir):
os.mkdir(cf.output_dir)
# get corpus
txt_root_path = cf.corpus_path
char_lines = get_char_lines(txt_root_path=txt_root_path)
# get all fonts
fonts_path = cf.fonts_path
fonts_list = get_fonts(fonts_path)
# rec bg
img_root_path = cf.bg_path
imnames=os.listdir(img_root_path)
# det bg
det_bg_path = cf.det_bg_path
bg_pics = os.listdir(det_bg_path)
# OCR det files
det_val_file = open(cf.output_dir + 'det_gt_val.txt', 'w', encoding='utf-8')
det_train_file = open(cf.output_dir + 'det_gt_train.txt', 'w', encoding='utf-8')
# det imgs
det_save_dir = 'imgs/'
if not os.path.exists(cf.output_dir + det_save_dir):
os.mkdir(cf.output_dir + det_save_dir)
det_val_save_dir = 'imgs_val/'
if not os.path.exists(cf.output_dir + det_val_save_dir):
os.mkdir(cf.output_dir + det_val_save_dir)
# OCR rec files
rec_val_file = open(cf.output_dir + 'rec_gt_val.txt', 'w', encoding='utf-8')
rec_train_file = open(cf.output_dir + 'rec_gt_train.txt', 'w', encoding='utf-8')
# rec imgs
rec_save_dir = 'rec_imgs/'
if not os.path.exists(cf.output_dir + rec_save_dir):
os.mkdir(cf.output_dir + rec_save_dir)
rec_val_save_dir = 'rec_imgs_val/'
if not os.path.exists(cf.output_dir + rec_val_save_dir):
os.mkdir(cf.output_dir + rec_val_save_dir)
val_ratio = cf.num_img * 0.2 # val dataset ratio
print('start generating...')
for i in range(0, cf.num_img):
imname = random.choice(imnames)
img_path = os.path.join(img_root_path, imname)
rnd = random.random()
# gen horizontal text picture
if rnd < 0.5:
gen_img, chars = get_horizontal_text_picture(img_path, char_lines[i], fonts_list, cf)
ori_w, ori_h = gen_img.size
gen_img = gen_img.crop((0, 3, ori_w, ori_h))
# gen vertical text picture
else:
gen_img, chars = get_vertical_text_picture(img_path, char_lines[i], fonts_list, cf)
ori_w, ori_h = gen_img.size
gen_img = gen_img.crop((3, 0, ori_w, ori_h))
ori_w, ori_h = gen_img.size
# rec imgs
save_img_name = str(i).zfill(4) + '.jpg'
if i < val_ratio:
save_dir = os.path.join(rec_val_save_dir, save_img_name)
line = save_dir + '\t' + char_lines[i] + '\n'
rec_val_file.write(line)
else:
save_dir = os.path.join(rec_save_dir, save_img_name)
line = save_dir + '\t' + char_lines[i] + '\n'
rec_train_file.write(line)
gen_img.save(cf.output_dir + save_dir, quality = 95, subsampling=0)
# det img
# random choice bg
bg_pic = random.sample(bg_pics, 1)[0]
det_img = Image.open(os.path.join(det_bg_path, bg_pic))
# the PCB position is fixed, modify it according to your own scenario
if bg_pic == '1.png':
x1 = 38
y1 = 3
else:
x1 = 34
y1 = 1
det_img.paste(gen_img, (x1, y1))
# text pos
chars_pos = [[x1, y1], [x1 + ori_w, y1], [x1 + ori_w, y1 + ori_h], [x1, y1 + ori_h]]
label = [{"transcription":char_lines[i], "points":chars_pos}]
if i < val_ratio:
save_dir = os.path.join(det_val_save_dir, save_img_name)
det_val_file.write(save_dir + '\t' + json.dumps(
label, ensure_ascii=False) + '\n')
else:
save_dir = os.path.join(det_save_dir, save_img_name)
det_train_file.write(save_dir + '\t' + json.dumps(
label, ensure_ascii=False) + '\n')
det_img.save(cf.output_dir + save_dir, quality = 95, subsampling=0)
# 场景应用
PaddleOCR场景应用覆盖通用,制造、金融、交通行业的主要OCR垂类应用,在PP-OCR、PP-Structure的通用能力基础之上,以notebook的形式展示利用场景数据微调、模型优化方法、数据增广等内容,为开发者快速落地OCR应用提供示范与启发。
> 如需下载全部垂类模型,可以扫描下方二维码,关注公众号填写问卷后,加入PaddleOCR官方交流群获取20G OCR学习大礼包(内含《动手学OCR》电子书、课程回放视频、前沿论文等重磅资料)
<div align="center">
<img src="https://ai-studio-static-online.cdn.bcebos.com/dd721099bd50478f9d5fb13d8dd00fad69c22d6848244fd3a1d3980d7fefc63e" width = "150" height = "150" />
</div>
> 如果您是企业开发者且未在下述场景中找到合适的方案,可以填写[OCR应用合作调研问卷](https://paddle.wjx.cn/vj/QwF7GKw.aspx),免费与官方团队展开不同层次的合作,包括但不限于问题抽象、确定技术方案、项目答疑、共同研发等。如果您已经使用PaddleOCR落地项目,也可以填写此问卷,与飞桨平台共同宣传推广,提升企业技术品宣。期待您的提交!
## 通用
| 类别 | 亮点 | 类别 | 亮点 |
| ---------------------- | -------- | ---------- | ------------ |
| 高精度中文识别模型SVTR | 新增模型 | 手写体识别 | 新增字形支持 |
## 制造
| 类别 | 亮点 | 类别 | 亮点 |
| -------------- | ------------------------------ | -------------- | -------------------- |
| 数码管识别 | 数码管数据合成、漏识别调优 | 电表识别 | 大分辨率图像检测调优 |
| 液晶屏读数识别 | 检测模型蒸馏、Serving部署 | PCB文字识别 | 小尺寸文本检测与识别 |
| 包装生产日期 | 点阵字符合成、过曝过暗文字识别 | 液晶屏缺陷检测 | 非文字形态识别 |
## 金融
| 类别 | 亮点 | 类别 | 亮点 |
| -------------- | ------------------------ | ------------ | --------------------- |
| 表单VQA | 多模态通用表单结构化提取 | 通用卡证识别 | 通用结构化提取 |
| 增值税发票 | 尽请期待 | 身份证识别 | 结构化提取、图像阴影 |
| 印章检测与识别 | 端到端弯曲文本识别 | 合同比对 | 密集文本检测、NLP串联 |
## 交通
| 类别 | 亮点 | 类别 | 亮点 |
| ----------------- | ------------------------------ | ---------- | -------- |
| 车牌识别 | 多角度图像、轻量模型、端侧部署 | 快递单识别 | 尽请期待 |
| 驾驶证/行驶证识别 | 尽请期待 | | |
\ No newline at end of file
46.39
40.08
89.52
-71.93
23.19
-81.02
-34.09
05.87
-67.80
-51.56
-34.58
37.91
56.98
29.01
-90.13
35.55
66.07
-90.35
-50.93
42.42
21.40
-30.99
-71.78
25.60
-48.69
-72.28
-17.55
-99.93
-47.35
-64.89
-31.28
-90.01
05.17
30.91
30.56
-06.90
79.05
67.74
-32.31
94.22
28.75
51.03
-58.96
# 光功率计数码管字符识别
本案例将使用OCR技术自动识别光功率计显示屏文字,通过本章您可以掌握:
- PaddleOCR快速使用
- 数据合成方法
- 数据挖掘方法
- 基于现有数据微调
## 1. 背景介绍
光功率计(optical power meter )是指用于测量绝对光功率或通过一段光纤的光功率相对损耗的仪器。在光纤系统中,测量光功率是最基本的,非常像电子学中的万用表;在光纤测量中,光功率计是重负荷常用表。
<img src="https://bkimg.cdn.bcebos.com/pic/a08b87d6277f9e2f999f5e3e1c30e924b899f35a?x-bce-process=image/watermark,image_d2F0ZXIvYmFpa2U5Mg==,g_7,xp_5,yp_5/format,f_auto" width="400">
目前光功率计缺少将数据直接输出的功能,需要人工读数。这一项工作单调重复,如果可以使用机器替代人工,将节约大量成本。针对上述问题,希望通过摄像头拍照->智能读数的方式高效地完成此任务。
为实现智能读数,通常会采取文本检测+文本识别的方案:
第一步,使用文本检测模型定位出光功率计中的数字部分;
第二步,使用文本识别模型获得准确的数字和单位信息。
本项目主要介绍如何完成第二步文本识别部分,包括:真实评估集的建立、训练数据的合成、基于 PP-OCRv3 和 SVTR_Tiny 两个模型进行训练,以及评估和推理。
本项目难点如下:
- 光功率计数码管字符数据较少,难以获取。
- 数码管中小数点占像素较少,容易漏识别。
针对以上问题, 本例选用 PP-OCRv3 和 SVTR_Tiny 两个高精度模型训练,同时提供了真实数据挖掘案例和数据合成案例。基于 PP-OCRv3 模型,在构建的真实评估集上精度从 52% 提升至 72%,SVTR_Tiny 模型精度可达到 78.9%。
aistudio项目链接: [光功率计数码管字符识别](https://aistudio.baidu.com/aistudio/projectdetail/4049044?contributionType=1)
## 2. PaddleOCR 快速使用
PaddleOCR 旨在打造一套丰富、领先、且实用的OCR工具库,助力开发者训练出更好的模型,并应用落地。
![](https://github.com/PaddlePaddle/PaddleOCR/raw/release/2.5/doc/imgs_results/ch_ppocr_mobile_v2.0/test_add_91.jpg)
官方提供了适用于通用场景的高精轻量模型,首先使用官方提供的 PP-OCRv3 模型预测图片,验证下当前模型在光功率计场景上的效果。
- 准备环境
```
python3 -m pip install -U pip
python3 -m pip install paddleocr
```
- 测试效果
测试图:
![](https://ai-studio-static-online.cdn.bcebos.com/8dca91f016884e16ad9216d416da72ea08190f97d87b4be883f15079b7ebab9a)
```
paddleocr --lang=ch --det=Fase --image_dir=data
```
得到如下测试结果:
```
('.7000', 0.6885431408882141)
```
发现数字识别较准,然而对负号和小数点识别不准确。 由于PP-OCRv3的训练数据大多为通用场景数据,在特定的场景上效果可能不够好。因此需要基于场景数据进行微调。
下面就主要介绍如何在光功率计(数码管)场景上微调训练。
## 3. 开始训练
### 3.1 数据准备
特定的工业场景往往很难获取开源的真实数据集,光功率计也是如此。在实际工业场景中,可以通过摄像头采集的方法收集大量真实数据,本例中重点介绍数据合成方法和真实数据挖掘方法,如何利用有限的数据优化模型精度。
数据集分为两个部分:合成数据,真实数据, 其中合成数据由 text_renderer 工具批量生成得到, 真实数据通过爬虫等方式在百度图片中搜索并使用 PPOCRLabel 标注得到。
- 合成数据
本例中数据合成工具使用的是 [text_renderer](https://github.com/Sanster/text_renderer), 该工具可以合成用于文本识别训练的文本行数据:
![](https://github.com/oh-my-ocr/text_renderer/raw/master/example_data/effect_layout_image/char_spacing_compact.jpg)
![](https://github.com/oh-my-ocr/text_renderer/raw/master/example_data/effect_layout_image/color_image.jpg)
```
export https_proxy=http://172.19.57.45:3128
git clone https://github.com/oh-my-ocr/text_renderer
```
```
import os
python3 setup.py develop
python3 -m pip install -r docker/requirements.txt
python3 main.py \
--config example_data/example.py \
--dataset img \
--num_processes 2 \
--log_period 10
```
给定字体和语料,就可以合成较为丰富样式的文本行数据。 光功率计识别场景,目标是正确识别数码管文本,因此需要收集部分数码管字体,训练语料,用于合成文本识别数据。
将收集好的语料存放在 example_data 路径下:
```
ln -s ./fonts/DS* text_renderer/example_data/font/
ln -s ./corpus/digital.txt text_renderer/example_data/text/
```
修改 text_renderer/example_data/font_list/font_list.txt ,选择需要的字体开始合成:
```
python3 main.py \
--config example_data/digital_example.py \
--dataset img \
--num_processes 2 \
--log_period 10
```
合成图片会被存在目录 text_renderer/example_data/digital/chn_data 下
查看合成的数据样例:
![](https://ai-studio-static-online.cdn.bcebos.com/7d5774a273f84efba5b9ce7fd3f86e9ef24b6473e046444db69fa3ca20ac0986)
- 真实数据挖掘
模型训练需要使用真实数据作为评价指标,否则很容易过拟合到简单的合成数据中。没有开源数据的情况下,可以利用部分无标注数据+标注工具获得真实数据。
1. 数据搜集
使用[爬虫工具](https://github.com/Joeclinton1/google-images-download.git)获得无标注数据
2. [PPOCRLabel](https://github.com/PaddlePaddle/PaddleOCR/tree/release/2.5/PPOCRLabel) 完成半自动标注
PPOCRLabel是一款适用于OCR领域的半自动化图形标注工具,内置PP-OCR模型对数据自动标注和重新识别。使用Python3和PyQT5编写,支持矩形框标注、表格标注、不规则文本标注、关键信息标注模式,导出格式可直接用于PaddleOCR检测和识别模型的训练。
![](https://github.com/PaddlePaddle/PaddleOCR/raw/release/2.5/PPOCRLabel/data/gif/steps_en.gif)
收集完数据后就可以进行分配了,验证集中一般都是真实数据,训练集中包含合成数据+真实数据。本例中标注了155张图片,其中训练集和验证集的数目为100和55。
最终 `data` 文件夹应包含以下几部分:
```
|-data
|- synth_train.txt
|- real_train.txt
|- real_eval.txt
|- synthetic_data
|- word_001.png
|- word_002.jpg
|- word_003.jpg
| ...
|- real_data
|- word_001.png
|- word_002.jpg
|- word_003.jpg
| ...
...
```
### 3.2 模型选择
本案例提供了2种文本识别模型:PP-OCRv3 识别模型 和 SVTR_Tiny:
[PP-OCRv3 识别模型](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.5/doc/doc_ch/PP-OCRv3_introduction.md):PP-OCRv3的识别模块是基于文本识别算法SVTR优化。SVTR不再采用RNN结构,通过引入Transformers结构更加有效地挖掘文本行图像的上下文信息,从而提升文本识别能力。并进行了一系列结构改进加速模型预测。
[SVTR_Tiny](https://arxiv.org/abs/2205.00159):SVTR提出了一种用于场景文本识别的单视觉模型,该模型在patch-wise image tokenization框架内,完全摒弃了序列建模,在精度具有竞争力的前提下,模型参数量更少,速度更快。
以上两个策略在自建中文数据集上的精度和速度对比如下:
| ID | 策略 | 模型大小 | 精度 | 预测耗时(CPU + MKLDNN)|
|-----|-----|--------|----| --- |
| 01 | PP-OCRv2 | 8M | 74.8% | 8.54ms |
| 02 | SVTR_Tiny | 21M | 80.1% | 97ms |
| 03 | SVTR_LCNet(h32) | 12M | 71.9% | 6.6ms |
| 04 | SVTR_LCNet(h48) | 12M | 73.98% | 7.6ms |
| 05 | + GTC | 12M | 75.8% | 7.6ms |
| 06 | + TextConAug | 12M | 76.3% | 7.6ms |
| 07 | + TextRotNet | 12M | 76.9% | 7.6ms |
| 08 | + UDML | 12M | 78.4% | 7.6ms |
| 09 | + UIM | 12M | 79.4% | 7.6ms |
### 3.3 开始训练
首先下载 PaddleOCR 代码库
```
git clone -b release/2.5 https://github.com/PaddlePaddle/PaddleOCR.git
```
PaddleOCR提供了训练脚本、评估脚本和预测脚本,本节将以 PP-OCRv3 中文识别模型为例:
**Step1:下载预训练模型**
首先下载 pretrain model,您可以下载训练好的模型在自定义数据上进行finetune
```
cd PaddleOCR/
# 下载PP-OCRv3 中文预训练模型
wget -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_train.tar
# 解压模型参数
cd pretrain_models
tar -xf ch_PP-OCRv3_rec_train.tar && rm -rf ch_PP-OCRv3_rec_train.tar
```
**Step2:自定义字典文件**
接下来需要提供一个字典({word_dict_name}.txt),使模型在训练时,可以将所有出现的字符映射为字典的索引。
因此字典需要包含所有希望被正确识别的字符,{word_dict_name}.txt需要写成如下格式,并以 `utf-8` 编码格式保存:
```
0
1
2
3
4
5
6
7
8
9
-
.
```
word_dict.txt 每行有一个单字,将字符与数字索引映射在一起,“3.14” 将被映射成 [3, 11, 1, 4]
* 内置字典
PaddleOCR内置了一部分字典,可以按需使用。
`ppocr/utils/ppocr_keys_v1.txt` 是一个包含6623个字符的中文字典
`ppocr/utils/ic15_dict.txt` 是一个包含36个字符的英文字典
* 自定义字典
内置字典面向通用场景,具体的工业场景中,可能需要识别特殊字符,或者只需识别某几个字符,此时自定义字典会更提升模型精度。例如在光功率计场景中,需要识别数字和单位。
遍历真实数据标签中的字符,制作字典`digital_dict.txt`如下所示:
```
-
.
0
1
2
3
4
5
6
7
8
9
B
E
F
H
L
N
T
W
d
k
m
n
o
z
```
**Step3:修改配置文件**
为了更好的使用预训练模型,训练推荐使用[ch_PP-OCRv3_rec_distillation.yml](../../configs/rec/PP-OCRv3/ch_PP-OCRv3_rec_distillation.yml)配置文件,并参考下列说明修改配置文件:
`ch_PP-OCRv3_rec_distillation.yml` 为例:
```
Global:
...
# 添加自定义字典,如修改字典请将路径指向新字典
character_dict_path: ppocr/utils/dict/digital_dict.txt
...
# 识别空格
use_space_char: True
Optimizer:
...
# 添加学习率衰减策略
lr:
name: Cosine
learning_rate: 0.001
...
...
Train:
dataset:
# 数据集格式,支持LMDBDataSet以及SimpleDataSet
name: SimpleDataSet
# 数据集路径
data_dir: ./data/
# 训练集标签文件
label_file_list:
- ./train_data/digital_img/digital_train.txt #11w
- ./train_data/digital_img/real_train.txt #100
- ./train_data/digital_img/dbm_img/dbm.txt #3w
ratio_list:
- 0.3
- 1.0
- 1.0
transforms:
...
- RecResizeImg:
# 修改 image_shape 以适应长文本
image_shape: [3, 48, 320]
...
loader:
...
# 单卡训练的batch_size
batch_size_per_card: 256
...
Eval:
dataset:
# 数据集格式,支持LMDBDataSet以及SimpleDataSet
name: SimpleDataSet
# 数据集路径
data_dir: ./data
# 验证集标签文件
label_file_list:
- ./train_data/digital_img/real_val.txt
transforms:
...
- RecResizeImg:
# 修改 image_shape 以适应长文本
image_shape: [3, 48, 320]
...
loader:
# 单卡验证的batch_size
batch_size_per_card: 256
...
```
**注意,训练/预测/评估时的配置文件请务必与训练一致。**
**Step4:启动训练**
*如果您安装的是cpu版本,请将配置文件中的 `use_gpu` 字段修改为false*
```
# GPU训练 支持单卡,多卡训练
# 训练数码管数据 训练日志会自动保存为 "{save_model_dir}" 下的train.log
#单卡训练(训练周期长,不建议)
python3 tools/train.py -c configs/rec/PP-OCRv3/ch_PP-OCRv3_rec_distillation.yml -o Global.pretrained_model=./pretrain_models/ch_PP-OCRv3_rec_train/best_accuracy
#多卡训练,通过--gpus参数指定卡号
python3 -m paddle.distributed.launch --gpus '0,1,2,3' tools/train.py -c configs/rec/PP-OCRv3/ch_PP-OCRv3_rec_distillation.yml -o Global.pretrained_model=./pretrain_models/en_PP-OCRv3_rec_train/best_accuracy
```
PaddleOCR支持训练和评估交替进行, 可以在 `configs/rec/PP-OCRv3/ch_PP-OCRv3_rec_distillation.yml` 中修改 `eval_batch_step` 设置评估频率,默认每500个iter评估一次。评估过程中默认将最佳acc模型,保存为 `output/ch_PP-OCRv3_rec_distill/best_accuracy`
如果验证集很大,测试将会比较耗时,建议减少评估次数,或训练完再进行评估。
### SVTR_Tiny 训练
SVTR_Tiny 训练步骤与上面一致,SVTR支持的配置和模型训练权重可以参考[算法介绍文档](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.5/doc/doc_ch/algorithm_rec_svtr.md)
**Step1:下载预训练模型**
```
# 下载 SVTR_Tiny 中文识别预训练模型和配置文件
wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/rec_svtr_tiny_none_ctc_ch_train.tar
# 解压模型参数
tar -xf rec_svtr_tiny_none_ctc_ch_train.tar && rm -rf rec_svtr_tiny_none_ctc_ch_train.tar
```
**Step2:自定义字典文件**
字典依然使用自定义的 digital_dict.txt
**Step3:修改配置文件**
配置文件中对应修改字典路径和数据路径
**Step4:启动训练**
```
## 单卡训练
python tools/train.py -c rec_svtr_tiny_none_ctc_ch_train/rec_svtr_tiny_6local_6global_stn_ch.yml \
-o Global.pretrained_model=./rec_svtr_tiny_none_ctc_ch_train/best_accuracy
```
### 3.4 验证效果
如需获取已训练模型,请扫码填写问卷,加入PaddleOCR官方交流群获取全部OCR垂类模型下载链接、《动手学OCR》电子书等全套OCR学习资料🎁
<div align="left">
<img src="https://ai-studio-static-online.cdn.bcebos.com/dd721099bd50478f9d5fb13d8dd00fad69c22d6848244fd3a1d3980d7fefc63e" width = "150" height = "150" />
</div>
将下载或训练完成的模型放置在对应目录下即可完成模型推理
* 指标评估
训练中模型参数默认保存在`Global.save_model_dir`目录下。在评估指标时,需要设置`Global.checkpoints`指向保存的参数文件。评估数据集可以通过 `configs/rec/PP-OCRv3/ch_PP-OCRv3_rec_distillation.yml` 修改Eval中的 `label_file_path` 设置。
```
# GPU 评估, Global.checkpoints 为待测权重
python3 -m paddle.distributed.launch --gpus '0' tools/eval.py -c configs/rec/PP-OCRv3/ch_PP-OCRv3_rec_distillation.yml -o Global.checkpoints={path/to/weights}/best_accuracy
```
* 测试识别效果
使用 PaddleOCR 训练好的模型,可以通过以下脚本进行快速预测。
默认预测图片存储在 `infer_img` 里,通过 `-o Global.checkpoints` 加载训练好的参数文件:
根据配置文件中设置的 `save_model_dir``save_epoch_step` 字段,会有以下几种参数被保存下来:
```
output/rec/
├── best_accuracy.pdopt
├── best_accuracy.pdparams
├── best_accuracy.states
├── config.yml
├── iter_epoch_3.pdopt
├── iter_epoch_3.pdparams
├── iter_epoch_3.states
├── latest.pdopt
├── latest.pdparams
├── latest.states
└── train.log
```
其中 best_accuracy.* 是评估集上的最优模型;iter_epoch_x.* 是以 `save_epoch_step` 为间隔保存下来的模型;latest.* 是最后一个epoch的模型。
```
# 预测英文结果
python3 tools/infer_rec.py -c configs/rec/PP-OCRv3/ch_PP-OCRv3_rec_distillation.yml -o Global.pretrained_model={path/to/weights}/best_accuracy Global.infer_img=test_digital.png
```
预测图片:
![](https://ai-studio-static-online.cdn.bcebos.com/8dca91f016884e16ad9216d416da72ea08190f97d87b4be883f15079b7ebab9a)
得到输入图像的预测结果:
```
infer_img: test_digital.png
result: ('-70.00', 0.9998967)
```
此差异已折叠。
# 高精度中文场景文本识别模型SVTR
## 1. 简介
PP-OCRv3是百度开源的超轻量级场景文本检测识别模型库,其中超轻量的场景中文识别模型SVTR_LCNet使用了SVTR算法结构。为了保证速度,SVTR_LCNet将SVTR模型的Local Blocks替换为LCNet,使用两层Global Blocks。在中文场景中,PP-OCRv3识别主要使用如下优化策略:
- GTC:Attention指导CTC训练策略;
- TextConAug:挖掘文字上下文信息的数据增广策略;
- TextRotNet:自监督的预训练模型;
- UDML:联合互学习策略;
- UIM:无标注数据挖掘方案。
其中 *UIM:无标注数据挖掘方案* 使用了高精度的SVTR中文模型进行无标注文件的刷库,该模型在PP-OCRv3识别的数据集上训练,精度对比如下表。
|中文识别算法|模型|UIM|精度|
| --- | --- | --- |--- |
|PP-OCRv3|SVTR_LCNet| w/o |78.4%|
|PP-OCRv3|SVTR_LCNet| w |79.4%|
|SVTR|SVTR-Tiny|-|82.5%|
aistudio项目链接: [高精度中文场景文本识别模型SVTR](https://aistudio.baidu.com/aistudio/projectdetail/4263032)
## 2. SVTR中文模型使用
### 环境准备
本任务基于Aistudio完成, 具体环境如下:
- 操作系统: Linux
- PaddlePaddle: 2.3
- PaddleOCR: dygraph
下载 PaddleOCR代码
```bash
git clone -b dygraph https://github.com/PaddlePaddle/PaddleOCR
```
安装依赖库
```bash
pip install -r PaddleOCR/requirements.txt -i https://mirror.baidu.com/pypi/simple
```
### 快速使用
获取SVTR中文模型文件,请扫码填写问卷,加入PaddleOCR官方交流群获取全部OCR垂类模型下载链接、《动手学OCR》电子书等全套OCR学习资料🎁
<div align="center">
<img src="https://ai-studio-static-online.cdn.bcebos.com/dd721099bd50478f9d5fb13d8dd00fad69c22d6848244fd3a1d3980d7fefc63e" width = "150" height = "150" />
</div>
```bash
# 解压模型文件
tar xf svtr_ch_high_accuracy.tar
```
预测中文文本,以下图为例:
![](../doc/imgs_words/ch/word_1.jpg)
预测命令:
```bash
# CPU预测
python tools/infer_rec.py -c configs/rec/rec_svtrnet_ch.yml -o Global.pretrained_model=./svtr_ch_high_accuracy/best_accuracy Global.infer_img=./doc/imgs_words/ch/word_1.jpg Global.use_gpu=False
# GPU预测
#python tools/infer_rec.py -c configs/rec/rec_svtrnet_ch.yml -o Global.pretrained_model=./svtr_ch_high_accuracy/best_accuracy Global.infer_img=./doc/imgs_words/ch/word_1.jpg Global.use_gpu=True
```
可以看到最后打印结果为
- result: 韩国小馆 0.9853458404541016
0.9853458404541016为预测置信度。
### 推理模型导出与预测
inference 模型(paddle.jit.save保存的模型) 一般是模型训练,把模型结构和模型参数保存在文件中的固化模型,多用于预测部署场景。 训练过程中保存的模型是checkpoints模型,保存的只有模型的参数,多用于恢复训练等。 与checkpoints模型相比,inference 模型会额外保存模型的结构信息,在预测部署、加速推理上性能优越,灵活方便,适合于实际系统集成。
运行识别模型转inference模型命令,如下:
```bash
python tools/export_model.py -c configs/rec/rec_svtrnet_ch.yml -o Global.pretrained_model=./svtr_ch_high_accuracy/best_accuracy Global.save_inference_dir=./inference/svtr_ch
```
转换成功后,在目录下有三个文件:
```shell
inference/svtr_ch/
├── inference.pdiparams # 识别inference模型的参数文件
├── inference.pdiparams.info # 识别inference模型的参数信息,可忽略
└── inference.pdmodel # 识别inference模型的program文件
```
inference模型预测,命令如下:
```bash
# CPU预测
python3 tools/infer/predict_rec.py --image_dir="./doc/imgs_words/ch/word_1.jpg" --rec_algorithm='SVTR' --rec_model_dir=./inference/svtr_ch/ --rec_image_shape='3, 32, 320' --rec_char_dict_path=ppocr/utils/ppocr_keys_v1.txt --use_gpu=False
# GPU预测
#python3 tools/infer/predict_rec.py --image_dir="./doc/imgs_words/ch/word_1.jpg" --rec_algorithm='SVTR' --rec_model_dir=./inference/svtr_ch/ --rec_image_shape='3, 32, 320' --rec_char_dict_path=ppocr/utils/ppocr_keys_v1.txt --use_gpu=True
```
**注意**
- 使用SVTR算法时,需要指定--rec_algorithm='SVTR'
- 如果使用自定义字典训练的模型,需要将--rec_char_dict_path=ppocr/utils/ppocr_keys_v1.txt修改为自定义的字典
- --rec_image_shape='3, 32, 320' 该参数不能去掉
......@@ -63,8 +63,7 @@ Train:
- DecodeImage:
img_mode: BGR
channel_first: false
- RecAug:
use_tia: False
- BaseDataAugmentation:
- RandAugment:
- SSLRotateResize:
image_shape: [3, 48, 320]
......
......@@ -60,8 +60,7 @@ Train:
img_mode: BGR
channel_first: False
- ClsLabelEncode: # Class handling label
- RecAug:
use_tia: False
- BaseDataAugmentation:
- RandAugment:
- ClsResizeImg:
image_shape: [3, 48, 192]
......
......@@ -49,7 +49,7 @@ Architecture:
Loss:
name: NRTRLoss
name: CELoss
smoothing: True
PostProcess:
......@@ -68,8 +68,8 @@ Train:
img_mode: BGR
channel_first: False
- NRTRLabelEncode: # Class handling label
- NRTRRecResizeImg:
image_shape: [100, 32]
- GrayRecResizeImg:
image_shape: [100, 32] # W H
resize_type: PIL # PIL or OpenCV
- KeepKeys:
keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order
......@@ -82,14 +82,14 @@ Train:
Eval:
dataset:
name: LMDBDataSet
data_dir: ./train_data/data_lmdb_release/evaluation/
data_dir: ./train_data/data_lmdb_release/validation/
transforms:
- DecodeImage: # load image
img_mode: BGR
channel_first: False
- NRTRLabelEncode: # Class handling label
- NRTRRecResizeImg:
image_shape: [100, 32]
- GrayRecResizeImg:
image_shape: [100, 32] # W H
resize_type: PIL # PIL or OpenCV
- KeepKeys:
keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order
......@@ -97,5 +97,5 @@ Eval:
shuffle: False
drop_last: False
batch_size_per_card: 256
num_workers: 1
num_workers: 4
use_shared_memory: False
Global:
use_gpu: True
epoch_num: 10
log_smooth_window: 20
print_batch_step: 10
save_model_dir: ./output/rec/r45_abinet/
save_epoch_step: 1
# evaluation is run every 2000 iterations
eval_batch_step: [0, 2000]
cal_metric_during_train: True
pretrained_model:
checkpoints:
save_inference_dir:
use_visualdl: False
infer_img: doc/imgs_words_en/word_10.png
# for data or label process
character_dict_path:
character_type: en
max_text_length: 25
infer_mode: False
use_space_char: False
save_res_path: ./output/rec/predicts_abinet.txt
Optimizer:
name: Adam
beta1: 0.9
beta2: 0.99
clip_norm: 20.0
lr:
name: Piecewise
decay_epochs: [6]
values: [0.0001, 0.00001]
regularizer:
name: 'L2'
factor: 0.
Architecture:
model_type: rec
algorithm: ABINet
in_channels: 3
Transform:
Backbone:
name: ResNet45
Head:
name: ABINetHead
use_lang: True
iter_size: 3
Loss:
name: CELoss
ignore_index: &ignore_index 100 # Must be greater than the number of character classes
PostProcess:
name: ABINetLabelDecode
Metric:
name: RecMetric
main_indicator: acc
Train:
dataset:
name: LMDBDataSet
data_dir: ./train_data/data_lmdb_release/training/
transforms:
- DecodeImage: # load image
img_mode: RGB
channel_first: False
- ABINetRecAug:
- ABINetLabelEncode: # Class handling label
ignore_index: *ignore_index
- ABINetRecResizeImg:
image_shape: [3, 32, 128]
- KeepKeys:
keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order
loader:
shuffle: True
batch_size_per_card: 96
drop_last: True
num_workers: 4
Eval:
dataset:
name: LMDBDataSet
data_dir: ./train_data/data_lmdb_release/validation/
transforms:
- DecodeImage: # load image
img_mode: RGB
channel_first: False
- ABINetLabelEncode: # Class handling label
ignore_index: *ignore_index
- ABINetRecResizeImg:
image_shape: [3, 32, 128]
- KeepKeys:
keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order
loader:
shuffle: False
drop_last: False
batch_size_per_card: 256
num_workers: 4
use_shared_memory: False
......@@ -26,7 +26,7 @@ Optimizer:
name: AdamW
beta1: 0.9
beta2: 0.99
epsilon: 0.00000008
epsilon: 8.e-8
weight_decay: 0.05
no_weight_decay_name: norm pos_embed
one_dim_param_no_weight_decay: true
......@@ -77,14 +77,13 @@ Metric:
Train:
dataset:
name: LMDBDataSet
data_dir: ./train_data/data_lmdb_release/training/
data_dir: ./train_data/data_lmdb_release/training
transforms:
- DecodeImage: # load image
img_mode: BGR
channel_first: False
- CTCLabelEncode: # Class handling label
- RecResizeImg:
character_dict_path:
- SVTRRecResizeImg:
image_shape: [3, 64, 256]
padding: False
- KeepKeys:
......@@ -98,14 +97,13 @@ Train:
Eval:
dataset:
name: LMDBDataSet
data_dir: ./train_data/data_lmdb_release/validation/
data_dir: ./train_data/data_lmdb_release/validation
transforms:
- DecodeImage: # load image
img_mode: BGR
channel_first: False
- CTCLabelEncode: # Class handling label
- RecResizeImg:
character_dict_path:
- SVTRRecResizeImg:
image_shape: [3, 64, 256]
padding: False
- KeepKeys:
......
Global:
use_gpu: true
epoch_num: 100
log_smooth_window: 20
print_batch_step: 10
save_model_dir: ./output/rec/svtr_ch_all/
save_epoch_step: 10
eval_batch_step:
- 0
- 2000
cal_metric_during_train: true
pretrained_model: null
checkpoints: null
save_inference_dir: null
use_visualdl: false
infer_img: doc/imgs_words/ch/word_1.jpg
character_dict_path: ppocr/utils/ppocr_keys_v1.txt
max_text_length: 25
infer_mode: false
use_space_char: true
save_res_path: ./output/rec/predicts_svtr_tiny_ch_all.txt
Optimizer:
name: AdamW
beta1: 0.9
beta2: 0.99
epsilon: 8.0e-08
weight_decay: 0.05
no_weight_decay_name: norm pos_embed
one_dim_param_no_weight_decay: true
lr:
name: Cosine
learning_rate: 0.0005
warmup_epoch: 2
Architecture:
model_type: rec
algorithm: SVTR
Transform: null
Backbone:
name: SVTRNet
img_size:
- 32
- 320
out_char_num: 40
out_channels: 96
patch_merging: Conv
embed_dim:
- 64
- 128
- 256
depth:
- 3
- 6
- 3
num_heads:
- 2
- 4
- 8
mixer:
- Local
- Local
- Local
- Local
- Local
- Local
- Global
- Global
- Global
- Global
- Global
- Global
local_mixer:
- - 7
- 11
- - 7
- 11
- - 7
- 11
last_stage: true
prenorm: false
Neck:
name: SequenceEncoder
encoder_type: reshape
Head:
name: CTCHead
Loss:
name: CTCLoss
PostProcess:
name: CTCLabelDecode
Metric:
name: RecMetric
main_indicator: acc
Train:
dataset:
name: SimpleDataSet
data_dir: ./train_data
label_file_list:
- ./train_data/train_list.txt
ext_op_transform_idx: 1
transforms:
- DecodeImage:
img_mode: BGR
channel_first: false
- RecConAug:
prob: 0.5
ext_data_num: 2
image_shape:
- 32
- 320
- 3
- RecAug: null
- CTCLabelEncode: null
- SVTRRecResizeImg:
image_shape:
- 3
- 32
- 320
padding: true
- KeepKeys:
keep_keys:
- image
- label
- length
loader:
shuffle: true
batch_size_per_card: 256
drop_last: true
num_workers: 8
Eval:
dataset:
name: SimpleDataSet
data_dir: ./train_data
label_file_list:
- ./train_data/val_list.txt
transforms:
- DecodeImage:
img_mode: BGR
channel_first: false
- CTCLabelEncode: null
- SVTRRecResizeImg:
image_shape:
- 3
- 32
- 320
padding: true
- KeepKeys:
keep_keys:
- image
- label
- length
loader:
shuffle: false
drop_last: false
batch_size_per_card: 256
num_workers: 2
profiler_options: null
Global:
use_gpu: True
epoch_num: 20
log_smooth_window: 20
print_batch_step: 10
save_model_dir: ./output/rec/vitstr_none_ce/
save_epoch_step: 1
# evaluation is run every 2000 iterations after the 0th iteration#
eval_batch_step: [0, 2000]
cal_metric_during_train: True
pretrained_model:
checkpoints:
save_inference_dir:
use_visualdl: False
infer_img: doc/imgs_words_en/word_10.png
# for data or label process
character_dict_path: ppocr/utils/EN_symbol_dict.txt
max_text_length: 25
infer_mode: False
use_space_char: False
save_res_path: ./output/rec/predicts_vitstr.txt
Optimizer:
name: Adadelta
epsilon: 1.e-8
rho: 0.95
clip_norm: 5.0
lr:
learning_rate: 1.0
Architecture:
model_type: rec
algorithm: ViTSTR
in_channels: 1
Transform:
Backbone:
name: ViTSTR
scale: tiny
Neck:
name: SequenceEncoder
encoder_type: reshape
Head:
name: CTCHead
Loss:
name: CELoss
with_all: True
ignore_index: &ignore_index 0 # Must be zero or greater than the number of character classes
PostProcess:
name: ViTSTRLabelDecode
Metric:
name: RecMetric
main_indicator: acc
Train:
dataset:
name: LMDBDataSet
data_dir: ./train_data/data_lmdb_release/training/
transforms:
- DecodeImage: # load image
img_mode: BGR
channel_first: False
- ViTSTRLabelEncode: # Class handling label
ignore_index: *ignore_index
- GrayRecResizeImg:
image_shape: [224, 224] # W H
resize_type: PIL # PIL or OpenCV
inter_type: 'Image.BICUBIC'
scale: false
- KeepKeys:
keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order
loader:
shuffle: True
batch_size_per_card: 48
drop_last: True
num_workers: 8
Eval:
dataset:
name: LMDBDataSet
data_dir: ./train_data/data_lmdb_release/validation/
transforms:
- DecodeImage: # load image
img_mode: BGR
channel_first: False
- ViTSTRLabelEncode: # Class handling label
ignore_index: *ignore_index
- GrayRecResizeImg:
image_shape: [224, 224] # W H
resize_type: PIL # PIL or OpenCV
inter_type: 'Image.BICUBIC'
scale: false
- KeepKeys:
keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order
loader:
shuffle: False
drop_last: False
batch_size_per_card: 256
num_workers: 2
include/inputs.h
include/outputs.h
__pycache__/
build/
\ No newline at end of file
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Makefile to build demo
# Setup build environment
BUILD_DIR := build
ARM_CPU = ARMCM55
ETHOSU_PATH = /opt/arm/ethosu
CMSIS_PATH ?= ${ETHOSU_PATH}/cmsis
ETHOSU_PLATFORM_PATH ?= ${ETHOSU_PATH}/core_platform
STANDALONE_CRT_PATH := $(abspath $(BUILD_DIR))/runtime
CORSTONE_300_PATH = ${ETHOSU_PLATFORM_PATH}/targets/corstone-300
PKG_COMPILE_OPTS = -g -Wall -O2 -Wno-incompatible-pointer-types -Wno-format -mcpu=cortex-m55 -mthumb -mfloat-abi=hard -std=gnu99
CMAKE ?= cmake
CC = arm-none-eabi-gcc
AR = arm-none-eabi-ar
RANLIB = arm-none-eabi-ranlib
PKG_CFLAGS = ${PKG_COMPILE_OPTS} \
-I${STANDALONE_CRT_PATH}/include \
-I${STANDALONE_CRT_PATH}/src/runtime/crt/include \
-I${PWD}/include \
-I${CORSTONE_300_PATH} \
-I${CMSIS_PATH}/Device/ARM/${ARM_CPU}/Include/ \
-I${CMSIS_PATH}/CMSIS/Core/Include \
-I${CMSIS_PATH}/CMSIS/NN/Include \
-I${CMSIS_PATH}/CMSIS/DSP/Include \
-I$(abspath $(BUILD_DIR))/codegen/host/include
CMSIS_NN_CMAKE_FLAGS = -DCMAKE_TOOLCHAIN_FILE=$(abspath $(BUILD_DIR))/../arm-none-eabi-gcc.cmake \
-DTARGET_CPU=cortex-m55 \
-DBUILD_CMSIS_NN_FUNCTIONS=YES
PKG_LDFLAGS = -lm -specs=nosys.specs -static -T corstone300.ld
$(ifeq VERBOSE,1)
QUIET ?=
$(else)
QUIET ?= @
$(endif)
DEMO_MAIN = src/demo_bare_metal.c
CODEGEN_SRCS = $(wildcard $(abspath $(BUILD_DIR))/codegen/host/src/*.c)
CODEGEN_OBJS = $(subst .c,.o,$(CODEGEN_SRCS))
CMSIS_STARTUP_SRCS = $(wildcard ${CMSIS_PATH}/Device/ARM/${ARM_CPU}/Source/*.c)
UART_SRCS = $(wildcard ${CORSTONE_300_PATH}/*.c)
demo: $(BUILD_DIR)/demo
$(BUILD_DIR)/stack_allocator.o: $(STANDALONE_CRT_PATH)/src/runtime/crt/memory/stack_allocator.c
$(QUIET)mkdir -p $(@D)
$(QUIET)$(CC) -c $(PKG_CFLAGS) -o $@ $^
$(BUILD_DIR)/crt_backend_api.o: $(STANDALONE_CRT_PATH)/src/runtime/crt/common/crt_backend_api.c
$(QUIET)mkdir -p $(@D)
$(QUIET)$(CC) -c $(PKG_CFLAGS) -o $@ $^
# Build generated code
$(BUILD_DIR)/libcodegen.a: $(CODEGEN_SRCS)
$(QUIET)cd $(abspath $(BUILD_DIR)/codegen/host/src) && $(CC) -c $(PKG_CFLAGS) $(CODEGEN_SRCS)
$(QUIET)$(AR) -cr $(abspath $(BUILD_DIR)/libcodegen.a) $(CODEGEN_OBJS)
$(QUIET)$(RANLIB) $(abspath $(BUILD_DIR)/libcodegen.a)
# Build CMSIS startup code
${BUILD_DIR}/libcmsis_startup.a: $(CMSIS_STARTUP_SRCS)
$(QUIET)mkdir -p $(abspath $(BUILD_DIR)/libcmsis_startup)
$(QUIET)cd $(abspath $(BUILD_DIR)/libcmsis_startup) && $(CC) -c $(PKG_CFLAGS) -D${ARM_CPU} $^
$(QUIET)$(AR) -cr $(abspath $(BUILD_DIR)/libcmsis_startup.a) $(abspath $(BUILD_DIR))/libcmsis_startup/*.o
$(QUIET)$(RANLIB) $(abspath $(BUILD_DIR)/libcmsis_startup.a)
CMSIS_SHA_FILE=${CMSIS_PATH}/977abe9849781a2e788b02282986480ff4e25ea6.sha
ifneq ("$(wildcard $(CMSIS_SHA_FILE))","")
${BUILD_DIR}/cmsis_nn/Source/libcmsis-nn.a:
$(QUIET)mkdir -p $(@D)
$(QUIET)cd $(CMSIS_PATH)/CMSIS/NN && $(CMAKE) -B $(abspath $(BUILD_DIR)/cmsis_nn) $(CMSIS_NN_CMAKE_FLAGS)
$(QUIET)cd $(abspath $(BUILD_DIR)/cmsis_nn) && $(MAKE) all
else
# Build CMSIS-NN
${BUILD_DIR}/cmsis_nn/Source/SoftmaxFunctions/libCMSISNNSoftmax.a:
$(QUIET)mkdir -p $(@D)
$(QUIET)cd $(CMSIS_PATH)/CMSIS/NN && $(CMAKE) -B $(abspath $(BUILD_DIR)/cmsis_nn) $(CMSIS_NN_CMAKE_FLAGS)
$(QUIET)cd $(abspath $(BUILD_DIR)/cmsis_nn) && $(MAKE) all
endif
# Build demo application
ifneq ("$(wildcard $(CMSIS_SHA_FILE))","")
$(BUILD_DIR)/demo: $(DEMO_MAIN) $(UART_SRCS) $(BUILD_DIR)/stack_allocator.o $(BUILD_DIR)/crt_backend_api.o \
${BUILD_DIR}/libcodegen.a ${BUILD_DIR}/libcmsis_startup.a ${BUILD_DIR}/cmsis_nn/Source/libcmsis-nn.a
$(QUIET)mkdir -p $(@D)
$(QUIET)$(CC) $(PKG_CFLAGS) $(FREERTOS_FLAGS) -o $@ -Wl,--whole-archive $^ -Wl,--no-whole-archive $(PKG_LDFLAGS)
else
$(BUILD_DIR)/demo: $(DEMO_MAIN) $(UART_SRCS) $(BUILD_DIR)/stack_allocator.o $(BUILD_DIR)/crt_backend_api.o \
${BUILD_DIR}/libcodegen.a ${BUILD_DIR}/libcmsis_startup.a \
${BUILD_DIR}/cmsis_nn/Source/SoftmaxFunctions/libCMSISNNSoftmax.a \
${BUILD_DIR}/cmsis_nn/Source/FullyConnectedFunctions/libCMSISNNFullyConnected.a \
${BUILD_DIR}/cmsis_nn/Source/SVDFunctions/libCMSISNNSVDF.a \
${BUILD_DIR}/cmsis_nn/Source/ReshapeFunctions/libCMSISNNReshape.a \
${BUILD_DIR}/cmsis_nn/Source/ActivationFunctions/libCMSISNNActivation.a \
${BUILD_DIR}/cmsis_nn/Source/NNSupportFunctions/libCMSISNNSupport.a \
${BUILD_DIR}/cmsis_nn/Source/ConcatenationFunctions/libCMSISNNConcatenation.a \
${BUILD_DIR}/cmsis_nn/Source/BasicMathFunctions/libCMSISNNBasicMaths.a \
${BUILD_DIR}/cmsis_nn/Source/ConvolutionFunctions/libCMSISNNConvolutions.a \
${BUILD_DIR}/cmsis_nn/Source/PoolingFunctions/libCMSISNNPooling.a
$(QUIET)mkdir -p $(@D)
$(QUIET)$(CC) $(PKG_CFLAGS) $(FREERTOS_FLAGS) -o $@ -Wl,--whole-archive $^ -Wl,--no-whole-archive $(PKG_LDFLAGS)
endif
clean:
$(QUIET)rm -rf $(BUILD_DIR)/codegen
cleanall:
$(QUIET)rm -rf $(BUILD_DIR)
.SUFFIXES:
.DEFAULT: demo
<!--- Licensed to the Apache Software Foundation (ASF) under one -->
<!--- or more contributor license agreements. See the NOTICE file -->
<!--- distributed with this work for additional information -->
<!--- regarding copyright ownership. The ASF licenses this file -->
<!--- to you under the Apache License, Version 2.0 (the -->
<!--- "License"); you may not use this file except in compliance -->
<!--- with the License. You may obtain a copy of the License at -->
<!--- http://www.apache.org/licenses/LICENSE-2.0 -->
<!--- Unless required by applicable law or agreed to in writing, -->
<!--- software distributed under the License is distributed on an -->
<!--- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -->
<!--- KIND, either express or implied. See the License for the -->
<!--- specific language governing permissions and limitations -->
<!--- under the License. -->
Running PaddleOCR text recognition model via TVM on bare metal Arm(R) Cortex(R)-M55 CPU and CMSIS-NN
===============================================================
This folder contains an example of how to use TVM to run a PaddleOCR model
on bare metal Cortex(R)-M55 CPU and CMSIS-NN.
Prerequisites
-------------
If the demo is run in the ci_cpu Docker container provided with TVM, then the following
software will already be installed.
If the demo is not run in the ci_cpu Docker container, then you will need the following:
- Software required to build and run the demo (These can all be installed by running
https://github.com/apache/tvm/blob/main/docker/install/ubuntu_install_ethosu_driver_stack.sh .)
- [Fixed Virtual Platform (FVP) based on Arm(R) Corstone(TM)-300 software](https://developer.arm.com/tools-and-software/open-source-software/arm-platforms-software/arm-ecosystem-fvps)
- [cmake 3.19.5](https://github.com/Kitware/CMake/releases/)
- [GCC toolchain from Arm(R)](https://developer.arm.com/-/media/Files/downloads/gnu-rm/10-2020q4/gcc-arm-none-eabi-10-2020-q4-major-x86_64-linux.tar.bz2)
- [Arm(R) Ethos(TM)-U NPU driver stack](https://review.mlplatform.org)
- [CMSIS](https://github.com/ARM-software/CMSIS_5)
- The python libraries listed in the requirements.txt of this directory
- These can be installed by running the following from the current directory:
```bash
pip install -r ./requirements.txt
```
You will also need TVM which can either be:
- Built from source (see [Install from Source](https://tvm.apache.org/docs/install/from_source.html))
- When building from source, the following need to be set in config.cmake:
- set(USE_CMSISNN ON)
- set(USE_MICRO ON)
- set(USE_LLVM ON)
- Installed from TLCPack nightly(see [TLCPack](https://tlcpack.ai/))
You will need to update your PATH environment variable to include the path to cmake 3.19.5 and the FVP.
For example if you've installed these in ```/opt/arm``` , then you would do the following:
```bash
export PATH=/opt/arm/FVP_Corstone_SSE-300/models/Linux64_GCC-6.4:/opt/arm/cmake/bin:$PATH
```
Running the demo application
----------------------------
Type the following command to run the bare metal text recognition application ([src/demo_bare_metal.c](./src/demo_bare_metal.c)):
```bash
./run_demo.sh
```
If the Ethos(TM)-U platform and/or CMSIS have not been installed in /opt/arm/ethosu then
the locations for these can be specified as arguments to run_demo.sh, for example:
```bash
./run_demo.sh --cmsis_path /home/tvm-user/cmsis \
--ethosu_platform_path /home/tvm-user/ethosu/core_platform
```
This will:
- Download a PaddleOCR text recognition model
- Use tvmc to compile the text recognition model for Cortex(R)-M55 CPU and CMSIS-NN
- Create a C header file inputs.c containing the image data as a C array
- Create a C header file outputs.c containing a C array where the output of inference will be stored
- Build the demo application
- Run the demo application on a Fixed Virtual Platform (FVP) based on Arm(R) Corstone(TM)-300 software
- The application will report the text on the image and the corresponding score.
Using your own image
--------------------
The create_image.py script takes a single argument on the command line which is the path of the
image to be converted into an array of bytes for consumption by the model.
The demo can be modified to use an image of your choice by changing the following line in run_demo.sh
```bash
python3 ./convert_image.py path/to/image
```
Model description
-----------------
In this demo, the model we use is an English recognition model based on [PP-OCRv3](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/PP-OCRv3_introduction.md). PP-OCRv3 is the third version of the PP-OCR series model released by [PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR). This series of models has the following features:
- PP-OCRv3: ultra-lightweight OCR system: detection (3.6M) + direction classifier (1.4M) + recognition (12M) = 17.0M
- Support more than 80 kinds of multi-language recognition models, including English, Chinese, French, German, Arabic, Korean, Japanese and so on. For details
- Support vertical text recognition, and long text recognition
The text recognition model in PP-OCRv3 supports more than 80 languages. In the process of model development, since Arm(R) Cortex(R)-M55 CPU does not support rnn operator, we delete the unsupported operator based on the PP-OCRv3 text recognition model to obtain the current model.
\ No newline at end of file
<!--- Licensed to the Apache Software Foundation (ASF) under one -->
<!--- or more contributor license agreements. See the NOTICE file -->
<!--- distributed with this work for additional information -->
<!--- regarding copyright ownership. The ASF licenses this file -->
<!--- to you under the Apache License, Version 2.0 (the -->
<!--- "License"); you may not use this file except in compliance -->
<!--- with the License. You may obtain a copy of the License at -->
<!--- http://www.apache.org/licenses/LICENSE-2.0 -->
<!--- Unless required by applicable law or agreed to in writing, -->
<!--- software distributed under the License is distributed on an -->
<!--- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -->
<!--- KIND, either express or implied. See the License for the -->
<!--- specific language governing permissions and limitations -->
<!--- under the License. -->
通过TVM在 Arm(R) Cortex(R)-M55 CPU 上运行 PaddleOCR文 本能识别模型
===============================================================
此文件夹包含如何使用 TVM 在 Cortex(R)-M55 CPU 上运行 PaddleOCR 模型的示例。
依赖
-------------
本demo运行在TVM提供的docker环境上,在该环境中已经安装好的必须的软件
在非docker环境中,需要手动安装如下依赖项:
- 软件可通过[安装脚本](https://github.com/apache/tvm/blob/main/docker/install/ubuntu_install_ethosu_driver_stack.sh)一键安装
- [Fixed Virtual Platform (FVP) based on Arm(R) Corstone(TM)-300 software](https://developer.arm.com/tools-and-software/open-source-software/arm-platforms-software/arm-ecosystem-fvps)
- [cmake 3.19.5](https://github.com/Kitware/CMake/releases/)
- [GCC toolchain from Arm(R)](https://developer.arm.com/-/media/Files/downloads/gnu-rm/10-2020q4/gcc-arm-none-eabi-10-2020-q4-major-x86_64-linux.tar.bz2)
- [Arm(R) Ethos(TM)-U NPU driver stack](https://review.mlplatform.org)
- [CMSIS](https://github.com/ARM-software/CMSIS_5)
- python 依赖
```bash
pip install -r ./requirements.txt
```
- TVM
- 从源码安装([Install from Source](https://tvm.apache.org/docs/install/from_source.html))
从源码安装时,需要设置如下字段
- set(USE_CMSISNN ON)
- set(USE_MICRO ON)
- set(USE_LLVM ON)
- 从TLCPack 安装([TLCPack](https://tlcpack.ai/))
安装完成后需要更新环境变量,以软件安装地址为`/opt/arm`为例:
```bash
export PATH=/opt/arm/FVP_Corstone_SSE-300/models/Linux64_GCC-6.4:/opt/arm/cmake/bin:$PATH
```
运行demo
----------------------------
使用如下命令可以一键运行demo
```bash
./run_demo.sh
```
如果 Ethos(TM)-U 平台或 CMSIS 没有安装在 `/opt/arm/ethosu` 中,可通过参数进行设置,例如:
```bash
./run_demo.sh --cmsis_path /home/tvm-user/cmsis \
--ethosu_platform_path /home/tvm-user/ethosu/core_platform
```
`./run_demo.sh`脚本会执行如下步骤:
- 下载 PaddleOCR 文字识别模型
- 使用tvm将PaddleOCR 文字识别模型编译为 Cortex(R)-M55 CPU 和 CMSIS-NN 后端的可执行文件
- 创建一个包含输入图像数据的头文件`inputs.c`
- 创建一个包含输出tensor大小的头文件`outputs.c`
- 编译可执行程序
- 运行程序
- 输出图片上的文字和置信度
使用自己的图片
--------------------
替换 `run_demo.sh ` 中140行处的图片地址即可
使用自己的模型
--------------------
替换 `run_demo.sh ` 中130行处的模型地址即可
模型描述
-----------------
在这个demo中,我们使用的模型是基于[PP-OCRv3](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/PP-OCRv3_introduction.md)的英文识别模型。 PP-OCRv3是[PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR)发布的PP-OCR系列模型的第三个版本。 该系列模型具有以下特点:
- 超轻量级OCR系统:检测(3.6M)+方向分类器(1.4M)+识别(12M)=17.0M。
- 支持80多种多语言识别模型,包括英文、中文、法文、德文、阿拉伯文、韩文、日文等。
- 支持竖排文本识别,长文本识别。
PP-OCRv3 中的文本识别模型支持 80 多种语言。 在模型开发过程中,由于Arm(R) Cortex(R)-M55 CPU不支持rnn算子,我们在PP-OCRv3文本识别模型的基础上删除了不支持的算子,得到当前模型。
\ No newline at end of file
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
if (__TOOLCHAIN_LOADED)
return()
endif()
set(__TOOLCHAIN_LOADED TRUE)
set(CMAKE_SYSTEM_NAME Generic)
set(CMAKE_C_COMPILER "arm-none-eabi-gcc")
set(CMAKE_CXX_COMPILER "arm-none-eabi-g++")
set(CMAKE_SYSTEM_PROCESSOR "cortex-m55" CACHE STRING "Select Arm(R) Cortex(R)-M architecture. (cortex-m0, cortex-m3, cortex-m33, cortex-m4, cortex-m55, cortex-m7, etc)")
set(CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY)
SET(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
SET(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
set(CMAKE_C_STANDARD 99)
set(CMAKE_CXX_STANDARD 14)
# The system processor could for example be set to cortex-m33+nodsp+nofp.
set(__CPU_COMPILE_TARGET ${CMAKE_SYSTEM_PROCESSOR})
string(REPLACE "+" ";" __CPU_FEATURES ${__CPU_COMPILE_TARGET})
list(POP_FRONT __CPU_FEATURES CMAKE_SYSTEM_PROCESSOR)
string(FIND ${__CPU_COMPILE_TARGET} "+" __OFFSET)
if(__OFFSET GREATER_EQUAL 0)
string(SUBSTRING ${__CPU_COMPILE_TARGET} ${__OFFSET} -1 CPU_FEATURES)
endif()
# Add -mcpu to the compile options to override the -mcpu the CMake toolchain adds
add_compile_options(-mcpu=${__CPU_COMPILE_TARGET})
# Set floating point unit
if("${__CPU_COMPILE_TARGET}" MATCHES "\\+fp")
set(FLOAT hard)
elseif("${__CPU_COMPILE_TARGET}" MATCHES "\\+nofp")
set(FLOAT soft)
elseif("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "cortex-m33" OR
"${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "cortex-m55")
set(FLOAT hard)
else()
set(FLOAT soft)
endif()
add_compile_options(-mfloat-abi=${FLOAT})
add_link_options(-mfloat-abi=${FLOAT})
# Link target
add_link_options(-mcpu=${__CPU_COMPILE_TARGET})
add_link_options(-Xlinker -Map=output.map)
#
# Compile options
#
set(cxx_flags "-fno-unwind-tables;-fno-rtti;-fno-exceptions")
add_compile_options("-Wall;-Wextra;-Wsign-compare;-Wunused;-Wswitch-default;\
-Wdouble-promotion;-Wredundant-decls;-Wshadow;-Wnull-dereference;\
-Wno-format-extra-args;-Wno-unused-function;-Wno-unused-label;\
-Wno-missing-field-initializers;-Wno-return-type;-Wno-format;-Wno-int-conversion"
"$<$<COMPILE_LANGUAGE:CXX>:${cxx_flags}>"
)
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pathlib
import re
import sys
import cv2
import math
from PIL import Image
import numpy as np
def resize_norm_img(img, image_shape, padding=True):
imgC, imgH, imgW = image_shape
h = img.shape[0]
w = img.shape[1]
if not padding:
resized_image = cv2.resize(
img, (imgW, imgH), interpolation=cv2.INTER_LINEAR)
resized_w = imgW
else:
ratio = w / float(h)
if math.ceil(imgH * ratio) > imgW:
resized_w = imgW
else:
resized_w = int(math.ceil(imgH * ratio))
resized_image = cv2.resize(img, (resized_w, imgH))
resized_image = resized_image.astype('float32')
if image_shape[0] == 1:
resized_image = resized_image / 255
resized_image = resized_image[np.newaxis, :]
else:
resized_image = resized_image.transpose((2, 0, 1)) / 255
resized_image -= 0.5
resized_image /= 0.5
padding_im = np.zeros((imgC, imgH, imgW), dtype=np.float32)
padding_im[:, :, 0:resized_w] = resized_image
return padding_im
def create_header_file(name, tensor_name, tensor_data, output_path):
"""
This function generates a header file containing the data from the numpy array provided.
"""
file_path = pathlib.Path(f"{output_path}/" + name).resolve()
# Create header file with npy_data as a C array
raw_path = file_path.with_suffix(".h").resolve()
with open(raw_path, "w") as header_file:
header_file.write(
"\n"
+ f"const size_t {tensor_name}_len = {tensor_data.size};\n"
+ f'__attribute__((section(".data.tvm"), aligned(16))) float {tensor_name}[] = '
)
header_file.write("{")
for i in np.ndindex(tensor_data.shape):
header_file.write(f"{tensor_data[i]}, ")
header_file.write("};\n\n")
def create_headers(image_name):
"""
This function generates C header files for the input and output arrays required to run inferences
"""
img_path = os.path.join("./", f"{image_name}")
# Resize image to 32x320
img = cv2.imread(img_path)
img = resize_norm_img(img, [3,32,320])
img_data = img.astype("float32")
# # Add the batch dimension, as we are expecting 4-dimensional input: NCHW.
img_data = np.expand_dims(img_data, axis=0)
# Create input header file
create_header_file("inputs", "input", img_data, "./include")
# Create output header file
output_data = np.zeros([7760], np.float)
create_header_file(
"outputs",
"output",
output_data,
"./include",
)
if __name__ == "__main__":
create_headers(sys.argv[1])
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*------------------ Reference System Memories -------------
+===================+============+=======+============+============+
| Memory | Address | Size | CPU Access | NPU Access |
+===================+============+=======+============+============+
| ITCM | 0x00000000 | 512KB | Yes (RO) | No |
+-------------------+------------+-------+------------+------------+
| DTCM | 0x20000000 | 512KB | Yes (R/W) | No |
+-------------------+------------+-------+------------+------------+
| SSE-300 SRAM | 0x21000000 | 2MB | Yes (R/W) | Yes (R/W) |
+-------------------+------------+-------+------------+------------+
| Data SRAM | 0x01000000 | 2MB | Yes (R/W) | Yes (R/W) |
+-------------------+------------+-------+------------+------------+
| DDR | 0x60000000 | 32MB | Yes (R/W) | Yes (R/W) |
+-------------------+------------+-------+------------+------------+ */
/*---------------------- ITCM Configuration ----------------------------------
<h> Flash Configuration
<o0> Flash Base Address <0x0-0xFFFFFFFF:8>
<o1> Flash Size (in Bytes) <0x0-0xFFFFFFFF:8>
</h>
-----------------------------------------------------------------------------*/
__ROM_BASE = 0x00000000;
__ROM_SIZE = 0x00080000;
/*--------------------- DTCM RAM Configuration ----------------------------
<h> RAM Configuration
<o0> RAM Base Address <0x0-0xFFFFFFFF:8>
<o1> RAM Size (in Bytes) <0x0-0xFFFFFFFF:8>
</h>
-----------------------------------------------------------------------------*/
__RAM_BASE = 0x20000000;
__RAM_SIZE = 0x00080000;
/*----------------------- Data SRAM Configuration ------------------------------
<h> Data SRAM Configuration
<o0> DATA_SRAM Base Address <0x0-0xFFFFFFFF:8>
<o1> DATA_SRAM Size (in Bytes) <0x0-0xFFFFFFFF:8>
</h>
-----------------------------------------------------------------------------*/
__DATA_SRAM_BASE = 0x01000000;
__DATA_SRAM_SIZE = 0x00200000;
/*--------------------- Embedded SRAM Configuration ----------------------------
<h> SRAM Configuration
<o0> SRAM Base Address <0x0-0xFFFFFFFF:8>
<o1> SRAM Size (in Bytes) <0x0-0xFFFFFFFF:8>
</h>
-----------------------------------------------------------------------------*/
__SRAM_BASE = 0x21000000;
__SRAM_SIZE = 0x00200000;
/*--------------------- Stack / Heap Configuration ----------------------------
<h> Stack / Heap Configuration
<o0> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
<o1> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
</h>
-----------------------------------------------------------------------------*/
__STACK_SIZE = 0x00008000;
__HEAP_SIZE = 0x00008000;
/*--------------------- Embedded RAM Configuration ----------------------------
<h> DDR Configuration
<o0> DDR Base Address <0x0-0xFFFFFFFF:8>
<o1> DDR Size (in Bytes) <0x0-0xFFFFFFFF:8>
</h>
-----------------------------------------------------------------------------*/
__DDR_BASE = 0x60000000;
__DDR_SIZE = 0x02000000;
/*
*-------------------- <<< end of configuration section >>> -------------------
*/
MEMORY
{
ITCM (rx) : ORIGIN = __ROM_BASE, LENGTH = __ROM_SIZE
DTCM (rwx) : ORIGIN = __RAM_BASE, LENGTH = __RAM_SIZE
DATA_SRAM (rwx) : ORIGIN = __DATA_SRAM_BASE, LENGTH = __DATA_SRAM_SIZE
SRAM (rwx) : ORIGIN = __SRAM_BASE, LENGTH = __SRAM_SIZE
DDR (rwx) : ORIGIN = __DDR_BASE, LENGTH = __DDR_SIZE
}
/* Linker script to place sections and symbol values. Should be used together
* with other linker script that defines memory regions ITCM and RAM.
* It references following symbols, which must be defined in code:
* Reset_Handler : Entry of reset handler
*
* It defines following symbols, which code can use without definition:
* __exidx_start
* __exidx_end
* __copy_table_start__
* __copy_table_end__
* __zero_table_start__
* __zero_table_end__
* __etext
* __data_start__
* __preinit_array_start
* __preinit_array_end
* __init_array_start
* __init_array_end
* __fini_array_start
* __fini_array_end
* __data_end__
* __bss_start__
* __bss_end__
* __end__
* end
* __HeapLimit
* __StackLimit
* __StackTop
* __stack
*/
ENTRY(Reset_Handler)
SECTIONS
{
/* .ddr is placed before .text so that .rodata.tvm is encountered before .rodata* */
.ddr :
{
. = ALIGN (16);
*(.rodata.tvm)
. = ALIGN (16);
*(.data.tvm);
. = ALIGN(16);
} > DDR
.text :
{
KEEP(*(.vectors))
*(.text*)
KEEP(*(.init))
KEEP(*(.fini))
/* .ctors */
*crtbegin.o(.ctors)
*crtbegin?.o(.ctors)
*(EXCLUDE_FILE(*crtend?.o *crtend.o) .ctors)
*(SORT(.ctors.*))
*(.ctors)
/* .dtors */
*crtbegin.o(.dtors)
*crtbegin?.o(.dtors)
*(EXCLUDE_FILE(*crtend?.o *crtend.o) .dtors)
*(SORT(.dtors.*))
*(.dtors)
*(.rodata*)
KEEP(*(.eh_frame*))
} > ITCM
.ARM.extab :
{
*(.ARM.extab* .gnu.linkonce.armextab.*)
} > ITCM
__exidx_start = .;
.ARM.exidx :
{
*(.ARM.exidx* .gnu.linkonce.armexidx.*)
} > ITCM
__exidx_end = .;
.copy.table :
{
. = ALIGN(4);
__copy_table_start__ = .;
LONG (__etext)
LONG (__data_start__)
LONG (__data_end__ - __data_start__)
/* Add each additional data section here */
__copy_table_end__ = .;
} > ITCM
.zero.table :
{
. = ALIGN(4);
__zero_table_start__ = .;
__zero_table_end__ = .;
} > ITCM
/**
* Location counter can end up 2byte aligned with narrow Thumb code but
* __etext is assumed by startup code to be the LMA of a section in DTCM
* which must be 4byte aligned
*/
__etext = ALIGN (4);
.sram :
{
. = ALIGN(16);
} > SRAM AT > SRAM
.data : AT (__etext)
{
__data_start__ = .;
*(vtable)
*(.data)
*(.data.*)
. = ALIGN(4);
/* preinit data */
PROVIDE_HIDDEN (__preinit_array_start = .);
KEEP(*(.preinit_array))
PROVIDE_HIDDEN (__preinit_array_end = .);
. = ALIGN(4);
/* init data */
PROVIDE_HIDDEN (__init_array_start = .);
KEEP(*(SORT(.init_array.*)))
KEEP(*(.init_array))
PROVIDE_HIDDEN (__init_array_end = .);
. = ALIGN(4);
/* finit data */
PROVIDE_HIDDEN (__fini_array_start = .);
KEEP(*(SORT(.fini_array.*)))
KEEP(*(.fini_array))
PROVIDE_HIDDEN (__fini_array_end = .);
KEEP(*(.jcr*))
. = ALIGN(4);
/* All data end */
__data_end__ = .;
} > DTCM
.bss.noinit (NOLOAD):
{
. = ALIGN(16);
*(.bss.noinit.*)
. = ALIGN(16);
} > SRAM AT > SRAM
.bss :
{
. = ALIGN(4);
__bss_start__ = .;
*(.bss)
*(.bss.*)
*(COMMON)
. = ALIGN(4);
__bss_end__ = .;
} > DTCM AT > DTCM
.data_sram :
{
. = ALIGN(16);
} > DATA_SRAM
.heap (COPY) :
{
. = ALIGN(8);
__end__ = .;
PROVIDE(end = .);
. = . + __HEAP_SIZE;
. = ALIGN(8);
__HeapLimit = .;
} > DTCM
.stack (ORIGIN(DTCM) + LENGTH(DTCM) - __STACK_SIZE) (COPY) :
{
. = ALIGN(8);
__StackLimit = .;
. = . + __STACK_SIZE;
. = ALIGN(8);
__StackTop = .;
} > DTCM
PROVIDE(__stack = __StackTop);
/* Check if data + stack exceeds DTCM limit */
ASSERT(__StackLimit >= __bss_end__, "region DTCM overflowed with stack")
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#ifndef TVM_RUNTIME_CRT_CONFIG_H_
#define TVM_RUNTIME_CRT_CONFIG_H_
/*! Log level of the CRT runtime */
#define TVM_CRT_LOG_LEVEL TVM_CRT_LOG_LEVEL_DEBUG
#endif // TVM_RUNTIME_CRT_CONFIG_H_
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <tvm/runtime/c_runtime_api.h>
#include <tvm/runtime/crt/stack_allocator.h>
#ifdef __cplusplus
extern "C" {
#endif
void __attribute__((noreturn)) TVMPlatformAbort(tvm_crt_error_t error_code) {
printf("TVMPlatformAbort: %d\n", error_code);
printf("EXITTHESIM\n");
exit(-1);
}
tvm_crt_error_t TVMPlatformMemoryAllocate(size_t num_bytes, DLDevice dev, void** out_ptr) {
return kTvmErrorFunctionCallNotImplemented;
}
tvm_crt_error_t TVMPlatformMemoryFree(void* ptr, DLDevice dev) {
return kTvmErrorFunctionCallNotImplemented;
}
void TVMLogf(const char* msg, ...) {
va_list args;
va_start(args, msg);
vfprintf(stdout, msg, args);
va_end(args);
}
TVM_DLL int TVMFuncRegisterGlobal(const char* name, TVMFunctionHandle f, int override) { return 0; }
#ifdef __cplusplus
}
#endif
paddlepaddle
numpy
opencv-python
\ No newline at end of file
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
set -e
set -u
set -o pipefail
# Show usage
function show_usage() {
cat <<EOF
Usage: run_demo.sh
-h, --help
Display this help message.
--cmsis_path CMSIS_PATH
Set path to CMSIS.
--ethosu_platform_path ETHOSU_PLATFORM_PATH
Set path to Arm(R) Ethos(TM)-U core platform.
--fvp_path FVP_PATH
Set path to FVP.
--cmake_path
Set path to cmake.
EOF
}
# Parse arguments
while (( $# )); do
case "$1" in
-h|--help)
show_usage
exit 0
;;
--cmsis_path)
if [ $# -gt 1 ]
then
export CMSIS_PATH="$2"
shift 2
else
echo 'ERROR: --cmsis_path requires a non-empty argument' >&2
show_usage >&2
exit 1
fi
;;
--ethosu_platform_path)
if [ $# -gt 1 ]
then
export ETHOSU_PLATFORM_PATH="$2"
shift 2
else
echo 'ERROR: --ethosu_platform_path requires a non-empty argument' >&2
show_usage >&2
exit 1
fi
;;
--fvp_path)
if [ $# -gt 1 ]
then
export PATH="$2/models/Linux64_GCC-6.4:$PATH"
shift 2
else
echo 'ERROR: --fvp_path requires a non-empty argument' >&2
show_usage >&2
exit 1
fi
;;
--cmake_path)
if [ $# -gt 1 ]
then
export CMAKE="$2"
shift 2
else
echo 'ERROR: --cmake_path requires a non-empty argument' >&2
show_usage >&2
exit 1
fi
;;
-*|--*)
echo "Error: Unknown flag: $1" >&2
show_usage >&2
exit 1
;;
esac
done
# Directories
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
# Make build directory
rm -rf build
make cleanall
mkdir -p build
cd build
wget https://paddleocr.bj.bcebos.com/tvm/ocr_en.tar
tar -xf ocr_en.tar
# Compile model for Arm(R) Cortex(R)-M55 CPU and CMSIS-NN
# An alternative to using "python3 -m tvm.driver.tvmc" is to call
# "tvmc" directly once TVM has been pip installed.
python3 -m tvm.driver.tvmc compile --target=cmsis-nn,c \
--target-cmsis-nn-mcpu=cortex-m55 \
--target-c-mcpu=cortex-m55 \
--runtime=crt \
--executor=aot \
--executor-aot-interface-api=c \
--executor-aot-unpacked-api=1 \
--pass-config tir.usmp.enable=1 \
--pass-config tir.usmp.algorithm=hill_climb \
--pass-config tir.disable_storage_rewrite=1 \
--pass-config tir.disable_vectorize=1 ocr_en/inference.pdmodel \
--output-format=mlf \
--model-format=paddle \
--module-name=rec \
--input-shapes x:[1,3,32,320] \
--output=rec.tar
tar -xf rec.tar
# Create C header files
cd ..
python3 ./convert_image.py imgs_words_en/word_116.png
# Build demo executable
cd ${script_dir}
echo ${script_dir}
make
# Run demo executable on the FVP
FVP_Corstone_SSE-300_Ethos-U55 -C cpu0.CFGDTCMSZ=15 \
-C cpu0.CFGITCMSZ=15 -C mps3_board.uart0.out_file=\"-\" -C mps3_board.uart0.shutdown_tag=\"EXITTHESIM\" \
-C mps3_board.visualisation.disable-visualisation=1 -C mps3_board.telnetterminal0.start_telnet=0 \
-C mps3_board.telnetterminal1.start_telnet=0 -C mps3_board.telnetterminal2.start_telnet=0 -C mps3_board.telnetterminal5.start_telnet=0 \
./build/demo
\ No newline at end of file
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include <stdio.h>
#include <tvm_runtime.h>
#include <tvmgen_rec.h>
#include "uart.h"
// Header files generated by convert_image.py
#include "inputs.h"
#include "outputs.h"
int main(int argc, char** argv) {
char dict[]={"#0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~!\"#$%&'()*+,-./ "};
int char_dict_nums = 97;
uart_init();
printf("Starting ocr rec inference\n");
struct tvmgen_rec_outputs rec_outputs = {
.output = output,
};
struct tvmgen_rec_inputs rec_inputs = {
.x = input,
};
tvmgen_rec_run(&rec_inputs, &rec_outputs);
// post process
int char_nums = output_len / char_dict_nums;
int last_index = 0;
float score = 0.f;
int count = 0;
printf("text: ");
for (int i = 0; i < char_nums; i++) {
int argmax_idx = 0;
float max_value = 0.0f;
for (int j = 0; j < char_dict_nums; j++){
if (output[i * char_dict_nums + j] > max_value){
max_value = output[i * char_dict_nums + j];
argmax_idx = j;
}
}
if (argmax_idx > 0 && (!(i > 0 && argmax_idx == last_index))) {
score += max_value;
count += 1;
// printf("%d,%f,%c\n", argmax_idx, max_value, dict[argmax_idx]);
printf("%c", dict[argmax_idx]);
}
last_index = argmax_idx;
}
score /= count;
printf(", score: %f\n", score);
// The FVP will shut down when it receives "EXITTHESIM" on the UART
printf("EXITTHESIM\n");
while (1 == 1)
;
return 0;
}
......@@ -92,6 +92,8 @@ include_directories("${PADDLE_LIB}/third_party/install/glog/include")
include_directories("${PADDLE_LIB}/third_party/install/gflags/include")
include_directories("${PADDLE_LIB}/third_party/install/xxhash/include")
include_directories("${PADDLE_LIB}/third_party/install/zlib/include")
include_directories("${PADDLE_LIB}/third_party/install/onnxruntime/include")
include_directories("${PADDLE_LIB}/third_party/install/paddle2onnx/include")
include_directories("${PADDLE_LIB}/third_party/boost")
include_directories("${PADDLE_LIB}/third_party/eigen3")
......@@ -110,6 +112,8 @@ link_directories("${PADDLE_LIB}/third_party/install/protobuf/lib")
link_directories("${PADDLE_LIB}/third_party/install/glog/lib")
link_directories("${PADDLE_LIB}/third_party/install/gflags/lib")
link_directories("${PADDLE_LIB}/third_party/install/xxhash/lib")
link_directories("${PADDLE_LIB}/third_party/install/onnxruntime/lib")
link_directories("${PADDLE_LIB}/third_party/install/paddle2onnx/lib")
link_directories("${PADDLE_LIB}/paddle/lib")
......
......@@ -208,7 +208,7 @@ Execute the built executable file:
./build/ppocr [--param1] [--param2] [...]
```
**Note**:ppocr uses the `PP-OCRv3` model by default, and the input shape used by the recognition model is `3, 48, 320`, so if you use the recognition function, you need to add the parameter `--rec_img_h=48`, if you do not use the default `PP-OCRv3` model, you do not need to set this parameter.
**Note**:ppocr uses the `PP-OCRv3` model by default, and the input shape used by the recognition model is `3, 48, 320`, if you want to use the old version model, you should add the parameter `--rec_img_h=32`.
Specifically,
......@@ -222,7 +222,6 @@ Specifically,
--det=true \
--rec=true \
--cls=true \
--rec_img_h=48\
```
##### 2. det+rec:
......@@ -234,7 +233,6 @@ Specifically,
--det=true \
--rec=true \
--cls=false \
--rec_img_h=48\
```
##### 3. det
......@@ -254,7 +252,6 @@ Specifically,
--det=false \
--rec=true \
--cls=true \
--rec_img_h=48\
```
##### 5. rec
......@@ -265,7 +262,6 @@ Specifically,
--det=false \
--rec=true \
--cls=false \
--rec_img_h=48\
```
##### 6. cls
......@@ -330,7 +326,7 @@ More parameters are as follows,
|rec_model_dir|string|-|Address of recognition inference model|
|rec_char_dict_path|string|../../ppocr/utils/ppocr_keys_v1.txt|dictionary file|
|rec_batch_num|int|6|batch size of recognition|
|rec_img_h|int|32|image height of recognition|
|rec_img_h|int|48|image height of recognition|
|rec_img_w|int|320|image width of recognition|
* Multi-language inference is also supported in PaddleOCR, you can refer to [recognition tutorial](../../doc/doc_en/recognition_en.md) for more supported languages and models in PaddleOCR. Specifically, if you want to infer using multi-language models, you just need to modify values of `rec_char_dict_path` and `rec_model_dir`.
......
......@@ -213,7 +213,7 @@ CUDNN_LIB_DIR=/your_cudnn_lib_dir
本demo支持系统串联调用,也支持单个功能的调用,如,只使用检测或识别功能。
**注意** ppocr默认使用`PP-OCRv3`模型,识别模型使用的输入shape为`3,48,320`, 因此如果使用识别功能,需要添加参数`--rec_img_h=48`,如果不使用默认的`PP-OCRv3`模型,则无需设置该参数
**注意** ppocr默认使用`PP-OCRv3`模型,识别模型使用的输入shape为`3,48,320`, 如需使用旧版本的PP-OCR模型,则需要设置参数`--rec_img_h=32`
运行方式:
......@@ -232,7 +232,6 @@ CUDNN_LIB_DIR=/your_cudnn_lib_dir
--det=true \
--rec=true \
--cls=true \
--rec_img_h=48\
```
##### 2. 检测+识别:
......@@ -244,7 +243,6 @@ CUDNN_LIB_DIR=/your_cudnn_lib_dir
--det=true \
--rec=true \
--cls=false \
--rec_img_h=48\
```
##### 3. 检测:
......@@ -264,7 +262,6 @@ CUDNN_LIB_DIR=/your_cudnn_lib_dir
--det=false \
--rec=true \
--cls=true \
--rec_img_h=48\
```
##### 5. 识别:
......@@ -275,7 +272,6 @@ CUDNN_LIB_DIR=/your_cudnn_lib_dir
--det=false \
--rec=true \
--cls=false \
--rec_img_h=48\
```
##### 6. 分类:
......@@ -339,7 +335,7 @@ CUDNN_LIB_DIR=/your_cudnn_lib_dir
|rec_model_dir|string|-|识别模型inference model地址|
|rec_char_dict_path|string|../../ppocr/utils/ppocr_keys_v1.txt|字典文件|
|rec_batch_num|int|6|识别模型batchsize|
|rec_img_h|int|32|识别模型输入图像高度|
|rec_img_h|int|48|识别模型输入图像高度|
|rec_img_w|int|320|识别模型输入图像宽度|
......
......@@ -47,7 +47,7 @@ DEFINE_string(rec_model_dir, "", "Path of rec inference model.");
DEFINE_int32(rec_batch_num, 6, "rec_batch_num.");
DEFINE_string(rec_char_dict_path, "../../ppocr/utils/ppocr_keys_v1.txt",
"Path of dictionary.");
DEFINE_int32(rec_img_h, 32, "rec image height");
DEFINE_int32(rec_img_h, 48, "rec image height");
DEFINE_int32(rec_img_w, 320, "rec image width");
// ocr forward related
......
......@@ -77,6 +77,9 @@ int main(int argc, char **argv) {
for (int i = 0; i < cv_all_img_names.size(); ++i) {
if (FLAGS_benchmark) {
cout << cv_all_img_names[i] << '\t';
if (FLAGS_rec && FLAGS_det) {
Utility::print_result(ocr_results[i]);
} else if (FLAGS_det) {
for (int n = 0; n < ocr_results[i].size(); n++) {
for (int m = 0; m < ocr_results[i][n].box.size(); m++) {
cout << ocr_results[i][n].box[m][0] << ' '
......@@ -84,6 +87,9 @@ int main(int argc, char **argv) {
}
}
cout << endl;
} else {
Utility::print_result(ocr_results[i]);
}
} else {
cout << cv_all_img_names[i] << "\n";
Utility::print_result(ocr_results[i]);
......
......@@ -32,40 +32,46 @@ void DBDetector::LoadModel(const std::string &model_dir) {
if (this->precision_ == "int8") {
precision = paddle_infer::Config::Precision::kInt8;
}
config.EnableTensorRtEngine(1 << 20, 10, 3, precision, false, false);
config.EnableTensorRtEngine(1 << 20, 1, 20, precision, false, false);
std::map<std::string, std::vector<int>> min_input_shape = {
{"x", {1, 3, 50, 50}},
{"conv2d_92.tmp_0", {1, 96, 20, 20}},
{"conv2d_91.tmp_0", {1, 96, 10, 10}},
{"nearest_interp_v2_1.tmp_0", {1, 96, 10, 10}},
{"nearest_interp_v2_2.tmp_0", {1, 96, 20, 20}},
{"nearest_interp_v2_3.tmp_0", {1, 24, 20, 20}},
{"nearest_interp_v2_4.tmp_0", {1, 24, 20, 20}},
{"nearest_interp_v2_5.tmp_0", {1, 24, 20, 20}},
{"conv2d_92.tmp_0", {1, 120, 20, 20}},
{"conv2d_91.tmp_0", {1, 24, 10, 10}},
{"conv2d_59.tmp_0", {1, 96, 20, 20}},
{"nearest_interp_v2_1.tmp_0", {1, 256, 10, 10}},
{"nearest_interp_v2_2.tmp_0", {1, 256, 20, 20}},
{"conv2d_124.tmp_0", {1, 256, 20, 20}},
{"nearest_interp_v2_3.tmp_0", {1, 64, 20, 20}},
{"nearest_interp_v2_4.tmp_0", {1, 64, 20, 20}},
{"nearest_interp_v2_5.tmp_0", {1, 64, 20, 20}},
{"elementwise_add_7", {1, 56, 2, 2}},
{"nearest_interp_v2_0.tmp_0", {1, 96, 2, 2}}};
{"nearest_interp_v2_0.tmp_0", {1, 256, 2, 2}}};
std::map<std::string, std::vector<int>> max_input_shape = {
{"x", {1, 3, this->max_side_len_, this->max_side_len_}},
{"conv2d_92.tmp_0", {1, 96, 400, 400}},
{"conv2d_91.tmp_0", {1, 96, 200, 200}},
{"nearest_interp_v2_1.tmp_0", {1, 96, 200, 200}},
{"nearest_interp_v2_2.tmp_0", {1, 96, 400, 400}},
{"nearest_interp_v2_3.tmp_0", {1, 24, 400, 400}},
{"nearest_interp_v2_4.tmp_0", {1, 24, 400, 400}},
{"nearest_interp_v2_5.tmp_0", {1, 24, 400, 400}},
{"conv2d_92.tmp_0", {1, 120, 400, 400}},
{"conv2d_91.tmp_0", {1, 24, 200, 200}},
{"conv2d_59.tmp_0", {1, 96, 400, 400}},
{"nearest_interp_v2_1.tmp_0", {1, 256, 200, 200}},
{"nearest_interp_v2_2.tmp_0", {1, 256, 400, 400}},
{"conv2d_124.tmp_0", {1, 256, 400, 400}},
{"nearest_interp_v2_3.tmp_0", {1, 64, 400, 400}},
{"nearest_interp_v2_4.tmp_0", {1, 64, 400, 400}},
{"nearest_interp_v2_5.tmp_0", {1, 64, 400, 400}},
{"elementwise_add_7", {1, 56, 400, 400}},
{"nearest_interp_v2_0.tmp_0", {1, 96, 400, 400}}};
{"nearest_interp_v2_0.tmp_0", {1, 256, 400, 400}}};
std::map<std::string, std::vector<int>> opt_input_shape = {
{"x", {1, 3, 640, 640}},
{"conv2d_92.tmp_0", {1, 96, 160, 160}},
{"conv2d_91.tmp_0", {1, 96, 80, 80}},
{"nearest_interp_v2_1.tmp_0", {1, 96, 80, 80}},
{"nearest_interp_v2_2.tmp_0", {1, 96, 160, 160}},
{"nearest_interp_v2_3.tmp_0", {1, 24, 160, 160}},
{"nearest_interp_v2_4.tmp_0", {1, 24, 160, 160}},
{"nearest_interp_v2_5.tmp_0", {1, 24, 160, 160}},
{"conv2d_92.tmp_0", {1, 120, 160, 160}},
{"conv2d_91.tmp_0", {1, 24, 80, 80}},
{"conv2d_59.tmp_0", {1, 96, 160, 160}},
{"nearest_interp_v2_1.tmp_0", {1, 256, 80, 80}},
{"nearest_interp_v2_2.tmp_0", {1, 256, 160, 160}},
{"conv2d_124.tmp_0", {1, 256, 160, 160}},
{"nearest_interp_v2_3.tmp_0", {1, 64, 160, 160}},
{"nearest_interp_v2_4.tmp_0", {1, 64, 160, 160}},
{"nearest_interp_v2_5.tmp_0", {1, 64, 160, 160}},
{"elementwise_add_7", {1, 56, 40, 40}},
{"nearest_interp_v2_0.tmp_0", {1, 96, 40, 40}}};
{"nearest_interp_v2_0.tmp_0", {1, 256, 40, 40}}};
config.SetTRTDynamicShapeInfo(min_input_shape, max_input_shape,
opt_input_shape);
......
......@@ -83,7 +83,7 @@ void CRNNRecognizer::Run(std::vector<cv::Mat> img_list,
int out_num = std::accumulate(predict_shape.begin(), predict_shape.end(), 1,
std::multiplies<int>());
predict_batch.resize(out_num);
// predict_batch is the result of Last FC with softmax
output_t->CopyToCpu(predict_batch.data());
auto inference_end = std::chrono::steady_clock::now();
inference_diff += inference_end - inference_start;
......@@ -98,9 +98,11 @@ void CRNNRecognizer::Run(std::vector<cv::Mat> img_list,
float max_value = 0.0f;
for (int n = 0; n < predict_shape[1]; n++) {
// get idx
argmax_idx = int(Utility::argmax(
&predict_batch[(m * predict_shape[1] + n) * predict_shape[2]],
&predict_batch[(m * predict_shape[1] + n + 1) * predict_shape[2]]));
// get score
max_value = float(*std::max_element(
&predict_batch[(m * predict_shape[1] + n) * predict_shape[2]],
&predict_batch[(m * predict_shape[1] + n + 1) * predict_shape[2]]));
......@@ -132,7 +134,9 @@ void CRNNRecognizer::LoadModel(const std::string &model_dir) {
paddle_infer::Config config;
config.SetModel(model_dir + "/inference.pdmodel",
model_dir + "/inference.pdiparams");
std::cout << "In PP-OCRv3, default rec_img_h is 48,"
<< "if you use other model, you should set the param rec_img_h=32"
<< std::endl;
if (this->use_gpu_) {
config.EnableUseGpu(this->gpu_mem_, this->gpu_id_);
if (this->use_tensorrt_) {
......@@ -143,15 +147,17 @@ void CRNNRecognizer::LoadModel(const std::string &model_dir) {
if (this->precision_ == "int8") {
precision = paddle_infer::Config::Precision::kInt8;
}
config.EnableTensorRtEngine(1 << 20, 10, 3, precision, false, false);
config.EnableTensorRtEngine(1 << 20, 10, 15, precision, false, false);
int imgH = this->rec_image_shape_[1];
int imgW = this->rec_image_shape_[2];
std::map<std::string, std::vector<int>> min_input_shape = {
{"x", {1, 3, imgH, 10}}, {"lstm_0.tmp_0", {10, 1, 96}}};
std::map<std::string, std::vector<int>> max_input_shape = {
{"x", {1, 3, imgH, 2000}}, {"lstm_0.tmp_0", {1000, 1, 96}}};
{"x", {this->rec_batch_num_, 3, imgH, 2500}},
{"lstm_0.tmp_0", {1000, 1, 96}}};
std::map<std::string, std::vector<int>> opt_input_shape = {
{"x", {1, 3, imgH, imgW}}, {"lstm_0.tmp_0", {25, 1, 96}}};
{"x", {this->rec_batch_num_, 3, imgH, imgW}},
{"lstm_0.tmp_0", {25, 1, 96}}};
config.SetTRTDynamicShapeInfo(min_input_shape, max_input_shape,
opt_input_shape);
......
......@@ -5,3 +5,4 @@ det_db_unclip_ratio 1.6
det_db_use_dilate 0
det_use_polygon_score 1
use_direction_classify 1
rec_image_height 32
\ No newline at end of file
......@@ -19,25 +19,27 @@
const std::vector<int> rec_image_shape{3, 32, 320};
cv::Mat CrnnResizeImg(cv::Mat img, float wh_ratio) {
cv::Mat CrnnResizeImg(cv::Mat img, float wh_ratio, int rec_image_height) {
int imgC, imgH, imgW;
imgC = rec_image_shape[0];
imgH = rec_image_height;
imgW = rec_image_shape[2];
imgH = rec_image_shape[1];
imgW = int(32 * wh_ratio);
imgW = int(imgH * wh_ratio);
float ratio = static_cast<float>(img.cols) / static_cast<float>(img.rows);
float ratio = float(img.cols) / float(img.rows);
int resize_w, resize_h;
if (ceilf(imgH * ratio) > imgW)
resize_w = imgW;
else
resize_w = static_cast<int>(ceilf(imgH * ratio));
cv::Mat resize_img;
resize_w = int(ceilf(imgH * ratio));
cv::resize(img, resize_img, cv::Size(resize_w, imgH), 0.f, 0.f,
cv::INTER_LINEAR);
return resize_img;
cv::copyMakeBorder(resize_img, resize_img, 0, 0, 0,
int(imgW - resize_img.cols), cv::BORDER_CONSTANT,
{127, 127, 127});
}
std::vector<std::string> ReadDict(std::string path) {
......
......@@ -26,7 +26,7 @@
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc.hpp"
cv::Mat CrnnResizeImg(cv::Mat img, float wh_ratio);
cv::Mat CrnnResizeImg(cv::Mat img, float wh_ratio, int rec_image_height);
std::vector<std::string> ReadDict(std::string path);
......
......@@ -162,7 +162,8 @@ void RunRecModel(std::vector<std::vector<std::vector<int>>> boxes, cv::Mat img,
std::vector<std::string> charactor_dict,
std::shared_ptr<PaddlePredictor> predictor_cls,
int use_direction_classify,
std::vector<double> *times) {
std::vector<double> *times,
int rec_image_height) {
std::vector<float> mean = {0.5f, 0.5f, 0.5f};
std::vector<float> scale = {1 / 0.5f, 1 / 0.5f, 1 / 0.5f};
......@@ -183,7 +184,7 @@ void RunRecModel(std::vector<std::vector<std::vector<int>>> boxes, cv::Mat img,
float wh_ratio =
static_cast<float>(crop_img.cols) / static_cast<float>(crop_img.rows);
resize_img = CrnnResizeImg(crop_img, wh_ratio);
resize_img = CrnnResizeImg(crop_img, wh_ratio, rec_image_height);
resize_img.convertTo(resize_img, CV_32FC3, 1 / 255.f);
const float *dimg = reinterpret_cast<const float *>(resize_img.data);
......@@ -444,7 +445,7 @@ void system(char **argv){
//// load config from txt file
auto Config = LoadConfigTxt(det_config_path);
int use_direction_classify = int(Config["use_direction_classify"]);
int rec_image_height = int(Config["rec_image_height"]);
auto charactor_dict = ReadDict(dict_path);
charactor_dict.insert(charactor_dict.begin(), "#"); // blank char for ctc
charactor_dict.push_back(" ");
......@@ -590,12 +591,16 @@ void rec(int argc, char **argv) {
std::string batchsize = argv[6];
std::string img_dir = argv[7];
std::string dict_path = argv[8];
std::string config_path = argv[9];
if (strcmp(argv[4], "FP32") != 0 && strcmp(argv[4], "INT8") != 0) {
std::cerr << "Only support FP32 or INT8." << std::endl;
exit(1);
}
auto Config = LoadConfigTxt(config_path);
int rec_image_height = int(Config["rec_image_height"]);
std::vector<cv::String> cv_all_img_names;
cv::glob(img_dir, cv_all_img_names);
......@@ -630,7 +635,7 @@ void rec(int argc, char **argv) {
std::vector<float> rec_text_score;
std::vector<double> times;
RunRecModel(boxes, srcimg, rec_predictor, rec_text, rec_text_score,
charactor_dict, cls_predictor, 0, &times);
charactor_dict, cls_predictor, 0, &times, rec_image_height);
//// print recognized text
for (int i = 0; i < rec_text.size(); i++) {
......
......@@ -34,7 +34,7 @@ For the compilation process of different development environments, please refer
### 1.2 Prepare Paddle-Lite library
There are two ways to obtain the Paddle-Lite library:
- 1. Download directly, the download link of the Paddle-Lite library is as follows:
- 1. [Recommended] Download directly, the download link of the Paddle-Lite library is as follows:
| Platform | Paddle-Lite library download link |
|---|---|
......@@ -43,7 +43,9 @@ There are two ways to obtain the Paddle-Lite library:
Note: 1. The above Paddle-Lite library is compiled from the Paddle-Lite 2.10 branch. For more information about Paddle-Lite 2.10, please refer to [link](https://github.com/PaddlePaddle/Paddle-Lite/releases/tag/v2.10).
- 2. [Recommended] Compile Paddle-Lite to get the prediction library. The compilation method of Paddle-Lite is as follows:
**Note: It is recommended to use paddlelite>=2.10 version of the prediction library, other prediction library versions [download link](https://github.com/PaddlePaddle/Paddle-Lite/tags)**
- 2. Compile Paddle-Lite to get the prediction library. The compilation method of Paddle-Lite is as follows:
```
git clone https://github.com/PaddlePaddle/Paddle-Lite.git
cd Paddle-Lite
......@@ -104,21 +106,17 @@ If you directly use the model in the above table for deployment, you can skip th
If the model to be deployed is not in the above table, you need to follow the steps below to obtain the optimized model.
The `opt` tool can be obtained by compiling Paddle Lite.
- Step 1: Refer to [document](https://www.paddlepaddle.org.cn/lite/v2.10/user_guides/opt/opt_python.html) to install paddlelite, which is used to convert paddle inference model to paddlelite required for running nb model
```
git clone https://github.com/PaddlePaddle/Paddle-Lite.git
cd Paddle-Lite
git checkout release/v2.10
./lite/tools/build.sh build_optimize_tool
pip install paddlelite==2.10 # The paddlelite version should be the same as the prediction library version
```
After the compilation is complete, the opt file is located under build.opt/lite/api/, You can view the operating options and usage of opt in the following ways:
After installation, the following commands can view the help information
```
cd build.opt/lite/api/
./opt
paddle_lite_opt
```
Introduction to paddle_lite_opt parameters:
|Options|Description|
|---|---|
|--model_dir|The path of the PaddlePaddle model to be optimized (non-combined form)|
......@@ -131,6 +129,8 @@ cd build.opt/lite/api/
`--model_dir` is suitable for the non-combined mode of the model to be optimized, and the inference model of PaddleOCR is the combined mode, that is, the model structure and model parameters are stored in a single file.
- Step 2: Use paddle_lite_opt to convert the inference model to the mobile model format.
The following takes the ultra-lightweight Chinese model of PaddleOCR as an example to introduce the use of the compiled opt file to complete the conversion of the inference model to the Paddle-Lite optimized model
```
......@@ -240,6 +240,7 @@ det_db_thresh 0.3 # Used to filter the binarized image of DB prediction,
det_db_box_thresh 0.5 # DDB post-processing filter box threshold, if there is a missing box detected, it can be reduced as appropriate
det_db_unclip_ratio 1.6 # Indicates the compactness of the text box, the smaller the value, the closer the text box to the text
use_direction_classify 0 # Whether to use the direction classifier, 0 means not to use, 1 means to use
rec_image_height 32 # The height of the input image of the recognition model, the PP-OCRv3 model needs to be set to 48, and the PP-OCRv2 model needs to be set to 32
```
5. Run Model on phone
......@@ -258,8 +259,15 @@ After the above steps are completed, you can use adb to push the file to the pho
cd /data/local/tmp/debug
export LD_LIBRARY_PATH=${PWD}:$LD_LIBRARY_PATH
# The use of ocr_db_crnn is:
# ./ocr_db_crnn Detection model file Orientation classifier model file Recognition model file Test image path Dictionary file path
./ocr_db_crnn ch_PP-OCRv2_det_slim_opt.nb ch_PP-OCRv2_rec_slim_opt.nb ch_ppocr_mobile_v2.0_cls_opt.nb ./11.jpg ppocr_keys_v1.txt
# ./ocr_db_crnn Mode Detection model file Orientation classifier model file Recognition model file Hardware Precision Threads Batchsize Test image path Dictionary file path
./ocr_db_crnn system ch_PP-OCRv2_det_slim_opt.nb ch_PP-OCRv2_rec_slim_opt.nb ch_ppocr_mobile_v2.0_cls_slim_opt.nb arm8 INT8 10 1 ./11.jpg config.txt ppocr_keys_v1.txt True
# precision can be INT8 for quantitative model or FP32 for normal model.
# Only using detection model
./ocr_db_crnn det ch_PP-OCRv2_det_slim_opt.nb arm8 INT8 10 1 ./11.jpg config.txt
# Only using recognition model
./ocr_db_crnn rec ch_PP-OCRv2_rec_slim_opt.nb arm8 INT8 10 1 word_1.jpg ppocr_keys_v1.txt config.txt
```
If you modify the code, you need to recompile and push to the phone.
......@@ -283,3 +291,7 @@ A2: Replace the .jpg test image under ./debug with the image you want to test, a
Q3: How to package it into the mobile APP?
A3: This demo aims to provide the core algorithm part that can run OCR on mobile phones. Further, PaddleOCR/deploy/android_demo is an example of encapsulating this demo into a mobile app for reference.
Q4: When running the demo, an error is reported `Error: This model is not supported, because kernel for 'io_copy' is not supported by Paddle-Lite.`
A4: The problem is that the installed paddlelite version does not match the downloaded prediction library version. Make sure that the paddleliteopt tool matches your prediction library version, and try to switch to the nb model again.
此差异已折叠。
......@@ -136,7 +136,7 @@ The recognition model is the same.
2. Run the following command to start the service.
```
# Start the service and save the running log in log.txt
python3 web_service.py &>log.txt &
python3 web_service.py --config=config.yml &>log.txt &
```
After the service is successfully started, a log similar to the following will be printed in log.txt
![](./imgs/start_server.png)
......@@ -217,7 +217,7 @@ The C++ service deployment is the same as python in the environment setup and da
2. Run the following command to start the service.
```
# Start the service and save the running log in log.txt
python3 -m paddle_serving_server.serve --model ppocr_det_v3_serving ppocr_rec_v3_serving --op GeneralDetectionOp GeneralInferOp --port 9293 &>log.txt &
python3 -m paddle_serving_server.serve --model ppocr_det_v3_serving ppocr_rec_v3_serving --op GeneralDetectionOp GeneralInferOp --port 8181 &>log.txt &
```
After the service is successfully started, a log similar to the following will be printed in log.txt
![](./imgs/start_server.png)
......
......@@ -135,7 +135,7 @@ python3 -m paddle_serving_client.convert --dirname ./ch_PP-OCRv3_rec_infer/ \
2. 启动服务可运行如下命令:
```
# 启动服务,运行日志保存在log.txt
python3 web_service.py &>log.txt &
python3 web_service.py --config=config.yml &>log.txt &
```
成功启动服务后,log.txt中会打印类似如下日志
![](./imgs/start_server.png)
......@@ -230,7 +230,7 @@ cp -rf general_detection_op.cpp Serving/core/general-server/op
```
# 启动服务,运行日志保存在log.txt
python3 -m paddle_serving_server.serve --model ppocr_det_v3_serving ppocr_rec_v3_serving --op GeneralDetectionOp GeneralInferOp --port 9293 &>log.txt &
python3 -m paddle_serving_server.serve --model ppocr_det_v3_serving ppocr_rec_v3_serving --op GeneralDetectionOp GeneralInferOp --port 8181 &>log.txt &
```
成功启动服务后,log.txt中会打印类似如下日志
![](./imgs/start_server.png)
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册