提交 adafa3e6 编写于 作者: W WenmuZhou

Merge branch 'dygraph' of https://github.com/PaddlePaddle/PaddleOCR into dygraph_rc

......@@ -64,6 +64,7 @@ from libs.colorDialog import ColorDialog
from libs.toolBar import ToolBar
from libs.ustr import ustr
from libs.hashableQListWidgetItem import HashableQListWidgetItem
from libs.editinlist import EditInList
__appname__ = 'PPOCRLabel'
......@@ -147,7 +148,7 @@ class MainWindow(QMainWindow, WindowMixin):
self.prevLabelText = getStr('tempLabel')
self.model = 'paddle'
self.PPreader = None
self.autoSaveNum = 10
self.autoSaveNum = 5
################# file list ###############
self.fileListWidget = QListWidget()
......@@ -201,12 +202,12 @@ class MainWindow(QMainWindow, WindowMixin):
################## label list ####################
# Create and add a widget for showing current label items
self.labelList = QListWidget()
self.labelList = EditInList()
labelListContainer = QWidget()
labelListContainer.setLayout(listLayout)
self.labelList.itemActivated.connect(self.labelSelectionChanged)
self.labelList.itemSelectionChanged.connect(self.labelSelectionChanged)
self.labelList.itemDoubleClicked.connect(self.editLabel)
self.labelList.clicked.connect(self.labelList.item_clicked)
# Connect to itemChanged to detect checkbox changes.
self.labelList.itemChanged.connect(self.labelItemChanged)
self.labelListDock = QDockWidget(getStr('recognitionResult'),self)
......@@ -316,7 +317,7 @@ class MainWindow(QMainWindow, WindowMixin):
self.scrollArea = scroll
self.canvas.scrollRequest.connect(self.scrollRequest)
self.canvas.newShape.connect(self.newShape)
self.canvas.newShape.connect(partial(self.newShape, False))
self.canvas.shapeMoved.connect(self.updateBoxlist) # self.setDirty
self.canvas.selectionChanged.connect(self.shapeSelectionChanged)
self.canvas.drawingPolygon.connect(self.toggleDrawingSensitive)
......@@ -354,13 +355,9 @@ class MainWindow(QMainWindow, WindowMixin):
quit = action(getStr('quit'), self.close,
'Ctrl+Q', 'quit', getStr('quitApp'))
open = action(getStr('openFile'), self.openFile,
'Ctrl+O', 'open', getStr('openFileDetail'))
opendir = action(getStr('openDir'), self.openDirDialog,
'Ctrl+u', 'open', getStr('openDir'))
save = action(getStr('save'), self.saveFile,
'Ctrl+V', 'verify', getStr('saveDetail'), enabled=False)
......@@ -506,7 +503,7 @@ class MainWindow(QMainWindow, WindowMixin):
self.drawSquaresOption.triggered.connect(self.toogleDrawSquare)
# Store actions for further handling.
self.actions = struct(save=save, open=open, resetAll=resetAll, deleteImg=deleteImg,
self.actions = struct(save=save, resetAll=resetAll, deleteImg=deleteImg,
lineColor=color1, create=create, delete=delete, edit=edit, copy=copy,
saveRec=saveRec, singleRere=singleRere,AutoRec=AutoRec,reRec=reRec,
createMode=createMode, editMode=editMode,
......@@ -515,7 +512,7 @@ class MainWindow(QMainWindow, WindowMixin):
fitWindow=fitWindow, fitWidth=fitWidth,
zoomActions=zoomActions, saveLabel=saveLabel,
fileMenuActions=(
open, opendir, saveLabel, resetAll, quit),
opendir, saveLabel, resetAll, quit),
beginner=(), advanced=(),
editMenu=(createpoly, edit, copy, delete,singleRere,
None, color1, self.drawSquaresOption),
......@@ -537,11 +534,6 @@ class MainWindow(QMainWindow, WindowMixin):
labelList=labelMenu)
# Sync single class mode from PR#106
self.singleClassMode = QAction(getStr('singleClsMode'), self)
self.singleClassMode.setShortcut("Ctrl+Shift+S")
self.singleClassMode.setCheckable(True)
self.singleClassMode.setChecked(settings.get(SETTING_SINGLE_CLASS, False))
self.lastLabel = None
# Add option to enable/disable labels being displayed at the top of bounding boxes
self.displayLabelOption = QAction(getStr('displayLabel'), self)
......@@ -550,12 +542,18 @@ class MainWindow(QMainWindow, WindowMixin):
self.displayLabelOption.setChecked(settings.get(SETTING_PAINT_LABEL, False))
self.displayLabelOption.triggered.connect(self.togglePaintLabelsOption)
self.labelDialogOption = QAction(getStr('labelDialogOption'), self)
self.labelDialogOption.setShortcut("Ctrl+Shift+L")
self.labelDialogOption.setCheckable(True)
self.labelDialogOption.setChecked(settings.get(SETTING_PAINT_LABEL, False))
self.labelDialogOption.triggered.connect(self.speedChoose)
addActions(self.menus.file,
(opendir, None, saveLabel, saveRec, None, resetAll, deleteImg, quit))
addActions(self.menus.help, (showSteps, showInfo))
addActions(self.menus.view, (
self.displayLabelOption, # labels,
self.displayLabelOption, self.labelDialogOption,
None,
hideAll, showAll, None,
zoomIn, zoomOut, zoomOrg, None,
......@@ -1062,6 +1060,7 @@ class MainWindow(QMainWindow, WindowMixin):
def labelSelectionChanged(self):
item = self.currentItem()
self.labelList.scrollToItem(item, QAbstractItemView.EnsureVisible)
if item and self.canvas.editing():
self._noSelectionSlot = True
self.canvas.selectShape(self.itemsToShapes[item])
......@@ -1069,6 +1068,7 @@ class MainWindow(QMainWindow, WindowMixin):
def boxSelectionChanged(self):
item = self.currentBox()
self.BoxList.scrollToItem(item, QAbstractItemView.EnsureVisible)
if item and self.canvas.editing():
self._noSelectionSlot = True
self.canvas.selectShape(self.itemsToShapesbox[item])
......@@ -1089,7 +1089,7 @@ class MainWindow(QMainWindow, WindowMixin):
# self.actions.save.setEnabled(True)
# Callback functions:
def newShape(self):
def newShape(self, value=True):
"""Pop-up and give focus to the label editor.
position MUST be in global coordinates.
......@@ -1098,12 +1098,11 @@ class MainWindow(QMainWindow, WindowMixin):
self.labelDialog = LabelDialog(
parent=self, listItem=self.labelHist)
# Sync single class mode from PR#106
if self.singleClassMode.isChecked() and self.lastLabel:
text = self.lastLabel
else:
if value:
text = self.labelDialog.popUp(text=self.prevLabelText)
self.lastLabel = text
else:
text = self.prevLabelText
if text is not None:
self.prevLabelText = self.stringBundle.getString('tempLabel')
......@@ -1364,7 +1363,6 @@ class MainWindow(QMainWindow, WindowMixin):
else:
settings[SETTING_LAST_OPEN_DIR] = ''
settings[SETTING_SINGLE_CLASS] = self.singleClassMode.isChecked()
settings[SETTING_PAINT_LABEL] = self.displayLabelOption.isChecked()
settings[SETTING_DRAW_SQUARE] = self.drawSquaresOption.isChecked()
settings.save()
......@@ -1497,35 +1495,6 @@ class MainWindow(QMainWindow, WindowMixin):
print('file name in openNext is ',filename)
self.loadFile(filename)
def openFile(self, _value=False):
if not self.mayContinue():
return
path = os.path.dirname(ustr(self.filePath)) if self.filePath else '.'
formats = ['*.%s' % fmt.data().decode("ascii").lower() for fmt in QImageReader.supportedImageFormats()]
filters = "Image & Label files (%s)" % ' '.join(formats + ['*%s' % LabelFile.suffix])
filename = QFileDialog.getOpenFileName(self, '%s - Choose Image or Label file' % __appname__, path, filters)
if filename:
if isinstance(filename, (tuple, list)):
filename = filename[0]
self.loadFile(filename)
# print('filename in openfile is ', self.filePath)
self.filePath = None
self.fileListWidget.clear()
self.iconlist.clear()
self.mImgList = [filename]
self.openNextImg()
if self.validFilestate(filename) is True:
item = QListWidgetItem(newIcon('done'), filename)
self.setClean()
elif self.validFilestate(filename) is None:
item = QListWidgetItem(newIcon('close'), filename)
else:
item = QListWidgetItem(newIcon('close'), filename)
self.setDirty()
self.fileListWidget.addItem(filename)
self.additems5(None)
print('opened image is', filename)
def updateFileListIcon(self, filename):
pass
......@@ -1963,6 +1932,16 @@ class MainWindow(QMainWindow, WindowMixin):
QMessageBox.information(self, "Information", "Cropped images has been saved in "+str(crop_img_dir))
def speedChoose(self):
if self.labelDialogOption.isChecked():
self.canvas.newShape.disconnect()
self.canvas.newShape.connect(partial(self.newShape, True))
else:
self.canvas.newShape.disconnect()
self.canvas.newShape.connect(partial(self.newShape, False))
def inverted(color):
return QColor(*[255 - v for v in color.getRgb()])
......
......@@ -2,19 +2,27 @@ English | [简体中文](README_ch.md)
# PPOCRLabel
PPOCRLabel is a semi-automatic graphic annotation tool suitable for OCR field. It is written in python3 and pyqt5, supporting rectangular box annotation and four-point annotation modes. Annotations can be directly used for the training of PPOCR detection and recognition models.
PPOCRLabel is a semi-automatic graphic annotation tool suitable for OCR field, with built-in PPOCR model to automatically detect and re-recognize data. It is written in python3 and pyqt5, supporting rectangular box annotation and four-point annotation modes. Annotations can be directly used for the training of PPOCR detection and recognition models.
<img src="./data/gif/steps_en.gif" width="100%"/>
### Recent Update
- 2021.1.11: Optimize the labeling experience (by [edencfc](https://github.com/edencfc)),
- Users can choose whether to pop up the label input dialog after drawing the detection box in "View - Pop-up Label Input Dialog".
- The recognition result scrolls synchronously when users click related detection box.
- Click to modify the recognition result.(If you can't change the result, please switch to the system default input method, or switch back to the original input method again)
- 2020.12.18: Support re-recognition of a single label box (by [ninetailskim](https://github.com/ninetailskim) ), perfect shortcut keys.
### TODO:
- Lock box mode: For the same scene data, the size and position of the locked detection box can be transferred between different pictures.
- Experience optimization: Add undo, batch operation include move, copy, delete and so on, optimize the annotation process.
## Installation
### 1. Install PaddleOCR
Refer to [PaddleOCR installation document](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_ch/installation.md) to prepare PaddleOCR
PaddleOCR models has been built in PPOCRLabel, please refer to [PaddleOCR installation document](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_ch/installation.md) to prepare PaddleOCR and make sure it works.
### 2. Install PPOCRLabel
......@@ -60,7 +68,7 @@ python3 PPOCRLabel.py
4.1 Click 'Create RectBox' or press 'W' in English keyboard mode to draw a new rectangle detection box. Click and release left mouse to select a region to annotate the text area.
4.2 Press 'P' to enter four-point labeling mode which enables you to create any four-point shape by clicking four points with the left mouse button in succession and DOUBLE CLICK the left mouse as the signal of labeling completion.
4.2 Press 'Q' to enter four-point labeling mode which enables you to create any four-point shape by clicking four points with the left mouse button in succession and DOUBLE CLICK the left mouse as the signal of labeling completion.
5. After the marking frame is drawn, the user clicks "OK", and the detection frame will be pre-assigned a "TEMPORARY" label.
......@@ -72,7 +80,7 @@ python3 PPOCRLabel.py
9. Click "Delete Image" and the image will be deleted to the recycle bin.
10. Labeling result: the user can save manually through the menu "File - Save Label", while the program will also save automatically after every 10 images confirmed by the user.the manually checked label will be stored in *Label.txt* under the opened picture folder.
10. Labeling result: the user can save manually through the menu "File - Save Label", while the program will also save automatically after every 5 images confirmed by the user.the manually checked label will be stored in *Label.txt* under the opened picture folder.
Click "PaddleOCR"-"Save Recognition Results" in the menu bar, the recognition training data of such pictures will be saved in the *crop_img* folder, and the recognition label will be saved in *rec_gt.txt*<sup>[4]</sup>.
### Note
......@@ -88,7 +96,7 @@ Therefore, if the recognition result has been manually changed before, it may ch
| File name | Description |
| :-----------: | :----------------------------------------------------------: |
| Label.txt | The detection label file can be directly used for PPOCR detection model training. After the user saves 10 label results, the file will be automatically saved. It will also be written when the user closes the application or changes the file folder. |
| Label.txt | The detection label file can be directly used for PPOCR detection model training. After the user saves 5 label results, the file will be automatically saved. It will also be written when the user closes the application or changes the file folder. |
| fileState.txt | The picture status file save the image in the current folder that has been manually confirmed by the user. |
| Cache.cach | Cache files to save the results of model recognition. |
| rec_gt.txt | The recognition label file, which can be directly used for PPOCR identification model training, is generated after the user clicks on the menu bar "File"-"Save recognition result". |
......@@ -124,6 +132,15 @@ Therefore, if the recognition result has been manually changed before, it may ch
- Custom model: The model trained by users can be replaced by modifying PPOCRLabel.py in [PaddleOCR class instantiation](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/PPOCRLabel/PPOCRLabel.py#L110) referring [Custom Model Code](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_en/whl_en.md#use-custom-model)
### Save
PPOCRLabel supports three ways to save Label.txt
- Automatically save: When it detects that the user has manually checked 5 pictures, the program automatically writes the annotations into Label.txt. The user can change the value of ``self.autoSaveNum`` in ``PPOCRLabel.py`` to set the number of images to be automatically saved after confirmation.
- Manual save: Click "File-Save Marking Results" to manually save the label.
- Close application save
### Export partial recognition results
For some data that are difficult to recognize, the recognition results will not be exported by **unchecking** the corresponding tags in the recognition results checkbox.
......
......@@ -2,18 +2,30 @@
# PPOCRLabel
PPOCRLabel是一款适用于OCR领域的半自动化图形标注工具,使用python3和pyqt5编写,支持矩形框标注和四点标注模式,导出格式可直接用于PPOCR检测和识别模型的训练。
PPOCRLabel是一款适用于OCR领域的半自动化图形标注工具,内置PPOCR模型对数据自动标注和重新识别。使用python3和pyqt5编写,支持矩形框标注和四点标注模式,导出格式可直接用于PPOCR检测和识别模型的训练。
<img src="./data/gif/steps.gif" width="100%"/>
#### 近期更新
- 2020.12.18: 支持对单个标记框进行重新识别(by [ninetailskim](https://github.com/ninetailskim) ),完善快捷键。
- 2021.1.11:优化标注体验(by [edencfc](https://github.com/edencfc)):
- 用户可在“视图 - 弹出标记输入框”选择在画完检测框后标记输入框是否弹出。
- 识别结果与检测框同步滚动。
- 识别结果更改为单击修改。(如果无法修改,请切换为系统自带输入法,或再次切回原输入法)
- 2020.12.18: 支持对单个标记框进行重新识别(by [ninetailskim](https://github.com/ninetailskim)),完善快捷键。
#### 尽请期待
- 锁定框模式:针对同一场景数据,被锁定的检测框的大小与位置能在不同图片之间传递。
- 体验优化:增加撤销操作,批量移动、复制、删除等功能。优化标注流程。
如果您对以上内容感兴趣或对完善工具有不一样的想法,欢迎加入我们的队伍与我们共同开发
## 安装
### 1. 安装PaddleOCR
参考[PaddleOCR安装文档](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_ch/installation.md)准备好PaddleOCR
PPOCRLabel内置PaddleOCR模型,故请参考[PaddleOCR安装文档](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_ch/installation.md)准备好PaddleOCR,并确保PaddleOCR安装成功。
### 2. 安装PPOCRLabel
#### Windows + Anaconda
......@@ -49,13 +61,13 @@ python3 PPOCRLabel.py --lang ch
1. 安装与运行:使用上述命令安装与运行程序。
2. 打开文件夹:在菜单栏点击 “文件” - "打开目录" 选择待标记图片的文件夹<sup>[1]</sup>.
3. 自动标注:点击 ”自动标注“,使用PPOCR超轻量模型对图片文件名前图片状态<sup>[2]</sup>为 “X” 的图片进行自动标注。
4. 手动标注:点击 “矩形标注”(推荐直接在英文模式下点击键盘中的 “W”),用户可对当前图片中模型未检出的部分进行手动绘制标记框。点击键盘P,则使用四点标注模式(或点击“编辑” - “四点标注”),用户依次点击4个点后,双击左键表示标注完成。
4. 手动标注:点击 “矩形标注”(推荐直接在英文模式下点击键盘中的 “W”),用户可对当前图片中模型未检出的部分进行手动绘制标记框。点击键盘Q,则使用四点标注模式(或点击“编辑” - “四点标注”),用户依次点击4个点后,双击左键表示标注完成。
5. 标记框绘制完成后,用户点击 “确认”,检测框会先被预分配一个 “待识别” 标签。
6. 重新识别:将图片中的所有检测画绘制/调整完成后,点击 “重新识别”,PPOCR模型会对当前图片中的**所有检测框**重新识别<sup>[3]</sup>
7. 内容更改:双击识别结果,对不准确的识别结果进行手动更改。
8. 确认标记:点击 “确认”,图片状态切换为 “√”,跳转至下一张(此时不会直接将结果写入文件)。
9. 删除:点击 “删除图像”,图片将会被删除至回收站。
10. 保存结果:用户可以通过菜单中“文件-保存标记结果”手动保存,同时程序也会在用户每确认10张图片后自动保存一次。手动确认过的标记将会被存放在所打开图片文件夹下的*Label.txt*中。在菜单栏点击 “文件” - "保存识别结果"后,会将此类图片的识别训练数据保存在*crop_img*文件夹下,识别标签保存在*rec_gt.txt*<sup>[4]</sup>
10. 保存结果:用户可以通过菜单中“文件-保存标记结果”手动保存,同时程序也会在用户每确认5张图片后自动保存一次。手动确认过的标记将会被存放在所打开图片文件夹下的*Label.txt*中。在菜单栏点击 “文件” - "保存识别结果"后,会将此类图片的识别训练数据保存在*crop_img*文件夹下,识别标签保存在*rec_gt.txt*<sup>[4]</sup>
### 注意
......@@ -69,7 +81,7 @@ python3 PPOCRLabel.py --lang ch
| 文件名 | 说明 |
| :-----------: | :----------------------------------------------------------: |
| Label.txt | 检测标签,可直接用于PPOCR检测模型训练。用户每保存10张检测结果后,程序会进行自动写入。当用户关闭应用程序或切换文件路径后同样会进行写入。 |
| Label.txt | 检测标签,可直接用于PPOCR检测模型训练。用户每保存5张检测结果后,程序会进行自动写入。当用户关闭应用程序或切换文件路径后同样会进行写入。 |
| fileState.txt | 图片状态标记文件,保存当前文件夹下已经被用户手动确认过的图片名称。 |
| Cache.cach | 缓存文件,保存模型自动识别的结果。 |
| rec_gt.txt | 识别标签。可直接用于PPOCR识别模型训练。需用户手动点击菜单栏“文件” - "保存识别结果"后产生。 |
......@@ -104,6 +116,14 @@ python3 PPOCRLabel.py --lang ch
- 自定义模型:用户可根据[自定义模型代码使用](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_ch/whl.md#%E8%87%AA%E5%AE%9A%E4%B9%89%E6%A8%A1%E5%9E%8B),通过修改PPOCRLabel.py中针对[PaddleOCR类的实例化](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/PPOCRLabel/PPOCRLabel.py#L110)替换成自己训练的模型。
### 保存方式
PPOCRLabel支持三种保存方式:
- 程序自动保存:当检测到用户手动确认过5张图片后,程序自动将标记结果写入Label.txt中。其中用户可通过更改```PPOCRLabel.py```中的```self.autoSaveNum```的数值设置确认几张图片后进行自动保存。
- 手动保存:点击“文件 - 保存标记结果”手动保存标记。
- 关闭应用程序保存
### 导出部分识别结果
针对部分难以识别的数据,通过在识别结果的复选框中**取消勾选**相应的标记,其识别结果不会被导出。
......@@ -115,7 +135,7 @@ python3 PPOCRLabel.py --lang ch
- PPOCRLabel**不支持对中文文件名**的图片进行自动标注。
- 针对Linux用户:如果您在打开软件过程中出现**objc[XXXXX]**开头的错误,证明您的opencv版本太高,建议安装4.2版本:
- 针对Linux用户:如果您在打开软件过程中出现**objc[XXXXX]**开头的错误,证明您的opencv版本太高,建议安装4.2版本:
```
pip install opencv-python==4.2.0.32
```
......@@ -129,6 +149,7 @@ python3 PPOCRLabel.py --lang ch
```
pip install opencv-contrib-python-headless
```
### 参考资料
1.[Tzutalin. LabelImg. Git code (2015)](https://github.com/tzutalin/labelImg)
import sys, time
from PyQt5 import QtWidgets
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
class EditInList(QListWidget):
def __init__(self):
super(EditInList,self).__init__()
# click to edit
self.clicked.connect(self.item_clicked)
def item_clicked(self, modelindex: QModelIndex) -> None:
self.edited_item = self.currentItem()
self.closePersistentEditor(self.edited_item)
item = self.item(modelindex.row())
# time.sleep(0.2)
self.edited_item = item
self.openPersistentEditor(item)
# time.sleep(0.2)
self.editItem(item)
def mouseDoubleClickEvent(self, event):
# close edit
for i in range(self.count()):
self.closePersistentEditor(self.item(i))
def leaveEvent(self, event):
# close edit
for i in range(self.count()):
self.closePersistentEditor(self.item(i))
\ No newline at end of file
因为 它太大了无法显示 source diff 。你可以改为 查看blob
......@@ -42,7 +42,7 @@ zoomin=放大画面
info=信息
openAnnotation=开启标签
prevImgDetail=上一个图像
fitWidth=缩放到跟当前画面一样宽
fitWidth=缩放到当前画面宽度
zoomout=缩小画面
changeSavedAnnotationDir=更改保存标签文件的预设目录
nextImgDetail=下一个图像
......@@ -96,3 +96,4 @@ hideBox=隐藏所有标注
showBox=显示所有标注
saveLabel=保存标记结果
singleRe=重识别此区块
labelDialogOption=弹出标记输入框
\ No newline at end of file
......@@ -96,3 +96,4 @@ hideBox=Hide All Box
showBox=Show All Box
saveLabel=Save Label
singleRe=Re-recognition RectBox
labelDialogOption=Pop-up Label Input Dialog
\ No newline at end of file
......@@ -173,7 +173,7 @@ This project is released under <a href="https://github.com/PaddlePaddle/PaddleOC
We welcome all the contributions to PaddleOCR and appreciate for your feedback very much.
- Many thanks to [Khanh Tran](https://github.com/xxxpsyduck) and [Karl Horky](https://github.com/karlhorky) for contributing and revising the English documentation.
- Many thanks to [zhangxin](https://github.com/ZhangXinNan) for contributing the new visualize function、add .gitgnore and discard set PYTHONPATH manually.
- Many thanks to [zhangxin](https://github.com/ZhangXinNan) for contributing the new visualize function、add .gitignore and discard set PYTHONPATH manually.
- Many thanks to [lyl120117](https://github.com/lyl120117) for contributing the code for printing the network structure.
- Thanks [xiangyubo](https://github.com/xiangyubo) for contributing the handwritten Chinese OCR datasets.
- Thanks [authorfu](https://github.com/authorfu) for contributing Android demo and [xiadeye](https://github.com/xiadeye) contributing iOS demo, respectively.
......
......@@ -8,7 +8,7 @@ PaddleOCR同时支持动态图与静态图两种编程范式
- 静态图版本:develop分支
**近期更新**
- 2020.12.28 [FAQ](./doc/doc_ch/FAQ.md)新增5个高频问题,总数137个,每周一都会更新,欢迎大家持续关注。
- 2021.1.18 [FAQ](./doc/doc_ch/FAQ.md)新增5个高频问题,总数152个,每周一都会更新,欢迎大家持续关注。
- 2020.12.15 更新数据合成工具[Style-Text](./StyleText/README_ch.md),可以批量合成大量与目标场景类似的图像,在多个场景验证,效果明显提升。
- 2020.11.25 更新半自动标注工具[PPOCRLabel](./PPOCRLabel/README_ch.md),辅助开发者高效完成标注任务,输出格式与PP-OCR训练任务完美衔接。
- 2020.9.22 更新PP-OCR技术文章,https://arxiv.org/abs/2009.09941
......@@ -101,8 +101,8 @@ PaddleOCR同时支持动态图与静态图两种编程范式
- [效果展示](#效果展示)
- FAQ
- [【精选】OCR精选10个问题](./doc/doc_ch/FAQ.md)
- [【理论篇】OCR通用31个问题](./doc/doc_ch/FAQ.md)
- [【实战篇】PaddleOCR实战96个问题](./doc/doc_ch/FAQ.md)
- [【理论篇】OCR通用32个问题](./doc/doc_ch/FAQ.md)
- [【实战篇】PaddleOCR实战110个问题](./doc/doc_ch/FAQ.md)
- [技术交流群](#欢迎加入PaddleOCR技术交流群)
- [参考文献](./doc/doc_ch/reference.md)
- [许可证书](#许可证书)
......@@ -149,7 +149,7 @@ PP-OCR是一个实用的超轻量OCR系统。主要由DB文本检测[2]、检测
- 非常感谢 [Khanh Tran](https://github.com/xxxpsyduck)[Karl Horky](https://github.com/karlhorky) 贡献修改英文文档
- 非常感谢 [zhangxin](https://github.com/ZhangXinNan)([Blog](https://blog.csdn.net/sdlypyzq)) 贡献新的可视化方式、添加.gitgnore、处理手动设置PYTHONPATH环境变量的问题
- 非常感谢 [zhangxin](https://github.com/ZhangXinNan)([Blog](https://blog.csdn.net/sdlypyzq)) 贡献新的可视化方式、添加.gitignore、处理手动设置PYTHONPATH环境变量的问题
- 非常感谢 [lyl120117](https://github.com/lyl120117) 贡献打印网络结构的代码
- 非常感谢 [xiangyubo](https://github.com/xiangyubo) 贡献手写中文OCR数据集
- 非常感谢 [authorfu](https://github.com/authorfu) 贡献Android和[xiadeye](https://github.com/xiadeye) 贡献IOS的demo代码
......
......@@ -52,7 +52,7 @@ If you save the model in another location, please modify the address of the mode
```
bg_generator:
pretrain: style_text_rec/bg_generator
pretrain: style_text_models/bg_generator
...
text_generator:
pretrain: style_text_models/text_generator
......
......@@ -102,6 +102,7 @@ Train:
drop_last: False
batch_size_per_card: 16
num_workers: 8
use_shared_memory: False
Eval:
dataset:
......@@ -129,3 +130,4 @@ Eval:
drop_last: False
batch_size_per_card: 1 # must be 1
num_workers: 8
use_shared_memory: False
\ No newline at end of file
......@@ -76,6 +76,7 @@ Train:
batch_size_per_card: 256
drop_last: True
num_workers: 8
use_shared_memory: False
Eval:
dataset:
......@@ -96,3 +97,4 @@ Eval:
drop_last: False
batch_size_per_card: 256
num_workers: 4
use_shared_memory: False
......@@ -138,12 +138,22 @@ endif()
# Note: libpaddle_inference_api.so/a must put before libpaddle_fluid.so/a
if(WITH_STATIC_LIB)
if(WIN32)
set(DEPS
${PADDLE_LIB}/paddle/lib/paddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX})
else()
set(DEPS
${PADDLE_LIB}/paddle/lib/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX})
endif()
else()
if(WIN32)
set(DEPS
${PADDLE_LIB}/paddle/lib/paddle_fluid${CMAKE_SHARED_LIBRARY_SUFFIX})
else()
set(DEPS
${PADDLE_LIB}/paddle/lib/libpaddle_fluid${CMAKE_SHARED_LIBRARY_SUFFIX})
endif()
endif()
endif(WITH_STATIC_LIB)
if (NOT WIN32)
set(DEPS ${DEPS}
......
......@@ -62,6 +62,10 @@ public:
this->cls_thresh = stod(config_map_["cls_thresh"]);
this->visualize = bool(stoi(config_map_["visualize"]));
this->use_tensorrt = bool(stoi(config_map_["use_tensorrt"]));
this->use_fp16 = bool(stod(config_map_["use_fp16"]));
}
bool use_gpu = false;
......@@ -96,6 +100,10 @@ public:
bool visualize = true;
bool use_tensorrt = false;
bool use_fp16 = false;
void PrintConfigInfo();
private:
......
......@@ -39,7 +39,8 @@ public:
explicit Classifier(const std::string &model_dir, const bool &use_gpu,
const int &gpu_id, const int &gpu_mem,
const int &cpu_math_library_num_threads,
const bool &use_mkldnn, const double &cls_thresh) {
const bool &use_mkldnn, const double &cls_thresh,
const bool &use_tensorrt, const bool &use_fp16) {
this->use_gpu_ = use_gpu;
this->gpu_id_ = gpu_id;
this->gpu_mem_ = gpu_mem;
......@@ -47,6 +48,8 @@ public:
this->use_mkldnn_ = use_mkldnn;
this->cls_thresh = cls_thresh;
this->use_tensorrt_ = use_tensorrt;
this->use_fp16_ = use_fp16;
LoadModel(model_dir);
}
......@@ -69,7 +72,8 @@ private:
std::vector<float> mean_ = {0.5f, 0.5f, 0.5f};
std::vector<float> scale_ = {1 / 0.5f, 1 / 0.5f, 1 / 0.5f};
bool is_scale_ = true;
bool use_tensorrt_ = false;
bool use_fp16_ = false;
// pre-process
ClsResizeImg resize_op_;
Normalize normalize_op_;
......
......@@ -44,8 +44,8 @@ public:
const bool &use_mkldnn, const int &max_side_len,
const double &det_db_thresh,
const double &det_db_box_thresh,
const double &det_db_unclip_ratio,
const bool &visualize) {
const double &det_db_unclip_ratio, const bool &visualize,
const bool &use_tensorrt, const bool &use_fp16) {
this->use_gpu_ = use_gpu;
this->gpu_id_ = gpu_id;
this->gpu_mem_ = gpu_mem;
......@@ -59,6 +59,8 @@ public:
this->det_db_unclip_ratio_ = det_db_unclip_ratio;
this->visualize_ = visualize;
this->use_tensorrt_ = use_tensorrt;
this->use_fp16_ = use_fp16;
LoadModel(model_dir);
}
......@@ -85,6 +87,8 @@ private:
double det_db_unclip_ratio_ = 2.0;
bool visualize_ = true;
bool use_tensorrt_ = false;
bool use_fp16_ = false;
std::vector<float> mean_ = {0.485f, 0.456f, 0.406f};
std::vector<float> scale_ = {1 / 0.229f, 1 / 0.224f, 1 / 0.225f};
......
......@@ -41,12 +41,15 @@ public:
explicit CRNNRecognizer(const std::string &model_dir, const bool &use_gpu,
const int &gpu_id, const int &gpu_mem,
const int &cpu_math_library_num_threads,
const bool &use_mkldnn, const string &label_path) {
const bool &use_mkldnn, const string &label_path,
const bool &use_tensorrt, const bool &use_fp16) {
this->use_gpu_ = use_gpu;
this->gpu_id_ = gpu_id;
this->gpu_mem_ = gpu_mem;
this->cpu_math_library_num_threads_ = cpu_math_library_num_threads;
this->use_mkldnn_ = use_mkldnn;
this->use_tensorrt_ = use_tensorrt;
this->use_fp16_ = use_fp16;
this->label_list_ = Utility::ReadDict(label_path);
this->label_list_.insert(this->label_list_.begin(),
......@@ -76,7 +79,8 @@ private:
std::vector<float> mean_ = {0.5f, 0.5f, 0.5f};
std::vector<float> scale_ = {1 / 0.5f, 1 / 0.5f, 1 / 0.5f};
bool is_scale_ = true;
bool use_tensorrt_ = false;
bool use_fp16_ = false;
// pre-process
CrnnResizeImg resize_op_;
Normalize normalize_op_;
......
......@@ -47,18 +47,20 @@ public:
class ResizeImgType0 {
public:
virtual void Run(const cv::Mat &img, cv::Mat &resize_img, int max_size_len,
float &ratio_h, float &ratio_w);
float &ratio_h, float &ratio_w, bool use_tensorrt);
};
class CrnnResizeImg {
public:
virtual void Run(const cv::Mat &img, cv::Mat &resize_img, float wh_ratio,
bool use_tensorrt = false,
const std::vector<int> &rec_image_shape = {3, 32, 320});
};
class ClsResizeImg {
public:
virtual void Run(const cv::Mat &img, cv::Mat &resize_img,
bool use_tensorrt = false,
const std::vector<int> &rec_image_shape = {3, 48, 192});
};
......
......@@ -54,18 +54,20 @@ int main(int argc, char **argv) {
config.gpu_mem, config.cpu_math_library_num_threads,
config.use_mkldnn, config.max_side_len, config.det_db_thresh,
config.det_db_box_thresh, config.det_db_unclip_ratio,
config.visualize);
config.visualize, config.use_tensorrt, config.use_fp16);
Classifier *cls = nullptr;
if (config.use_angle_cls == true) {
cls = new Classifier(config.cls_model_dir, config.use_gpu, config.gpu_id,
config.gpu_mem, config.cpu_math_library_num_threads,
config.use_mkldnn, config.cls_thresh);
config.use_mkldnn, config.cls_thresh,
config.use_tensorrt, config.use_fp16);
}
CRNNRecognizer rec(config.rec_model_dir, config.use_gpu, config.gpu_id,
config.gpu_mem, config.cpu_math_library_num_threads,
config.use_mkldnn, config.char_list_file);
config.use_mkldnn, config.char_list_file,
config.use_tensorrt, config.use_fp16);
auto start = std::chrono::system_clock::now();
std::vector<std::vector<std::vector<int>>> boxes;
......@@ -75,11 +77,11 @@ int main(int argc, char **argv) {
auto end = std::chrono::system_clock::now();
auto duration =
std::chrono::duration_cast<std::chrono::microseconds>(end - start);
std::cout << "花费了"
std::cout << "Cost "
<< double(duration.count()) *
std::chrono::microseconds::period::num /
std::chrono::microseconds::period::den
<< "" << std::endl;
<< "s" << std::endl;
return 0;
}
......@@ -25,7 +25,7 @@ cv::Mat Classifier::Run(cv::Mat &img) {
int index = 0;
float wh_ratio = float(img.cols) / float(img.rows);
this->resize_op_.Run(img, resize_img, cls_image_shape);
this->resize_op_.Run(img, resize_img, this->use_tensorrt_, cls_image_shape);
this->normalize_op_.Run(&resize_img, this->mean_, this->scale_,
this->is_scale_);
......@@ -76,6 +76,13 @@ void Classifier::LoadModel(const std::string &model_dir) {
if (this->use_gpu_) {
config.EnableUseGpu(this->gpu_mem_, this->gpu_id_);
if (this->use_tensorrt_) {
config.EnableTensorRtEngine(
1 << 20, 10, 3,
this->use_fp16_ ? paddle_infer::Config::Precision::kHalf
: paddle_infer::Config::Precision::kFloat32,
false, false);
}
} else {
config.DisableGpu();
if (this->use_mkldnn_) {
......
......@@ -24,10 +24,13 @@ void DBDetector::LoadModel(const std::string &model_dir) {
if (this->use_gpu_) {
config.EnableUseGpu(this->gpu_mem_, this->gpu_id_);
// config.EnableTensorRtEngine(
// 1 << 20, 1, 3,
// AnalysisConfig::Precision::kFloat32,
// false, false);
if (this->use_tensorrt_) {
config.EnableTensorRtEngine(
1 << 20, 10, 3,
this->use_fp16_ ? paddle_infer::Config::Precision::kHalf
: paddle_infer::Config::Precision::kFloat32,
false, false);
}
} else {
config.DisableGpu();
if (this->use_mkldnn_) {
......@@ -58,7 +61,8 @@ void DBDetector::Run(cv::Mat &img,
cv::Mat srcimg;
cv::Mat resize_img;
img.copyTo(srcimg);
this->resize_op_.Run(img, resize_img, this->max_side_len_, ratio_h, ratio_w);
this->resize_op_.Run(img, resize_img, this->max_side_len_, ratio_h, ratio_w,
this->use_tensorrt_);
this->normalize_op_.Run(&resize_img, this->mean_, this->scale_,
this->is_scale_);
......
......@@ -33,7 +33,7 @@ void CRNNRecognizer::Run(std::vector<std::vector<std::vector<int>>> boxes,
float wh_ratio = float(crop_img.cols) / float(crop_img.rows);
this->resize_op_.Run(crop_img, resize_img, wh_ratio);
this->resize_op_.Run(crop_img, resize_img, wh_ratio, this->use_tensorrt_);
this->normalize_op_.Run(&resize_img, this->mean_, this->scale_,
this->is_scale_);
......@@ -76,7 +76,7 @@ void CRNNRecognizer::Run(std::vector<std::vector<std::vector<int>>> boxes,
float(*std::max_element(&predict_batch[n * predict_shape[2]],
&predict_batch[(n + 1) * predict_shape[2]]));
if (argmax_idx > 0 && (not(i > 0 && argmax_idx == last_index))) {
if (argmax_idx > 0 && (!(i > 0 && argmax_idx == last_index))) {
score += max_value;
count += 1;
str_res.push_back(label_list_[argmax_idx]);
......@@ -99,6 +99,13 @@ void CRNNRecognizer::LoadModel(const std::string &model_dir) {
if (this->use_gpu_) {
config.EnableUseGpu(this->gpu_mem_, this->gpu_id_);
if (this->use_tensorrt_) {
config.EnableTensorRtEngine(
1 << 20, 10, 3,
this->use_fp16_ ? paddle_infer::Config::Precision::kHalf
: paddle_infer::Config::Precision::kFloat32,
false, false);
}
} else {
config.DisableGpu();
if (this->use_mkldnn_) {
......
......@@ -60,7 +60,8 @@ void Normalize::Run(cv::Mat *im, const std::vector<float> &mean,
}
void ResizeImgType0::Run(const cv::Mat &img, cv::Mat &resize_img,
int max_size_len, float &ratio_h, float &ratio_w) {
int max_size_len, float &ratio_h, float &ratio_w,
bool use_tensorrt) {
int w = img.cols;
int h = img.rows;
......@@ -89,14 +90,19 @@ void ResizeImgType0::Run(const cv::Mat &img, cv::Mat &resize_img,
resize_w = 32;
else
resize_w = (resize_w / 32) * 32;
if (!use_tensorrt) {
cv::resize(img, resize_img, cv::Size(resize_w, resize_h));
ratio_h = float(resize_h) / float(h);
ratio_w = float(resize_w) / float(w);
} else {
cv::resize(img, resize_img, cv::Size(640, 640));
ratio_h = float(640) / float(h);
ratio_w = float(640) / float(w);
}
}
void CrnnResizeImg::Run(const cv::Mat &img, cv::Mat &resize_img, float wh_ratio,
bool use_tensorrt,
const std::vector<int> &rec_image_shape) {
int imgC, imgH, imgW;
imgC = rec_image_shape[0];
......@@ -111,12 +117,27 @@ void CrnnResizeImg::Run(const cv::Mat &img, cv::Mat &resize_img, float wh_ratio,
resize_w = imgW;
else
resize_w = int(ceilf(imgH * ratio));
if (!use_tensorrt) {
cv::resize(img, resize_img, cv::Size(resize_w, imgH), 0.f, 0.f,
cv::INTER_LINEAR);
cv::copyMakeBorder(resize_img, resize_img, 0, 0, 0,
int(imgW - resize_img.cols), cv::BORDER_CONSTANT,
{127, 127, 127});
} else {
int k = int(img.cols * 32 / img.rows);
if (k >= 100) {
cv::resize(img, resize_img, cv::Size(100, 32), 0.f, 0.f,
cv::INTER_LINEAR);
} else {
cv::resize(img, resize_img, cv::Size(k, 32), 0.f, 0.f, cv::INTER_LINEAR);
cv::copyMakeBorder(resize_img, resize_img, 0, 0, 0, int(100 - k),
cv::BORDER_CONSTANT, {127, 127, 127});
}
}
}
void ClsResizeImg::Run(const cv::Mat &img, cv::Mat &resize_img,
bool use_tensorrt,
const std::vector<int> &rec_image_shape) {
int imgC, imgH, imgW;
imgC = rec_image_shape[0];
......@@ -130,12 +151,16 @@ void ClsResizeImg::Run(const cv::Mat &img, cv::Mat &resize_img,
else
resize_w = int(ceilf(imgH * ratio));
if (!use_tensorrt) {
cv::resize(img, resize_img, cv::Size(resize_w, imgH), 0.f, 0.f,
cv::INTER_LINEAR);
if (resize_w < imgW) {
cv::copyMakeBorder(resize_img, resize_img, 0, 0, 0, imgW - resize_w,
cv::BORDER_CONSTANT, cv::Scalar(0, 0, 0));
}
} else {
cv::resize(img, resize_img, cv::Size(100, 32), 0.f, 0.f, cv::INTER_LINEAR);
}
}
} // namespace PaddleOCR
......@@ -24,3 +24,7 @@ char_list_file ../../ppocr/utils/ppocr_keys_v1.txt
# show the detection results
visualize 1
# use_tensorrt
use_tensorrt 0
use_fp16 0
## 介绍
复杂的模型有利于提高模型的性能,但也导致模型中存在一定冗余,模型量化将全精度缩减到定点数减少这种冗余,达到减少模型计算复杂度,提高模型推理性能的目的。
模型量化可以在基本不损失模型的精度的情况下,将FP32精度的模型参数转换为Int8精度,减小模型参数大小并加速计算,使用量化后的模型在移动端等部署时更具备速度优势。
本教程将介绍如何使用飞桨模型压缩库PaddleSlim做PaddleOCR模型的压缩。
[PaddleSlim](https://github.com/PaddlePaddle/PaddleSlim) 集成了模型剪枝、量化(包括量化训练和离线量化)、蒸馏和神经网络搜索等多种业界常用且领先的模型压缩功能,如果您感兴趣,可以关注并了解。
在开始本教程之前,建议先了解[PaddleOCR模型的训练方法](../../../doc/doc_ch/quickstart.md)以及[PaddleSlim](https://paddleslim.readthedocs.io/zh_CN/latest/index.html)
## 快速开始
量化多适用于轻量模型在移动端的部署,当训练出一个模型后,如果希望进一步的压缩模型大小并加速预测,可使用量化的方法压缩模型。
模型量化主要包括五个步骤:
1. 安装 PaddleSlim
2. 准备训练好的模型
3. 量化训练
4. 导出量化推理模型
5. 量化模型预测部署
### 1. 安装PaddleSlim
```bash
git clone https://github.com/PaddlePaddle/PaddleSlim.git
cd Paddleslim
python setup.py install
```
### 2. 准备训练好的模型
PaddleOCR提供了一系列训练好的[模型](../../../doc/doc_ch/models_list.md),如果待量化的模型不在列表中,需要按照[常规训练](../../../doc/doc_ch/quickstart.md)方法得到训练好的模型。
### 3. 量化训练
量化训练包括离线量化训练和在线量化训练,在线量化训练效果更好,需加载预训练模型,在定义好量化策略后即可对模型进行量化。
量化训练的代码位于slim/quantization/quant.py 中,比如训练检测模型,训练指令如下:
```bash
python deploy/slim/quantization/quant.py -c configs/det/det_mv3_db.yml -o Global.pretrain_weights='your trained model' Global.save_model_dir=./output/quant_model
# 比如下载提供的训练模型
wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_train.tar
tar -xf ch_ppocr_mobile_v2.0_det_train.tar
python deploy/slim/quantization/quant.py -c configs/det/det_mv3_db.yml -o Global.pretrain_weights=./ch_ppocr_mobile_v2.0_det_train/best_accuracy Global.save_model_dir=./output/quant_model
```
如果要训练识别模型的量化,修改配置文件和加载的模型参数即可。
### 4. 导出模型
在得到量化训练保存的模型后,我们可以将其导出为inference_model,用于预测部署:
```bash
python deploy/slim/quantization/export_model.py -c configs/det/det_mv3_db.yml -o Global.checkpoints=output/quant_model/best_accuracy Global.save_model_dir=./output/quant_inference_model
```
### 5. 量化模型部署
上述步骤导出的量化模型,参数精度仍然是FP32,但是参数的数值范围是int8,导出的模型可以通过PaddleLite的opt模型转换工具完成模型转换。
量化模型部署的可参考 [移动端模型部署](../../lite/readme.md)
## Introduction
Generally, a more complex model would achive better performance in the task, but it also leads to some redundancy in the model.
Quantization is a technique that reduces this redundancy by reducing the full precision data to a fixed number,
so as to reduce model calculation complexity and improve model inference performance.
This example uses PaddleSlim provided [APIs of Quantization](https://paddlepaddle.github.io/PaddleSlim/api/quantization_api/) to compress the OCR model.
It is recommended that you could understand following pages before reading this example:
- [The training strategy of OCR model](../../../doc/doc_en/quickstart_en.md)
- [PaddleSlim Document](https://paddlepaddle.github.io/PaddleSlim/api/quantization_api/)
## Quick Start
Quantization is mostly suitable for the deployment of lightweight models on mobile terminals.
After training, if you want to further compress the model size and accelerate the prediction, you can use quantization methods to compress the model according to the following steps.
1. Install PaddleSlim
2. Prepare trained model
3. Quantization-Aware Training
4. Export inference model
5. Deploy quantization inference model
### 1. Install PaddleSlim
```bash
git clone https://github.com/PaddlePaddle/PaddleSlim.git
cd Paddleslim
python setup.py install
```
### 2. Download Pretrain Model
PaddleOCR provides a series of trained [models](../../../doc/doc_en/models_list_en.md).
If the model to be quantified is not in the list, you need to follow the [Regular Training](../../../doc/doc_en/quickstart_en.md) method to get the trained model.
### 3. Quant-Aware Training
Quantization training includes offline quantization training and online quantization training.
Online quantization training is more effective. It is necessary to load the pre-training model.
After the quantization strategy is defined, the model can be quantified.
The code for quantization training is located in `slim/quantization/quant.py`. For example, to train a detection model, the training instructions are as follows:
```bash
python deploy/slim/quantization/quant.py -c configs/det/det_mv3_db.yml -o Global.pretrain_weights='your trained model' Global.save_model_dir=./output/quant_model
# download provided model
wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_train.tar
tar -xf ch_ppocr_mobile_v2.0_det_train.tar
python deploy/slim/quantization/quant.py -c configs/det/det_mv3_db.yml -o Global.pretrain_weights=./ch_ppocr_mobile_v2.0_det_train/best_accuracy Global.save_model_dir=./output/quant_model
```
### 4. Export inference model
After getting the model after pruning and finetuning we, can export it as inference_model for predictive deployment:
```bash
python deploy/slim/quantization/export_model.py -c configs/det/det_mv3_db.yml -o Global.checkpoints=output/quant_model/best_accuracy Global.save_model_dir=./output/quant_inference_model
```
### 5. Deploy
The numerical range of the quantized model parameters derived from the above steps is still FP32, but the numerical range of the parameters is int8.
The derived model can be converted through the `opt tool` of PaddleLite.
For quantitative model deployment, please refer to [Mobile terminal model deployment](../../lite/readme_en.md)
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
__dir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(__dir__)
sys.path.append(os.path.abspath(os.path.join(__dir__, '..', '..', '..')))
sys.path.append(
os.path.abspath(os.path.join(__dir__, '..', '..', '..', 'tools')))
import argparse
import paddle
from paddle.jit import to_static
from ppocr.modeling.architectures import build_model
from ppocr.postprocess import build_post_process
from ppocr.utils.save_load import init_model
from ppocr.utils.logging import get_logger
from tools.program import load_config, merge_config, ArgsParser
from ppocr.metrics import build_metric
import tools.program as program
from paddleslim.dygraph.quant import QAT
from ppocr.data import build_dataloader
def main():
############################################################################################################
# 1. quantization configs
############################################################################################################
quant_config = {
# weight preprocess type, default is None and no preprocessing is performed.
'weight_preprocess_type': None,
# activation preprocess type, default is None and no preprocessing is performed.
'activation_preprocess_type': None,
# weight quantize type, default is 'channel_wise_abs_max'
'weight_quantize_type': 'channel_wise_abs_max',
# activation quantize type, default is 'moving_average_abs_max'
'activation_quantize_type': 'moving_average_abs_max',
# weight quantize bit num, default is 8
'weight_bits': 8,
# activation quantize bit num, default is 8
'activation_bits': 8,
# data type after quantization, such as 'uint8', 'int8', etc. default is 'int8'
'dtype': 'int8',
# window size for 'range_abs_max' quantization. default is 10000
'window_size': 10000,
# The decay coefficient of moving average, default is 0.9
'moving_rate': 0.9,
# for dygraph quantization, layers of type in quantizable_layer_type will be quantized
'quantizable_layer_type': ['Conv2D', 'Linear'],
}
FLAGS = ArgsParser().parse_args()
config = load_config(FLAGS.config)
merge_config(FLAGS.opt)
logger = get_logger()
# build post process
post_process_class = build_post_process(config['PostProcess'],
config['Global'])
# build model
# for rec algorithm
if hasattr(post_process_class, 'character'):
char_num = len(getattr(post_process_class, 'character'))
config['Architecture']["Head"]['out_channels'] = char_num
model = build_model(config['Architecture'])
# get QAT model
quanter = QAT(config=quant_config)
quanter.quantize(model)
init_model(config, model, logger)
model.eval()
# build metric
eval_class = build_metric(config['Metric'])
# build dataloader
valid_dataloader = build_dataloader(config, 'Eval', device, logger)
# start eval
metirc = program.eval(model, valid_dataloader, post_process_class,
eval_class)
logger.info('metric eval ***************')
for k, v in metirc.items():
logger.info('{}:{}'.format(k, v))
save_path = '{}/inference'.format(config['Global']['save_inference_dir'])
infer_shape = [3, 32, 100] if config['Architecture'][
'model_type'] != "det" else [3, 640, 640]
quanter.save_quantized_model(
model,
save_path,
input_spec=[
paddle.static.InputSpec(
shape=[None] + infer_shape, dtype='float32')
])
logger.info('inference QAT model is saved to {}'.format(save_path))
if __name__ == "__main__":
config, device, logger, vdl_writer = program.preprocess()
main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
__dir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(__dir__)
sys.path.append(os.path.abspath(os.path.join(__dir__, '..', '..', '..')))
sys.path.append(
os.path.abspath(os.path.join(__dir__, '..', '..', '..', 'tools')))
import yaml
import paddle
import paddle.distributed as dist
paddle.seed(2)
from ppocr.data import build_dataloader
from ppocr.modeling.architectures import build_model
from ppocr.losses import build_loss
from ppocr.optimizer import build_optimizer
from ppocr.postprocess import build_post_process
from ppocr.metrics import build_metric
from ppocr.utils.save_load import init_model
import tools.program as program
from paddleslim.dygraph.quant import QAT
dist.get_world_size()
class PACT(paddle.nn.Layer):
def __init__(self):
super(PACT, self).__init__()
alpha_attr = paddle.ParamAttr(
name=self.full_name() + ".pact",
initializer=paddle.nn.initializer.Constant(value=20),
learning_rate=1.0,
regularizer=paddle.regularizer.L2Decay(2e-5))
self.alpha = self.create_parameter(
shape=[1], attr=alpha_attr, dtype='float32')
def forward(self, x):
out_left = paddle.nn.functional.relu(x - self.alpha)
out_right = paddle.nn.functional.relu(-self.alpha - x)
x = x - out_left + out_right
return x
quant_config = {
# weight preprocess type, default is None and no preprocessing is performed.
'weight_preprocess_type': None,
# activation preprocess type, default is None and no preprocessing is performed.
'activation_preprocess_type': None,
# weight quantize type, default is 'channel_wise_abs_max'
'weight_quantize_type': 'channel_wise_abs_max',
# activation quantize type, default is 'moving_average_abs_max'
'activation_quantize_type': 'moving_average_abs_max',
# weight quantize bit num, default is 8
'weight_bits': 8,
# activation quantize bit num, default is 8
'activation_bits': 8,
# data type after quantization, such as 'uint8', 'int8', etc. default is 'int8'
'dtype': 'int8',
# window size for 'range_abs_max' quantization. default is 10000
'window_size': 10000,
# The decay coefficient of moving average, default is 0.9
'moving_rate': 0.9,
# for dygraph quantization, layers of type in quantizable_layer_type will be quantized
'quantizable_layer_type': ['Conv2D', 'Linear'],
}
def main(config, device, logger, vdl_writer):
# init dist environment
if config['Global']['distributed']:
dist.init_parallel_env()
global_config = config['Global']
# build dataloader
train_dataloader = build_dataloader(config, 'Train', device, logger)
if config['Eval']:
valid_dataloader = build_dataloader(config, 'Eval', device, logger)
else:
valid_dataloader = None
# build post process
post_process_class = build_post_process(config['PostProcess'],
global_config)
# build model
# for rec algorithm
if hasattr(post_process_class, 'character'):
char_num = len(getattr(post_process_class, 'character'))
config['Architecture']["Head"]['out_channels'] = char_num
model = build_model(config['Architecture'])
# prepare to quant
quanter = QAT(config=quant_config, act_preprocess=PACT)
quanter.quantize(model)
if config['Global']['distributed']:
model = paddle.DataParallel(model)
# build loss
loss_class = build_loss(config['Loss'])
# build optim
optimizer, lr_scheduler = build_optimizer(
config['Optimizer'],
epochs=config['Global']['epoch_num'],
step_each_epoch=len(train_dataloader),
parameters=model.parameters())
# build metric
eval_class = build_metric(config['Metric'])
# load pretrain model
pre_best_model_dict = init_model(config, model, logger, optimizer)
logger.info('train dataloader has {} iters, valid dataloader has {} iters'.
format(len(train_dataloader), len(valid_dataloader)))
# start train
program.train(config, train_dataloader, valid_dataloader, device, model,
loss_class, optimizer, lr_scheduler, post_process_class,
eval_class, pre_best_model_dict, logger, vdl_writer)
def test_reader(config, device, logger):
loader = build_dataloader(config, 'Train', device, logger)
import time
starttime = time.time()
count = 0
try:
for data in loader():
count += 1
if count % 1 == 0:
batch_time = time.time() - starttime
starttime = time.time()
logger.info("reader: {}, {}, {}".format(
count, len(data[0]), batch_time))
except Exception as e:
logger.info(e)
logger.info("finish reader: {}, Success!".format(count))
if __name__ == '__main__':
config, device, logger, vdl_writer = program.preprocess(is_train=True)
main(config, device, logger, vdl_writer)
# test_reader(config, device, logger)
......@@ -9,45 +9,42 @@
## PaddleOCR常见问题汇总(持续更新)
* [近期更新(2020.12.28)](#近期更新)
* [近期更新(2021.1.18)](#近期更新)
* [【精选】OCR精选10个问题](#OCR精选10个问题)
* [【理论篇】OCR通用31个问题](#OCR通用问题)
* [【理论篇】OCR通用32个问题](#OCR通用问题)
* [基础知识7题](#基础知识)
* [数据集7题](#数据集2)
* [模型训练调优17](#模型训练调优2)
* [【实战篇】PaddleOCR实战96个问题](#PaddleOCR实战问题)
* [使用咨询28](#使用咨询)
* [模型训练调优18](#模型训练调优2)
* [【实战篇】PaddleOCR实战110个问题](#PaddleOCR实战问题)
* [使用咨询36](#使用咨询)
* [数据集17题](#数据集3)
* [模型训练调优26](#模型训练调优3)
* [预测部署25](#预测部署3)
* [模型训练调优28](#模型训练调优3)
* [预测部署29](#预测部署3)
<a name="近期更新"></a>
## 近期更新(2020.12.28)
## 近期更新(2021.1.18)
#### Q3.1.25: 使用dygraph分支,在docker中训练PaddleOCR的时候,数据路径没有任何问题,但是一直报错`reader rasied an exception`,这是为什么呢?
**A** 创建docker的时候,`/dev/shm`的默认大小为64M,如果使用多进程读取数据,共享内存可能不够,因此需要给`/dev/shm`分配更大的空间,在创建docker的时候,传入`--shm-size=8g`表示给`/dev/shm`分配8g的空间。
#### Q2.3.18: 在PP-OCR系统中,文本检测的骨干网络为什么没有使用SE模块?
#### Q3.1.26: 在repo中没有找到Lite和PaddleServing相关的部署教程,这是在哪里呢?
**A**:SE模块是MobileNetV3网络一个重要模块,目的是估计特征图每个特征通道重要性,给特征图每个特征分配权重,提高网络的表达能力。但是,对于文本检测,输入网络的分辨率比较大,一般是640\*640,利用SE模块估计特征图每个特征通道重要性比较困难,网络提升能力有限,但是该模块又比较耗时,因此在PP-OCR系统中,文本检测的骨干网络没有使用SE模块。实验也表明,当去掉SE模块,超轻量模型大小可以减小40%,文本检测效果基本不受影响。详细可以参考PP-OCR技术文章,https://arxiv.org/abs/2009.09941.
**A** 目前PaddleOCR的默认分支为dygraph,关于Lite和PaddleLite的动态图部署还在适配中,如果希望在Lite端或者使用PaddleServing部署,推荐使用develop分支(静态图)的代码。
#### Q3.3.27: PaddleOCR关于文本识别模型的训练,支持的数据增强方式有哪些?
#### Q3.1.27: 如何可视化acc,loss曲线图,模型网络结构图等?
**A**:文本识别支持的数据增强方式有随机小幅度裁剪、图像平衡、添加白噪声、颜色漂移、图像反色和Text Image Augmentation(TIA)变换等。可以参考[代码](../../ppocr/data/imaug/rec_img_aug.py)中的warp函数。
**A** 在配置文件里有`use_visualdl`的参数,设置为True即可,更多的使用命令可以参考:[VisualDL使用指南](https://www.paddlepaddle.org.cn/documentation/docs/zh/2.0-rc1/guides/03_VisualDL/visualdl.html)
#### Q3.1.28: 在使用StyleText数据合成工具的时候,报错`ModuleNotFoundError: No module named 'utils.config'`,这是为什么呢?
#### Q3.3.28: 关于dygraph分支中,文本识别模型训练,要使用数据增强应该如何设置?
**A** 有2个解决方案
- 在StyleText路径下面设置PYTHONPATH:`export PYTHONPATH=./`
- 拉取最新的代码
**A**:可以参考[配置文件](../../configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml)在Train['dataset']['transforms']添加RecAug字段,使数据增强生效。可以通过添加对aug_prob设置,表示每种数据增强采用的概率。aug_prob默认是0.4.由于tia数据增强特殊性,默认不采用,可以通过添加use_tia设置,使tia数据增强生效。详细设置可以参考[ISSUE 1744](https://github.com/PaddlePaddle/PaddleOCR/issues/1744)
#### Q3.3.26: PaddleOCR在训练的时候一直使用cosine_decay的学习率下降策略,这是为什么呢
#### Q3.4.28: PP-OCR系统中,文本检测的结果有置信度吗
**A**: cosine_decay表示在训练的过程中,学习率按照cosine的变化趋势逐渐下降至0,在迭代轮数更长的情况下,比常量的学习率变化策略会有更好的收敛效果,因此在实际训练的时候,均采用了cosine_decay,来获得精度更高的模型
**A**:文本检测的结果有置信度,由于推理过程中没有使用,所以没有显示的返回到最终结果中。如果需要文本检测结果的置信度,可以在[文本检测DB的后处理代码](../../ppocr/postprocess/db_postprocess.py)的155行,添加scores信息。这样,在[检测预测代码](../../tools/infer/predict_det.py)的197行,就可以拿到文本检测的scores信息
#### Q3.4.29: DB文本检测,特征提取网络金字塔构建的部分代码在哪儿?
**A**:特征提取网络金字塔构建的部分:[代码位置](../../ppocr/modeling/necks/db_fpn.py)。ppocr/modeling文件夹里面是组网相关的代码,其中architectures是文本检测或者文本识别整体流程代码;backbones是骨干网络相关代码;necks是类似与FPN的颈函数代码;heads是提取文本检测或者文本识别预测结果相关的头函数;transforms是类似于TPS特征预处理模块。更多的信息可以参考[代码组织结构](./tree.md)
<a name="OCR精选10个问题"></a>
## 【精选】OCR精选10个问题
......@@ -156,7 +153,7 @@
**A**:端到端在文字分布密集的业务场景,效率会比较有保证,精度的话看自己业务数据积累情况,如果行级别的识别数据积累比较多的话two-stage会比较好。百度的落地场景,比如工业仪表识别、车牌识别都用到端到端解决方案。
#### Q2.1.4 印章如何识别
**A**: 1. 使用带tps的识别网络或abcnet,2.使用极坐标变换将图片拉平之后使用crnn
**A**1. 使用带tps的识别网络或abcnet,2.使用极坐标变换将图片拉平之后使用crnn
#### Q2.1.5 多语言的字典里是混合了不同的语种,这个是有什么讲究吗?统一到一个字典里会对精度造成多大的损失?
**A**:统一到一个字典里,会造成最后一层FC过大,增加模型大小。如果有特殊需求的话,可以把需要的几种语言合并字典训练模型,合并字典之后如果引入过多的形近字,可能会造成精度损失,字符平衡的问题可能也需要考虑一下。在PaddleOCR里暂时将语言字典分开。
......@@ -193,11 +190,11 @@
#### Q2.2.6: 当训练数据量少时,如何获取更多的数据?
**A**: 当训练数据量少时,可以尝试以下三种方式获取更多的数据:(1)人工采集更多的训练数据,最直接也是最有效的方式。(2)基于PIL和opencv基本图像处理或者变换。例如PIL中ImageFont, Image, ImageDraw三个模块将文字写到背景中,opencv的旋转仿射变换,高斯滤波等。(3)利用数据生成算法合成数据,例如pix2pix等算法。
**A**当训练数据量少时,可以尝试以下三种方式获取更多的数据:(1)人工采集更多的训练数据,最直接也是最有效的方式。(2)基于PIL和opencv基本图像处理或者变换。例如PIL中ImageFont, Image, ImageDraw三个模块将文字写到背景中,opencv的旋转仿射变换,高斯滤波等。(3)利用数据生成算法合成数据,例如pix2pix等算法。
#### Q2.2.7: 论文《Editing Text in the Wild》中文本合成方法SRNet有什么特点?
**A**: SRNet是借鉴GAN中图像到图像转换、风格迁移的想法合成文本数据。不同于通用GAN的方法只选择一个分支,SRNet将文本合成任务分解为三个简单的子模块,提升合成数据的效果。这三个子模块为不带背景的文本风格迁移模块、背景抽取模块和融合模块。PaddleOCR计划将在2020年12月中旬开源基于SRNet的实用模型。
**A**SRNet是借鉴GAN中图像到图像转换、风格迁移的想法合成文本数据。不同于通用GAN的方法只选择一个分支,SRNet将文本合成任务分解为三个简单的子模块,提升合成数据的效果。这三个子模块为不带背景的文本风格迁移模块、背景抽取模块和融合模块。PaddleOCR计划将在2020年12月中旬开源基于SRNet的实用模型。
<a name="模型训练调优2"></a>
### 模型训练调优
......@@ -289,6 +286,9 @@
**A**:StyleText模型生成的数据主要用于OCR识别模型的训练。PaddleOCR目前识别模型的输入为32 x N,因此当前版本模型主要适用高度为32的数据。
建议要合成的数据尺寸设置为32 x N。尺寸相差不多的数据也可以生成,尺寸很大或很小的数据效果确实不佳。
#### Q2.3.18: 在PP-OCR系统中,文本检测的骨干网络为什么没有使用SE模块?
**A**:SE模块是MobileNetV3网络一个重要模块,目的是估计特征图每个特征通道重要性,给特征图每个特征分配权重,提高网络的表达能力。但是,对于文本检测,输入网络的分辨率比较大,一般是640\*640,利用SE模块估计特征图每个特征通道重要性比较困难,网络提升能力有限,但是该模块又比较耗时,因此在PP-OCR系统中,文本检测的骨干网络没有使用SE模块。实验也表明,当去掉SE模块,超轻量模型大小可以减小40%,文本检测效果基本不受影响。详细可以参考PP-OCR技术文章,https://arxiv.org/abs/2009.09941.
<a name="PaddleOCR实战问题"></a>
## 【实战篇】PaddleOCR实战问题
......@@ -346,7 +346,7 @@
#### Q3.1.13:识别模型框出来的位置太紧凑,会丢失边缘的文字信息,导致识别错误
**A** 可以在命令中加入 --det_db_unclip_ratio ,参数[定义位置](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/tools/infer/utility.py#L48),这个参数是检测后处理时控制文本框大小的,默认1.6,可以尝试改成2.5或者更大,反之,如果觉得文本框不够紧凑,也可以把该参数调小。
**A**:可以在命令中加入 --det_db_unclip_ratio ,参数[定义位置](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/tools/infer/utility.py#L48),这个参数是检测后处理时控制文本框大小的,默认1.6,可以尝试改成2.5或者更大,反之,如果觉得文本框不够紧凑,也可以把该参数调小。
#### Q3.1.14:英文手写体识别有计划提供的预训练模型吗?
......@@ -410,7 +410,7 @@ python3 -m pip install paddlepaddle-gpu==2.0.0rc1 -i https://mirror.baidu.com/py
#### Q3.1.24: PaddleOCR develop分支和dygraph分支的区别?
**A** 目前PaddleOCR有四个分支,分别是:
**A**目前PaddleOCR有四个分支,分别是:
- develop:基于Paddle静态图开发的分支,推荐使用paddle1.8 或者2.0版本,该分支具备完善的模型训练、预测、推理部署、量化裁剪等功能,领先于release/1.1分支。
- release/1.1:PaddleOCR 发布的第一个稳定版本,基于静态图开发,具备完善的训练、预测、推理部署、量化裁剪等功能。
......@@ -423,22 +423,60 @@ python3 -m pip install paddlepaddle-gpu==2.0.0rc1 -i https://mirror.baidu.com/py
#### Q3.1.25: 使用dygraph分支,在docker中训练PaddleOCR的时候,数据路径没有任何问题,但是一直报错`reader rasied an exception`,这是为什么呢?
**A** 创建docker的时候,`/dev/shm`的默认大小为64M,如果使用多进程读取数据,共享内存可能不够,因此需要给`/dev/shm`分配更大的空间,在创建docker的时候,传入`--shm-size=8g`表示给`/dev/shm`分配8g的空间。
**A**创建docker的时候,`/dev/shm`的默认大小为64M,如果使用多进程读取数据,共享内存可能不够,因此需要给`/dev/shm`分配更大的空间,在创建docker的时候,传入`--shm-size=8g`表示给`/dev/shm`分配8g的空间。
#### Q3.1.26: 在repo中没有找到Lite和PaddleServing相关的部署教程,这是在哪里呢?
**A** 目前PaddleOCR的默认分支为dygraph,关于Lite和PaddleLite的动态图部署还在适配中,如果希望在Lite端或者使用PaddleServing部署,推荐使用develop分支(静态图)的代码。
**A**目前PaddleOCR的默认分支为dygraph,关于Lite和PaddleLite的动态图部署还在适配中,如果希望在Lite端或者使用PaddleServing部署,推荐使用develop分支(静态图)的代码。
#### Q3.1.27: 如何可视化acc,loss曲线图,模型网络结构图等?
**A** 在配置文件里有`use_visualdl`的参数,设置为True即可,更多的使用命令可以参考:[VisualDL使用指南](https://www.paddlepaddle.org.cn/documentation/docs/zh/2.0-rc1/guides/03_VisualDL/visualdl.html)
**A**在配置文件里有`use_visualdl`的参数,设置为True即可,更多的使用命令可以参考:[VisualDL使用指南](https://www.paddlepaddle.org.cn/documentation/docs/zh/2.0-rc1/guides/03_VisualDL/visualdl.html)
#### Q3.1.28: 在使用StyleText数据合成工具的时候,报错`ModuleNotFoundError: No module named 'utils.config'`,这是为什么呢?
**A** 有2个解决方案
**A**有2个解决方案
- 在StyleText路径下面设置PYTHONPATH:`export PYTHONPATH=./`
- 拉取最新的代码
#### Q3.1.29: PPOCRLabel创建矩形框时只能拖出正方形,如何进行矩形标注?
**A**:取消勾选:“编辑”-“正方形标注”
#### Q3.1.30: Style-Text 如何不文字风格迁移,就像普通文本生成程序一样默认字体直接输出到分割的背景图?
**A**:使用image_synth模式会输出fake_bg.jpg,即为背景图。如果想要批量提取背景,可以稍微修改一下代码,将fake_bg保存下来即可。要修改的位置:
https://github.com/PaddlePaddle/PaddleOCR/blob/de3e2e7cd3b8b65ee02d7a41e570fa5b511a3c1d/StyleText/engine/synthesisers.py#L68
#### Q3.1.31: 怎么输出网络结构以及每层的参数信息?
**A**:可以使用 `paddle.summary`, 具体参考:https://www.paddlepaddle.org.cn/documentation/docs/zh/2.0-rc1/api/paddle/hapi/model_summary/summary_cn.html#summary。
#### Q3.1.32 能否修改StyleText配置文件中的分辨率?
**A**:StyleText目前的训练数据主要是高度32的图片,建议不要改变高度。未来我们会支持更丰富的分辨率。
#### Q3.1.33 StyleText是否可以更换字体文件?
**A**:StyleText项目中的字体文件为标准字体,主要用作模型的输入部分,不能够修改。
StyleText的用途主要是:提取style_image中的字体、背景等style信息,根据语料生成同样style的图片。
#### Q3.1.34 StyleText批量生成图片为什么没有输出?
**A**:需要检查以下您配置文件中的路径是否都存在。尤其要注意的是[label_file配置](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/StyleText/README_ch.md#%E5%BF%AB%E9%80%9F%E4%B8%8A%E6%89%8B)
如果您使用的style_image输入没有label信息,您依然需要提供一个图片文件列表。
#### Q3.1.35 怎样把OCR输出的结果组成有意义的语句呢?
**A**:OCR输出的结果包含坐标信息和文字内容两部分。如果您不关心文字的顺序,那么可以直接按box的序号连起来。
如果需要将文字按照一定的顺序排列,则需要您设定一些规则,对文字的坐标进行处理,例如按照坐标从上到下,从左到右连接识别结果。
对于一些有规律的垂类场景,可以设定模板,根据位置、内容进行匹配。
例如识别身份证照片,可以先匹配"姓名","性别"等关键字,根据这些关键字的坐标去推测其他信息的位置,再与识别的结果匹配。
#### Q3.1.36 如何识别竹简上的古文?
**A**:对于字符都是普通的汉字字符的情况,只要标注足够的数据,finetune模型就可以了。如果数据量不足,您可以尝试StyleText工具。
而如果使用的字符是特殊的古文字、甲骨文、象形文字等,那么首先需要构建一个古文字的字典,之后再进行训练。
<a name="数据集3"></a>
### 数据集
......@@ -500,8 +538,8 @@ python3 -m pip install paddlepaddle-gpu==2.0.0rc1 -i https://mirror.baidu.com/py
#### Q3.2.11:有哪些标注工具可以标注OCR数据集?
**A**您可以参考:https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_en/data_annotation_en.md
我们计划推出高效标注OCR数据的标注工具,请您持续关注PaddleOCR的近期更新
**A**推荐您使用PPOCRLabel工具
您还可以参考:https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_en/data_annotation_en.md
#### Q3.2.12:一些特殊场景的数据识别效果差,但是数据量很少,不够用来finetune怎么办?
......@@ -520,15 +558,15 @@ python3 -m pip install paddlepaddle-gpu==2.0.0rc1 -i https://mirror.baidu.com/py
#### Q3.2.15: 文本标注工具PPOCRLabel有什么特色?
**A**: PPOCRLabel是一个半自动文本标注工具,它使用基于PPOCR的中英文OCR模型,预先预测文本检测和识别结果,然后用户对上述结果进行校验和修正就行,大大提高用户的标注效率。同时导出的标注结果直接适配PPOCR训练所需要的数据格式,
**A**PPOCRLabel是一个半自动文本标注工具,它使用基于PPOCR的中英文OCR模型,预先预测文本检测和识别结果,然后用户对上述结果进行校验和修正就行,大大提高用户的标注效率。同时导出的标注结果直接适配PPOCR训练所需要的数据格式,
#### Q3.2.16: 文本标注工具PPOCRLabel,可以更换模型吗?
**A**: PPOCRLabel中OCR部署方式采用的基于pip安装whl包快速推理,可以参考相关文档更换模型路径,进行特定任务的标注适配。基于pip安装whl包快速推理的文档如下,https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_ch/whl.md。
**A**PPOCRLabel中OCR部署方式采用的基于pip安装whl包快速推理,可以参考相关文档更换模型路径,进行特定任务的标注适配。基于pip安装whl包快速推理的文档如下,https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_ch/whl.md。
#### Q3.2.17: 文本标注工具PPOCRLabel支持的运行环境有哪些?
**A**: PPOCRLabel可运行于Linux、Windows、MacOS等多种系统。操作步骤可以参考文档,https://github.com/PaddlePaddle/PaddleOCR/blob/develop/PPOCRLabel/README.md
**A**PPOCRLabel可运行于Linux、Windows、MacOS等多种系统。操作步骤可以参考文档,https://github.com/PaddlePaddle/PaddleOCR/blob/develop/PPOCRLabel/README.md
<a name="模型训练调优3"></a>
......@@ -560,7 +598,6 @@ det_db_unclip_ratio: 文本框扩张的系数,关系到文本框的大小``
ps -axu | grep train.py | awk '{print $2}' | xargs kill -9
```
#### Q3.3.5:可不可以将pretrain_weights设置为空呢?想从零开始训练一个model
**A**:这个是可以的,在训练通用识别模型的时候,pretrain_weights就设置为空,但是这样可能需要更长的迭代轮数才能达到相同的精度。
......@@ -641,15 +678,15 @@ ps -axu | grep train.py | awk '{print $2}' | xargs kill -9
#### Q3.3.20: 文字检测时怎么模糊的数据增强?
**A**: 模糊的数据增强需要修改代码进行添加,以DB为例,参考[Normalize](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/ppocr/data/imaug/operators.py#L60) ,添加模糊的增强就行
**A**模糊的数据增强需要修改代码进行添加,以DB为例,参考[Normalize](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/ppocr/data/imaug/operators.py#L60) ,添加模糊的增强就行
#### Q3.3.21: 文字检测时怎么更改图片旋转的角度,实现360度任意旋转?
**A**: [这里](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/ppocr/data/imaug/iaa_augment.py#L64) 的(-10,10) 改为(-180,180)即可
**A**[这里](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/ppocr/data/imaug/iaa_augment.py#L64) 的(-10,10) 改为(-180,180)即可
#### Q3.3.22: 训练数据的长宽比过大怎么修改shape
**A**: 识别修改[这里](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yaml#L75) ,
**A**识别修改[这里](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yaml#L75) ,
检测修改[这里](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml#L85)
#### Q3.3.23:检测模型训练或预测时出现elementwise_add报错
......@@ -658,15 +695,23 @@ ps -axu | grep train.py | awk '{print $2}' | xargs kill -9
#### Q3.3.24: DB检测训练输入尺寸640,可以改大一些吗?
**A**: 不建议改大。检测模型训练输入尺寸是预处理中random crop后的尺寸,并非直接将原图进行resize,多数场景下这个尺寸并不小了,改大后可能反而并不合适,而且训练会变慢。另外,代码里可能有的地方参数按照预设输入尺寸适配的,改大后可能有隐藏风险。
**A**不建议改大。检测模型训练输入尺寸是预处理中random crop后的尺寸,并非直接将原图进行resize,多数场景下这个尺寸并不小了,改大后可能反而并不合适,而且训练会变慢。另外,代码里可能有的地方参数按照预设输入尺寸适配的,改大后可能有隐藏风险。
#### Q3.3.25: 识别模型训练时,loss能正常下降,但acc一直为0
**A**: 识别模型训练初期acc为0是正常的,多训一段时间指标就上来了。
**A**识别模型训练初期acc为0是正常的,多训一段时间指标就上来了。
#### Q3.3.26: PaddleOCR在训练的时候一直使用cosine_decay的学习率下降策略,这是为什么呢?
**A**: cosine_decay表示在训练的过程中,学习率按照cosine的变化趋势逐渐下降至0,在迭代轮数更长的情况下,比常量的学习率变化策略会有更好的收敛效果,因此在实际训练的时候,均采用了cosine_decay,来获得精度更高的模型。
**A**:cosine_decay表示在训练的过程中,学习率按照cosine的变化趋势逐渐下降至0,在迭代轮数更长的情况下,比常量的学习率变化策略会有更好的收敛效果,因此在实际训练的时候,均采用了cosine_decay,来获得精度更高的模型。
#### Q3.3.27: PaddleOCR关于文本识别模型的训练,支持的数据增强方式有哪些?
**A**:文本识别支持的数据增强方式有随机小幅度裁剪、图像平衡、添加白噪声、颜色漂移、图像反色和Text Image Augmentation(TIA)变换等。可以参考[代码](../../ppocr/data/imaug/rec_img_aug.py)中的warp函数。
#### Q3.3.28: 关于dygraph分支中,文本识别模型训练,要使用数据增强应该如何设置?
**A**:可以参考[配置文件](../../configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml)在Train['dataset']['transforms']添加RecAug字段,使数据增强生效。可以通过添加对aug_prob设置,表示每种数据增强采用的概率。aug_prob默认是0.4.由于tia数据增强特殊性,默认不采用,可以通过添加use_tia设置,使tia数据增强生效。详细设置可以参考[ISSUE 1744](https://github.com/PaddlePaddle/PaddleOCR/issues/1744)
<a name="预测部署3"></a>
......@@ -781,8 +826,31 @@ ps -axu | grep train.py | awk '{print $2}' | xargs kill -9
**A**:使用EAST或SAST模型进行推理预测时,需要在命令中指定参数--det_algorithm="EAST" 或 --det_algorithm="SAST",使用DB时不用指定是因为该参数默认值是"DB":https://github.com/PaddlePaddle/PaddleOCR/blob/e7a708e9fdaf413ed7a14da8e4a7b4ac0b211e42/tools/infer/utility.py#L43
#### Q3.4.25 : PaddleOCR模型Python端预测和C++预测结果不一致?
#### Q3.4.25: PaddleOCR模型Python端预测和C++预测结果不一致?
正常来说,python端预测和C++预测文本是一致的,如果预测结果差异较大,
建议首先排查diff出现在检测模型还是识别模型,或者尝试换其他模型是否有类似的问题。
其次,检查python端和C++端数据处理部分是否存在差异,建议保存环境,更新PaddleOCR代码再试下。
如果更新代码或者更新代码都没能解决,建议在PaddleOCR微信群里或者issue中抛出您的问题。
#### Q3.4.26: 目前paddle hub serving 只支持 imgpath,如果我想用imgurl 去哪里改呢?
**A**:图片是在这里读取的:https://github.com/PaddlePaddle/PaddleOCR/blob/67ef25d593c4eabfaaceb22daade4577f53bed81/deploy/hubserving/ocr_system/module.py#L55,
可以参考下面的写法,将url path转化为np array(https://cloud.tencent.com/developer/article/1467840)
```
response = request.urlopen('http://i1.whymtj.com/uploads/tu/201902/9999/52491ae4ba.jpg')
img_array = np.array(bytearray(response.read()), dtype=np.uint8)
img = cv.imdecode(img_array, -1)
```
#### Q3.4.27: C++ 端侧部署可以只对OCR的检测部署吗?
**A**:可以的,识别和检测模块是解耦的。如果想对检测部署,需要自己修改一下main函数,
只保留检测相关就可以:https://github.com/PaddlePaddle/PaddleOCR/blob/de3e2e7cd3b8b65ee02d7a41e570fa5b511a3c1d/deploy/cpp_infer/src/main.cpp#L72
#### Q3.4.28: PP-OCR系统中,文本检测的结果有置信度吗?
**A**:文本检测的结果有置信度,由于推理过程中没有使用,所以没有显示的返回到最终结果中。如果需要文本检测结果的置信度,可以在[文本检测DB的后处理代码](../../ppocr/postprocess/db_postprocess.py)的155行,添加scores信息。这样,在[检测预测代码](../../tools/infer/predict_det.py)的197行,就可以拿到文本检测的scores信息。
#### Q3.4.29: DB文本检测,特征提取网络金字塔构建的部分代码在哪儿?
**A**:特征提取网络金字塔构建的部分:[代码位置](../../ppocr/modeling/necks/db_fpn.py)。ppocr/modeling文件夹里面是组网相关的代码,其中architectures是文本检测或者文本识别整体流程代码;backbones是骨干网络相关代码;necks是类似与FPN的颈函数代码;heads是提取文本检测或者文本识别预测结果相关的头函数;transforms是类似于TPS特征预处理模块。更多的信息可以参考[代码组织结构](./tree.md)
......@@ -21,13 +21,13 @@ PaddleOCR开源的文本检测算法列表:
|EAST|MobileNetV3|78.24%|79.15%|78.69%|[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_east_v2.0_train.tar)|
|DB|ResNet50_vd|86.41%|78.72%|82.38%|[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_db_v2.0_train.tar)|
|DB|MobileNetV3|77.29%|73.08%|75.12%|[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_db_v2.0_train.tar)|
|SAST|ResNet50_vd|91.83%|81.80%|86.52%|[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_sast_icdar15_v2.0_train.tar)|
|SAST|ResNet50_vd|91.39%|83.77%|87.42%|[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_sast_icdar15_v2.0_train.tar)|
在Total-text文本检测公开数据集上,算法效果如下:
|模型|骨干网络|precision|recall|Hmean|下载链接|
| --- | --- | --- | --- | --- | --- |
|SAST|ResNet50_vd|89.05%|76.80%|82.47%|[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_sast_totaltext_v2.0_train.tar)|
|SAST|ResNet50_vd|89.63%|78.44%|83.66%|[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_sast_totaltext_v2.0_train.tar)|
**说明:** SAST模型训练额外加入了icdar2013、icdar2017、COCO-Text、ArT等公开数据集进行调优。PaddleOCR用到的经过整理格式的英文公开数据集下载:[百度云地址](https://pan.baidu.com/s/12cPnZcVuV1zn5DOd4mqjVw) (提取码: 2bpi)
......
......@@ -21,9 +21,8 @@ ln -sf <path/to/dataset> <path/to/paddle_ocr>/train_data/cls/dataset
```
" 图像文件名 图像标注信息 "
train_data/cls/word_001.jpg 0
train_data/cls/word_002.jpg 180
train/word_001.jpg 0
train/word_002.jpg 180
```
最终训练集应有如下文件结构:
......@@ -55,6 +54,8 @@ train_data/cls/word_002.jpg 180
### 启动训练
将准备好的txt文件和图片文件夹路径分别写入配置文件的 `Train/Eval.dataset.label_file_list``Train/Eval.dataset.data_dir` 字段下,`Train/Eval.dataset.data_dir`字段下的路径和文件里记载的图片名构成了图片的绝对路径。
PaddleOCR提供了训练脚本、评估脚本和预测脚本。
开始训练:
......
......@@ -96,5 +96,5 @@ python3 tools/infer/predict_system.py --image_dir="./doc/imgs/11.jpg" --det_mode
此外,文档教程中也提供了中文OCR模型的其他预测部署方式:
- [基于C++预测引擎推理](../../deploy/cpp_infer/readme.md)
- [服务部署](../../deploy/pdserving/readme.md)
- [端侧部署](../../deploy/lite/readme.md)
- [服务部署](../../deploy/hubserving)
- [端侧部署(目前只支持静态图)](https://github.com/PaddlePaddle/PaddleOCR/tree/develop/deploy/lite)
......@@ -23,13 +23,13 @@ On the ICDAR2015 dataset, the text detection result is as follows:
|EAST|MobileNetV3|78.24%|79.15%|78.69%|[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_east_v2.0_train.tar)|
|DB|ResNet50_vd|86.41%|78.72%|82.38%|[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_db_v2.0_train.tar)|
|DB|MobileNetV3|77.29%|73.08%|75.12%|[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_db_v2.0_train.tar)|
|SAST|ResNet50_vd|91.83%|81.80%|86.52%|[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_sast_icdar15_v2.0_train.tar)|
|SAST|ResNet50_vd|91.39%|83.77%|87.42%|[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_sast_icdar15_v2.0_train.tar)|
On Total-Text dataset, the text detection result is as follows:
|Model|Backbone|precision|recall|Hmean|Download link|
| --- | --- | --- | --- | --- | --- |
|SAST|ResNet50_vd|89.05%|76.80%|82.47%|[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_sast_totaltext_v2.0_train.tar)|
|SAST|ResNet50_vd|89.63%|78.44%|83.66%|[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_sast_totaltext_v2.0_train.tar)|
**Note:** Additional data, like icdar2013, icdar2017, COCO-Text, ArT, was added to the model training of SAST. Download English public dataset in organized format used by PaddleOCR from [Baidu Drive](https://pan.baidu.com/s/12cPnZcVuV1zn5DOd4mqjVw) (download code: 2bpi).
......
......@@ -23,8 +23,8 @@ First put the training images in the same folder (train_images), and use a txt f
```
" Image file name Image annotation "
train_data/word_001.jpg 0
train_data/word_002.jpg 180
train/word_001.jpg 0
train/word_002.jpg 180
```
The final training set should have the following file structure:
......@@ -57,6 +57,7 @@ containing all images (test) and a cls_gt_test.txt. The structure of the test se
```
### TRAINING
Write the prepared txt file and image folder path into the configuration file under the `Train/Eval.dataset.label_file_list` and `Train/Eval.dataset.data_dir` fields, the absolute path of the image consists of the `Train/Eval.dataset.data_dir` field and the image name recorded in the txt file.
PaddleOCR provides training scripts, evaluation scripts, and prediction scripts.
......
......@@ -99,5 +99,5 @@ For more text detection and recognition tandem reasoning, please refer to the do
In addition, the tutorial also provides other deployment methods for the Chinese OCR model:
- [Server-side C++ inference](../../deploy/cpp_infer/readme_en.md)
- [Service deployment](../../deploy/pdserving/readme_en.md)
- [End-to-end deployment](../../deploy/lite/readme_en.md)
- [Service deployment](../../deploy/hubserving)
- [End-to-end deployment](https://github.com/PaddlePaddle/PaddleOCR/tree/develop/deploy/lite)
doc/joinus.PNG

174.4 KB | W: | H:

doc/joinus.PNG

108.9 KB | W: | H:

doc/joinus.PNG
doc/joinus.PNG
doc/joinus.PNG
doc/joinus.PNG
  • 2-up
  • Swipe
  • Onion skin
......@@ -66,8 +66,10 @@ def build_dataloader(config, mode, device, logger):
batch_size = loader_config['batch_size_per_card']
drop_last = loader_config['drop_last']
num_workers = loader_config['num_workers']
use_shared_memory = False
if 'use_shared_memory' in loader_config.keys():
use_shared_memory = loader_config['use_shared_memory']
else:
use_shared_memory = True
if mode == "Train":
#Distribute data to multiple cards
batch_sampler = DistributedBatchSampler(
......@@ -75,7 +77,6 @@ def build_dataloader(config, mode, device, logger):
batch_size=batch_size,
shuffle=False,
drop_last=drop_last)
use_shared_memory = True
else:
#Distribute data to single card
batch_sampler = BatchSampler(
......
......@@ -26,6 +26,8 @@ class RecMetric(object):
all_num = 0
norm_edit_dis = 0.0
for (pred, pred_conf), (target, _) in zip(preds, labels):
pred = pred.replace(" ", "")
target = target.replace(" ", "")
norm_edit_dis += Levenshtein.distance(pred, target) / max(
len(pred), len(target))
if pred == target:
......
......@@ -60,7 +60,7 @@ class BaseRecLabelDecode(object):
def add_special_char(self, dict_character):
return dict_character
def decode(self, text_index, text_prob=None, is_remove_duplicate=True):
def decode(self, text_index, text_prob=None, is_remove_duplicate=False):
""" convert text-index into text-label. """
result_list = []
ignored_tokens = self.get_ignored_tokens()
......@@ -110,7 +110,7 @@ class CTCLabelDecode(BaseRecLabelDecode):
text = self.decode(preds_idx, preds_prob)
if label is None:
return text
label = self.decode(label, is_remove_duplicate=False)
label = self.decode(label)
return text, label
def add_special_char(self, dict_character):
......
......@@ -57,7 +57,7 @@ def get_image_file_list(img_file):
elif os.path.isdir(img_file):
for single_file in os.listdir(img_file):
file_path = os.path.join(img_file, single_file)
if imghdr.what(file_path) in img_end:
if os.path.isfile(file_path) and imghdr.what(file_path) in img_end:
imgs_lists.append(file_path)
if len(imgs_lists) == 0:
raise Exception("not found any img file in {}".format(img_file))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册