提交 adafa3e6 编写于 作者: W WenmuZhou

Merge branch 'dygraph' of https://github.com/PaddlePaddle/PaddleOCR into dygraph_rc

...@@ -64,6 +64,7 @@ from libs.colorDialog import ColorDialog ...@@ -64,6 +64,7 @@ from libs.colorDialog import ColorDialog
from libs.toolBar import ToolBar from libs.toolBar import ToolBar
from libs.ustr import ustr from libs.ustr import ustr
from libs.hashableQListWidgetItem import HashableQListWidgetItem from libs.hashableQListWidgetItem import HashableQListWidgetItem
from libs.editinlist import EditInList
__appname__ = 'PPOCRLabel' __appname__ = 'PPOCRLabel'
...@@ -147,7 +148,7 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -147,7 +148,7 @@ class MainWindow(QMainWindow, WindowMixin):
self.prevLabelText = getStr('tempLabel') self.prevLabelText = getStr('tempLabel')
self.model = 'paddle' self.model = 'paddle'
self.PPreader = None self.PPreader = None
self.autoSaveNum = 10 self.autoSaveNum = 5
################# file list ############### ################# file list ###############
self.fileListWidget = QListWidget() self.fileListWidget = QListWidget()
...@@ -201,12 +202,12 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -201,12 +202,12 @@ class MainWindow(QMainWindow, WindowMixin):
################## label list #################### ################## label list ####################
# Create and add a widget for showing current label items # Create and add a widget for showing current label items
self.labelList = QListWidget() self.labelList = EditInList()
labelListContainer = QWidget() labelListContainer = QWidget()
labelListContainer.setLayout(listLayout) labelListContainer.setLayout(listLayout)
self.labelList.itemActivated.connect(self.labelSelectionChanged) self.labelList.itemActivated.connect(self.labelSelectionChanged)
self.labelList.itemSelectionChanged.connect(self.labelSelectionChanged) self.labelList.itemSelectionChanged.connect(self.labelSelectionChanged)
self.labelList.itemDoubleClicked.connect(self.editLabel) self.labelList.clicked.connect(self.labelList.item_clicked)
# Connect to itemChanged to detect checkbox changes. # Connect to itemChanged to detect checkbox changes.
self.labelList.itemChanged.connect(self.labelItemChanged) self.labelList.itemChanged.connect(self.labelItemChanged)
self.labelListDock = QDockWidget(getStr('recognitionResult'),self) self.labelListDock = QDockWidget(getStr('recognitionResult'),self)
...@@ -316,7 +317,7 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -316,7 +317,7 @@ class MainWindow(QMainWindow, WindowMixin):
self.scrollArea = scroll self.scrollArea = scroll
self.canvas.scrollRequest.connect(self.scrollRequest) self.canvas.scrollRequest.connect(self.scrollRequest)
self.canvas.newShape.connect(self.newShape) self.canvas.newShape.connect(partial(self.newShape, False))
self.canvas.shapeMoved.connect(self.updateBoxlist) # self.setDirty self.canvas.shapeMoved.connect(self.updateBoxlist) # self.setDirty
self.canvas.selectionChanged.connect(self.shapeSelectionChanged) self.canvas.selectionChanged.connect(self.shapeSelectionChanged)
self.canvas.drawingPolygon.connect(self.toggleDrawingSensitive) self.canvas.drawingPolygon.connect(self.toggleDrawingSensitive)
...@@ -354,13 +355,9 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -354,13 +355,9 @@ class MainWindow(QMainWindow, WindowMixin):
quit = action(getStr('quit'), self.close, quit = action(getStr('quit'), self.close,
'Ctrl+Q', 'quit', getStr('quitApp')) 'Ctrl+Q', 'quit', getStr('quitApp'))
open = action(getStr('openFile'), self.openFile,
'Ctrl+O', 'open', getStr('openFileDetail'))
opendir = action(getStr('openDir'), self.openDirDialog, opendir = action(getStr('openDir'), self.openDirDialog,
'Ctrl+u', 'open', getStr('openDir')) 'Ctrl+u', 'open', getStr('openDir'))
save = action(getStr('save'), self.saveFile, save = action(getStr('save'), self.saveFile,
'Ctrl+V', 'verify', getStr('saveDetail'), enabled=False) 'Ctrl+V', 'verify', getStr('saveDetail'), enabled=False)
...@@ -506,7 +503,7 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -506,7 +503,7 @@ class MainWindow(QMainWindow, WindowMixin):
self.drawSquaresOption.triggered.connect(self.toogleDrawSquare) self.drawSquaresOption.triggered.connect(self.toogleDrawSquare)
# Store actions for further handling. # Store actions for further handling.
self.actions = struct(save=save, open=open, resetAll=resetAll, deleteImg=deleteImg, self.actions = struct(save=save, resetAll=resetAll, deleteImg=deleteImg,
lineColor=color1, create=create, delete=delete, edit=edit, copy=copy, lineColor=color1, create=create, delete=delete, edit=edit, copy=copy,
saveRec=saveRec, singleRere=singleRere,AutoRec=AutoRec,reRec=reRec, saveRec=saveRec, singleRere=singleRere,AutoRec=AutoRec,reRec=reRec,
createMode=createMode, editMode=editMode, createMode=createMode, editMode=editMode,
...@@ -515,7 +512,7 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -515,7 +512,7 @@ class MainWindow(QMainWindow, WindowMixin):
fitWindow=fitWindow, fitWidth=fitWidth, fitWindow=fitWindow, fitWidth=fitWidth,
zoomActions=zoomActions, saveLabel=saveLabel, zoomActions=zoomActions, saveLabel=saveLabel,
fileMenuActions=( fileMenuActions=(
open, opendir, saveLabel, resetAll, quit), opendir, saveLabel, resetAll, quit),
beginner=(), advanced=(), beginner=(), advanced=(),
editMenu=(createpoly, edit, copy, delete,singleRere, editMenu=(createpoly, edit, copy, delete,singleRere,
None, color1, self.drawSquaresOption), None, color1, self.drawSquaresOption),
...@@ -537,11 +534,6 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -537,11 +534,6 @@ class MainWindow(QMainWindow, WindowMixin):
labelList=labelMenu) labelList=labelMenu)
# Sync single class mode from PR#106
self.singleClassMode = QAction(getStr('singleClsMode'), self)
self.singleClassMode.setShortcut("Ctrl+Shift+S")
self.singleClassMode.setCheckable(True)
self.singleClassMode.setChecked(settings.get(SETTING_SINGLE_CLASS, False))
self.lastLabel = None self.lastLabel = None
# Add option to enable/disable labels being displayed at the top of bounding boxes # Add option to enable/disable labels being displayed at the top of bounding boxes
self.displayLabelOption = QAction(getStr('displayLabel'), self) self.displayLabelOption = QAction(getStr('displayLabel'), self)
...@@ -550,12 +542,18 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -550,12 +542,18 @@ class MainWindow(QMainWindow, WindowMixin):
self.displayLabelOption.setChecked(settings.get(SETTING_PAINT_LABEL, False)) self.displayLabelOption.setChecked(settings.get(SETTING_PAINT_LABEL, False))
self.displayLabelOption.triggered.connect(self.togglePaintLabelsOption) self.displayLabelOption.triggered.connect(self.togglePaintLabelsOption)
self.labelDialogOption = QAction(getStr('labelDialogOption'), self)
self.labelDialogOption.setShortcut("Ctrl+Shift+L")
self.labelDialogOption.setCheckable(True)
self.labelDialogOption.setChecked(settings.get(SETTING_PAINT_LABEL, False))
self.labelDialogOption.triggered.connect(self.speedChoose)
addActions(self.menus.file, addActions(self.menus.file,
(opendir, None, saveLabel, saveRec, None, resetAll, deleteImg, quit)) (opendir, None, saveLabel, saveRec, None, resetAll, deleteImg, quit))
addActions(self.menus.help, (showSteps, showInfo)) addActions(self.menus.help, (showSteps, showInfo))
addActions(self.menus.view, ( addActions(self.menus.view, (
self.displayLabelOption, # labels, self.displayLabelOption, self.labelDialogOption,
None, None,
hideAll, showAll, None, hideAll, showAll, None,
zoomIn, zoomOut, zoomOrg, None, zoomIn, zoomOut, zoomOrg, None,
...@@ -1062,6 +1060,7 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -1062,6 +1060,7 @@ class MainWindow(QMainWindow, WindowMixin):
def labelSelectionChanged(self): def labelSelectionChanged(self):
item = self.currentItem() item = self.currentItem()
self.labelList.scrollToItem(item, QAbstractItemView.EnsureVisible)
if item and self.canvas.editing(): if item and self.canvas.editing():
self._noSelectionSlot = True self._noSelectionSlot = True
self.canvas.selectShape(self.itemsToShapes[item]) self.canvas.selectShape(self.itemsToShapes[item])
...@@ -1069,6 +1068,7 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -1069,6 +1068,7 @@ class MainWindow(QMainWindow, WindowMixin):
def boxSelectionChanged(self): def boxSelectionChanged(self):
item = self.currentBox() item = self.currentBox()
self.BoxList.scrollToItem(item, QAbstractItemView.EnsureVisible)
if item and self.canvas.editing(): if item and self.canvas.editing():
self._noSelectionSlot = True self._noSelectionSlot = True
self.canvas.selectShape(self.itemsToShapesbox[item]) self.canvas.selectShape(self.itemsToShapesbox[item])
...@@ -1089,7 +1089,7 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -1089,7 +1089,7 @@ class MainWindow(QMainWindow, WindowMixin):
# self.actions.save.setEnabled(True) # self.actions.save.setEnabled(True)
# Callback functions: # Callback functions:
def newShape(self): def newShape(self, value=True):
"""Pop-up and give focus to the label editor. """Pop-up and give focus to the label editor.
position MUST be in global coordinates. position MUST be in global coordinates.
...@@ -1098,12 +1098,11 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -1098,12 +1098,11 @@ class MainWindow(QMainWindow, WindowMixin):
self.labelDialog = LabelDialog( self.labelDialog = LabelDialog(
parent=self, listItem=self.labelHist) parent=self, listItem=self.labelHist)
# Sync single class mode from PR#106 if value:
if self.singleClassMode.isChecked() and self.lastLabel:
text = self.lastLabel
else:
text = self.labelDialog.popUp(text=self.prevLabelText) text = self.labelDialog.popUp(text=self.prevLabelText)
self.lastLabel = text self.lastLabel = text
else:
text = self.prevLabelText
if text is not None: if text is not None:
self.prevLabelText = self.stringBundle.getString('tempLabel') self.prevLabelText = self.stringBundle.getString('tempLabel')
...@@ -1364,7 +1363,6 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -1364,7 +1363,6 @@ class MainWindow(QMainWindow, WindowMixin):
else: else:
settings[SETTING_LAST_OPEN_DIR] = '' settings[SETTING_LAST_OPEN_DIR] = ''
settings[SETTING_SINGLE_CLASS] = self.singleClassMode.isChecked()
settings[SETTING_PAINT_LABEL] = self.displayLabelOption.isChecked() settings[SETTING_PAINT_LABEL] = self.displayLabelOption.isChecked()
settings[SETTING_DRAW_SQUARE] = self.drawSquaresOption.isChecked() settings[SETTING_DRAW_SQUARE] = self.drawSquaresOption.isChecked()
settings.save() settings.save()
...@@ -1497,35 +1495,6 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -1497,35 +1495,6 @@ class MainWindow(QMainWindow, WindowMixin):
print('file name in openNext is ',filename) print('file name in openNext is ',filename)
self.loadFile(filename) self.loadFile(filename)
def openFile(self, _value=False):
if not self.mayContinue():
return
path = os.path.dirname(ustr(self.filePath)) if self.filePath else '.'
formats = ['*.%s' % fmt.data().decode("ascii").lower() for fmt in QImageReader.supportedImageFormats()]
filters = "Image & Label files (%s)" % ' '.join(formats + ['*%s' % LabelFile.suffix])
filename = QFileDialog.getOpenFileName(self, '%s - Choose Image or Label file' % __appname__, path, filters)
if filename:
if isinstance(filename, (tuple, list)):
filename = filename[0]
self.loadFile(filename)
# print('filename in openfile is ', self.filePath)
self.filePath = None
self.fileListWidget.clear()
self.iconlist.clear()
self.mImgList = [filename]
self.openNextImg()
if self.validFilestate(filename) is True:
item = QListWidgetItem(newIcon('done'), filename)
self.setClean()
elif self.validFilestate(filename) is None:
item = QListWidgetItem(newIcon('close'), filename)
else:
item = QListWidgetItem(newIcon('close'), filename)
self.setDirty()
self.fileListWidget.addItem(filename)
self.additems5(None)
print('opened image is', filename)
def updateFileListIcon(self, filename): def updateFileListIcon(self, filename):
pass pass
...@@ -1963,6 +1932,16 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -1963,6 +1932,16 @@ class MainWindow(QMainWindow, WindowMixin):
QMessageBox.information(self, "Information", "Cropped images has been saved in "+str(crop_img_dir)) QMessageBox.information(self, "Information", "Cropped images has been saved in "+str(crop_img_dir))
def speedChoose(self):
if self.labelDialogOption.isChecked():
self.canvas.newShape.disconnect()
self.canvas.newShape.connect(partial(self.newShape, True))
else:
self.canvas.newShape.disconnect()
self.canvas.newShape.connect(partial(self.newShape, False))
def inverted(color): def inverted(color):
return QColor(*[255 - v for v in color.getRgb()]) return QColor(*[255 - v for v in color.getRgb()])
......
...@@ -2,19 +2,27 @@ English | [简体中文](README_ch.md) ...@@ -2,19 +2,27 @@ English | [简体中文](README_ch.md)
# PPOCRLabel # PPOCRLabel
PPOCRLabel is a semi-automatic graphic annotation tool suitable for OCR field. It is written in python3 and pyqt5, supporting rectangular box annotation and four-point annotation modes. Annotations can be directly used for the training of PPOCR detection and recognition models. PPOCRLabel is a semi-automatic graphic annotation tool suitable for OCR field, with built-in PPOCR model to automatically detect and re-recognize data. It is written in python3 and pyqt5, supporting rectangular box annotation and four-point annotation modes. Annotations can be directly used for the training of PPOCR detection and recognition models.
<img src="./data/gif/steps_en.gif" width="100%"/> <img src="./data/gif/steps_en.gif" width="100%"/>
### Recent Update ### Recent Update
- 2021.1.11: Optimize the labeling experience (by [edencfc](https://github.com/edencfc)),
- Users can choose whether to pop up the label input dialog after drawing the detection box in "View - Pop-up Label Input Dialog".
- The recognition result scrolls synchronously when users click related detection box.
- Click to modify the recognition result.(If you can't change the result, please switch to the system default input method, or switch back to the original input method again)
- 2020.12.18: Support re-recognition of a single label box (by [ninetailskim](https://github.com/ninetailskim) ), perfect shortcut keys. - 2020.12.18: Support re-recognition of a single label box (by [ninetailskim](https://github.com/ninetailskim) ), perfect shortcut keys.
### TODO:
- Lock box mode: For the same scene data, the size and position of the locked detection box can be transferred between different pictures.
- Experience optimization: Add undo, batch operation include move, copy, delete and so on, optimize the annotation process.
## Installation ## Installation
### 1. Install PaddleOCR ### 1. Install PaddleOCR
Refer to [PaddleOCR installation document](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_ch/installation.md) to prepare PaddleOCR PaddleOCR models has been built in PPOCRLabel, please refer to [PaddleOCR installation document](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_ch/installation.md) to prepare PaddleOCR and make sure it works.
### 2. Install PPOCRLabel ### 2. Install PPOCRLabel
...@@ -60,7 +68,7 @@ python3 PPOCRLabel.py ...@@ -60,7 +68,7 @@ python3 PPOCRLabel.py
4.1 Click 'Create RectBox' or press 'W' in English keyboard mode to draw a new rectangle detection box. Click and release left mouse to select a region to annotate the text area. 4.1 Click 'Create RectBox' or press 'W' in English keyboard mode to draw a new rectangle detection box. Click and release left mouse to select a region to annotate the text area.
4.2 Press 'P' to enter four-point labeling mode which enables you to create any four-point shape by clicking four points with the left mouse button in succession and DOUBLE CLICK the left mouse as the signal of labeling completion. 4.2 Press 'Q' to enter four-point labeling mode which enables you to create any four-point shape by clicking four points with the left mouse button in succession and DOUBLE CLICK the left mouse as the signal of labeling completion.
5. After the marking frame is drawn, the user clicks "OK", and the detection frame will be pre-assigned a "TEMPORARY" label. 5. After the marking frame is drawn, the user clicks "OK", and the detection frame will be pre-assigned a "TEMPORARY" label.
...@@ -72,7 +80,7 @@ python3 PPOCRLabel.py ...@@ -72,7 +80,7 @@ python3 PPOCRLabel.py
9. Click "Delete Image" and the image will be deleted to the recycle bin. 9. Click "Delete Image" and the image will be deleted to the recycle bin.
10. Labeling result: the user can save manually through the menu "File - Save Label", while the program will also save automatically after every 10 images confirmed by the user.the manually checked label will be stored in *Label.txt* under the opened picture folder. 10. Labeling result: the user can save manually through the menu "File - Save Label", while the program will also save automatically after every 5 images confirmed by the user.the manually checked label will be stored in *Label.txt* under the opened picture folder.
Click "PaddleOCR"-"Save Recognition Results" in the menu bar, the recognition training data of such pictures will be saved in the *crop_img* folder, and the recognition label will be saved in *rec_gt.txt*<sup>[4]</sup>. Click "PaddleOCR"-"Save Recognition Results" in the menu bar, the recognition training data of such pictures will be saved in the *crop_img* folder, and the recognition label will be saved in *rec_gt.txt*<sup>[4]</sup>.
### Note ### Note
...@@ -88,7 +96,7 @@ Therefore, if the recognition result has been manually changed before, it may ch ...@@ -88,7 +96,7 @@ Therefore, if the recognition result has been manually changed before, it may ch
| File name | Description | | File name | Description |
| :-----------: | :----------------------------------------------------------: | | :-----------: | :----------------------------------------------------------: |
| Label.txt | The detection label file can be directly used for PPOCR detection model training. After the user saves 10 label results, the file will be automatically saved. It will also be written when the user closes the application or changes the file folder. | | Label.txt | The detection label file can be directly used for PPOCR detection model training. After the user saves 5 label results, the file will be automatically saved. It will also be written when the user closes the application or changes the file folder. |
| fileState.txt | The picture status file save the image in the current folder that has been manually confirmed by the user. | | fileState.txt | The picture status file save the image in the current folder that has been manually confirmed by the user. |
| Cache.cach | Cache files to save the results of model recognition. | | Cache.cach | Cache files to save the results of model recognition. |
| rec_gt.txt | The recognition label file, which can be directly used for PPOCR identification model training, is generated after the user clicks on the menu bar "File"-"Save recognition result". | | rec_gt.txt | The recognition label file, which can be directly used for PPOCR identification model training, is generated after the user clicks on the menu bar "File"-"Save recognition result". |
...@@ -124,6 +132,15 @@ Therefore, if the recognition result has been manually changed before, it may ch ...@@ -124,6 +132,15 @@ Therefore, if the recognition result has been manually changed before, it may ch
- Custom model: The model trained by users can be replaced by modifying PPOCRLabel.py in [PaddleOCR class instantiation](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/PPOCRLabel/PPOCRLabel.py#L110) referring [Custom Model Code](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_en/whl_en.md#use-custom-model) - Custom model: The model trained by users can be replaced by modifying PPOCRLabel.py in [PaddleOCR class instantiation](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/PPOCRLabel/PPOCRLabel.py#L110) referring [Custom Model Code](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_en/whl_en.md#use-custom-model)
### Save
PPOCRLabel supports three ways to save Label.txt
- Automatically save: When it detects that the user has manually checked 5 pictures, the program automatically writes the annotations into Label.txt. The user can change the value of ``self.autoSaveNum`` in ``PPOCRLabel.py`` to set the number of images to be automatically saved after confirmation.
- Manual save: Click "File-Save Marking Results" to manually save the label.
- Close application save
### Export partial recognition results ### Export partial recognition results
For some data that are difficult to recognize, the recognition results will not be exported by **unchecking** the corresponding tags in the recognition results checkbox. For some data that are difficult to recognize, the recognition results will not be exported by **unchecking** the corresponding tags in the recognition results checkbox.
......
...@@ -2,18 +2,30 @@ ...@@ -2,18 +2,30 @@
# PPOCRLabel # PPOCRLabel
PPOCRLabel是一款适用于OCR领域的半自动化图形标注工具,使用python3和pyqt5编写,支持矩形框标注和四点标注模式,导出格式可直接用于PPOCR检测和识别模型的训练。 PPOCRLabel是一款适用于OCR领域的半自动化图形标注工具,内置PPOCR模型对数据自动标注和重新识别。使用python3和pyqt5编写,支持矩形框标注和四点标注模式,导出格式可直接用于PPOCR检测和识别模型的训练。
<img src="./data/gif/steps.gif" width="100%"/> <img src="./data/gif/steps.gif" width="100%"/>
#### 近期更新 #### 近期更新
- 2020.12.18: 支持对单个标记框进行重新识别(by [ninetailskim](https://github.com/ninetailskim) ),完善快捷键。 - 2021.1.11:优化标注体验(by [edencfc](https://github.com/edencfc)):
- 用户可在“视图 - 弹出标记输入框”选择在画完检测框后标记输入框是否弹出。
- 识别结果与检测框同步滚动。
- 识别结果更改为单击修改。(如果无法修改,请切换为系统自带输入法,或再次切回原输入法)
- 2020.12.18: 支持对单个标记框进行重新识别(by [ninetailskim](https://github.com/ninetailskim)),完善快捷键。
#### 尽请期待
- 锁定框模式:针对同一场景数据,被锁定的检测框的大小与位置能在不同图片之间传递。
- 体验优化:增加撤销操作,批量移动、复制、删除等功能。优化标注流程。
如果您对以上内容感兴趣或对完善工具有不一样的想法,欢迎加入我们的队伍与我们共同开发
## 安装 ## 安装
### 1. 安装PaddleOCR ### 1. 安装PaddleOCR
参考[PaddleOCR安装文档](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_ch/installation.md)准备好PaddleOCR PPOCRLabel内置PaddleOCR模型,故请参考[PaddleOCR安装文档](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_ch/installation.md)准备好PaddleOCR,并确保PaddleOCR安装成功。
### 2. 安装PPOCRLabel ### 2. 安装PPOCRLabel
#### Windows + Anaconda #### Windows + Anaconda
...@@ -49,13 +61,13 @@ python3 PPOCRLabel.py --lang ch ...@@ -49,13 +61,13 @@ python3 PPOCRLabel.py --lang ch
1. 安装与运行:使用上述命令安装与运行程序。 1. 安装与运行:使用上述命令安装与运行程序。
2. 打开文件夹:在菜单栏点击 “文件” - "打开目录" 选择待标记图片的文件夹<sup>[1]</sup>. 2. 打开文件夹:在菜单栏点击 “文件” - "打开目录" 选择待标记图片的文件夹<sup>[1]</sup>.
3. 自动标注:点击 ”自动标注“,使用PPOCR超轻量模型对图片文件名前图片状态<sup>[2]</sup>为 “X” 的图片进行自动标注。 3. 自动标注:点击 ”自动标注“,使用PPOCR超轻量模型对图片文件名前图片状态<sup>[2]</sup>为 “X” 的图片进行自动标注。
4. 手动标注:点击 “矩形标注”(推荐直接在英文模式下点击键盘中的 “W”),用户可对当前图片中模型未检出的部分进行手动绘制标记框。点击键盘P,则使用四点标注模式(或点击“编辑” - “四点标注”),用户依次点击4个点后,双击左键表示标注完成。 4. 手动标注:点击 “矩形标注”(推荐直接在英文模式下点击键盘中的 “W”),用户可对当前图片中模型未检出的部分进行手动绘制标记框。点击键盘Q,则使用四点标注模式(或点击“编辑” - “四点标注”),用户依次点击4个点后,双击左键表示标注完成。
5. 标记框绘制完成后,用户点击 “确认”,检测框会先被预分配一个 “待识别” 标签。 5. 标记框绘制完成后,用户点击 “确认”,检测框会先被预分配一个 “待识别” 标签。
6. 重新识别:将图片中的所有检测画绘制/调整完成后,点击 “重新识别”,PPOCR模型会对当前图片中的**所有检测框**重新识别<sup>[3]</sup> 6. 重新识别:将图片中的所有检测画绘制/调整完成后,点击 “重新识别”,PPOCR模型会对当前图片中的**所有检测框**重新识别<sup>[3]</sup>
7. 内容更改:双击识别结果,对不准确的识别结果进行手动更改。 7. 内容更改:双击识别结果,对不准确的识别结果进行手动更改。
8. 确认标记:点击 “确认”,图片状态切换为 “√”,跳转至下一张(此时不会直接将结果写入文件)。 8. 确认标记:点击 “确认”,图片状态切换为 “√”,跳转至下一张(此时不会直接将结果写入文件)。
9. 删除:点击 “删除图像”,图片将会被删除至回收站。 9. 删除:点击 “删除图像”,图片将会被删除至回收站。
10. 保存结果:用户可以通过菜单中“文件-保存标记结果”手动保存,同时程序也会在用户每确认10张图片后自动保存一次。手动确认过的标记将会被存放在所打开图片文件夹下的*Label.txt*中。在菜单栏点击 “文件” - "保存识别结果"后,会将此类图片的识别训练数据保存在*crop_img*文件夹下,识别标签保存在*rec_gt.txt*<sup>[4]</sup> 10. 保存结果:用户可以通过菜单中“文件-保存标记结果”手动保存,同时程序也会在用户每确认5张图片后自动保存一次。手动确认过的标记将会被存放在所打开图片文件夹下的*Label.txt*中。在菜单栏点击 “文件” - "保存识别结果"后,会将此类图片的识别训练数据保存在*crop_img*文件夹下,识别标签保存在*rec_gt.txt*<sup>[4]</sup>
### 注意 ### 注意
...@@ -69,7 +81,7 @@ python3 PPOCRLabel.py --lang ch ...@@ -69,7 +81,7 @@ python3 PPOCRLabel.py --lang ch
| 文件名 | 说明 | | 文件名 | 说明 |
| :-----------: | :----------------------------------------------------------: | | :-----------: | :----------------------------------------------------------: |
| Label.txt | 检测标签,可直接用于PPOCR检测模型训练。用户每保存10张检测结果后,程序会进行自动写入。当用户关闭应用程序或切换文件路径后同样会进行写入。 | | Label.txt | 检测标签,可直接用于PPOCR检测模型训练。用户每保存5张检测结果后,程序会进行自动写入。当用户关闭应用程序或切换文件路径后同样会进行写入。 |
| fileState.txt | 图片状态标记文件,保存当前文件夹下已经被用户手动确认过的图片名称。 | | fileState.txt | 图片状态标记文件,保存当前文件夹下已经被用户手动确认过的图片名称。 |
| Cache.cach | 缓存文件,保存模型自动识别的结果。 | | Cache.cach | 缓存文件,保存模型自动识别的结果。 |
| rec_gt.txt | 识别标签。可直接用于PPOCR识别模型训练。需用户手动点击菜单栏“文件” - "保存识别结果"后产生。 | | rec_gt.txt | 识别标签。可直接用于PPOCR识别模型训练。需用户手动点击菜单栏“文件” - "保存识别结果"后产生。 |
...@@ -104,6 +116,14 @@ python3 PPOCRLabel.py --lang ch ...@@ -104,6 +116,14 @@ python3 PPOCRLabel.py --lang ch
- 自定义模型:用户可根据[自定义模型代码使用](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_ch/whl.md#%E8%87%AA%E5%AE%9A%E4%B9%89%E6%A8%A1%E5%9E%8B),通过修改PPOCRLabel.py中针对[PaddleOCR类的实例化](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/PPOCRLabel/PPOCRLabel.py#L110)替换成自己训练的模型。 - 自定义模型:用户可根据[自定义模型代码使用](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_ch/whl.md#%E8%87%AA%E5%AE%9A%E4%B9%89%E6%A8%A1%E5%9E%8B),通过修改PPOCRLabel.py中针对[PaddleOCR类的实例化](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/PPOCRLabel/PPOCRLabel.py#L110)替换成自己训练的模型。
### 保存方式
PPOCRLabel支持三种保存方式:
- 程序自动保存:当检测到用户手动确认过5张图片后,程序自动将标记结果写入Label.txt中。其中用户可通过更改```PPOCRLabel.py```中的```self.autoSaveNum```的数值设置确认几张图片后进行自动保存。
- 手动保存:点击“文件 - 保存标记结果”手动保存标记。
- 关闭应用程序保存
### 导出部分识别结果 ### 导出部分识别结果
针对部分难以识别的数据,通过在识别结果的复选框中**取消勾选**相应的标记,其识别结果不会被导出。 针对部分难以识别的数据,通过在识别结果的复选框中**取消勾选**相应的标记,其识别结果不会被导出。
...@@ -115,7 +135,7 @@ python3 PPOCRLabel.py --lang ch ...@@ -115,7 +135,7 @@ python3 PPOCRLabel.py --lang ch
- PPOCRLabel**不支持对中文文件名**的图片进行自动标注。 - PPOCRLabel**不支持对中文文件名**的图片进行自动标注。
- 针对Linux用户:如果您在打开软件过程中出现**objc[XXXXX]**开头的错误,证明您的opencv版本太高,建议安装4.2版本: - 针对Linux用户:如果您在打开软件过程中出现**objc[XXXXX]**开头的错误,证明您的opencv版本太高,建议安装4.2版本:
``` ```
pip install opencv-python==4.2.0.32 pip install opencv-python==4.2.0.32
``` ```
...@@ -129,6 +149,7 @@ python3 PPOCRLabel.py --lang ch ...@@ -129,6 +149,7 @@ python3 PPOCRLabel.py --lang ch
``` ```
pip install opencv-contrib-python-headless pip install opencv-contrib-python-headless
``` ```
### 参考资料 ### 参考资料
1.[Tzutalin. LabelImg. Git code (2015)](https://github.com/tzutalin/labelImg) 1.[Tzutalin. LabelImg. Git code (2015)](https://github.com/tzutalin/labelImg)
import sys, time
from PyQt5 import QtWidgets
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
class EditInList(QListWidget):
def __init__(self):
super(EditInList,self).__init__()
# click to edit
self.clicked.connect(self.item_clicked)
def item_clicked(self, modelindex: QModelIndex) -> None:
self.edited_item = self.currentItem()
self.closePersistentEditor(self.edited_item)
item = self.item(modelindex.row())
# time.sleep(0.2)
self.edited_item = item
self.openPersistentEditor(item)
# time.sleep(0.2)
self.editItem(item)
def mouseDoubleClickEvent(self, event):
# close edit
for i in range(self.count()):
self.closePersistentEditor(self.item(i))
def leaveEvent(self, event):
# close edit
for i in range(self.count()):
self.closePersistentEditor(self.item(i))
\ No newline at end of file
此差异已折叠。
...@@ -42,7 +42,7 @@ zoomin=放大画面 ...@@ -42,7 +42,7 @@ zoomin=放大画面
info=信息 info=信息
openAnnotation=开启标签 openAnnotation=开启标签
prevImgDetail=上一个图像 prevImgDetail=上一个图像
fitWidth=缩放到跟当前画面一样宽 fitWidth=缩放到当前画面宽度
zoomout=缩小画面 zoomout=缩小画面
changeSavedAnnotationDir=更改保存标签文件的预设目录 changeSavedAnnotationDir=更改保存标签文件的预设目录
nextImgDetail=下一个图像 nextImgDetail=下一个图像
...@@ -96,3 +96,4 @@ hideBox=隐藏所有标注 ...@@ -96,3 +96,4 @@ hideBox=隐藏所有标注
showBox=显示所有标注 showBox=显示所有标注
saveLabel=保存标记结果 saveLabel=保存标记结果
singleRe=重识别此区块 singleRe=重识别此区块
labelDialogOption=弹出标记输入框
\ No newline at end of file
...@@ -96,3 +96,4 @@ hideBox=Hide All Box ...@@ -96,3 +96,4 @@ hideBox=Hide All Box
showBox=Show All Box showBox=Show All Box
saveLabel=Save Label saveLabel=Save Label
singleRe=Re-recognition RectBox singleRe=Re-recognition RectBox
labelDialogOption=Pop-up Label Input Dialog
\ No newline at end of file
...@@ -173,7 +173,7 @@ This project is released under <a href="https://github.com/PaddlePaddle/PaddleOC ...@@ -173,7 +173,7 @@ This project is released under <a href="https://github.com/PaddlePaddle/PaddleOC
We welcome all the contributions to PaddleOCR and appreciate for your feedback very much. We welcome all the contributions to PaddleOCR and appreciate for your feedback very much.
- Many thanks to [Khanh Tran](https://github.com/xxxpsyduck) and [Karl Horky](https://github.com/karlhorky) for contributing and revising the English documentation. - Many thanks to [Khanh Tran](https://github.com/xxxpsyduck) and [Karl Horky](https://github.com/karlhorky) for contributing and revising the English documentation.
- Many thanks to [zhangxin](https://github.com/ZhangXinNan) for contributing the new visualize function、add .gitgnore and discard set PYTHONPATH manually. - Many thanks to [zhangxin](https://github.com/ZhangXinNan) for contributing the new visualize function、add .gitignore and discard set PYTHONPATH manually.
- Many thanks to [lyl120117](https://github.com/lyl120117) for contributing the code for printing the network structure. - Many thanks to [lyl120117](https://github.com/lyl120117) for contributing the code for printing the network structure.
- Thanks [xiangyubo](https://github.com/xiangyubo) for contributing the handwritten Chinese OCR datasets. - Thanks [xiangyubo](https://github.com/xiangyubo) for contributing the handwritten Chinese OCR datasets.
- Thanks [authorfu](https://github.com/authorfu) for contributing Android demo and [xiadeye](https://github.com/xiadeye) contributing iOS demo, respectively. - Thanks [authorfu](https://github.com/authorfu) for contributing Android demo and [xiadeye](https://github.com/xiadeye) contributing iOS demo, respectively.
......
...@@ -8,7 +8,7 @@ PaddleOCR同时支持动态图与静态图两种编程范式 ...@@ -8,7 +8,7 @@ PaddleOCR同时支持动态图与静态图两种编程范式
- 静态图版本:develop分支 - 静态图版本:develop分支
**近期更新** **近期更新**
- 2020.12.28 [FAQ](./doc/doc_ch/FAQ.md)新增5个高频问题,总数137个,每周一都会更新,欢迎大家持续关注。 - 2021.1.18 [FAQ](./doc/doc_ch/FAQ.md)新增5个高频问题,总数152个,每周一都会更新,欢迎大家持续关注。
- 2020.12.15 更新数据合成工具[Style-Text](./StyleText/README_ch.md),可以批量合成大量与目标场景类似的图像,在多个场景验证,效果明显提升。 - 2020.12.15 更新数据合成工具[Style-Text](./StyleText/README_ch.md),可以批量合成大量与目标场景类似的图像,在多个场景验证,效果明显提升。
- 2020.11.25 更新半自动标注工具[PPOCRLabel](./PPOCRLabel/README_ch.md),辅助开发者高效完成标注任务,输出格式与PP-OCR训练任务完美衔接。 - 2020.11.25 更新半自动标注工具[PPOCRLabel](./PPOCRLabel/README_ch.md),辅助开发者高效完成标注任务,输出格式与PP-OCR训练任务完美衔接。
- 2020.9.22 更新PP-OCR技术文章,https://arxiv.org/abs/2009.09941 - 2020.9.22 更新PP-OCR技术文章,https://arxiv.org/abs/2009.09941
...@@ -101,8 +101,8 @@ PaddleOCR同时支持动态图与静态图两种编程范式 ...@@ -101,8 +101,8 @@ PaddleOCR同时支持动态图与静态图两种编程范式
- [效果展示](#效果展示) - [效果展示](#效果展示)
- FAQ - FAQ
- [【精选】OCR精选10个问题](./doc/doc_ch/FAQ.md) - [【精选】OCR精选10个问题](./doc/doc_ch/FAQ.md)
- [【理论篇】OCR通用31个问题](./doc/doc_ch/FAQ.md) - [【理论篇】OCR通用32个问题](./doc/doc_ch/FAQ.md)
- [【实战篇】PaddleOCR实战96个问题](./doc/doc_ch/FAQ.md) - [【实战篇】PaddleOCR实战110个问题](./doc/doc_ch/FAQ.md)
- [技术交流群](#欢迎加入PaddleOCR技术交流群) - [技术交流群](#欢迎加入PaddleOCR技术交流群)
- [参考文献](./doc/doc_ch/reference.md) - [参考文献](./doc/doc_ch/reference.md)
- [许可证书](#许可证书) - [许可证书](#许可证书)
...@@ -149,7 +149,7 @@ PP-OCR是一个实用的超轻量OCR系统。主要由DB文本检测[2]、检测 ...@@ -149,7 +149,7 @@ PP-OCR是一个实用的超轻量OCR系统。主要由DB文本检测[2]、检测
- 非常感谢 [Khanh Tran](https://github.com/xxxpsyduck)[Karl Horky](https://github.com/karlhorky) 贡献修改英文文档 - 非常感谢 [Khanh Tran](https://github.com/xxxpsyduck)[Karl Horky](https://github.com/karlhorky) 贡献修改英文文档
- 非常感谢 [zhangxin](https://github.com/ZhangXinNan)([Blog](https://blog.csdn.net/sdlypyzq)) 贡献新的可视化方式、添加.gitgnore、处理手动设置PYTHONPATH环境变量的问题 - 非常感谢 [zhangxin](https://github.com/ZhangXinNan)([Blog](https://blog.csdn.net/sdlypyzq)) 贡献新的可视化方式、添加.gitignore、处理手动设置PYTHONPATH环境变量的问题
- 非常感谢 [lyl120117](https://github.com/lyl120117) 贡献打印网络结构的代码 - 非常感谢 [lyl120117](https://github.com/lyl120117) 贡献打印网络结构的代码
- 非常感谢 [xiangyubo](https://github.com/xiangyubo) 贡献手写中文OCR数据集 - 非常感谢 [xiangyubo](https://github.com/xiangyubo) 贡献手写中文OCR数据集
- 非常感谢 [authorfu](https://github.com/authorfu) 贡献Android和[xiadeye](https://github.com/xiadeye) 贡献IOS的demo代码 - 非常感谢 [authorfu](https://github.com/authorfu) 贡献Android和[xiadeye](https://github.com/xiadeye) 贡献IOS的demo代码
......
...@@ -52,7 +52,7 @@ If you save the model in another location, please modify the address of the mode ...@@ -52,7 +52,7 @@ If you save the model in another location, please modify the address of the mode
``` ```
bg_generator: bg_generator:
pretrain: style_text_rec/bg_generator pretrain: style_text_models/bg_generator
... ...
text_generator: text_generator:
pretrain: style_text_models/text_generator pretrain: style_text_models/text_generator
......
...@@ -102,6 +102,7 @@ Train: ...@@ -102,6 +102,7 @@ Train:
drop_last: False drop_last: False
batch_size_per_card: 16 batch_size_per_card: 16
num_workers: 8 num_workers: 8
use_shared_memory: False
Eval: Eval:
dataset: dataset:
...@@ -129,3 +130,4 @@ Eval: ...@@ -129,3 +130,4 @@ Eval:
drop_last: False drop_last: False
batch_size_per_card: 1 # must be 1 batch_size_per_card: 1 # must be 1
num_workers: 8 num_workers: 8
use_shared_memory: False
\ No newline at end of file
...@@ -76,6 +76,7 @@ Train: ...@@ -76,6 +76,7 @@ Train:
batch_size_per_card: 256 batch_size_per_card: 256
drop_last: True drop_last: True
num_workers: 8 num_workers: 8
use_shared_memory: False
Eval: Eval:
dataset: dataset:
...@@ -96,3 +97,4 @@ Eval: ...@@ -96,3 +97,4 @@ Eval:
drop_last: False drop_last: False
batch_size_per_card: 256 batch_size_per_card: 256
num_workers: 4 num_workers: 4
use_shared_memory: False
...@@ -138,12 +138,22 @@ endif() ...@@ -138,12 +138,22 @@ endif()
# Note: libpaddle_inference_api.so/a must put before libpaddle_fluid.so/a # Note: libpaddle_inference_api.so/a must put before libpaddle_fluid.so/a
if(WITH_STATIC_LIB) if(WITH_STATIC_LIB)
if(WIN32)
set(DEPS
${PADDLE_LIB}/paddle/lib/paddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX})
else()
set(DEPS set(DEPS
${PADDLE_LIB}/paddle/lib/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}) ${PADDLE_LIB}/paddle/lib/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX})
endif()
else() else()
if(WIN32)
set(DEPS
${PADDLE_LIB}/paddle/lib/paddle_fluid${CMAKE_SHARED_LIBRARY_SUFFIX})
else()
set(DEPS set(DEPS
${PADDLE_LIB}/paddle/lib/libpaddle_fluid${CMAKE_SHARED_LIBRARY_SUFFIX}) ${PADDLE_LIB}/paddle/lib/libpaddle_fluid${CMAKE_SHARED_LIBRARY_SUFFIX})
endif() endif()
endif(WITH_STATIC_LIB)
if (NOT WIN32) if (NOT WIN32)
set(DEPS ${DEPS} set(DEPS ${DEPS}
......
...@@ -62,6 +62,10 @@ public: ...@@ -62,6 +62,10 @@ public:
this->cls_thresh = stod(config_map_["cls_thresh"]); this->cls_thresh = stod(config_map_["cls_thresh"]);
this->visualize = bool(stoi(config_map_["visualize"])); this->visualize = bool(stoi(config_map_["visualize"]));
this->use_tensorrt = bool(stoi(config_map_["use_tensorrt"]));
this->use_fp16 = bool(stod(config_map_["use_fp16"]));
} }
bool use_gpu = false; bool use_gpu = false;
...@@ -96,6 +100,10 @@ public: ...@@ -96,6 +100,10 @@ public:
bool visualize = true; bool visualize = true;
bool use_tensorrt = false;
bool use_fp16 = false;
void PrintConfigInfo(); void PrintConfigInfo();
private: private:
......
...@@ -39,7 +39,8 @@ public: ...@@ -39,7 +39,8 @@ public:
explicit Classifier(const std::string &model_dir, const bool &use_gpu, explicit Classifier(const std::string &model_dir, const bool &use_gpu,
const int &gpu_id, const int &gpu_mem, const int &gpu_id, const int &gpu_mem,
const int &cpu_math_library_num_threads, const int &cpu_math_library_num_threads,
const bool &use_mkldnn, const double &cls_thresh) { const bool &use_mkldnn, const double &cls_thresh,
const bool &use_tensorrt, const bool &use_fp16) {
this->use_gpu_ = use_gpu; this->use_gpu_ = use_gpu;
this->gpu_id_ = gpu_id; this->gpu_id_ = gpu_id;
this->gpu_mem_ = gpu_mem; this->gpu_mem_ = gpu_mem;
...@@ -47,6 +48,8 @@ public: ...@@ -47,6 +48,8 @@ public:
this->use_mkldnn_ = use_mkldnn; this->use_mkldnn_ = use_mkldnn;
this->cls_thresh = cls_thresh; this->cls_thresh = cls_thresh;
this->use_tensorrt_ = use_tensorrt;
this->use_fp16_ = use_fp16;
LoadModel(model_dir); LoadModel(model_dir);
} }
...@@ -69,7 +72,8 @@ private: ...@@ -69,7 +72,8 @@ private:
std::vector<float> mean_ = {0.5f, 0.5f, 0.5f}; std::vector<float> mean_ = {0.5f, 0.5f, 0.5f};
std::vector<float> scale_ = {1 / 0.5f, 1 / 0.5f, 1 / 0.5f}; std::vector<float> scale_ = {1 / 0.5f, 1 / 0.5f, 1 / 0.5f};
bool is_scale_ = true; bool is_scale_ = true;
bool use_tensorrt_ = false;
bool use_fp16_ = false;
// pre-process // pre-process
ClsResizeImg resize_op_; ClsResizeImg resize_op_;
Normalize normalize_op_; Normalize normalize_op_;
......
...@@ -44,8 +44,8 @@ public: ...@@ -44,8 +44,8 @@ public:
const bool &use_mkldnn, const int &max_side_len, const bool &use_mkldnn, const int &max_side_len,
const double &det_db_thresh, const double &det_db_thresh,
const double &det_db_box_thresh, const double &det_db_box_thresh,
const double &det_db_unclip_ratio, const double &det_db_unclip_ratio, const bool &visualize,
const bool &visualize) { const bool &use_tensorrt, const bool &use_fp16) {
this->use_gpu_ = use_gpu; this->use_gpu_ = use_gpu;
this->gpu_id_ = gpu_id; this->gpu_id_ = gpu_id;
this->gpu_mem_ = gpu_mem; this->gpu_mem_ = gpu_mem;
...@@ -59,6 +59,8 @@ public: ...@@ -59,6 +59,8 @@ public:
this->det_db_unclip_ratio_ = det_db_unclip_ratio; this->det_db_unclip_ratio_ = det_db_unclip_ratio;
this->visualize_ = visualize; this->visualize_ = visualize;
this->use_tensorrt_ = use_tensorrt;
this->use_fp16_ = use_fp16;
LoadModel(model_dir); LoadModel(model_dir);
} }
...@@ -85,6 +87,8 @@ private: ...@@ -85,6 +87,8 @@ private:
double det_db_unclip_ratio_ = 2.0; double det_db_unclip_ratio_ = 2.0;
bool visualize_ = true; bool visualize_ = true;
bool use_tensorrt_ = false;
bool use_fp16_ = false;
std::vector<float> mean_ = {0.485f, 0.456f, 0.406f}; std::vector<float> mean_ = {0.485f, 0.456f, 0.406f};
std::vector<float> scale_ = {1 / 0.229f, 1 / 0.224f, 1 / 0.225f}; std::vector<float> scale_ = {1 / 0.229f, 1 / 0.224f, 1 / 0.225f};
......
...@@ -41,12 +41,15 @@ public: ...@@ -41,12 +41,15 @@ public:
explicit CRNNRecognizer(const std::string &model_dir, const bool &use_gpu, explicit CRNNRecognizer(const std::string &model_dir, const bool &use_gpu,
const int &gpu_id, const int &gpu_mem, const int &gpu_id, const int &gpu_mem,
const int &cpu_math_library_num_threads, const int &cpu_math_library_num_threads,
const bool &use_mkldnn, const string &label_path) { const bool &use_mkldnn, const string &label_path,
const bool &use_tensorrt, const bool &use_fp16) {
this->use_gpu_ = use_gpu; this->use_gpu_ = use_gpu;
this->gpu_id_ = gpu_id; this->gpu_id_ = gpu_id;
this->gpu_mem_ = gpu_mem; this->gpu_mem_ = gpu_mem;
this->cpu_math_library_num_threads_ = cpu_math_library_num_threads; this->cpu_math_library_num_threads_ = cpu_math_library_num_threads;
this->use_mkldnn_ = use_mkldnn; this->use_mkldnn_ = use_mkldnn;
this->use_tensorrt_ = use_tensorrt;
this->use_fp16_ = use_fp16;
this->label_list_ = Utility::ReadDict(label_path); this->label_list_ = Utility::ReadDict(label_path);
this->label_list_.insert(this->label_list_.begin(), this->label_list_.insert(this->label_list_.begin(),
...@@ -76,7 +79,8 @@ private: ...@@ -76,7 +79,8 @@ private:
std::vector<float> mean_ = {0.5f, 0.5f, 0.5f}; std::vector<float> mean_ = {0.5f, 0.5f, 0.5f};
std::vector<float> scale_ = {1 / 0.5f, 1 / 0.5f, 1 / 0.5f}; std::vector<float> scale_ = {1 / 0.5f, 1 / 0.5f, 1 / 0.5f};
bool is_scale_ = true; bool is_scale_ = true;
bool use_tensorrt_ = false;
bool use_fp16_ = false;
// pre-process // pre-process
CrnnResizeImg resize_op_; CrnnResizeImg resize_op_;
Normalize normalize_op_; Normalize normalize_op_;
......
...@@ -47,18 +47,20 @@ public: ...@@ -47,18 +47,20 @@ public:
class ResizeImgType0 { class ResizeImgType0 {
public: public:
virtual void Run(const cv::Mat &img, cv::Mat &resize_img, int max_size_len, virtual void Run(const cv::Mat &img, cv::Mat &resize_img, int max_size_len,
float &ratio_h, float &ratio_w); float &ratio_h, float &ratio_w, bool use_tensorrt);
}; };
class CrnnResizeImg { class CrnnResizeImg {
public: public:
virtual void Run(const cv::Mat &img, cv::Mat &resize_img, float wh_ratio, virtual void Run(const cv::Mat &img, cv::Mat &resize_img, float wh_ratio,
bool use_tensorrt = false,
const std::vector<int> &rec_image_shape = {3, 32, 320}); const std::vector<int> &rec_image_shape = {3, 32, 320});
}; };
class ClsResizeImg { class ClsResizeImg {
public: public:
virtual void Run(const cv::Mat &img, cv::Mat &resize_img, virtual void Run(const cv::Mat &img, cv::Mat &resize_img,
bool use_tensorrt = false,
const std::vector<int> &rec_image_shape = {3, 48, 192}); const std::vector<int> &rec_image_shape = {3, 48, 192});
}; };
......
...@@ -54,18 +54,20 @@ int main(int argc, char **argv) { ...@@ -54,18 +54,20 @@ int main(int argc, char **argv) {
config.gpu_mem, config.cpu_math_library_num_threads, config.gpu_mem, config.cpu_math_library_num_threads,
config.use_mkldnn, config.max_side_len, config.det_db_thresh, config.use_mkldnn, config.max_side_len, config.det_db_thresh,
config.det_db_box_thresh, config.det_db_unclip_ratio, config.det_db_box_thresh, config.det_db_unclip_ratio,
config.visualize); config.visualize, config.use_tensorrt, config.use_fp16);
Classifier *cls = nullptr; Classifier *cls = nullptr;
if (config.use_angle_cls == true) { if (config.use_angle_cls == true) {
cls = new Classifier(config.cls_model_dir, config.use_gpu, config.gpu_id, cls = new Classifier(config.cls_model_dir, config.use_gpu, config.gpu_id,
config.gpu_mem, config.cpu_math_library_num_threads, config.gpu_mem, config.cpu_math_library_num_threads,
config.use_mkldnn, config.cls_thresh); config.use_mkldnn, config.cls_thresh,
config.use_tensorrt, config.use_fp16);
} }
CRNNRecognizer rec(config.rec_model_dir, config.use_gpu, config.gpu_id, CRNNRecognizer rec(config.rec_model_dir, config.use_gpu, config.gpu_id,
config.gpu_mem, config.cpu_math_library_num_threads, config.gpu_mem, config.cpu_math_library_num_threads,
config.use_mkldnn, config.char_list_file); config.use_mkldnn, config.char_list_file,
config.use_tensorrt, config.use_fp16);
auto start = std::chrono::system_clock::now(); auto start = std::chrono::system_clock::now();
std::vector<std::vector<std::vector<int>>> boxes; std::vector<std::vector<std::vector<int>>> boxes;
...@@ -75,11 +77,11 @@ int main(int argc, char **argv) { ...@@ -75,11 +77,11 @@ int main(int argc, char **argv) {
auto end = std::chrono::system_clock::now(); auto end = std::chrono::system_clock::now();
auto duration = auto duration =
std::chrono::duration_cast<std::chrono::microseconds>(end - start); std::chrono::duration_cast<std::chrono::microseconds>(end - start);
std::cout << "花费了" std::cout << "Cost "
<< double(duration.count()) * << double(duration.count()) *
std::chrono::microseconds::period::num / std::chrono::microseconds::period::num /
std::chrono::microseconds::period::den std::chrono::microseconds::period::den
<< "" << std::endl; << "s" << std::endl;
return 0; return 0;
} }
...@@ -25,7 +25,7 @@ cv::Mat Classifier::Run(cv::Mat &img) { ...@@ -25,7 +25,7 @@ cv::Mat Classifier::Run(cv::Mat &img) {
int index = 0; int index = 0;
float wh_ratio = float(img.cols) / float(img.rows); float wh_ratio = float(img.cols) / float(img.rows);
this->resize_op_.Run(img, resize_img, cls_image_shape); this->resize_op_.Run(img, resize_img, this->use_tensorrt_, cls_image_shape);
this->normalize_op_.Run(&resize_img, this->mean_, this->scale_, this->normalize_op_.Run(&resize_img, this->mean_, this->scale_,
this->is_scale_); this->is_scale_);
...@@ -76,6 +76,13 @@ void Classifier::LoadModel(const std::string &model_dir) { ...@@ -76,6 +76,13 @@ void Classifier::LoadModel(const std::string &model_dir) {
if (this->use_gpu_) { if (this->use_gpu_) {
config.EnableUseGpu(this->gpu_mem_, this->gpu_id_); config.EnableUseGpu(this->gpu_mem_, this->gpu_id_);
if (this->use_tensorrt_) {
config.EnableTensorRtEngine(
1 << 20, 10, 3,
this->use_fp16_ ? paddle_infer::Config::Precision::kHalf
: paddle_infer::Config::Precision::kFloat32,
false, false);
}
} else { } else {
config.DisableGpu(); config.DisableGpu();
if (this->use_mkldnn_) { if (this->use_mkldnn_) {
......
...@@ -24,10 +24,13 @@ void DBDetector::LoadModel(const std::string &model_dir) { ...@@ -24,10 +24,13 @@ void DBDetector::LoadModel(const std::string &model_dir) {
if (this->use_gpu_) { if (this->use_gpu_) {
config.EnableUseGpu(this->gpu_mem_, this->gpu_id_); config.EnableUseGpu(this->gpu_mem_, this->gpu_id_);
// config.EnableTensorRtEngine( if (this->use_tensorrt_) {
// 1 << 20, 1, 3, config.EnableTensorRtEngine(
// AnalysisConfig::Precision::kFloat32, 1 << 20, 10, 3,
// false, false); this->use_fp16_ ? paddle_infer::Config::Precision::kHalf
: paddle_infer::Config::Precision::kFloat32,
false, false);
}
} else { } else {
config.DisableGpu(); config.DisableGpu();
if (this->use_mkldnn_) { if (this->use_mkldnn_) {
...@@ -58,7 +61,8 @@ void DBDetector::Run(cv::Mat &img, ...@@ -58,7 +61,8 @@ void DBDetector::Run(cv::Mat &img,
cv::Mat srcimg; cv::Mat srcimg;
cv::Mat resize_img; cv::Mat resize_img;
img.copyTo(srcimg); img.copyTo(srcimg);
this->resize_op_.Run(img, resize_img, this->max_side_len_, ratio_h, ratio_w); this->resize_op_.Run(img, resize_img, this->max_side_len_, ratio_h, ratio_w,
this->use_tensorrt_);
this->normalize_op_.Run(&resize_img, this->mean_, this->scale_, this->normalize_op_.Run(&resize_img, this->mean_, this->scale_,
this->is_scale_); this->is_scale_);
......
...@@ -33,7 +33,7 @@ void CRNNRecognizer::Run(std::vector<std::vector<std::vector<int>>> boxes, ...@@ -33,7 +33,7 @@ void CRNNRecognizer::Run(std::vector<std::vector<std::vector<int>>> boxes,
float wh_ratio = float(crop_img.cols) / float(crop_img.rows); float wh_ratio = float(crop_img.cols) / float(crop_img.rows);
this->resize_op_.Run(crop_img, resize_img, wh_ratio); this->resize_op_.Run(crop_img, resize_img, wh_ratio, this->use_tensorrt_);
this->normalize_op_.Run(&resize_img, this->mean_, this->scale_, this->normalize_op_.Run(&resize_img, this->mean_, this->scale_,
this->is_scale_); this->is_scale_);
...@@ -76,7 +76,7 @@ void CRNNRecognizer::Run(std::vector<std::vector<std::vector<int>>> boxes, ...@@ -76,7 +76,7 @@ void CRNNRecognizer::Run(std::vector<std::vector<std::vector<int>>> boxes,
float(*std::max_element(&predict_batch[n * predict_shape[2]], float(*std::max_element(&predict_batch[n * predict_shape[2]],
&predict_batch[(n + 1) * predict_shape[2]])); &predict_batch[(n + 1) * predict_shape[2]]));
if (argmax_idx > 0 && (not(i > 0 && argmax_idx == last_index))) { if (argmax_idx > 0 && (!(i > 0 && argmax_idx == last_index))) {
score += max_value; score += max_value;
count += 1; count += 1;
str_res.push_back(label_list_[argmax_idx]); str_res.push_back(label_list_[argmax_idx]);
...@@ -99,6 +99,13 @@ void CRNNRecognizer::LoadModel(const std::string &model_dir) { ...@@ -99,6 +99,13 @@ void CRNNRecognizer::LoadModel(const std::string &model_dir) {
if (this->use_gpu_) { if (this->use_gpu_) {
config.EnableUseGpu(this->gpu_mem_, this->gpu_id_); config.EnableUseGpu(this->gpu_mem_, this->gpu_id_);
if (this->use_tensorrt_) {
config.EnableTensorRtEngine(
1 << 20, 10, 3,
this->use_fp16_ ? paddle_infer::Config::Precision::kHalf
: paddle_infer::Config::Precision::kFloat32,
false, false);
}
} else { } else {
config.DisableGpu(); config.DisableGpu();
if (this->use_mkldnn_) { if (this->use_mkldnn_) {
......
...@@ -60,7 +60,8 @@ void Normalize::Run(cv::Mat *im, const std::vector<float> &mean, ...@@ -60,7 +60,8 @@ void Normalize::Run(cv::Mat *im, const std::vector<float> &mean,
} }
void ResizeImgType0::Run(const cv::Mat &img, cv::Mat &resize_img, void ResizeImgType0::Run(const cv::Mat &img, cv::Mat &resize_img,
int max_size_len, float &ratio_h, float &ratio_w) { int max_size_len, float &ratio_h, float &ratio_w,
bool use_tensorrt) {
int w = img.cols; int w = img.cols;
int h = img.rows; int h = img.rows;
...@@ -89,14 +90,19 @@ void ResizeImgType0::Run(const cv::Mat &img, cv::Mat &resize_img, ...@@ -89,14 +90,19 @@ void ResizeImgType0::Run(const cv::Mat &img, cv::Mat &resize_img,
resize_w = 32; resize_w = 32;
else else
resize_w = (resize_w / 32) * 32; resize_w = (resize_w / 32) * 32;
if (!use_tensorrt) {
cv::resize(img, resize_img, cv::Size(resize_w, resize_h)); cv::resize(img, resize_img, cv::Size(resize_w, resize_h));
ratio_h = float(resize_h) / float(h); ratio_h = float(resize_h) / float(h);
ratio_w = float(resize_w) / float(w); ratio_w = float(resize_w) / float(w);
} else {
cv::resize(img, resize_img, cv::Size(640, 640));
ratio_h = float(640) / float(h);
ratio_w = float(640) / float(w);
}
} }
void CrnnResizeImg::Run(const cv::Mat &img, cv::Mat &resize_img, float wh_ratio, void CrnnResizeImg::Run(const cv::Mat &img, cv::Mat &resize_img, float wh_ratio,
bool use_tensorrt,
const std::vector<int> &rec_image_shape) { const std::vector<int> &rec_image_shape) {
int imgC, imgH, imgW; int imgC, imgH, imgW;
imgC = rec_image_shape[0]; imgC = rec_image_shape[0];
...@@ -111,12 +117,27 @@ void CrnnResizeImg::Run(const cv::Mat &img, cv::Mat &resize_img, float wh_ratio, ...@@ -111,12 +117,27 @@ void CrnnResizeImg::Run(const cv::Mat &img, cv::Mat &resize_img, float wh_ratio,
resize_w = imgW; resize_w = imgW;
else else
resize_w = int(ceilf(imgH * ratio)); resize_w = int(ceilf(imgH * ratio));
if (!use_tensorrt) {
cv::resize(img, resize_img, cv::Size(resize_w, imgH), 0.f, 0.f, cv::resize(img, resize_img, cv::Size(resize_w, imgH), 0.f, 0.f,
cv::INTER_LINEAR); cv::INTER_LINEAR);
cv::copyMakeBorder(resize_img, resize_img, 0, 0, 0,
int(imgW - resize_img.cols), cv::BORDER_CONSTANT,
{127, 127, 127});
} else {
int k = int(img.cols * 32 / img.rows);
if (k >= 100) {
cv::resize(img, resize_img, cv::Size(100, 32), 0.f, 0.f,
cv::INTER_LINEAR);
} else {
cv::resize(img, resize_img, cv::Size(k, 32), 0.f, 0.f, cv::INTER_LINEAR);
cv::copyMakeBorder(resize_img, resize_img, 0, 0, 0, int(100 - k),
cv::BORDER_CONSTANT, {127, 127, 127});
}
}
} }
void ClsResizeImg::Run(const cv::Mat &img, cv::Mat &resize_img, void ClsResizeImg::Run(const cv::Mat &img, cv::Mat &resize_img,
bool use_tensorrt,
const std::vector<int> &rec_image_shape) { const std::vector<int> &rec_image_shape) {
int imgC, imgH, imgW; int imgC, imgH, imgW;
imgC = rec_image_shape[0]; imgC = rec_image_shape[0];
...@@ -130,12 +151,16 @@ void ClsResizeImg::Run(const cv::Mat &img, cv::Mat &resize_img, ...@@ -130,12 +151,16 @@ void ClsResizeImg::Run(const cv::Mat &img, cv::Mat &resize_img,
else else
resize_w = int(ceilf(imgH * ratio)); resize_w = int(ceilf(imgH * ratio));
if (!use_tensorrt) {
cv::resize(img, resize_img, cv::Size(resize_w, imgH), 0.f, 0.f, cv::resize(img, resize_img, cv::Size(resize_w, imgH), 0.f, 0.f,
cv::INTER_LINEAR); cv::INTER_LINEAR);
if (resize_w < imgW) { if (resize_w < imgW) {
cv::copyMakeBorder(resize_img, resize_img, 0, 0, 0, imgW - resize_w, cv::copyMakeBorder(resize_img, resize_img, 0, 0, 0, imgW - resize_w,
cv::BORDER_CONSTANT, cv::Scalar(0, 0, 0)); cv::BORDER_CONSTANT, cv::Scalar(0, 0, 0));
} }
} else {
cv::resize(img, resize_img, cv::Size(100, 32), 0.f, 0.f, cv::INTER_LINEAR);
}
} }
} // namespace PaddleOCR } // namespace PaddleOCR
...@@ -24,3 +24,7 @@ char_list_file ../../ppocr/utils/ppocr_keys_v1.txt ...@@ -24,3 +24,7 @@ char_list_file ../../ppocr/utils/ppocr_keys_v1.txt
# show the detection results # show the detection results
visualize 1 visualize 1
# use_tensorrt
use_tensorrt 0
use_fp16 0
## 介绍
复杂的模型有利于提高模型的性能,但也导致模型中存在一定冗余,模型量化将全精度缩减到定点数减少这种冗余,达到减少模型计算复杂度,提高模型推理性能的目的。
模型量化可以在基本不损失模型的精度的情况下,将FP32精度的模型参数转换为Int8精度,减小模型参数大小并加速计算,使用量化后的模型在移动端等部署时更具备速度优势。
本教程将介绍如何使用飞桨模型压缩库PaddleSlim做PaddleOCR模型的压缩。
[PaddleSlim](https://github.com/PaddlePaddle/PaddleSlim) 集成了模型剪枝、量化(包括量化训练和离线量化)、蒸馏和神经网络搜索等多种业界常用且领先的模型压缩功能,如果您感兴趣,可以关注并了解。
在开始本教程之前,建议先了解[PaddleOCR模型的训练方法](../../../doc/doc_ch/quickstart.md)以及[PaddleSlim](https://paddleslim.readthedocs.io/zh_CN/latest/index.html)
## 快速开始
量化多适用于轻量模型在移动端的部署,当训练出一个模型后,如果希望进一步的压缩模型大小并加速预测,可使用量化的方法压缩模型。
模型量化主要包括五个步骤:
1. 安装 PaddleSlim
2. 准备训练好的模型
3. 量化训练
4. 导出量化推理模型
5. 量化模型预测部署
### 1. 安装PaddleSlim
```bash
git clone https://github.com/PaddlePaddle/PaddleSlim.git
cd Paddleslim
python setup.py install
```
### 2. 准备训练好的模型
PaddleOCR提供了一系列训练好的[模型](../../../doc/doc_ch/models_list.md),如果待量化的模型不在列表中,需要按照[常规训练](../../../doc/doc_ch/quickstart.md)方法得到训练好的模型。
### 3. 量化训练
量化训练包括离线量化训练和在线量化训练,在线量化训练效果更好,需加载预训练模型,在定义好量化策略后即可对模型进行量化。
量化训练的代码位于slim/quantization/quant.py 中,比如训练检测模型,训练指令如下:
```bash
python deploy/slim/quantization/quant.py -c configs/det/det_mv3_db.yml -o Global.pretrain_weights='your trained model' Global.save_model_dir=./output/quant_model
# 比如下载提供的训练模型
wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_train.tar
tar -xf ch_ppocr_mobile_v2.0_det_train.tar
python deploy/slim/quantization/quant.py -c configs/det/det_mv3_db.yml -o Global.pretrain_weights=./ch_ppocr_mobile_v2.0_det_train/best_accuracy Global.save_model_dir=./output/quant_model
```
如果要训练识别模型的量化,修改配置文件和加载的模型参数即可。
### 4. 导出模型
在得到量化训练保存的模型后,我们可以将其导出为inference_model,用于预测部署:
```bash
python deploy/slim/quantization/export_model.py -c configs/det/det_mv3_db.yml -o Global.checkpoints=output/quant_model/best_accuracy Global.save_model_dir=./output/quant_inference_model
```
### 5. 量化模型部署
上述步骤导出的量化模型,参数精度仍然是FP32,但是参数的数值范围是int8,导出的模型可以通过PaddleLite的opt模型转换工具完成模型转换。
量化模型部署的可参考 [移动端模型部署](../../lite/readme.md)
## Introduction
Generally, a more complex model would achive better performance in the task, but it also leads to some redundancy in the model.
Quantization is a technique that reduces this redundancy by reducing the full precision data to a fixed number,
so as to reduce model calculation complexity and improve model inference performance.
This example uses PaddleSlim provided [APIs of Quantization](https://paddlepaddle.github.io/PaddleSlim/api/quantization_api/) to compress the OCR model.
It is recommended that you could understand following pages before reading this example:
- [The training strategy of OCR model](../../../doc/doc_en/quickstart_en.md)
- [PaddleSlim Document](https://paddlepaddle.github.io/PaddleSlim/api/quantization_api/)
## Quick Start
Quantization is mostly suitable for the deployment of lightweight models on mobile terminals.
After training, if you want to further compress the model size and accelerate the prediction, you can use quantization methods to compress the model according to the following steps.
1. Install PaddleSlim
2. Prepare trained model
3. Quantization-Aware Training
4. Export inference model
5. Deploy quantization inference model
### 1. Install PaddleSlim
```bash
git clone https://github.com/PaddlePaddle/PaddleSlim.git
cd Paddleslim
python setup.py install
```
### 2. Download Pretrain Model
PaddleOCR provides a series of trained [models](../../../doc/doc_en/models_list_en.md).
If the model to be quantified is not in the list, you need to follow the [Regular Training](../../../doc/doc_en/quickstart_en.md) method to get the trained model.
### 3. Quant-Aware Training
Quantization training includes offline quantization training and online quantization training.
Online quantization training is more effective. It is necessary to load the pre-training model.
After the quantization strategy is defined, the model can be quantified.
The code for quantization training is located in `slim/quantization/quant.py`. For example, to train a detection model, the training instructions are as follows:
```bash
python deploy/slim/quantization/quant.py -c configs/det/det_mv3_db.yml -o Global.pretrain_weights='your trained model' Global.save_model_dir=./output/quant_model
# download provided model
wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_train.tar
tar -xf ch_ppocr_mobile_v2.0_det_train.tar
python deploy/slim/quantization/quant.py -c configs/det/det_mv3_db.yml -o Global.pretrain_weights=./ch_ppocr_mobile_v2.0_det_train/best_accuracy Global.save_model_dir=./output/quant_model
```
### 4. Export inference model
After getting the model after pruning and finetuning we, can export it as inference_model for predictive deployment:
```bash
python deploy/slim/quantization/export_model.py -c configs/det/det_mv3_db.yml -o Global.checkpoints=output/quant_model/best_accuracy Global.save_model_dir=./output/quant_inference_model
```
### 5. Deploy
The numerical range of the quantized model parameters derived from the above steps is still FP32, but the numerical range of the parameters is int8.
The derived model can be converted through the `opt tool` of PaddleLite.
For quantitative model deployment, please refer to [Mobile terminal model deployment](../../lite/readme_en.md)
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
__dir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(__dir__)
sys.path.append(os.path.abspath(os.path.join(__dir__, '..', '..', '..')))
sys.path.append(
os.path.abspath(os.path.join(__dir__, '..', '..', '..', 'tools')))
import argparse
import paddle
from paddle.jit import to_static
from ppocr.modeling.architectures import build_model
from ppocr.postprocess import build_post_process
from ppocr.utils.save_load import init_model
from ppocr.utils.logging import get_logger
from tools.program import load_config, merge_config, ArgsParser
from ppocr.metrics import build_metric
import tools.program as program
from paddleslim.dygraph.quant import QAT
from ppocr.data import build_dataloader
def main():
############################################################################################################
# 1. quantization configs
############################################################################################################
quant_config = {
# weight preprocess type, default is None and no preprocessing is performed.
'weight_preprocess_type': None,
# activation preprocess type, default is None and no preprocessing is performed.
'activation_preprocess_type': None,
# weight quantize type, default is 'channel_wise_abs_max'
'weight_quantize_type': 'channel_wise_abs_max',
# activation quantize type, default is 'moving_average_abs_max'
'activation_quantize_type': 'moving_average_abs_max',
# weight quantize bit num, default is 8
'weight_bits': 8,
# activation quantize bit num, default is 8
'activation_bits': 8,
# data type after quantization, such as 'uint8', 'int8', etc. default is 'int8'
'dtype': 'int8',
# window size for 'range_abs_max' quantization. default is 10000
'window_size': 10000,
# The decay coefficient of moving average, default is 0.9
'moving_rate': 0.9,
# for dygraph quantization, layers of type in quantizable_layer_type will be quantized
'quantizable_layer_type': ['Conv2D', 'Linear'],
}
FLAGS = ArgsParser().parse_args()
config = load_config(FLAGS.config)
merge_config(FLAGS.opt)
logger = get_logger()
# build post process
post_process_class = build_post_process(config['PostProcess'],
config['Global'])
# build model
# for rec algorithm
if hasattr(post_process_class, 'character'):
char_num = len(getattr(post_process_class, 'character'))
config['Architecture']["Head"]['out_channels'] = char_num
model = build_model(config['Architecture'])
# get QAT model
quanter = QAT(config=quant_config)
quanter.quantize(model)
init_model(config, model, logger)
model.eval()
# build metric
eval_class = build_metric(config['Metric'])
# build dataloader
valid_dataloader = build_dataloader(config, 'Eval', device, logger)
# start eval
metirc = program.eval(model, valid_dataloader, post_process_class,
eval_class)
logger.info('metric eval ***************')
for k, v in metirc.items():
logger.info('{}:{}'.format(k, v))
save_path = '{}/inference'.format(config['Global']['save_inference_dir'])
infer_shape = [3, 32, 100] if config['Architecture'][
'model_type'] != "det" else [3, 640, 640]
quanter.save_quantized_model(
model,
save_path,
input_spec=[
paddle.static.InputSpec(
shape=[None] + infer_shape, dtype='float32')
])
logger.info('inference QAT model is saved to {}'.format(save_path))
if __name__ == "__main__":
config, device, logger, vdl_writer = program.preprocess()
main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
__dir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(__dir__)
sys.path.append(os.path.abspath(os.path.join(__dir__, '..', '..', '..')))
sys.path.append(
os.path.abspath(os.path.join(__dir__, '..', '..', '..', 'tools')))
import yaml
import paddle
import paddle.distributed as dist
paddle.seed(2)
from ppocr.data import build_dataloader
from ppocr.modeling.architectures import build_model
from ppocr.losses import build_loss
from ppocr.optimizer import build_optimizer
from ppocr.postprocess import build_post_process
from ppocr.metrics import build_metric
from ppocr.utils.save_load import init_model
import tools.program as program
from paddleslim.dygraph.quant import QAT
dist.get_world_size()
class PACT(paddle.nn.Layer):
def __init__(self):
super(PACT, self).__init__()
alpha_attr = paddle.ParamAttr(
name=self.full_name() + ".pact",
initializer=paddle.nn.initializer.Constant(value=20),
learning_rate=1.0,
regularizer=paddle.regularizer.L2Decay(2e-5))
self.alpha = self.create_parameter(
shape=[1], attr=alpha_attr, dtype='float32')
def forward(self, x):
out_left = paddle.nn.functional.relu(x - self.alpha)
out_right = paddle.nn.functional.relu(-self.alpha - x)
x = x - out_left + out_right
return x
quant_config = {
# weight preprocess type, default is None and no preprocessing is performed.
'weight_preprocess_type': None,
# activation preprocess type, default is None and no preprocessing is performed.
'activation_preprocess_type': None,
# weight quantize type, default is 'channel_wise_abs_max'
'weight_quantize_type': 'channel_wise_abs_max',
# activation quantize type, default is 'moving_average_abs_max'
'activation_quantize_type': 'moving_average_abs_max',
# weight quantize bit num, default is 8
'weight_bits': 8,
# activation quantize bit num, default is 8
'activation_bits': 8,
# data type after quantization, such as 'uint8', 'int8', etc. default is 'int8'
'dtype': 'int8',
# window size for 'range_abs_max' quantization. default is 10000
'window_size': 10000,
# The decay coefficient of moving average, default is 0.9
'moving_rate': 0.9,
# for dygraph quantization, layers of type in quantizable_layer_type will be quantized
'quantizable_layer_type': ['Conv2D', 'Linear'],
}
def main(config, device, logger, vdl_writer):
# init dist environment
if config['Global']['distributed']:
dist.init_parallel_env()
global_config = config['Global']
# build dataloader
train_dataloader = build_dataloader(config, 'Train', device, logger)
if config['Eval']:
valid_dataloader = build_dataloader(config, 'Eval', device, logger)
else:
valid_dataloader = None
# build post process
post_process_class = build_post_process(config['PostProcess'],
global_config)
# build model
# for rec algorithm
if hasattr(post_process_class, 'character'):
char_num = len(getattr(post_process_class, 'character'))
config['Architecture']["Head"]['out_channels'] = char_num
model = build_model(config['Architecture'])
# prepare to quant
quanter = QAT(config=quant_config, act_preprocess=PACT)
quanter.quantize(model)
if config['Global']['distributed']:
model = paddle.DataParallel(model)
# build loss
loss_class = build_loss(config['Loss'])
# build optim
optimizer, lr_scheduler = build_optimizer(
config['Optimizer'],
epochs=config['Global']['epoch_num'],
step_each_epoch=len(train_dataloader),
parameters=model.parameters())
# build metric
eval_class = build_metric(config['Metric'])
# load pretrain model
pre_best_model_dict = init_model(config, model, logger, optimizer)
logger.info('train dataloader has {} iters, valid dataloader has {} iters'.
format(len(train_dataloader), len(valid_dataloader)))
# start train
program.train(config, train_dataloader, valid_dataloader, device, model,
loss_class, optimizer, lr_scheduler, post_process_class,
eval_class, pre_best_model_dict, logger, vdl_writer)
def test_reader(config, device, logger):
loader = build_dataloader(config, 'Train', device, logger)
import time
starttime = time.time()
count = 0
try:
for data in loader():
count += 1
if count % 1 == 0:
batch_time = time.time() - starttime
starttime = time.time()
logger.info("reader: {}, {}, {}".format(
count, len(data[0]), batch_time))
except Exception as e:
logger.info(e)
logger.info("finish reader: {}, Success!".format(count))
if __name__ == '__main__':
config, device, logger, vdl_writer = program.preprocess(is_train=True)
main(config, device, logger, vdl_writer)
# test_reader(config, device, logger)
此差异已折叠。
...@@ -21,13 +21,13 @@ PaddleOCR开源的文本检测算法列表: ...@@ -21,13 +21,13 @@ PaddleOCR开源的文本检测算法列表:
|EAST|MobileNetV3|78.24%|79.15%|78.69%|[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_east_v2.0_train.tar)| |EAST|MobileNetV3|78.24%|79.15%|78.69%|[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_east_v2.0_train.tar)|
|DB|ResNet50_vd|86.41%|78.72%|82.38%|[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_db_v2.0_train.tar)| |DB|ResNet50_vd|86.41%|78.72%|82.38%|[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_db_v2.0_train.tar)|
|DB|MobileNetV3|77.29%|73.08%|75.12%|[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_db_v2.0_train.tar)| |DB|MobileNetV3|77.29%|73.08%|75.12%|[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_db_v2.0_train.tar)|
|SAST|ResNet50_vd|91.83%|81.80%|86.52%|[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_sast_icdar15_v2.0_train.tar)| |SAST|ResNet50_vd|91.39%|83.77%|87.42%|[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_sast_icdar15_v2.0_train.tar)|
在Total-text文本检测公开数据集上,算法效果如下: 在Total-text文本检测公开数据集上,算法效果如下:
|模型|骨干网络|precision|recall|Hmean|下载链接| |模型|骨干网络|precision|recall|Hmean|下载链接|
| --- | --- | --- | --- | --- | --- | | --- | --- | --- | --- | --- | --- |
|SAST|ResNet50_vd|89.05%|76.80%|82.47%|[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_sast_totaltext_v2.0_train.tar)| |SAST|ResNet50_vd|89.63%|78.44%|83.66%|[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_sast_totaltext_v2.0_train.tar)|
**说明:** SAST模型训练额外加入了icdar2013、icdar2017、COCO-Text、ArT等公开数据集进行调优。PaddleOCR用到的经过整理格式的英文公开数据集下载:[百度云地址](https://pan.baidu.com/s/12cPnZcVuV1zn5DOd4mqjVw) (提取码: 2bpi) **说明:** SAST模型训练额外加入了icdar2013、icdar2017、COCO-Text、ArT等公开数据集进行调优。PaddleOCR用到的经过整理格式的英文公开数据集下载:[百度云地址](https://pan.baidu.com/s/12cPnZcVuV1zn5DOd4mqjVw) (提取码: 2bpi)
......
...@@ -21,9 +21,8 @@ ln -sf <path/to/dataset> <path/to/paddle_ocr>/train_data/cls/dataset ...@@ -21,9 +21,8 @@ ln -sf <path/to/dataset> <path/to/paddle_ocr>/train_data/cls/dataset
``` ```
" 图像文件名 图像标注信息 " " 图像文件名 图像标注信息 "
train/word_001.jpg 0
train_data/cls/word_001.jpg 0 train/word_002.jpg 180
train_data/cls/word_002.jpg 180
``` ```
最终训练集应有如下文件结构: 最终训练集应有如下文件结构:
...@@ -55,6 +54,8 @@ train_data/cls/word_002.jpg 180 ...@@ -55,6 +54,8 @@ train_data/cls/word_002.jpg 180
### 启动训练 ### 启动训练
将准备好的txt文件和图片文件夹路径分别写入配置文件的 `Train/Eval.dataset.label_file_list``Train/Eval.dataset.data_dir` 字段下,`Train/Eval.dataset.data_dir`字段下的路径和文件里记载的图片名构成了图片的绝对路径。
PaddleOCR提供了训练脚本、评估脚本和预测脚本。 PaddleOCR提供了训练脚本、评估脚本和预测脚本。
开始训练: 开始训练:
......
...@@ -96,5 +96,5 @@ python3 tools/infer/predict_system.py --image_dir="./doc/imgs/11.jpg" --det_mode ...@@ -96,5 +96,5 @@ python3 tools/infer/predict_system.py --image_dir="./doc/imgs/11.jpg" --det_mode
此外,文档教程中也提供了中文OCR模型的其他预测部署方式: 此外,文档教程中也提供了中文OCR模型的其他预测部署方式:
- [基于C++预测引擎推理](../../deploy/cpp_infer/readme.md) - [基于C++预测引擎推理](../../deploy/cpp_infer/readme.md)
- [服务部署](../../deploy/pdserving/readme.md) - [服务部署](../../deploy/hubserving)
- [端侧部署](../../deploy/lite/readme.md) - [端侧部署(目前只支持静态图)](https://github.com/PaddlePaddle/PaddleOCR/tree/develop/deploy/lite)
...@@ -23,13 +23,13 @@ On the ICDAR2015 dataset, the text detection result is as follows: ...@@ -23,13 +23,13 @@ On the ICDAR2015 dataset, the text detection result is as follows:
|EAST|MobileNetV3|78.24%|79.15%|78.69%|[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_east_v2.0_train.tar)| |EAST|MobileNetV3|78.24%|79.15%|78.69%|[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_east_v2.0_train.tar)|
|DB|ResNet50_vd|86.41%|78.72%|82.38%|[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_db_v2.0_train.tar)| |DB|ResNet50_vd|86.41%|78.72%|82.38%|[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_db_v2.0_train.tar)|
|DB|MobileNetV3|77.29%|73.08%|75.12%|[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_db_v2.0_train.tar)| |DB|MobileNetV3|77.29%|73.08%|75.12%|[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_db_v2.0_train.tar)|
|SAST|ResNet50_vd|91.83%|81.80%|86.52%|[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_sast_icdar15_v2.0_train.tar)| |SAST|ResNet50_vd|91.39%|83.77%|87.42%|[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_sast_icdar15_v2.0_train.tar)|
On Total-Text dataset, the text detection result is as follows: On Total-Text dataset, the text detection result is as follows:
|Model|Backbone|precision|recall|Hmean|Download link| |Model|Backbone|precision|recall|Hmean|Download link|
| --- | --- | --- | --- | --- | --- | | --- | --- | --- | --- | --- | --- |
|SAST|ResNet50_vd|89.05%|76.80%|82.47%|[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_sast_totaltext_v2.0_train.tar)| |SAST|ResNet50_vd|89.63%|78.44%|83.66%|[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_sast_totaltext_v2.0_train.tar)|
**Note:** Additional data, like icdar2013, icdar2017, COCO-Text, ArT, was added to the model training of SAST. Download English public dataset in organized format used by PaddleOCR from [Baidu Drive](https://pan.baidu.com/s/12cPnZcVuV1zn5DOd4mqjVw) (download code: 2bpi). **Note:** Additional data, like icdar2013, icdar2017, COCO-Text, ArT, was added to the model training of SAST. Download English public dataset in organized format used by PaddleOCR from [Baidu Drive](https://pan.baidu.com/s/12cPnZcVuV1zn5DOd4mqjVw) (download code: 2bpi).
......
...@@ -23,8 +23,8 @@ First put the training images in the same folder (train_images), and use a txt f ...@@ -23,8 +23,8 @@ First put the training images in the same folder (train_images), and use a txt f
``` ```
" Image file name Image annotation " " Image file name Image annotation "
train_data/word_001.jpg 0 train/word_001.jpg 0
train_data/word_002.jpg 180 train/word_002.jpg 180
``` ```
The final training set should have the following file structure: The final training set should have the following file structure:
...@@ -57,6 +57,7 @@ containing all images (test) and a cls_gt_test.txt. The structure of the test se ...@@ -57,6 +57,7 @@ containing all images (test) and a cls_gt_test.txt. The structure of the test se
``` ```
### TRAINING ### TRAINING
Write the prepared txt file and image folder path into the configuration file under the `Train/Eval.dataset.label_file_list` and `Train/Eval.dataset.data_dir` fields, the absolute path of the image consists of the `Train/Eval.dataset.data_dir` field and the image name recorded in the txt file.
PaddleOCR provides training scripts, evaluation scripts, and prediction scripts. PaddleOCR provides training scripts, evaluation scripts, and prediction scripts.
......
...@@ -99,5 +99,5 @@ For more text detection and recognition tandem reasoning, please refer to the do ...@@ -99,5 +99,5 @@ For more text detection and recognition tandem reasoning, please refer to the do
In addition, the tutorial also provides other deployment methods for the Chinese OCR model: In addition, the tutorial also provides other deployment methods for the Chinese OCR model:
- [Server-side C++ inference](../../deploy/cpp_infer/readme_en.md) - [Server-side C++ inference](../../deploy/cpp_infer/readme_en.md)
- [Service deployment](../../deploy/pdserving/readme_en.md) - [Service deployment](../../deploy/hubserving)
- [End-to-end deployment](../../deploy/lite/readme_en.md) - [End-to-end deployment](https://github.com/PaddlePaddle/PaddleOCR/tree/develop/deploy/lite)
doc/joinus.PNG

174.4 KB | W: | H:

doc/joinus.PNG

108.9 KB | W: | H:

doc/joinus.PNG
doc/joinus.PNG
doc/joinus.PNG
doc/joinus.PNG
  • 2-up
  • Swipe
  • Onion skin
...@@ -66,8 +66,10 @@ def build_dataloader(config, mode, device, logger): ...@@ -66,8 +66,10 @@ def build_dataloader(config, mode, device, logger):
batch_size = loader_config['batch_size_per_card'] batch_size = loader_config['batch_size_per_card']
drop_last = loader_config['drop_last'] drop_last = loader_config['drop_last']
num_workers = loader_config['num_workers'] num_workers = loader_config['num_workers']
if 'use_shared_memory' in loader_config.keys():
use_shared_memory = False use_shared_memory = loader_config['use_shared_memory']
else:
use_shared_memory = True
if mode == "Train": if mode == "Train":
#Distribute data to multiple cards #Distribute data to multiple cards
batch_sampler = DistributedBatchSampler( batch_sampler = DistributedBatchSampler(
...@@ -75,7 +77,6 @@ def build_dataloader(config, mode, device, logger): ...@@ -75,7 +77,6 @@ def build_dataloader(config, mode, device, logger):
batch_size=batch_size, batch_size=batch_size,
shuffle=False, shuffle=False,
drop_last=drop_last) drop_last=drop_last)
use_shared_memory = True
else: else:
#Distribute data to single card #Distribute data to single card
batch_sampler = BatchSampler( batch_sampler = BatchSampler(
......
...@@ -26,6 +26,8 @@ class RecMetric(object): ...@@ -26,6 +26,8 @@ class RecMetric(object):
all_num = 0 all_num = 0
norm_edit_dis = 0.0 norm_edit_dis = 0.0
for (pred, pred_conf), (target, _) in zip(preds, labels): for (pred, pred_conf), (target, _) in zip(preds, labels):
pred = pred.replace(" ", "")
target = target.replace(" ", "")
norm_edit_dis += Levenshtein.distance(pred, target) / max( norm_edit_dis += Levenshtein.distance(pred, target) / max(
len(pred), len(target)) len(pred), len(target))
if pred == target: if pred == target:
......
...@@ -60,7 +60,7 @@ class BaseRecLabelDecode(object): ...@@ -60,7 +60,7 @@ class BaseRecLabelDecode(object):
def add_special_char(self, dict_character): def add_special_char(self, dict_character):
return dict_character return dict_character
def decode(self, text_index, text_prob=None, is_remove_duplicate=True): def decode(self, text_index, text_prob=None, is_remove_duplicate=False):
""" convert text-index into text-label. """ """ convert text-index into text-label. """
result_list = [] result_list = []
ignored_tokens = self.get_ignored_tokens() ignored_tokens = self.get_ignored_tokens()
...@@ -110,7 +110,7 @@ class CTCLabelDecode(BaseRecLabelDecode): ...@@ -110,7 +110,7 @@ class CTCLabelDecode(BaseRecLabelDecode):
text = self.decode(preds_idx, preds_prob) text = self.decode(preds_idx, preds_prob)
if label is None: if label is None:
return text return text
label = self.decode(label, is_remove_duplicate=False) label = self.decode(label)
return text, label return text, label
def add_special_char(self, dict_character): def add_special_char(self, dict_character):
......
...@@ -57,7 +57,7 @@ def get_image_file_list(img_file): ...@@ -57,7 +57,7 @@ def get_image_file_list(img_file):
elif os.path.isdir(img_file): elif os.path.isdir(img_file):
for single_file in os.listdir(img_file): for single_file in os.listdir(img_file):
file_path = os.path.join(img_file, single_file) file_path = os.path.join(img_file, single_file)
if imghdr.what(file_path) in img_end: if os.path.isfile(file_path) and imghdr.what(file_path) in img_end:
imgs_lists.append(file_path) imgs_lists.append(file_path)
if len(imgs_lists) == 0: if len(imgs_lists) == 0:
raise Exception("not found any img file in {}".format(img_file)) raise Exception("not found any img file in {}".format(img_file))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册