提交 3691291a 编写于 作者: L LDOUBLEV

Merge branch 'dygraph' of https://github.com/PaddlePaddle/PaddleOCR into fix_trt_res

...@@ -27,7 +27,12 @@ import json ...@@ -27,7 +27,12 @@ import json
import cv2 import cv2
__dir__ = os.path.dirname(os.path.abspath(__file__)) __dir__ = os.path.dirname(os.path.abspath(__file__))
import numpy as np
sys.path.append(__dir__) sys.path.append(__dir__)
sys.path.append(os.path.abspath(os.path.join(__dir__, '../..'))) sys.path.append(os.path.abspath(os.path.join(__dir__, '../..')))
sys.path.append("..") sys.path.append("..")
...@@ -78,7 +83,7 @@ class WindowMixin(object): ...@@ -78,7 +83,7 @@ class WindowMixin(object):
addActions(menu, actions) addActions(menu, actions)
return menu return menu
def toolbar(self, title, actions=None): def toolbar(self, title, actions=None):
toolbar = ToolBar(title) toolbar = ToolBar(title)
toolbar.setObjectName(u'%sToolBar' % title) toolbar.setObjectName(u'%sToolBar' % title)
# toolbar.setOrientation(Qt.Vertical) # toolbar.setOrientation(Qt.Vertical)
...@@ -98,7 +103,7 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -98,7 +103,7 @@ class MainWindow(QMainWindow, WindowMixin):
# Load setting in the main thread # Load setting in the main thread
self.settings = Settings() self.settings = Settings()
self.settings.load() self.settings.load()
settings = self.settings settings = self.settings
self.lang = lang self.lang = lang
# Load string bundle for i18n # Load string bundle for i18n
...@@ -159,7 +164,7 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -159,7 +164,7 @@ class MainWindow(QMainWindow, WindowMixin):
filelistLayout = QVBoxLayout() filelistLayout = QVBoxLayout()
filelistLayout.setContentsMargins(0, 0, 0, 0) filelistLayout.setContentsMargins(0, 0, 0, 0)
filelistLayout.addWidget(self.fileListWidget) filelistLayout.addWidget(self.fileListWidget)
self.AutoRecognition = QToolButton() self.AutoRecognition = QToolButton()
self.AutoRecognition.setToolButtonStyle(Qt.ToolButtonTextBesideIcon) self.AutoRecognition.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
self.AutoRecognition.setIcon(newIcon('Auto')) self.AutoRecognition.setIcon(newIcon('Auto'))
...@@ -176,7 +181,7 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -176,7 +181,7 @@ class MainWindow(QMainWindow, WindowMixin):
self.filedock.setObjectName(getStr('files')) self.filedock.setObjectName(getStr('files'))
self.filedock.setWidget(fileListContainer) self.filedock.setWidget(fileListContainer)
self.addDockWidget(Qt.LeftDockWidgetArea, self.filedock) self.addDockWidget(Qt.LeftDockWidgetArea, self.filedock)
######## Right area ########## ######## Right area ##########
listLayout = QVBoxLayout() listLayout = QVBoxLayout()
listLayout.setContentsMargins(0, 0, 0, 0) listLayout.setContentsMargins(0, 0, 0, 0)
...@@ -250,7 +255,7 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -250,7 +255,7 @@ class MainWindow(QMainWindow, WindowMixin):
self.imgsplider.setMaximum(150) self.imgsplider.setMaximum(150)
self.imgsplider.setSingleStep(1) self.imgsplider.setSingleStep(1)
self.imgsplider.setTickPosition(QSlider.TicksBelow) self.imgsplider.setTickPosition(QSlider.TicksBelow)
self.imgsplider.setTickInterval(1) self.imgsplider.setTickInterval(1)
op = QGraphicsOpacityEffect() op = QGraphicsOpacityEffect()
op.setOpacity(0.2) op.setOpacity(0.2)
self.imgsplider.setGraphicsEffect(op) self.imgsplider.setGraphicsEffect(op)
...@@ -266,7 +271,9 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -266,7 +271,9 @@ class MainWindow(QMainWindow, WindowMixin):
self.zoomWidget = ZoomWidget() self.zoomWidget = ZoomWidget()
self.colorDialog = ColorDialog(parent=self) self.colorDialog = ColorDialog(parent=self)
self.zoomWidgetValue = self.zoomWidget.value() self.zoomWidgetValue = self.zoomWidget.value()
self.msgBox = QMessageBox()
########## thumbnail ######### ########## thumbnail #########
hlayout = QHBoxLayout() hlayout = QHBoxLayout()
m = (0, 0, 0, 0) m = (0, 0, 0, 0)
...@@ -294,7 +301,7 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -294,7 +301,7 @@ class MainWindow(QMainWindow, WindowMixin):
self.nextButton.setStyleSheet('border: none;') self.nextButton.setStyleSheet('border: none;')
self.nextButton.clicked.connect(self.openNextImg) self.nextButton.clicked.connect(self.openNextImg)
self.nextButton.setShortcut('d') self.nextButton.setShortcut('d')
hlayout.addWidget(self.preButton) hlayout.addWidget(self.preButton)
hlayout.addWidget(self.iconlist) hlayout.addWidget(self.iconlist)
hlayout.addWidget(self.nextButton) hlayout.addWidget(self.nextButton)
...@@ -303,7 +310,7 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -303,7 +310,7 @@ class MainWindow(QMainWindow, WindowMixin):
iconListContainer = QWidget() iconListContainer = QWidget()
iconListContainer.setLayout(hlayout) iconListContainer.setLayout(hlayout)
iconListContainer.setFixedHeight(100) iconListContainer.setFixedHeight(100)
########### Canvas ########### ########### Canvas ###########
self.canvas = Canvas(parent=self) self.canvas = Canvas(parent=self)
self.canvas.zoomRequest.connect(self.zoomRequest) self.canvas.zoomRequest.connect(self.zoomRequest)
...@@ -360,6 +367,9 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -360,6 +367,9 @@ class MainWindow(QMainWindow, WindowMixin):
opendir = action(getStr('openDir'), self.openDirDialog, opendir = action(getStr('openDir'), self.openDirDialog,
'Ctrl+u', 'open', getStr('openDir')) 'Ctrl+u', 'open', getStr('openDir'))
open_dataset_dir = action(getStr('openDatasetDir'), self.openDatasetDirDialog,
'Ctrl+p', 'open', getStr('openDatasetDir'), enabled=False)
save = action(getStr('save'), self.saveFile, save = action(getStr('save'), self.saveFile,
'Ctrl+V', 'verify', getStr('saveDetail'), enabled=False) 'Ctrl+V', 'verify', getStr('saveDetail'), enabled=False)
...@@ -439,7 +449,7 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -439,7 +449,7 @@ class MainWindow(QMainWindow, WindowMixin):
AutoRec = action(getStr('autoRecognition'), self.autoRecognition, AutoRec = action(getStr('autoRecognition'), self.autoRecognition,
'', 'Auto', getStr('autoRecognition'), enabled=False) '', 'Auto', getStr('autoRecognition'), enabled=False)
reRec = action(getStr('reRecognition'), self.reRecognition, reRec = action(getStr('reRecognition'), self.reRecognition,
'Ctrl+Shift+R', 'reRec', getStr('reRecognition'), enabled=False) 'Ctrl+Shift+R', 'reRec', getStr('reRecognition'), enabled=False)
singleRere = action(getStr('singleRe'), self.singleRerecognition, singleRere = action(getStr('singleRe'), self.singleRerecognition,
...@@ -457,6 +467,12 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -457,6 +467,12 @@ class MainWindow(QMainWindow, WindowMixin):
undoLastPoint = action(getStr("undoLastPoint"), self.canvas.undoLastPoint, undoLastPoint = action(getStr("undoLastPoint"), self.canvas.undoLastPoint,
'Ctrl+Z', "undo", getStr("undoLastPoint"), enabled=False) 'Ctrl+Z', "undo", getStr("undoLastPoint"), enabled=False)
rotateLeft = action(getStr("rotateLeft"), partial(self.rotateImgAction,1),
'Ctrl+Alt+L', "rotateLeft", getStr("rotateLeft"), enabled=False)
rotateRight = action(getStr("rotateRight"), partial(self.rotateImgAction,-1),
'Ctrl+Alt+R', "rotateRight", getStr("rotateRight"), enabled=False)
undo = action(getStr("undo"), self.undoShapeEdit, undo = action(getStr("undo"), self.undoShapeEdit,
'Ctrl+Z', "undo", getStr("undo"), enabled=False) 'Ctrl+Z', "undo", getStr("undo"), enabled=False)
...@@ -520,13 +536,14 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -520,13 +536,14 @@ class MainWindow(QMainWindow, WindowMixin):
zoom=zoom, zoomIn=zoomIn, zoomOut=zoomOut, zoomOrg=zoomOrg, zoom=zoom, zoomIn=zoomIn, zoomOut=zoomOut, zoomOrg=zoomOrg,
fitWindow=fitWindow, fitWidth=fitWidth, fitWindow=fitWindow, fitWidth=fitWidth,
zoomActions=zoomActions, saveLabel=saveLabel, zoomActions=zoomActions, saveLabel=saveLabel,
undo=undo, undoLastPoint=undoLastPoint, undo=undo, undoLastPoint=undoLastPoint,open_dataset_dir=open_dataset_dir,
rotateLeft=rotateLeft,rotateRight=rotateRight,
fileMenuActions=( fileMenuActions=(
opendir, saveLabel, resetAll, quit), opendir, open_dataset_dir, saveLabel, resetAll, quit),
beginner=(), advanced=(), beginner=(), advanced=(),
editMenu=(createpoly, edit, copy, delete,singleRere,None, undo, undoLastPoint, editMenu=(createpoly, edit, copy, delete,singleRere,None, undo, undoLastPoint,
None, color1, self.drawSquaresOption), None, rotateLeft, rotateRight, None, color1, self.drawSquaresOption),
beginnerContext=(create, edit, copy, delete, singleRere), beginnerContext=(create, edit, copy, delete, singleRere, rotateLeft, rotateRight,),
advancedContext=(createMode, editMode, edit, copy, advancedContext=(createMode, editMode, edit, copy,
delete, shapeLineColor, shapeFillColor), delete, shapeLineColor, shapeFillColor),
onLoadActive=( onLoadActive=(
...@@ -564,7 +581,7 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -564,7 +581,7 @@ class MainWindow(QMainWindow, WindowMixin):
self.autoSaveOption.triggered.connect(self.autoSaveFunc) self.autoSaveOption.triggered.connect(self.autoSaveFunc)
addActions(self.menus.file, addActions(self.menus.file,
(opendir, None, saveLabel, saveRec, self.autoSaveOption, None, resetAll, deleteImg, quit)) (opendir, open_dataset_dir, None, saveLabel, saveRec, self.autoSaveOption, None, resetAll, deleteImg, quit))
addActions(self.menus.help, (showKeys,showSteps, showInfo)) addActions(self.menus.help, (showKeys,showSteps, showInfo))
addActions(self.menus.view, ( addActions(self.menus.view, (
...@@ -778,6 +795,38 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -778,6 +795,38 @@ class MainWindow(QMainWindow, WindowMixin):
self.actions.create.setEnabled(False) self.actions.create.setEnabled(False)
self.actions.undoLastPoint.setEnabled(True) self.actions.undoLastPoint.setEnabled(True)
def rotateImg(self, filename, k, _value):
self.actions.rotateRight.setEnabled(_value)
pix = cv2.imread(filename)
pix = np.rot90(pix, k)
cv2.imwrite(filename, pix)
self.canvas.update()
self.loadFile(filename)
def rotateImgWarn(self):
if self.lang == 'ch':
self.msgBox.warning (self, "提示", "\n 该图片已经有标注框,旋转操作会打乱标注,建议清除标注框后旋转。")
else:
self.msgBox.warning (self, "Warn", "\n The picture already has a label box, and rotation will disrupt the label.\
It is recommended to clear the label box and rotate it.")
def rotateImgAction(self, k=1, _value=False):
filename = self.mImgList[self.currIndex]
if os.path.exists(filename):
if self.itemsToShapesbox:
self.rotateImgWarn()
else:
self.saveFile()
self.dirty = False
self.rotateImg(filename=filename, k=k, _value=True)
else:
self.rotateImgWarn()
self.actions.rotateRight.setEnabled(False)
self.actions.rotateLeft.setEnabled(False)
def toggleDrawingSensitive(self, drawing=True): def toggleDrawingSensitive(self, drawing=True):
"""In the middle of drawing, toggling between modes should be disabled.""" """In the middle of drawing, toggling between modes should be disabled."""
self.actions.editMode.setEnabled(not drawing) self.actions.editMode.setEnabled(not drawing)
...@@ -885,7 +934,12 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -885,7 +934,12 @@ class MainWindow(QMainWindow, WindowMixin):
self.updateComboBox() self.updateComboBox()
def updateBoxlist(self): def updateBoxlist(self):
for shape in self.canvas.selectedShapes+[self.canvas.hShape]: self.canvas.selectedShapes_hShape = []
if self.canvas.hShape != None:
self.canvas.selectedShapes_hShape = self.canvas.selectedShapes + [self.canvas.hShape]
else:
self.canvas.selectedShapes_hShape = self.canvas.selectedShapes
for shape in self.canvas.selectedShapes_hShape:
item = self.shapesToItemsbox[shape] # listitem item = self.shapesToItemsbox[shape] # listitem
text = [(int(p.x()), int(p.y())) for p in shape.points] text = [(int(p.x()), int(p.y())) for p in shape.points]
item.setText(str(text)) item.setText(str(text))
...@@ -1274,7 +1328,7 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -1274,7 +1328,7 @@ class MainWindow(QMainWindow, WindowMixin):
titem = self.iconlist.item(i) titem = self.iconlist.item(i)
titem.setSelected(True) titem.setSelected(True)
self.iconlist.scrollToItem(titem) self.iconlist.scrollToItem(titem)
break break
else: else:
self.fileListWidget.clear() self.fileListWidget.clear()
self.mImgList.clear() self.mImgList.clear()
...@@ -1282,7 +1336,7 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -1282,7 +1336,7 @@ class MainWindow(QMainWindow, WindowMixin):
# if unicodeFilePath and self.iconList.count() > 0: # if unicodeFilePath and self.iconList.count() > 0:
# if unicodeFilePath in self.mImgList: # if unicodeFilePath in self.mImgList:
if unicodeFilePath and os.path.exists(unicodeFilePath): if unicodeFilePath and os.path.exists(unicodeFilePath):
self.canvas.verified = False self.canvas.verified = False
...@@ -1313,7 +1367,7 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -1313,7 +1367,7 @@ class MainWindow(QMainWindow, WindowMixin):
self.addRecentFile(self.filePath) self.addRecentFile(self.filePath)
self.toggleActions(True) self.toggleActions(True)
self.showBoundingBoxFromPPlabel(filePath) self.showBoundingBoxFromPPlabel(filePath)
self.setWindowTitle(__appname__ + ' ' + filePath) self.setWindowTitle(__appname__ + ' ' + filePath)
# Default : select last item if there is at least one item # Default : select last item if there is at least one item
...@@ -1325,7 +1379,7 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -1325,7 +1379,7 @@ class MainWindow(QMainWindow, WindowMixin):
return True return True
return False return False
def showBoundingBoxFromPPlabel(self, filePath): def showBoundingBoxFromPPlabel(self, filePath):
imgidx = self.getImglabelidx(filePath) imgidx = self.getImglabelidx(filePath)
if imgidx not in self.PPlabel.keys(): if imgidx not in self.PPlabel.keys():
...@@ -1418,6 +1472,7 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -1418,6 +1472,7 @@ class MainWindow(QMainWindow, WindowMixin):
def loadRecent(self, filename): def loadRecent(self, filename):
if self.mayContinue(): if self.mayContinue():
print(filename,"======")
self.loadFile(filename) self.loadFile(filename)
def scanAllImages(self, folderPath): def scanAllImages(self, folderPath):
...@@ -1453,6 +1508,23 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -1453,6 +1508,23 @@ class MainWindow(QMainWindow, WindowMixin):
self.lastOpenDir = targetDirPath self.lastOpenDir = targetDirPath
self.importDirImages(targetDirPath) self.importDirImages(targetDirPath)
def openDatasetDirDialog(self,):
if self.lastOpenDir and os.path.exists(self.lastOpenDir):
if platform.system() == 'Windows':
os.startfile(self.lastOpenDir)
else:
os.system('open ' + os.path.normpath(self.lastOpenDir))
defaultOpenDirPath = self.lastOpenDir
else:
if self.lang == 'ch':
self.msgBox.warning(self, "提示", "\n 原文件夹已不存在,请从新选择数据集路径!")
else:
self.msgBox.warning(self, "Warn", "\n The original folder no longer exists, please choose the data set path again!")
self.actions.open_dataset_dir.setEnabled(False)
defaultOpenDirPath = os.path.dirname(self.filePath) if self.filePath else '.'
def importDirImages(self, dirpath, isDelete = False): def importDirImages(self, dirpath, isDelete = False):
if not self.mayContinue() or not dirpath: if not self.mayContinue() or not dirpath:
return return
...@@ -1500,6 +1572,10 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -1500,6 +1572,10 @@ class MainWindow(QMainWindow, WindowMixin):
self.reRecogButton.setEnabled(True) self.reRecogButton.setEnabled(True)
self.actions.AutoRec.setEnabled(True) self.actions.AutoRec.setEnabled(True)
self.actions.reRec.setEnabled(True) self.actions.reRec.setEnabled(True)
self.actions.open_dataset_dir.setEnabled(True)
self.actions.rotateLeft.setEnabled(True)
self.actions.rotateRight.setEnabled(True)
def openPrevImg(self, _value=False): def openPrevImg(self, _value=False):
...@@ -1508,7 +1584,7 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -1508,7 +1584,7 @@ class MainWindow(QMainWindow, WindowMixin):
if self.filePath is None: if self.filePath is None:
return return
currIndex = self.mImgList.index(self.filePath) currIndex = self.mImgList.index(self.filePath)
self.mImgList5 = self.mImgList[:5] self.mImgList5 = self.mImgList[:5]
if currIndex - 1 >= 0: if currIndex - 1 >= 0:
...@@ -1538,7 +1614,7 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -1538,7 +1614,7 @@ class MainWindow(QMainWindow, WindowMixin):
if filename: if filename:
print('file name in openNext is ',filename) print('file name in openNext is ',filename)
self.loadFile(filename) self.loadFile(filename)
def updateFileListIcon(self, filename): def updateFileListIcon(self, filename):
pass pass
...@@ -1650,7 +1726,7 @@ class MainWindow(QMainWindow, WindowMixin): ...@@ -1650,7 +1726,7 @@ class MainWindow(QMainWindow, WindowMixin):
proc.startDetached(os.path.abspath(__file__)) proc.startDetached(os.path.abspath(__file__))
def mayContinue(self): # def mayContinue(self): #
if not self.dirty: if not self.dirty:
return True return True
else: else:
discardChanges = self.discardChangesDialog() discardChanges = self.discardChangesDialog()
...@@ -2077,7 +2153,7 @@ def main(): ...@@ -2077,7 +2153,7 @@ def main():
if __name__ == '__main__': if __name__ == '__main__':
resource_file = './libs/resources.py' resource_file = './libs/resources.py'
if not os.path.exists(resource_file): if not os.path.exists(resource_file):
output = os.system('pyrcc5 -o libs/resources.py resources.qrc') output = os.system('pyrcc5 -o libs/resources.py resources.qrc')
......
...@@ -8,9 +8,12 @@ PPOCRLabel is a semi-automatic graphic annotation tool suitable for OCR field, w ...@@ -8,9 +8,12 @@ PPOCRLabel is a semi-automatic graphic annotation tool suitable for OCR field, w
### Recent Update ### Recent Update
- 2021.8.11:
- New functions: Open the dataset folder, image rotation (Note: Please delete the label box before rotating the image) (by [Wei-JL](https://github.com/Wei-JL))
- Added shortcut key description (Help-Shortcut Key), repaired the direction shortcut key movement function under batch processing (by [d2623587501](https://github.com/d2623587501))
- 2021.2.5: New batch processing and undo functions (by [Evezerest](https://github.com/Evezerest)): - 2021.2.5: New batch processing and undo functions (by [Evezerest](https://github.com/Evezerest)):
- Batch processing function: Press and hold the Ctrl key to select the box, you can move, copy, and delete in batches. - **Batch processing function**: Press and hold the Ctrl key to select the box, you can move, copy, and delete in batches.
- Undo function: In the process of drawing a four-point label box or after editing the box, press Ctrl+Z to undo the previous operation. - **Undo function**: In the process of drawing a four-point label box or after editing the box, press Ctrl+Z to undo the previous operation.
- Fix image rotation and size problems, optimize the process of editing the mark frame (by [ninetailskim](https://github.com/ninetailskim)[edencfc](https://github.com/edencfc)). - Fix image rotation and size problems, optimize the process of editing the mark frame (by [ninetailskim](https://github.com/ninetailskim)[edencfc](https://github.com/edencfc)).
- 2021.1.11: Optimize the labeling experience (by [edencfc](https://github.com/edencfc)), - 2021.1.11: Optimize the labeling experience (by [edencfc](https://github.com/edencfc)),
- Users can choose whether to pop up the label input dialog after drawing the detection box in "View - Pop-up Label Input Dialog". - Users can choose whether to pop up the label input dialog after drawing the detection box in "View - Pop-up Label Input Dialog".
...@@ -23,15 +26,51 @@ PPOCRLabel is a semi-automatic graphic annotation tool suitable for OCR field, w ...@@ -23,15 +26,51 @@ PPOCRLabel is a semi-automatic graphic annotation tool suitable for OCR field, w
## Installation ## Installation
### 1. Install PaddleOCR ### 1. Environment Preparation
PaddleOCR models has been built in PPOCRLabel, please refer to [PaddleOCR installation document](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_ch/installation.md) to prepare PaddleOCR and make sure it works. #### **Install PaddlePaddle 2.0**
```bash
pip3 install --upgrade pip
# If you have cuda9 or cuda10 installed on your machine, please run the following command to install
python3 -m pip install paddlepaddle-gpu==2.0.0 -i https://mirror.baidu.com/pypi/simple
# If you only have cpu on your machine, please run the following command to install
python3 -m pip install paddlepaddle==2.0.0 -i https://mirror.baidu.com/pypi/simple
```
For more software version requirements, please refer to the instructions in [Installation Document](https://www.paddlepaddle.org.cn/install/quick) for operation.
#### **Install PaddleOCR**
```bash
# Recommend
git clone https://github.com/PaddlePaddle/PaddleOCR
# If you cannot pull successfully due to network problems, you can also choose to use the code hosting on the cloud:
git clone https://gitee.com/paddlepaddle/PaddleOCR
# Note: The cloud-hosting code may not be able to synchronize the update with this GitHub project in real time. There might be a delay of 3-5 days. Please give priority to the recommended method.
```
#### **Install Third-party Libraries**
```bash
cd PaddleOCR
pip3 install -r requirements.txt
```
If you getting this error `OSError: [WinError 126] The specified module could not be found` when you install shapely on windows. Please try to download Shapely whl file using http://www.lfd.uci.edu/~gohlke/pythonlibs/#shapely.
Reference: [Solve shapely installation on windows](https://stackoverflow.com/questions/44398265/install-shapely-oserror-winerror-126-the-specified-module-could-not-be-found)
### 2. Install PPOCRLabel ### 2. Install PPOCRLabel
#### Windows #### Windows
``` ```bash
pip install pyqt5 pip install pyqt5
cd ./PPOCRLabel # Change the directory to the PPOCRLabel folder cd ./PPOCRLabel # Change the directory to the PPOCRLabel folder
python PPOCRLabel.py python PPOCRLabel.py
...@@ -39,15 +78,15 @@ python PPOCRLabel.py ...@@ -39,15 +78,15 @@ python PPOCRLabel.py
#### Ubuntu Linux #### Ubuntu Linux
``` ```bash
pip3 install pyqt5 pip3 install pyqt5
pip3 install trash-cli pip3 install trash-cli
cd ./PPOCRLabel # Change the directory to the PPOCRLabel folder cd ./PPOCRLabel # Change the directory to the PPOCRLabel folder
python3 PPOCRLabel.py python3 PPOCRLabel.py
``` ```
#### macOS #### MacOS
``` ```bash
pip3 install pyqt5 pip3 install pyqt5
pip3 uninstall opencv-python # Uninstall opencv manually as it conflicts with pyqt pip3 uninstall opencv-python # Uninstall opencv manually as it conflicts with pyqt
pip3 install opencv-contrib-python-headless==4.2.0.32 # Install the headless version of opencv pip3 install opencv-contrib-python-headless==4.2.0.32 # Install the headless version of opencv
...@@ -77,11 +116,11 @@ python3 PPOCRLabel.py ...@@ -77,11 +116,11 @@ python3 PPOCRLabel.py
7. Double click the result in 'recognition result' list to manually change inaccurate recognition results. 7. Double click the result in 'recognition result' list to manually change inaccurate recognition results.
8. Click "Check", the image status will switch to "√",then the program automatically jump to the next. 8. **Click "Check", the image status will switch to "√",then the program automatically jump to the next.**
9. Click "Delete Image" and the image will be deleted to the recycle bin. 9. Click "Delete Image" and the image will be deleted to the recycle bin.
10. Labeling result: the user can save manually through the menu "File - Save Label", while the program will also save automatically if "File - Auto Save Label Mode" is selected. The manually checked label will be stored in *Label.txt* under the opened picture folder. Click "PaddleOCR"-"Save Recognition Results" in the menu bar, the recognition training data of such pictures will be saved in the *crop_img* folder, and the recognition label will be saved in *rec_gt.txt*<sup>[4]</sup>. 10. Labeling result: the user can export the label result manually through the menu "File - Export Label", while the program will also export automatically if "File - Auto export Label Mode" is selected. The manually checked label will be stored in *Label.txt* under the opened picture folder. Click "File"-"Export Recognition Results" in the menu bar, the recognition training data of such pictures will be saved in the *crop_img* folder, and the recognition label will be saved in *rec_gt.txt*<sup>[4]</sup>.
### Note ### Note
...@@ -95,10 +134,10 @@ python3 PPOCRLabel.py ...@@ -95,10 +134,10 @@ python3 PPOCRLabel.py
| File name | Description | | File name | Description |
| :-----------: | :----------------------------------------------------------: | | :-----------: | :----------------------------------------------------------: |
| Label.txt | The detection label file can be directly used for PPOCR detection model training. After the user saves 5 label results, the file will be automatically saved. It will also be written when the user closes the application or changes the file folder. | | Label.txt | The detection label file can be directly used for PPOCR detection model training. After the user saves 5 label results, the file will be automatically exported. It will also be written when the user closes the application or changes the file folder. |
| fileState.txt | The picture status file save the image in the current folder that has been manually confirmed by the user. | | fileState.txt | The picture status file save the image in the current folder that has been manually confirmed by the user. |
| Cache.cach | Cache files to save the results of model recognition. | | Cache.cach | Cache files to save the results of model recognition. |
| rec_gt.txt | The recognition label file, which can be directly used for PPOCR identification model training, is generated after the user clicks on the menu bar "File"-"Save recognition result". | | rec_gt.txt | The recognition label file, which can be directly used for PPOCR identification model training, is generated after the user clicks on the menu bar "File"-"Export recognition result". |
| crop_img | The recognition data, generated at the same time with *rec_gt.txt* | | crop_img | The recognition data, generated at the same time with *rec_gt.txt* |
## Explanation ## Explanation
...@@ -132,16 +171,16 @@ python3 PPOCRLabel.py ...@@ -132,16 +171,16 @@ python3 PPOCRLabel.py
- Custom model: The model trained by users can be replaced by modifying PPOCRLabel.py in [PaddleOCR class instantiation](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/PPOCRLabel/PPOCRLabel.py#L110) referring [Custom Model Code](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_en/whl_en.md#use-custom-model) - Custom model: The model trained by users can be replaced by modifying PPOCRLabel.py in [PaddleOCR class instantiation](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/PPOCRLabel/PPOCRLabel.py#L110) referring [Custom Model Code](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_en/whl_en.md#use-custom-model)
### Save ### Export Label Result
PPOCRLabel supports three ways to save Label.txt PPOCRLabel supports three ways to export Label.txt
- Automatically save: After selecting "File - Auto Save Label Mode", the program will automatically write the annotations into Label.txt every time the user confirms an image. If this option is not turned on, it will be automatically saved after detecting that the user has manually checked 5 images. - Automatically export: After selecting "File - Auto Export Label Mode", the program will automatically write the annotations into Label.txt every time the user confirms an image. If this option is not turned on, it will be automatically exported after detecting that the user has manually checked 5 images.
- Manual save: Click "File-Save Marking Results" to manually save the label. - Manual export: Click "File-Export Marking Results" to manually export the label.
- Close application save - Close application export
### Export partial recognition results ### Export Partial Recognition Results
For some data that are difficult to recognize, the recognition results will not be exported by **unchecking** the corresponding tags in the recognition results checkbox. For some data that are difficult to recognize, the recognition results will not be exported by **unchecking** the corresponding tags in the recognition results checkbox.
......
...@@ -8,9 +8,12 @@ PPOCRLabel是一款适用于OCR领域的半自动化图形标注工具,内置P ...@@ -8,9 +8,12 @@ PPOCRLabel是一款适用于OCR领域的半自动化图形标注工具,内置P
#### 近期更新 #### 近期更新
- 2021.8.11:
- 新增功能:打开数据所在文件夹、图像旋转(注意:旋转前的图片上不能存在标记框)(by [Wei-JL](https://github.com/Wei-JL)
- 新增快捷键说明(帮助-快捷键)、修复批处理下的方向快捷键移动功能(by [d2623587501](https://github.com/d2623587501)
- 2021.2.5:新增批处理与撤销功能(by [Evezerest](https://github.com/Evezerest)) - 2021.2.5:新增批处理与撤销功能(by [Evezerest](https://github.com/Evezerest))
- 批处理功能:按住Ctrl键选择标记框后可批量移动、复制、删除 - **批处理功能**:按住Ctrl键选择标记框后可批量移动、复制、删除、重新识别
- 撤销功能:在绘制四点标注框过程中或对框进行编辑操作后,按下Ctrl+Z可撤销上一部操作。 - **撤销功能**:在绘制四点标注框过程中或对框进行编辑操作后,按下Ctrl+Z可撤销上一部操作。
- 修复图像旋转和尺寸问题、优化编辑标记框过程(by [ninetailskim](https://github.com/ninetailskim)[edencfc](https://github.com/edencfc) - 修复图像旋转和尺寸问题、优化编辑标记框过程(by [ninetailskim](https://github.com/ninetailskim)[edencfc](https://github.com/edencfc)
- 2021.1.11:优化标注体验(by [edencfc](https://github.com/edencfc)): - 2021.1.11:优化标注体验(by [edencfc](https://github.com/edencfc)):
- 用户可在“视图 - 弹出标记输入框”选择在画完检测框后标记输入框是否弹出。 - 用户可在“视图 - 弹出标记输入框”选择在画完检测框后标记输入框是否弹出。
...@@ -27,13 +30,48 @@ PPOCRLabel是一款适用于OCR领域的半自动化图形标注工具,内置P ...@@ -27,13 +30,48 @@ PPOCRLabel是一款适用于OCR领域的半自动化图形标注工具,内置P
## 安装 ## 安装
### 1. 安装PaddleOCR ### 1. 环境搭建
PPOCRLabel内置PaddleOCR模型,故请参考[PaddleOCR安装文档](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_ch/installation.md)准备好PaddleOCR,并确保PaddleOCR安装成功。 #### 安装PaddlePaddle
```bash
pip3 install --upgrade pip
如果您的机器安装的是CUDA9或CUDA10,请运行以下命令安装
python3 -m pip install paddlepaddle-gpu==2.0.0 -i https://mirror.baidu.com/pypi/simple
如果您的机器是CPU,请运行以下命令安装
python3 -m pip install paddlepaddle==2.0.0 -i https://mirror.baidu.com/pypi/simple
```
更多的版本需求,请参照[安装文档](https://www.paddlepaddle.org.cn/install/quick)中的说明进行操作。
#### **安装PaddleOCR**
```bash
【推荐】git clone https://github.com/PaddlePaddle/PaddleOCR
如果因为网络问题无法pull成功,也可选择使用码云上的托管:
git clone https://gitee.com/paddlepaddle/PaddleOCR
注:码云托管代码可能无法实时同步本github项目更新,存在3~5天延时,请优先使用推荐方式。
```
#### 安装第三方库
```bash
cd PaddleOCR
pip3 install -r requirements.txt
```
注意,windows环境下,建议从[这里](https://www.lfd.uci.edu/~gohlke/pythonlibs/#shapely)下载shapely安装包完成安装, 直接通过pip安装的shapely库可能出现`[winRrror 126] 找不到指定模块的问题`
### 2. 安装PPOCRLabel ### 2. 安装PPOCRLabel
#### Windows #### Windows
``` ```bash
pip install pyqt5 pip install pyqt5
cd ./PPOCRLabel # 将目录切换到PPOCRLabel文件夹下 cd ./PPOCRLabel # 将目录切换到PPOCRLabel文件夹下
python PPOCRLabel.py --lang ch python PPOCRLabel.py --lang ch
...@@ -41,15 +79,15 @@ python PPOCRLabel.py --lang ch ...@@ -41,15 +79,15 @@ python PPOCRLabel.py --lang ch
#### Ubuntu Linux #### Ubuntu Linux
``` ```bash
pip3 install pyqt5 pip3 install pyqt5
pip3 install trash-cli pip3 install trash-cli
cd ./PPOCRLabel # 将目录切换到PPOCRLabel文件夹下 cd ./PPOCRLabel # 将目录切换到PPOCRLabel文件夹下
python3 PPOCRLabel.py --lang ch python3 PPOCRLabel.py --lang ch
``` ```
#### macOS #### MacOS
``` ```bash
pip3 install pyqt5 pip3 install pyqt5
pip3 uninstall opencv-python # 由于mac版本的opencv与pyqt有冲突,需先手动卸载opencv pip3 uninstall opencv-python # 由于mac版本的opencv与pyqt有冲突,需先手动卸载opencv
pip3 install opencv-contrib-python-headless==4.2.0.32 # 安装headless版本的open-cv pip3 install opencv-contrib-python-headless==4.2.0.32 # 安装headless版本的open-cv
...@@ -57,6 +95,8 @@ cd ./PPOCRLabel # 将目录切换到PPOCRLabel文件夹下 ...@@ -57,6 +95,8 @@ cd ./PPOCRLabel # 将目录切换到PPOCRLabel文件夹下
python3 PPOCRLabel.py --lang ch python3 PPOCRLabel.py --lang ch
``` ```
## 使用 ## 使用
### 操作步骤 ### 操作步骤
...@@ -68,9 +108,9 @@ python3 PPOCRLabel.py --lang ch ...@@ -68,9 +108,9 @@ python3 PPOCRLabel.py --lang ch
5. 标记框绘制完成后,用户点击 “确认”,检测框会先被预分配一个 “待识别” 标签。 5. 标记框绘制完成后,用户点击 “确认”,检测框会先被预分配一个 “待识别” 标签。
6. 重新识别:将图片中的所有检测画绘制/调整完成后,点击 “重新识别”,PPOCR模型会对当前图片中的**所有检测框**重新识别<sup>[3]</sup> 6. 重新识别:将图片中的所有检测画绘制/调整完成后,点击 “重新识别”,PPOCR模型会对当前图片中的**所有检测框**重新识别<sup>[3]</sup>
7. 内容更改:双击识别结果,对不准确的识别结果进行手动更改。 7. 内容更改:双击识别结果,对不准确的识别结果进行手动更改。
8. **确认标记**:点击 “确认”,图片状态切换为 “√”,跳转至下一张。 8. **确认标记:点击 “确认”,图片状态切换为 “√”,跳转至下一张。**
9. 删除:点击 “删除图像”,图片将会被删除至回收站。 9. 删除:点击 “删除图像”,图片将会被删除至回收站。
10. 保存结果:用户可以通过菜单中“文件-保存标记结果”手动保存,同时也可以点击“文件 - 自动保存标记结果”开启自动保存。手动确认过的标记将会被存放在所打开图片文件夹下的*Label.txt*中。在菜单栏点击 “文件” - "保存识别结果"后,会将此类图片的识别训练数据保存在*crop_img*文件夹下,识别标签保存在*rec_gt.txt*<sup>[4]</sup> 10. 导出结果:用户可以通过菜单中“文件-导出标记结果”手动导出,同时也可以点击“文件 - 自动导出标记结果”开启自动导出。手动确认过的标记将会被存放在所打开图片文件夹下的*Label.txt*中。在菜单栏点击 “文件” - "导出识别结果"后,会将此类图片的识别训练数据保存在*crop_img*文件夹下,识别标签保存在*rec_gt.txt*<sup>[4]</sup>
### 注意 ### 注意
...@@ -84,10 +124,10 @@ python3 PPOCRLabel.py --lang ch ...@@ -84,10 +124,10 @@ python3 PPOCRLabel.py --lang ch
| 文件名 | 说明 | | 文件名 | 说明 |
| :-----------: | :----------------------------------------------------------: | | :-----------: | :----------------------------------------------------------: |
| Label.txt | 检测标签,可直接用于PPOCR检测模型训练。用户每保存5张检测结果后,程序会进行自动写入。当用户关闭应用程序或切换文件路径后同样会进行写入。 | | Label.txt | 检测标签,可直接用于PPOCR检测模型训练。用户每确认5张检测结果后,程序会进行自动写入。当用户关闭应用程序或切换文件路径后同样会进行写入。 |
| fileState.txt | 图片状态标记文件,保存当前文件夹下已经被用户手动确认过的图片名称。 | | fileState.txt | 图片状态标记文件,保存当前文件夹下已经被用户手动确认过的图片名称。 |
| Cache.cach | 缓存文件,保存模型自动识别的结果。 | | Cache.cach | 缓存文件,保存模型自动识别的结果。 |
| rec_gt.txt | 识别标签。可直接用于PPOCR识别模型训练。需用户手动点击菜单栏“文件” - "保存识别结果"后产生。 | | rec_gt.txt | 识别标签。可直接用于PPOCR识别模型训练。需用户手动点击菜单栏“文件” - "导出识别结果"后产生。 |
| crop_img | 识别数据。按照检测框切割后的图片。与rec_gt.txt同时产生。 | | crop_img | 识别数据。按照检测框切割后的图片。与rec_gt.txt同时产生。 |
## 说明 ## 说明
...@@ -120,19 +160,19 @@ python3 PPOCRLabel.py --lang ch ...@@ -120,19 +160,19 @@ python3 PPOCRLabel.py --lang ch
- 自定义模型:用户可根据[自定义模型代码使用](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_ch/whl.md#%E8%87%AA%E5%AE%9A%E4%B9%89%E6%A8%A1%E5%9E%8B),通过修改PPOCRLabel.py中针对[PaddleOCR类的实例化](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/PPOCRLabel/PPOCRLabel.py#L110)替换成自己训练的模型。 - 自定义模型:用户可根据[自定义模型代码使用](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_ch/whl.md#%E8%87%AA%E5%AE%9A%E4%B9%89%E6%A8%A1%E5%9E%8B),通过修改PPOCRLabel.py中针对[PaddleOCR类的实例化](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/PPOCRLabel/PPOCRLabel.py#L110)替换成自己训练的模型。
### 保存方式 ### 导出标记结果
PPOCRLabel支持三种保存方式: PPOCRLabel支持三种导出方式:
- 自动保存:点击“文件 - 自动保存标记结果”后,用户每确认过一张图片,程序自动将标记结果写入Label.txt中。若未开启此选项,则检测到用户手动确认过5张图片后进行自动保存 - 自动导出:点击“文件 - 自动导出标记结果”后,用户每确认过一张图片,程序自动将标记结果写入Label.txt中。若未开启此选项,则检测到用户手动确认过5张图片后进行自动导出
- 手动保存:点击“文件 - 保存标记结果”手动保存标记。 - 手动导出:点击“文件 - 导出标记结果”手动导出标记。
- 关闭应用程序保存 - 关闭应用程序导出
### 导出部分识别结果 ### 导出部分识别结果
针对部分难以识别的数据,通过在识别结果的复选框中**取消勾选**相应的标记,其识别结果不会被导出。 针对部分难以识别的数据,通过在识别结果的复选框中**取消勾选**相应的标记,其识别结果不会被导出。
*注意:识别结果中的复选框状态仍需用户手动点击保存后才能保留* *注意:识别结果中的复选框状态仍需用户手动点击确认后才能保留*
### 错误提示 ### 错误提示
- 如果同时使用whl包安装了paddleocr,其优先级大于通过paddleocr.py调用PaddleOCR类,whl包未更新时会导致程序异常。 - 如果同时使用whl包安装了paddleocr,其优先级大于通过paddleocr.py调用PaddleOCR类,whl包未更新时会导致程序异常。
......
...@@ -23,6 +23,7 @@ except ImportError: ...@@ -23,6 +23,7 @@ except ImportError:
from libs.shape import Shape from libs.shape import Shape
from libs.utils import distance from libs.utils import distance
import copy
CURSOR_DEFAULT = Qt.ArrowCursor CURSOR_DEFAULT = Qt.ArrowCursor
CURSOR_POINT = Qt.PointingHandCursor CURSOR_POINT = Qt.PointingHandCursor
...@@ -81,6 +82,7 @@ class Canvas(QWidget): ...@@ -81,6 +82,7 @@ class Canvas(QWidget):
self.fourpoint = True # ADD self.fourpoint = True # ADD
self.pointnum = 0 self.pointnum = 0
self.movingShape = False self.movingShape = False
self.selectCountShape = False
#initialisation for panning #initialisation for panning
self.pan_initial_pos = QPoint() self.pan_initial_pos = QPoint()
...@@ -702,6 +704,10 @@ class Canvas(QWidget): ...@@ -702,6 +704,10 @@ class Canvas(QWidget):
def keyPressEvent(self, ev): def keyPressEvent(self, ev):
key = ev.key() key = ev.key()
shapesBackup = []
shapesBackup = copy.deepcopy(self.shapes)
self.shapesBackups.pop()
self.shapesBackups.append(shapesBackup)
if key == Qt.Key_Escape and self.current: if key == Qt.Key_Escape and self.current:
print('ESC press') print('ESC press')
self.current = None self.current = None
...@@ -709,41 +715,48 @@ class Canvas(QWidget): ...@@ -709,41 +715,48 @@ class Canvas(QWidget):
self.update() self.update()
elif key == Qt.Key_Return and self.canCloseShape(): elif key == Qt.Key_Return and self.canCloseShape():
self.finalise() self.finalise()
elif key == Qt.Key_Left and self.selectedShape: elif key == Qt.Key_Left and self.selectedShapes:
self.moveOnePixel('Left') self.moveOnePixel('Left')
elif key == Qt.Key_Right and self.selectedShape: elif key == Qt.Key_Right and self.selectedShapes:
self.moveOnePixel('Right') self.moveOnePixel('Right')
elif key == Qt.Key_Up and self.selectedShape: elif key == Qt.Key_Up and self.selectedShapes:
self.moveOnePixel('Up') self.moveOnePixel('Up')
elif key == Qt.Key_Down and self.selectedShape: elif key == Qt.Key_Down and self.selectedShapes:
self.moveOnePixel('Down') self.moveOnePixel('Down')
def moveOnePixel(self, direction): def moveOnePixel(self, direction):
# print(self.selectedShape.points) # print(self.selectedShape.points)
if direction == 'Left' and not self.moveOutOfBound(QPointF(-1.0, 0)): self.selectCount = len(self.selectedShapes)
# print("move Left one pixel") self.selectCountShape = True
self.selectedShape.points[0] += QPointF(-1.0, 0) for i in range(len(self.selectedShapes)):
self.selectedShape.points[1] += QPointF(-1.0, 0) self.selectedShape = self.selectedShapes[i]
self.selectedShape.points[2] += QPointF(-1.0, 0) if direction == 'Left' and not self.moveOutOfBound(QPointF(-1.0, 0)):
self.selectedShape.points[3] += QPointF(-1.0, 0) # print("move Left one pixel")
elif direction == 'Right' and not self.moveOutOfBound(QPointF(1.0, 0)): self.selectedShape.points[0] += QPointF(-1.0, 0)
# print("move Right one pixel") self.selectedShape.points[1] += QPointF(-1.0, 0)
self.selectedShape.points[0] += QPointF(1.0, 0) self.selectedShape.points[2] += QPointF(-1.0, 0)
self.selectedShape.points[1] += QPointF(1.0, 0) self.selectedShape.points[3] += QPointF(-1.0, 0)
self.selectedShape.points[2] += QPointF(1.0, 0) elif direction == 'Right' and not self.moveOutOfBound(QPointF(1.0, 0)):
self.selectedShape.points[3] += QPointF(1.0, 0) # print("move Right one pixel")
elif direction == 'Up' and not self.moveOutOfBound(QPointF(0, -1.0)): self.selectedShape.points[0] += QPointF(1.0, 0)
# print("move Up one pixel") self.selectedShape.points[1] += QPointF(1.0, 0)
self.selectedShape.points[0] += QPointF(0, -1.0) self.selectedShape.points[2] += QPointF(1.0, 0)
self.selectedShape.points[1] += QPointF(0, -1.0) self.selectedShape.points[3] += QPointF(1.0, 0)
self.selectedShape.points[2] += QPointF(0, -1.0) elif direction == 'Up' and not self.moveOutOfBound(QPointF(0, -1.0)):
self.selectedShape.points[3] += QPointF(0, -1.0) # print("move Up one pixel")
elif direction == 'Down' and not self.moveOutOfBound(QPointF(0, 1.0)): self.selectedShape.points[0] += QPointF(0, -1.0)
# print("move Down one pixel") self.selectedShape.points[1] += QPointF(0, -1.0)
self.selectedShape.points[0] += QPointF(0, 1.0) self.selectedShape.points[2] += QPointF(0, -1.0)
self.selectedShape.points[1] += QPointF(0, 1.0) self.selectedShape.points[3] += QPointF(0, -1.0)
self.selectedShape.points[2] += QPointF(0, 1.0) elif direction == 'Down' and not self.moveOutOfBound(QPointF(0, 1.0)):
self.selectedShape.points[3] += QPointF(0, 1.0) # print("move Down one pixel")
self.selectedShape.points[0] += QPointF(0, 1.0)
self.selectedShape.points[1] += QPointF(0, 1.0)
self.selectedShape.points[2] += QPointF(0, 1.0)
self.selectedShape.points[3] += QPointF(0, 1.0)
shapesBackup = []
shapesBackup = copy.deepcopy(self.shapes)
self.shapesBackups.append(shapesBackup)
self.shapeMoved.emit() self.shapeMoved.emit()
self.repaint() self.repaint()
...@@ -840,6 +853,7 @@ class Canvas(QWidget): ...@@ -840,6 +853,7 @@ class Canvas(QWidget):
def restoreShape(self): def restoreShape(self):
if not self.isShapeRestorable: if not self.isShapeRestorable:
return return
self.shapesBackups.pop() # latest self.shapesBackups.pop() # latest
shapesBackup = self.shapesBackups.pop() shapesBackup = self.shapesBackups.pop()
self.shapes = shapesBackup self.shapes = shapesBackup
......
因为 它太大了无法显示 source diff 。你可以改为 查看blob
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
<file alias="quit">resources/icons/quit.png</file> <file alias="quit">resources/icons/quit.png</file>
<file alias="copy">resources/icons/copy.png</file> <file alias="copy">resources/icons/copy.png</file>
<file alias="edit">resources/icons/edit.png</file> <file alias="edit">resources/icons/edit.png</file>
<file alias="rotateLeft">resources/icons/rotateLeft.png</file>
<file alias="rotateRight">resources/icons/rotateRight.png</file>
<file alias="open">resources/icons/open.png</file> <file alias="open">resources/icons/open.png</file>
<file alias="save">resources/icons/save.png</file> <file alias="save">resources/icons/save.png</file>
<file alias="format_voc">resources/icons/format_voc.png</file> <file alias="format_voc">resources/icons/format_voc.png</file>
......
...@@ -31,6 +31,7 @@ save=确认 ...@@ -31,6 +31,7 @@ save=确认
saveAs=另存为 saveAs=另存为
fitWinDetail=缩放到当前窗口大小 fitWinDetail=缩放到当前窗口大小
openDir=打开目录 openDir=打开目录
openDatasetDir=打开数据集路径
copyPrevBounding=复制当前图像中的上一个边界框 copyPrevBounding=复制当前图像中的上一个边界框
showHide=显示/隐藏标签 showHide=显示/隐藏标签
changeSaveFormat=更改存储格式 changeSaveFormat=更改存储格式
...@@ -85,7 +86,9 @@ detectionBoxposition=检测框位置 ...@@ -85,7 +86,9 @@ detectionBoxposition=检测框位置
recognitionResult=识别结果 recognitionResult=识别结果
creatPolygon=四点标注 creatPolygon=四点标注
drawSquares=正方形标注 drawSquares=正方形标注
saveRec=保存识别结果 rotateLeft=图片左旋转90度
rotateRight=图片右旋转90度
saveRec=导出识别结果
tempLabel=待识别 tempLabel=待识别
nullLabel=无法识别 nullLabel=无法识别
steps=操作步骤 steps=操作步骤
...@@ -96,9 +99,9 @@ ok=确认 ...@@ -96,9 +99,9 @@ ok=确认
autolabeling=自动标注中 autolabeling=自动标注中
hideBox=隐藏所有标注 hideBox=隐藏所有标注
showBox=显示所有标注 showBox=显示所有标注
saveLabel=保存标记结果 saveLabel=导出标记结果
singleRe=重识别此区块 singleRe=重识别此区块
labelDialogOption=弹出标记输入框 labelDialogOption=弹出标记输入框
undo=撤销 undo=撤销
undoLastPoint=撤销上个点 undoLastPoint=撤销上个点
autoSaveMode=自动保存标记结果 autoSaveMode=自动导出标记结果
\ No newline at end of file \ No newline at end of file
...@@ -3,6 +3,7 @@ openFileDetail=Open image or label file ...@@ -3,6 +3,7 @@ openFileDetail=Open image or label file
quit=Quit quit=Quit
quitApp=Quit application quitApp=Quit application
openDir=Open Dir openDir=Open Dir
openDatasetDir=Open DatasetDir
copyPrevBounding=Copy previous Bounding Boxes in the current image copyPrevBounding=Copy previous Bounding Boxes in the current image
changeSavedAnnotationDir=Change default saved Annotation dir changeSavedAnnotationDir=Change default saved Annotation dir
openAnnotation=Open Annotation openAnnotation=Open Annotation
...@@ -84,8 +85,10 @@ iconList=Icon List ...@@ -84,8 +85,10 @@ iconList=Icon List
detectionBoxposition=Detection box position detectionBoxposition=Detection box position
recognitionResult=Recognition result recognitionResult=Recognition result
creatPolygon=Create Quadrilateral creatPolygon=Create Quadrilateral
rotateLeft=Left turn 90 degrees
rotateRight=Right turn 90 degrees
drawSquares=Draw Squares drawSquares=Draw Squares
saveRec=Save Recognition Result saveRec=Export Recognition Result
tempLabel=TEMPORARY tempLabel=TEMPORARY
nullLabel=NULL nullLabel=NULL
steps=Steps steps=Steps
...@@ -96,9 +99,9 @@ ok=OK ...@@ -96,9 +99,9 @@ ok=OK
autolabeling=Automatic Labeling autolabeling=Automatic Labeling
hideBox=Hide All Box hideBox=Hide All Box
showBox=Show All Box showBox=Show All Box
saveLabel=Save Label saveLabel=Export Label
singleRe=Re-recognition RectBox singleRe=Re-recognition RectBox
labelDialogOption=Pop-up Label Input Dialog labelDialogOption=Pop-up Label Input Dialog
undo=Undo undo=Undo
undoLastPoint=Undo Last Point undoLastPoint=Undo Last Point
autoSaveMode=Auto Save Label Mode autoSaveMode=Auto Export Label Mode
\ No newline at end of file \ No newline at end of file
project(ocr_system CXX C) project(ppocr CXX C)
option(WITH_MKL "Compile demo with MKL/OpenBlas support, default use MKL." ON) option(WITH_MKL "Compile demo with MKL/OpenBlas support, default use MKL." ON)
option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." OFF) option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." OFF)
...@@ -11,7 +11,8 @@ SET(CUDA_LIB "" CACHE PATH "Location of libraries") ...@@ -11,7 +11,8 @@ SET(CUDA_LIB "" CACHE PATH "Location of libraries")
SET(CUDNN_LIB "" CACHE PATH "Location of libraries") SET(CUDNN_LIB "" CACHE PATH "Location of libraries")
SET(TENSORRT_DIR "" CACHE PATH "Compile demo with TensorRT") SET(TENSORRT_DIR "" CACHE PATH "Compile demo with TensorRT")
set(DEMO_NAME "ocr_system") set(DEMO_NAME "ppocr")
macro(safe_set_static_flag) macro(safe_set_static_flag)
foreach(flag_var foreach(flag_var
......
...@@ -31,6 +31,8 @@ ...@@ -31,6 +31,8 @@
* * * *
*******************************************************************************/ *******************************************************************************/
#pragma once
#ifndef clipper_hpp #ifndef clipper_hpp
#define clipper_hpp #define clipper_hpp
......
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <iomanip>
#include <iostream>
#include <map>
#include <ostream>
#include <string>
#include <vector>
#include "include/utility.h"
namespace PaddleOCR {
class OCRConfig {
public:
explicit OCRConfig(const std::string &config_file) {
config_map_ = LoadConfig(config_file);
this->use_gpu = bool(stoi(config_map_["use_gpu"]));
this->gpu_id = stoi(config_map_["gpu_id"]);
this->gpu_mem = stoi(config_map_["gpu_mem"]);
this->cpu_math_library_num_threads =
stoi(config_map_["cpu_math_library_num_threads"]);
this->use_mkldnn = bool(stoi(config_map_["use_mkldnn"]));
this->max_side_len = stoi(config_map_["max_side_len"]);
this->det_db_thresh = stod(config_map_["det_db_thresh"]);
this->det_db_box_thresh = stod(config_map_["det_db_box_thresh"]);
this->det_db_unclip_ratio = stod(config_map_["det_db_unclip_ratio"]);
this->use_polygon_score = bool(stoi(config_map_["use_polygon_score"]));
this->det_model_dir.assign(config_map_["det_model_dir"]);
this->rec_model_dir.assign(config_map_["rec_model_dir"]);
this->char_list_file.assign(config_map_["char_list_file"]);
this->use_angle_cls = bool(stoi(config_map_["use_angle_cls"]));
this->cls_model_dir.assign(config_map_["cls_model_dir"]);
this->cls_thresh = stod(config_map_["cls_thresh"]);
this->visualize = bool(stoi(config_map_["visualize"]));
this->use_tensorrt = bool(stoi(config_map_["use_tensorrt"]));
this->use_fp16 = bool(stod(config_map_["use_fp16"]));
}
bool use_gpu = false;
int gpu_id = 0;
int gpu_mem = 4000;
int cpu_math_library_num_threads = 1;
bool use_mkldnn = false;
int max_side_len = 960;
double det_db_thresh = 0.3;
double det_db_box_thresh = 0.5;
double det_db_unclip_ratio = 2.0;
bool use_polygon_score = false;
std::string det_model_dir;
std::string rec_model_dir;
bool use_angle_cls;
std::string char_list_file;
std::string cls_model_dir;
double cls_thresh;
bool visualize = true;
bool use_tensorrt = false;
bool use_fp16 = false;
void PrintConfigInfo();
private:
// Load configuration
std::map<std::string, std::string> LoadConfig(const std::string &config_file);
std::vector<std::string> split(const std::string &str,
const std::string &delim);
std::map<std::string, std::string> config_map_;
};
} // namespace PaddleOCR
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#pragma once
#include "opencv2/core.hpp" #include "opencv2/core.hpp"
#include "opencv2/imgcodecs.hpp" #include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc.hpp" #include "opencv2/imgproc.hpp"
...@@ -40,7 +42,7 @@ public: ...@@ -40,7 +42,7 @@ public:
const int &gpu_id, const int &gpu_mem, const int &gpu_id, const int &gpu_mem,
const int &cpu_math_library_num_threads, const int &cpu_math_library_num_threads,
const bool &use_mkldnn, const double &cls_thresh, const bool &use_mkldnn, const double &cls_thresh,
const bool &use_tensorrt, const bool &use_fp16) { const bool &use_tensorrt, const std::string &precision) {
this->use_gpu_ = use_gpu; this->use_gpu_ = use_gpu;
this->gpu_id_ = gpu_id; this->gpu_id_ = gpu_id;
this->gpu_mem_ = gpu_mem; this->gpu_mem_ = gpu_mem;
...@@ -49,7 +51,7 @@ public: ...@@ -49,7 +51,7 @@ public:
this->cls_thresh = cls_thresh; this->cls_thresh = cls_thresh;
this->use_tensorrt_ = use_tensorrt; this->use_tensorrt_ = use_tensorrt;
this->use_fp16_ = use_fp16; this->precision_ = precision;
LoadModel(model_dir); LoadModel(model_dir);
} }
...@@ -73,7 +75,7 @@ private: ...@@ -73,7 +75,7 @@ private:
std::vector<float> scale_ = {1 / 0.5f, 1 / 0.5f, 1 / 0.5f}; std::vector<float> scale_ = {1 / 0.5f, 1 / 0.5f, 1 / 0.5f};
bool is_scale_ = true; bool is_scale_ = true;
bool use_tensorrt_ = false; bool use_tensorrt_ = false;
bool use_fp16_ = false; std::string precision_ = "fp32";
// pre-process // pre-process
ClsResizeImg resize_op_; ClsResizeImg resize_op_;
Normalize normalize_op_; Normalize normalize_op_;
......
...@@ -46,7 +46,7 @@ public: ...@@ -46,7 +46,7 @@ public:
const double &det_db_box_thresh, const double &det_db_box_thresh,
const double &det_db_unclip_ratio, const double &det_db_unclip_ratio,
const bool &use_polygon_score, const bool &visualize, const bool &use_polygon_score, const bool &visualize,
const bool &use_tensorrt, const bool &use_fp16) { const bool &use_tensorrt, const std::string &precision) {
this->use_gpu_ = use_gpu; this->use_gpu_ = use_gpu;
this->gpu_id_ = gpu_id; this->gpu_id_ = gpu_id;
this->gpu_mem_ = gpu_mem; this->gpu_mem_ = gpu_mem;
...@@ -62,7 +62,7 @@ public: ...@@ -62,7 +62,7 @@ public:
this->visualize_ = visualize; this->visualize_ = visualize;
this->use_tensorrt_ = use_tensorrt; this->use_tensorrt_ = use_tensorrt;
this->use_fp16_ = use_fp16; this->precision_ = precision;
LoadModel(model_dir); LoadModel(model_dir);
} }
...@@ -71,7 +71,7 @@ public: ...@@ -71,7 +71,7 @@ public:
void LoadModel(const std::string &model_dir); void LoadModel(const std::string &model_dir);
// Run predictor // Run predictor
void Run(cv::Mat &img, std::vector<std::vector<std::vector<int>>> &boxes); void Run(cv::Mat &img, std::vector<std::vector<std::vector<int>>> &boxes, std::vector<double> *times);
private: private:
std::shared_ptr<Predictor> predictor_; std::shared_ptr<Predictor> predictor_;
...@@ -91,7 +91,7 @@ private: ...@@ -91,7 +91,7 @@ private:
bool visualize_ = true; bool visualize_ = true;
bool use_tensorrt_ = false; bool use_tensorrt_ = false;
bool use_fp16_ = false; std::string precision_ = "fp32";
std::vector<float> mean_ = {0.485f, 0.456f, 0.406f}; std::vector<float> mean_ = {0.485f, 0.456f, 0.406f};
std::vector<float> scale_ = {1 / 0.229f, 1 / 0.224f, 1 / 0.225f}; std::vector<float> scale_ = {1 / 0.229f, 1 / 0.224f, 1 / 0.225f};
......
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#pragma once
#include "opencv2/core.hpp" #include "opencv2/core.hpp"
#include "opencv2/imgcodecs.hpp" #include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc.hpp" #include "opencv2/imgproc.hpp"
...@@ -42,14 +44,14 @@ public: ...@@ -42,14 +44,14 @@ public:
const int &gpu_id, const int &gpu_mem, const int &gpu_id, const int &gpu_mem,
const int &cpu_math_library_num_threads, const int &cpu_math_library_num_threads,
const bool &use_mkldnn, const string &label_path, const bool &use_mkldnn, const string &label_path,
const bool &use_tensorrt, const bool &use_fp16) { const bool &use_tensorrt, const std::string &precision) {
this->use_gpu_ = use_gpu; this->use_gpu_ = use_gpu;
this->gpu_id_ = gpu_id; this->gpu_id_ = gpu_id;
this->gpu_mem_ = gpu_mem; this->gpu_mem_ = gpu_mem;
this->cpu_math_library_num_threads_ = cpu_math_library_num_threads; this->cpu_math_library_num_threads_ = cpu_math_library_num_threads;
this->use_mkldnn_ = use_mkldnn; this->use_mkldnn_ = use_mkldnn;
this->use_tensorrt_ = use_tensorrt; this->use_tensorrt_ = use_tensorrt;
this->use_fp16_ = use_fp16; this->precision_ = precision;
this->label_list_ = Utility::ReadDict(label_path); this->label_list_ = Utility::ReadDict(label_path);
this->label_list_.insert(this->label_list_.begin(), this->label_list_.insert(this->label_list_.begin(),
...@@ -62,8 +64,7 @@ public: ...@@ -62,8 +64,7 @@ public:
// Load Paddle inference model // Load Paddle inference model
void LoadModel(const std::string &model_dir); void LoadModel(const std::string &model_dir);
void Run(std::vector<std::vector<std::vector<int>>> boxes, cv::Mat &img, void Run(cv::Mat &img, std::vector<double> *times);
Classifier *cls);
private: private:
std::shared_ptr<Predictor> predictor_; std::shared_ptr<Predictor> predictor_;
...@@ -80,7 +81,7 @@ private: ...@@ -80,7 +81,7 @@ private:
std::vector<float> scale_ = {1 / 0.5f, 1 / 0.5f, 1 / 0.5f}; std::vector<float> scale_ = {1 / 0.5f, 1 / 0.5f, 1 / 0.5f};
bool is_scale_ = true; bool is_scale_ = true;
bool use_tensorrt_ = false; bool use_tensorrt_ = false;
bool use_fp16_ = false; std::string precision_ = "fp32";
// pre-process // pre-process
CrnnResizeImg resize_op_; CrnnResizeImg resize_op_;
Normalize normalize_op_; Normalize normalize_op_;
...@@ -89,9 +90,6 @@ private: ...@@ -89,9 +90,6 @@ private:
// post-process // post-process
PostProcessor post_processor_; PostProcessor post_processor_;
cv::Mat GetRotateCropImage(const cv::Mat &srcimage,
std::vector<std::vector<int>> box);
}; // class CrnnRecognizer }; // class CrnnRecognizer
} // namespace PaddleOCR } // namespace PaddleOCR
...@@ -47,6 +47,9 @@ public: ...@@ -47,6 +47,9 @@ public:
static void GetAllFiles(const char *dir_name, static void GetAllFiles(const char *dir_name,
std::vector<std::string> &all_inputs); std::vector<std::string> &all_inputs);
static cv::Mat GetRotateCropImage(const cv::Mat &srcimage,
std::vector<std::vector<int>> box);
}; };
} // namespace PaddleOCR } // namespace PaddleOCR
\ No newline at end of file
...@@ -154,82 +154,102 @@ inference/ ...@@ -154,82 +154,102 @@ inference/
* 编译命令如下,其中Paddle C++预测库、opencv等其他依赖库的地址需要换成自己机器上的实际地址。 * 编译命令如下,其中Paddle C++预测库、opencv等其他依赖库的地址需要换成自己机器上的实际地址。
```shell ```shell
sh tools/build.sh sh tools/build.sh
``` ```
具体地,`tools/build.sh`中内容如下。 * 具体的,需要修改`tools/build.sh`中环境路径,相关内容如下:
```shell ```shell
OPENCV_DIR=your_opencv_dir OPENCV_DIR=your_opencv_dir
LIB_DIR=your_paddle_inference_dir LIB_DIR=your_paddle_inference_dir
CUDA_LIB_DIR=your_cuda_lib_dir CUDA_LIB_DIR=your_cuda_lib_dir
CUDNN_LIB_DIR=/your_cudnn_lib_dir CUDNN_LIB_DIR=/your_cudnn_lib_dir
BUILD_DIR=build
rm -rf ${BUILD_DIR}
mkdir ${BUILD_DIR}
cd ${BUILD_DIR}
cmake .. \
-DPADDLE_LIB=${LIB_DIR} \
-DWITH_MKL=ON \
-DDEMO_NAME=ocr_system \
-DWITH_GPU=OFF \
-DWITH_STATIC_LIB=OFF \
-DUSE_TENSORRT=OFF \
-DOPENCV_DIR=${OPENCV_DIR} \
-DCUDNN_LIB=${CUDNN_LIB_DIR} \
-DCUDA_LIB=${CUDA_LIB_DIR} \
make -j
``` ```
`OPENCV_DIR`为opencv编译安装的地址;`LIB_DIR`为下载(`paddle_inference`文件夹)或者编译生成的Paddle预测库地址(`build/paddle_inference_install_dir`文件夹);`CUDA_LIB_DIR`为cuda库文件地址,在docker中为`/usr/local/cuda/lib64``CUDNN_LIB_DIR`为cudnn库文件地址,在docker中为`/usr/lib/x86_64-linux-gnu/`**注意**:以上路径都写绝对路径,不要写相对路径。 其中,`OPENCV_DIR`为opencv编译安装的地址;`LIB_DIR`为下载(`paddle_inference`文件夹)或者编译生成的Paddle预测库地址(`build/paddle_inference_install_dir`文件夹);`CUDA_LIB_DIR`为cuda库文件地址,在docker中为`/usr/local/cuda/lib64``CUDNN_LIB_DIR`为cudnn库文件地址,在docker中为`/usr/lib/x86_64-linux-gnu/`**注意:以上路径都写绝对路径,不要写相对路径。**
* 编译完成之后,会在`build`文件夹下生成一个名为`ocr_system`的可执行文件。 * 编译完成之后,会在`build`文件夹下生成一个名为`ppocr`的可执行文件。
### 运行demo ### 运行demo
* 执行以下命令,完成对一幅图像的OCR识别与检测。
运行方式:
```shell
./build/ppocr <mode> [--param1] [--param2] [...]
```
其中,`mode`为必选参数,表示选择的功能,取值范围['det', 'rec', 'system'],分别表示调用检测、识别、检测识别串联(包括方向分类器)。具体命令如下:
##### 1. 只调用检测:
```shell
./build/ppocr det \
--det_model_dir=inference/ch_ppocr_mobile_v2.0_det_infer \
--image_dir=../../doc/imgs/12.jpg
```
##### 2. 只调用识别:
```shell
./build/ppocr rec \
--rec_model_dir=inference/ch_ppocr_mobile_v2.0_rec_infer \
--image_dir=../../doc/imgs_words/ch/
```
##### 3. 调用串联:
```shell ```shell
sh tools/run.sh # 不使用方向分类器
./build/ppocr system \
--det_model_dir=inference/ch_ppocr_mobile_v2.0_det_infer \
--rec_model_dir=inference/ch_ppocr_mobile_v2.0_rec_infer \
--image_dir=../../doc/imgs/12.jpg
# 使用方向分类器
./build/ppocr system \
--det_model_dir=inference/ch_ppocr_mobile_v2.0_det_infer \
--use_angle_cls=true \
--cls_model_dir=inference/ch_ppocr_mobile_v2.0_cls_infer \
--rec_model_dir=inference/ch_ppocr_mobile_v2.0_rec_infer \
--image_dir=../../doc/imgs/12.jpg
``` ```
* 若需要使用方向分类器,则需要将`tools/config.txt`中的`use_angle_cls`参数修改为1,表示开启方向分类器的预测。 更多参数如下:
* 更多地,tools/config.txt中的参数及解释如下。
``` - 通用参数
use_gpu 0 # 是否使用GPU,1表示使用,0表示不使用
gpu_id 0 # GPU id,使用GPU时有效
gpu_mem 4000 # 申请的GPU内存
cpu_math_library_num_threads 10 # CPU预测时的线程数,在机器核数充足的情况下,该值越大,预测速度越快
use_mkldnn 1 # 是否使用mkldnn库
# det config |参数名称|类型|默认参数|意义|
max_side_len 960 # 输入图像长宽大于960时,等比例缩放图像,使得图像最长边为960 | --- | --- | --- | --- |
det_db_thresh 0.3 # 用于过滤DB预测的二值化图像,设置为0.-0.3对结果影响不明显 |use_gpu|bool|false|是否使用GPU|
det_db_box_thresh 0.5 # DB后处理过滤box的阈值,如果检测存在漏框情况,可酌情减小 |gpu_id|int|0|GPU id,使用GPU时有效|
det_db_unclip_ratio 1.6 # 表示文本框的紧致程度,越小则文本框更靠近文本 |gpu_mem|int|4000|申请的GPU内存|
use_polygon_score 1 # 是否使用多边形框计算bbox score,0表示使用矩形框计算。矩形框计算速度更快,多边形框对弯曲文本区域计算更准确。 |cpu_math_library_num_threads|int|10|CPU预测时的线程数,在机器核数充足的情况下,该值越大,预测速度越快|
det_model_dir ./inference/det_db # 检测模型inference model地址 |use_mkldnn|bool|true|是否使用mkldnn库|
# cls config - 检测模型相关
use_angle_cls 0 # 是否使用方向分类器,0表示不使用,1表示使用
cls_model_dir ./inference/cls # 方向分类器inference model地址
cls_thresh 0.9 # 方向分类器的得分阈值
# rec config |参数名称|类型|默认参数|意义|
rec_model_dir ./inference/rec_crnn # 识别模型inference model地址 | --- | --- | --- | --- |
char_list_file ../../ppocr/utils/ppocr_keys_v1.txt # 字典文件 |det_model_dir|string|-|检测模型inference model地址|
|max_side_len|int|960|输入图像长宽大于960时,等比例缩放图像,使得图像最长边为960|
|det_db_thresh|float|0.3|用于过滤DB预测的二值化图像,设置为0.-0.3对结果影响不明显|
|det_db_box_thresh|float|0.5|DB后处理过滤box的阈值,如果检测存在漏框情况,可酌情减小|
|det_db_unclip_ratio|float|1.6|表示文本框的紧致程度,越小则文本框更靠近文本|
|use_polygon_score|bool|false|是否使用多边形框计算bbox score,false表示使用矩形框计算。矩形框计算速度更快,多边形框对弯曲文本区域计算更准确。|
|visualize|bool|true|是否对结果进行可视化,为1时,会在当前文件夹下保存文件名为`ocr_vis.png`的预测结果。|
- 方向分类器相关
|参数名称|类型|默认参数|意义|
| --- | --- | --- | --- |
|use_angle_cls|bool|false|是否使用方向分类器|
|cls_model_dir|string|-|方向分类器inference model地址|
|cls_thresh|float|0.9|方向分类器的得分阈值|
- 识别模型相关
|参数名称|类型|默认参数|意义|
| --- | --- | --- | --- |
|rec_model_dir|string|-|识别模型inference model地址|
|char_list_file|string|../../ppocr/utils/ppocr_keys_v1.txt|字典文件|
# show the detection results
visualize 1 # 是否对结果进行可视化,为1时,会在当前文件夹下保存文件名为`ocr_vis.png`的预测结果。
```
* PaddleOCR也支持多语言的预测,更多支持的语言和模型可以参考[识别文档](../../doc/doc_ch/recognition.md)中的多语言字典与模型部分,如果希望进行多语言预测,只需将修改`tools/config.txt`中的`char_list_file`(字典文件路径)以及`rec_model_dir`(inference模型路径)字段即可。 * PaddleOCR也支持多语言的预测,更多支持的语言和模型可以参考[识别文档](../../doc/doc_ch/recognition.md)中的多语言字典与模型部分,如果希望进行多语言预测,只需将修改`char_list_file`(字典文件路径)以及`rec_model_dir`(inference模型路径)字段即可。
最终屏幕上会输出检测结果如下。 最终屏幕上会输出检测结果如下。
......
...@@ -162,30 +162,13 @@ inference/ ...@@ -162,30 +162,13 @@ inference/
sh tools/build.sh sh tools/build.sh
``` ```
Specifically, the content in `tools/build.sh` is as follows. Specifically, you should modify the paths in `tools/build.sh`. The related content is as follows.
```shell ```shell
OPENCV_DIR=your_opencv_dir OPENCV_DIR=your_opencv_dir
LIB_DIR=your_paddle_inference_dir LIB_DIR=your_paddle_inference_dir
CUDA_LIB_DIR=your_cuda_lib_dir CUDA_LIB_DIR=your_cuda_lib_dir
CUDNN_LIB_DIR=your_cudnn_lib_dir CUDNN_LIB_DIR=your_cudnn_lib_dir
BUILD_DIR=build
rm -rf ${BUILD_DIR}
mkdir ${BUILD_DIR}
cd ${BUILD_DIR}
cmake .. \
-DPADDLE_LIB=${LIB_DIR} \
-DWITH_MKL=ON \
-DDEMO_NAME=ocr_system \
-DWITH_GPU=OFF \
-DWITH_STATIC_LIB=OFF \
-DUSE_TENSORRT=OFF \
-DOPENCV_DIR=${OPENCV_DIR} \
-DCUDNN_LIB=${CUDNN_LIB_DIR} \
-DCUDA_LIB=${CUDA_LIB_DIR} \
make -j
``` ```
`OPENCV_DIR` is the opencv installation path; `LIB_DIR` is the download (`paddle_inference` folder) `OPENCV_DIR` is the opencv installation path; `LIB_DIR` is the download (`paddle_inference` folder)
...@@ -193,48 +176,84 @@ or the generated Paddle inference library path (`build/paddle_inference_install_ ...@@ -193,48 +176,84 @@ or the generated Paddle inference library path (`build/paddle_inference_install_
`CUDA_LIB_DIR` is the cuda library file path, in docker; it is `/usr/local/cuda/lib64`; `CUDNN_LIB_DIR` is the cudnn library file path, in docker it is `/usr/lib/x86_64-linux-gnu/`. `CUDA_LIB_DIR` is the cuda library file path, in docker; it is `/usr/local/cuda/lib64`; `CUDNN_LIB_DIR` is the cudnn library file path, in docker it is `/usr/lib/x86_64-linux-gnu/`.
* After the compilation is completed, an executable file named `ocr_system` will be generated in the `build` folder. * After the compilation is completed, an executable file named `ppocr` will be generated in the `build` folder.
### Run the demo ### Run the demo
* Execute the following command to complete the OCR recognition and detection of an image. Execute the built executable file:
```shell
./build/ppocr <mode> [--param1] [--param2] [...]
```
Here, `mode` is a required parameter,and the value range is ['det', 'rec', 'system'], representing using detection only, using recognition only and using the end-to-end system respectively. Specifically,
##### 1. run det demo:
```shell
./build/ppocr det \
--det_model_dir=inference/ch_ppocr_mobile_v2.0_det_infer \
--image_dir=../../doc/imgs/12.jpg
```
##### 2. run rec demo:
```shell
./build/ppocr rec \
--rec_model_dir=inference/ch_ppocr_mobile_v2.0_rec_infer \
--image_dir=../../doc/imgs_words/ch/
```
##### 3. run system demo:
```shell ```shell
sh tools/run.sh # without text direction classifier
./build/ppocr system \
--det_model_dir=inference/ch_ppocr_mobile_v2.0_det_infer \
--rec_model_dir=inference/ch_ppocr_mobile_v2.0_rec_infer \
--image_dir=../../doc/imgs/12.jpg
# with text direction classifier
./build/ppocr system \
--det_model_dir=inference/ch_ppocr_mobile_v2.0_det_infer \
--use_angle_cls=true \
--cls_model_dir=inference/ch_ppocr_mobile_v2.0_cls_infer \
--rec_model_dir=inference/ch_ppocr_mobile_v2.0_rec_infer \
--image_dir=../../doc/imgs/12.jpg
``` ```
* If you want to orientation classifier to correct the detected boxes, you can set `use_angle_cls` in the file `tools/config.txt` as 1 to enable the function. More parameters are as follows,
* What's more, Parameters and their meanings in `tools/config.txt` are as follows.
- common parameters
``` |parameter|data type|default|meaning|
use_gpu 0 # Whether to use GPU, 0 means not to use, 1 means to use | --- | --- | --- | --- |
gpu_id 0 # GPU id when use_gpu is 1 |use_gpu|bool|false|Whether to use GPU|
gpu_mem 4000 # GPU memory requested |gpu_id|int|0|GPU id when use_gpu is true|
cpu_math_library_num_threads 10 # Number of threads when using CPU inference. When machine cores is enough, the large the value, the faster the inference speed |gpu_mem|int|4000|GPU memory requested|
use_mkldnn 1 # Whether to use mkdlnn library |cpu_math_library_num_threads|int|10|Number of threads when using CPU inference. When machine cores is enough, the large the value, the faster the inference speed|
|use_mkldnn|bool|true|Whether to use mkdlnn library|
max_side_len 960 # Limit the maximum image height and width to 960 - detection related parameters
det_db_thresh 0.3 # Used to filter the binarized image of DB prediction, setting 0.-0.3 has no obvious effect on the result
det_db_box_thresh 0.5 # DDB post-processing filter box threshold, if there is a missing box detected, it can be reduced as appropriate
det_db_unclip_ratio 1.6 # Indicates the compactness of the text box, the smaller the value, the closer the text box to the text
use_polygon_score 1 # Whether to use polygon box to calculate bbox score, 0 means to use rectangle box to calculate. Use rectangular box to calculate faster, and polygonal box more accurate for curved text area.
det_model_dir ./inference/det_db # Address of detection inference model
# cls config |parameter|data type|default|meaning|
use_angle_cls 0 # Whether to use the direction classifier, 0 means not to use, 1 means to use | --- | --- | --- | --- |
cls_model_dir ./inference/cls # Address of direction classifier inference model |det_model_dir|string|-|Address of detection inference model|
cls_thresh 0.9 # Score threshold of the direction classifier |max_side_len|int|960|Limit the maximum image height and width to 960|
|det_db_thresh|float|0.3|Used to filter the binarized image of DB prediction, setting 0.-0.3 has no obvious effect on the result|
|det_db_box_thresh|float|0.5|DB post-processing filter box threshold, if there is a missing box detected, it can be reduced as appropriate|
|det_db_unclip_ratio|float|1.6|Indicates the compactness of the text box, the smaller the value, the closer the text box to the text|
|use_polygon_score|bool|false|Whether to use polygon box to calculate bbox score, false means to use rectangle box to calculate. Use rectangular box to calculate faster, and polygonal box more accurate for curved text area.|
|visualize|bool|true|Whether to visualize the results,when it is set as true, The prediction result will be save in the image file `./ocr_vis.png`.|
# rec config - classifier related parameters
rec_model_dir ./inference/rec_crnn # Address of recognition inference model
char_list_file ../../ppocr/utils/ppocr_keys_v1.txt # dictionary file
# show the detection results |parameter|data type|default|meaning|
visualize 1 # Whether to visualize the results,when it is set as 1, The prediction result will be save in the image file `./ocr_vis.png`. | --- | --- | --- | --- |
``` |use_angle_cls|bool|false|Whether to use the direction classifier|
|cls_model_dir|string|-|Address of direction classifier inference model|
|cls_thresh|float|0.9|Score threshold of the direction classifier|
- recogniton related parameters
|parameter|data type|default|meaning|
| --- | --- | --- | --- |
|rec_model_dir|string|-|Address of recognition inference model|
|char_list_file|string|../../ppocr/utils/ppocr_keys_v1.txt|dictionary file|
* Multi-language inference is also supported in PaddleOCR, you can refer to [recognition tutorial](../../doc/doc_en/recognition_en.md) for more supported languages and models in PaddleOCR. Specifically, if you want to infer using multi-language models, you just need to modify values of `char_list_file` and `rec_model_dir` in file `tools/config.txt`. * Multi-language inference is also supported in PaddleOCR, you can refer to [recognition tutorial](../../doc/doc_en/recognition_en.md) for more supported languages and models in PaddleOCR. Specifically, if you want to infer using multi-language models, you just need to modify values of `char_list_file` and `rec_model_dir`.
The detection results will be shown on the screen, which is as follows. The detection results will be shown on the screen, which is as follows.
......
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <include/config.h>
namespace PaddleOCR {
std::vector<std::string> OCRConfig::split(const std::string &str,
const std::string &delim) {
std::vector<std::string> res;
if ("" == str)
return res;
int strlen = str.length() + 1;
chars *strs = new char[strlen];
std::strcpy(strs, str.c_str());
int delimlen = delim.length() + 1;
char *d = new char[delimlen];
std::strcpy(d, delim.c_str());
delete[] strs;
delete[] d;
char *p = std::strtok(strs, d);
while (p) {
std::string s = p;
res.push_back(s);
p = std::strtok(NULL, d);
}
return res;
}
std::map<std::string, std::string>
OCRConfig::LoadConfig(const std::string &config_path) {
auto config = Utility::ReadDict(config_path);
std::map<std::string, std::string> dict;
for (int i = 0; i < config.size(); i++) {
// pass for empty line or comment
if (config[i].size() <= 1 || config[i][0] == '#') {
continue;
}
std::vector<std::string> res = split(config[i], " ");
dict[res[0]] = res[1];
}
return dict;
}
void OCRConfig::PrintConfigInfo() {
std::cout << "=======Paddle OCR inference config======" << std::endl;
for (auto iter = config_map_.begin(); iter != config_map_.end(); iter++) {
std::cout << iter->first << " : " << iter->second << std::endl;
}
std::cout << "=======End of Paddle OCR inference config======" << std::endl;
}
} // namespace PaddleOCR
...@@ -28,76 +28,276 @@ ...@@ -28,76 +28,276 @@
#include <numeric> #include <numeric>
#include <glog/logging.h> #include <glog/logging.h>
#include <include/config.h>
#include <include/ocr_det.h> #include <include/ocr_det.h>
#include <include/ocr_cls.h>
#include <include/ocr_rec.h> #include <include/ocr_rec.h>
#include <include/utility.h> #include <include/utility.h>
#include <sys/stat.h> #include <sys/stat.h>
#include <gflags/gflags.h>
DEFINE_bool(use_gpu, false, "Infering with GPU or CPU.");
DEFINE_int32(gpu_id, 0, "Device id of GPU to execute.");
DEFINE_int32(gpu_mem, 4000, "GPU id when infering with GPU.");
DEFINE_int32(cpu_math_library_num_threads, 10, "Num of threads with CPU.");
DEFINE_bool(use_mkldnn, false, "Whether use mkldnn with CPU.");
DEFINE_bool(use_tensorrt, false, "Whether use tensorrt.");
DEFINE_string(precision, "fp32", "Precision be one of fp32/fp16/int8");
DEFINE_bool(benchmark, true, "Whether use benchmark.");
DEFINE_string(save_log_path, "./log_output/", "Save benchmark log path.");
// detection related
DEFINE_string(image_dir, "", "Dir of input image.");
DEFINE_string(det_model_dir, "", "Path of det inference model.");
DEFINE_int32(max_side_len, 960, "max_side_len of input image.");
DEFINE_double(det_db_thresh, 0.3, "Threshold of det_db_thresh.");
DEFINE_double(det_db_box_thresh, 0.5, "Threshold of det_db_box_thresh.");
DEFINE_double(det_db_unclip_ratio, 1.6, "Threshold of det_db_unclip_ratio.");
DEFINE_bool(use_polygon_score, false, "Whether use polygon score.");
DEFINE_bool(visualize, true, "Whether show the detection results.");
// classification related
DEFINE_bool(use_angle_cls, false, "Whether use use_angle_cls.");
DEFINE_string(cls_model_dir, "", "Path of cls inference model.");
DEFINE_double(cls_thresh, 0.9, "Threshold of cls_thresh.");
// recognition related
DEFINE_string(rec_model_dir, "", "Path of rec inference model.");
DEFINE_string(char_list_file, "../../ppocr/utils/ppocr_keys_v1.txt", "Path of dictionary.");
using namespace std; using namespace std;
using namespace cv; using namespace cv;
using namespace PaddleOCR; using namespace PaddleOCR;
void PrintBenchmarkLog(std::string model_name,
int batch_size,
std::string input_shape,
std::vector<double> time_info,
int img_num){
LOG(INFO) << "----------------------- Config info -----------------------";
LOG(INFO) << "runtime_device: " << (FLAGS_use_gpu ? "gpu" : "cpu");
LOG(INFO) << "ir_optim: " << "True";
LOG(INFO) << "enable_memory_optim: " << "True";
LOG(INFO) << "enable_tensorrt: " << FLAGS_use_tensorrt;
LOG(INFO) << "enable_mkldnn: " << (FLAGS_use_mkldnn ? "True" : "False");
LOG(INFO) << "cpu_math_library_num_threads: " << FLAGS_cpu_math_library_num_threads;
LOG(INFO) << "----------------------- Data info -----------------------";
LOG(INFO) << "batch_size: " << batch_size;
LOG(INFO) << "input_shape: " << input_shape;
LOG(INFO) << "data_num: " << img_num;
LOG(INFO) << "----------------------- Model info -----------------------";
LOG(INFO) << "model_name: " << model_name;
LOG(INFO) << "precision: " << FLAGS_precision;
LOG(INFO) << "----------------------- Perf info ------------------------";
LOG(INFO) << "Total time spent(ms): "
<< std::accumulate(time_info.begin(), time_info.end(), 0);
LOG(INFO) << "preprocess_time(ms): " << time_info[0] / img_num
<< ", inference_time(ms): " << time_info[1] / img_num
<< ", postprocess_time(ms): " << time_info[2] / img_num;
}
static bool PathExists(const std::string& path){
#ifdef _WIN32
struct _stat buffer;
return (_stat(path.c_str(), &buffer) == 0);
#else
struct stat buffer;
return (stat(path.c_str(), &buffer) == 0);
#endif // !_WIN32
}
int main_det(std::vector<cv::String> cv_all_img_names) {
std::vector<double> time_info = {0, 0, 0};
DBDetector det(FLAGS_det_model_dir, FLAGS_use_gpu, FLAGS_gpu_id,
FLAGS_gpu_mem, FLAGS_cpu_math_library_num_threads,
FLAGS_use_mkldnn, FLAGS_max_side_len, FLAGS_det_db_thresh,
FLAGS_det_db_box_thresh, FLAGS_det_db_unclip_ratio,
FLAGS_use_polygon_score, FLAGS_visualize,
FLAGS_use_tensorrt, FLAGS_precision);
for (int i = 0; i < cv_all_img_names.size(); ++i) {
LOG(INFO) << "The predict img: " << cv_all_img_names[i];
cv::Mat srcimg = cv::imread(cv_all_img_names[i], cv::IMREAD_COLOR);
if (!srcimg.data) {
std::cerr << "[ERROR] image read failed! image path: " << cv_all_img_names[i] << endl;
exit(1);
}
std::vector<std::vector<std::vector<int>>> boxes;
std::vector<double> det_times;
det.Run(srcimg, boxes, &det_times);
time_info[0] += det_times[0];
time_info[1] += det_times[1];
time_info[2] += det_times[2];
}
if (FLAGS_benchmark) {
PrintBenchmarkLog("det", 1, "dynamic", time_info, cv_all_img_names.size());
}
return 0;
}
int main_rec(std::vector<cv::String> cv_all_img_names) {
std::vector<double> time_info = {0, 0, 0};
CRNNRecognizer rec(FLAGS_rec_model_dir, FLAGS_use_gpu, FLAGS_gpu_id,
FLAGS_gpu_mem, FLAGS_cpu_math_library_num_threads,
FLAGS_use_mkldnn, FLAGS_char_list_file,
FLAGS_use_tensorrt, FLAGS_precision);
for (int i = 0; i < cv_all_img_names.size(); ++i) {
LOG(INFO) << "The predict img: " << cv_all_img_names[i];
cv::Mat srcimg = cv::imread(cv_all_img_names[i], cv::IMREAD_COLOR);
if (!srcimg.data) {
std::cerr << "[ERROR] image read failed! image path: " << cv_all_img_names[i] << endl;
exit(1);
}
std::vector<double> rec_times;
rec.Run(srcimg, &rec_times);
time_info[0] += rec_times[0];
time_info[1] += rec_times[1];
time_info[2] += rec_times[2];
}
if (FLAGS_benchmark) {
PrintBenchmarkLog("rec", 1, "dynamic", time_info, cv_all_img_names.size());
}
return 0;
}
int main_system(std::vector<cv::String> cv_all_img_names) {
DBDetector det(FLAGS_det_model_dir, FLAGS_use_gpu, FLAGS_gpu_id,
FLAGS_gpu_mem, FLAGS_cpu_math_library_num_threads,
FLAGS_use_mkldnn, FLAGS_max_side_len, FLAGS_det_db_thresh,
FLAGS_det_db_box_thresh, FLAGS_det_db_unclip_ratio,
FLAGS_use_polygon_score, FLAGS_visualize,
FLAGS_use_tensorrt, FLAGS_precision);
Classifier *cls = nullptr;
if (FLAGS_use_angle_cls) {
cls = new Classifier(FLAGS_cls_model_dir, FLAGS_use_gpu, FLAGS_gpu_id,
FLAGS_gpu_mem, FLAGS_cpu_math_library_num_threads,
FLAGS_use_mkldnn, FLAGS_cls_thresh,
FLAGS_use_tensorrt, FLAGS_precision);
}
CRNNRecognizer rec(FLAGS_rec_model_dir, FLAGS_use_gpu, FLAGS_gpu_id,
FLAGS_gpu_mem, FLAGS_cpu_math_library_num_threads,
FLAGS_use_mkldnn, FLAGS_char_list_file,
FLAGS_use_tensorrt, FLAGS_precision);
auto start = std::chrono::system_clock::now();
for (int i = 0; i < cv_all_img_names.size(); ++i) {
LOG(INFO) << "The predict img: " << cv_all_img_names[i];
cv::Mat srcimg = cv::imread(FLAGS_image_dir, cv::IMREAD_COLOR);
if (!srcimg.data) {
std::cerr << "[ERROR] image read failed! image path: " << cv_all_img_names[i] << endl;
exit(1);
}
std::vector<std::vector<std::vector<int>>> boxes;
std::vector<double> det_times;
std::vector<double> rec_times;
det.Run(srcimg, boxes, &det_times);
cv::Mat crop_img;
for (int j = 0; j < boxes.size(); j++) {
crop_img = Utility::GetRotateCropImage(srcimg, boxes[j]);
if (cls != nullptr) {
crop_img = cls->Run(crop_img);
}
rec.Run(crop_img, &rec_times);
}
auto end = std::chrono::system_clock::now();
auto duration =
std::chrono::duration_cast<std::chrono::microseconds>(end - start);
std::cout << "Cost "
<< double(duration.count()) *
std::chrono::microseconds::period::num /
std::chrono::microseconds::period::den
<< "s" << std::endl;
}
return 0;
}
void check_params(char* mode) {
if (strcmp(mode, "det")==0) {
if (FLAGS_det_model_dir.empty() || FLAGS_image_dir.empty()) {
std::cout << "Usage[det]: ./ppocr --det_model_dir=/PATH/TO/DET_INFERENCE_MODEL/ "
<< "--image_dir=/PATH/TO/INPUT/IMAGE/" << std::endl;
exit(1);
}
}
if (strcmp(mode, "rec")==0) {
if (FLAGS_rec_model_dir.empty() || FLAGS_image_dir.empty()) {
std::cout << "Usage[rec]: ./ppocr --rec_model_dir=/PATH/TO/REC_INFERENCE_MODEL/ "
<< "--image_dir=/PATH/TO/INPUT/IMAGE/" << std::endl;
exit(1);
}
}
if (strcmp(mode, "system")==0) {
if ((FLAGS_det_model_dir.empty() || FLAGS_rec_model_dir.empty() || FLAGS_image_dir.empty()) ||
(FLAGS_use_angle_cls && FLAGS_cls_model_dir.empty())) {
std::cout << "Usage[system without angle cls]: ./ppocr --det_model_dir=/PATH/TO/DET_INFERENCE_MODEL/ "
<< "--rec_model_dir=/PATH/TO/REC_INFERENCE_MODEL/ "
<< "--image_dir=/PATH/TO/INPUT/IMAGE/" << std::endl;
std::cout << "Usage[system with angle cls]: ./ppocr --det_model_dir=/PATH/TO/DET_INFERENCE_MODEL/ "
<< "--use_angle_cls=true "
<< "--cls_model_dir=/PATH/TO/CLS_INFERENCE_MODEL/ "
<< "--rec_model_dir=/PATH/TO/REC_INFERENCE_MODEL/ "
<< "--image_dir=/PATH/TO/INPUT/IMAGE/" << std::endl;
exit(1);
}
}
if (FLAGS_precision != "fp32" && FLAGS_precision != "fp16" && FLAGS_precision != "int8") {
cout << "precison should be 'fp32'(default), 'fp16' or 'int8'. " << endl;
exit(1);
}
}
int main(int argc, char **argv) { int main(int argc, char **argv) {
if (argc < 3) { if (argc<=1 || (strcmp(argv[1], "det")!=0 && strcmp(argv[1], "rec")!=0 && strcmp(argv[1], "system")!=0)) {
std::cerr << "[ERROR] usage: " << argv[0] std::cout << "Please choose one mode of [det, rec, system] !" << std::endl;
<< " configure_filepath image_path\n"; return -1;
exit(1); }
} std::cout << "mode: " << argv[1] << endl;
OCRConfig config(argv[1]); // Parsing command-line
google::ParseCommandLineFlags(&argc, &argv, true);
config.PrintConfigInfo(); check_params(argv[1]);
std::string img_path(argv[2]); if (!PathExists(FLAGS_image_dir)) {
std::vector<std::string> all_img_names; std::cerr << "[ERROR] image path not exist! image_dir: " << FLAGS_image_dir << endl;
Utility::GetAllFiles((char *)img_path.c_str(), all_img_names); exit(1);
DBDetector det(config.det_model_dir, config.use_gpu, config.gpu_id,
config.gpu_mem, config.cpu_math_library_num_threads,
config.use_mkldnn, config.max_side_len, config.det_db_thresh,
config.det_db_box_thresh, config.det_db_unclip_ratio,
config.use_polygon_score, config.visualize,
config.use_tensorrt, config.use_fp16);
Classifier *cls = nullptr;
if (config.use_angle_cls == true) {
cls = new Classifier(config.cls_model_dir, config.use_gpu, config.gpu_id,
config.gpu_mem, config.cpu_math_library_num_threads,
config.use_mkldnn, config.cls_thresh,
config.use_tensorrt, config.use_fp16);
}
CRNNRecognizer rec(config.rec_model_dir, config.use_gpu, config.gpu_id,
config.gpu_mem, config.cpu_math_library_num_threads,
config.use_mkldnn, config.char_list_file,
config.use_tensorrt, config.use_fp16);
auto start = std::chrono::system_clock::now();
for (auto img_dir : all_img_names) {
LOG(INFO) << "The predict img: " << img_dir;
cv::Mat srcimg = cv::imread(img_dir, cv::IMREAD_COLOR);
if (!srcimg.data) {
std::cerr << "[ERROR] image read failed! image path: " << img_path
<< "\n";
exit(1);
} }
std::vector<std::vector<std::vector<int>>> boxes;
std::vector<cv::String> cv_all_img_names;
det.Run(srcimg, boxes); cv::glob(FLAGS_image_dir, cv_all_img_names);
std::cout << "total images num: " << cv_all_img_names.size() << endl;
rec.Run(boxes, srcimg, cls);
auto end = std::chrono::system_clock::now(); if (strcmp(argv[1], "det")==0) {
auto duration = return main_det(cv_all_img_names);
std::chrono::duration_cast<std::chrono::microseconds>(end - start); }
std::cout << "Cost " if (strcmp(argv[1], "rec")==0) {
<< double(duration.count()) * return main_rec(cv_all_img_names);
std::chrono::microseconds::period::num / }
std::chrono::microseconds::period::den if (strcmp(argv[1], "system")==0) {
<< "s" << std::endl; return main_system(cv_all_img_names);
} }
return 0;
} }
...@@ -77,10 +77,16 @@ void Classifier::LoadModel(const std::string &model_dir) { ...@@ -77,10 +77,16 @@ void Classifier::LoadModel(const std::string &model_dir) {
if (this->use_gpu_) { if (this->use_gpu_) {
config.EnableUseGpu(this->gpu_mem_, this->gpu_id_); config.EnableUseGpu(this->gpu_mem_, this->gpu_id_);
if (this->use_tensorrt_) { if (this->use_tensorrt_) {
auto precision = paddle_infer::Config::Precision::kFloat32;
if (this->precision_ == "fp16") {
precision = paddle_infer::Config::Precision::kHalf;
}
if (this->precision_ == "int8") {
precision = paddle_infer::Config::Precision::kInt8;
}
config.EnableTensorRtEngine( config.EnableTensorRtEngine(
1 << 20, 10, 3, 1 << 20, 10, 3,
this->use_fp16_ ? paddle_infer::Config::Precision::kHalf precision,
: paddle_infer::Config::Precision::kFloat32,
false, false); false, false);
} }
} else { } else {
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <include/ocr_det.h> #include <include/ocr_det.h>
namespace PaddleOCR { namespace PaddleOCR {
void DBDetector::LoadModel(const std::string &model_dir) { void DBDetector::LoadModel(const std::string &model_dir) {
...@@ -25,10 +26,16 @@ void DBDetector::LoadModel(const std::string &model_dir) { ...@@ -25,10 +26,16 @@ void DBDetector::LoadModel(const std::string &model_dir) {
if (this->use_gpu_) { if (this->use_gpu_) {
config.EnableUseGpu(this->gpu_mem_, this->gpu_id_); config.EnableUseGpu(this->gpu_mem_, this->gpu_id_);
if (this->use_tensorrt_) { if (this->use_tensorrt_) {
auto precision = paddle_infer::Config::Precision::kFloat32;
if (this->precision_ == "fp16") {
precision = paddle_infer::Config::Precision::kHalf;
}
if (this->precision_ == "int8") {
precision = paddle_infer::Config::Precision::kInt8;
}
config.EnableTensorRtEngine( config.EnableTensorRtEngine(
1 << 20, 10, 3, 1 << 20, 10, 3,
this->use_fp16_ ? paddle_infer::Config::Precision::kHalf precision,
: paddle_infer::Config::Precision::kFloat32,
false, false); false, false);
std::map<std::string, std::vector<int>> min_input_shape = { std::map<std::string, std::vector<int>> min_input_shape = {
{"x", {1, 3, 50, 50}}, {"x", {1, 3, 50, 50}},
...@@ -90,13 +97,16 @@ void DBDetector::LoadModel(const std::string &model_dir) { ...@@ -90,13 +97,16 @@ void DBDetector::LoadModel(const std::string &model_dir) {
} }
void DBDetector::Run(cv::Mat &img, void DBDetector::Run(cv::Mat &img,
std::vector<std::vector<std::vector<int>>> &boxes) { std::vector<std::vector<std::vector<int>>> &boxes,
std::vector<double> *times) {
float ratio_h{}; float ratio_h{};
float ratio_w{}; float ratio_w{};
cv::Mat srcimg; cv::Mat srcimg;
cv::Mat resize_img; cv::Mat resize_img;
img.copyTo(srcimg); img.copyTo(srcimg);
auto preprocess_start = std::chrono::steady_clock::now();
this->resize_op_.Run(img, resize_img, this->max_side_len_, ratio_h, ratio_w, this->resize_op_.Run(img, resize_img, this->max_side_len_, ratio_h, ratio_w,
this->use_tensorrt_); this->use_tensorrt_);
...@@ -105,14 +115,17 @@ void DBDetector::Run(cv::Mat &img, ...@@ -105,14 +115,17 @@ void DBDetector::Run(cv::Mat &img,
std::vector<float> input(1 * 3 * resize_img.rows * resize_img.cols, 0.0f); std::vector<float> input(1 * 3 * resize_img.rows * resize_img.cols, 0.0f);
this->permute_op_.Run(&resize_img, input.data()); this->permute_op_.Run(&resize_img, input.data());
auto preprocess_end = std::chrono::steady_clock::now();
// Inference. // Inference.
auto input_names = this->predictor_->GetInputNames(); auto input_names = this->predictor_->GetInputNames();
auto input_t = this->predictor_->GetInputHandle(input_names[0]); auto input_t = this->predictor_->GetInputHandle(input_names[0]);
input_t->Reshape({1, 3, resize_img.rows, resize_img.cols}); input_t->Reshape({1, 3, resize_img.rows, resize_img.cols});
auto inference_start = std::chrono::steady_clock::now();
input_t->CopyFromCpu(input.data()); input_t->CopyFromCpu(input.data());
this->predictor_->Run(); this->predictor_->Run();
std::vector<float> out_data; std::vector<float> out_data;
auto output_names = this->predictor_->GetOutputNames(); auto output_names = this->predictor_->GetOutputNames();
auto output_t = this->predictor_->GetOutputHandle(output_names[0]); auto output_t = this->predictor_->GetOutputHandle(output_names[0]);
...@@ -122,7 +135,9 @@ void DBDetector::Run(cv::Mat &img, ...@@ -122,7 +135,9 @@ void DBDetector::Run(cv::Mat &img,
out_data.resize(out_num); out_data.resize(out_num);
output_t->CopyToCpu(out_data.data()); output_t->CopyToCpu(out_data.data());
auto inference_end = std::chrono::steady_clock::now();
auto postprocess_start = std::chrono::steady_clock::now();
int n2 = output_shape[2]; int n2 = output_shape[2];
int n3 = output_shape[3]; int n3 = output_shape[3];
int n = n2 * n3; int n = n2 * n3;
...@@ -150,7 +165,16 @@ void DBDetector::Run(cv::Mat &img, ...@@ -150,7 +165,16 @@ void DBDetector::Run(cv::Mat &img,
this->det_db_unclip_ratio_, this->use_polygon_score_); this->det_db_unclip_ratio_, this->use_polygon_score_);
boxes = post_processor_.FilterTagDetRes(boxes, ratio_h, ratio_w, srcimg); boxes = post_processor_.FilterTagDetRes(boxes, ratio_h, ratio_w, srcimg);
auto postprocess_end = std::chrono::steady_clock::now();
std::cout << "Detected boxes num: " << boxes.size() << endl;
std::chrono::duration<float> preprocess_diff = preprocess_end - preprocess_start;
times->push_back(double(preprocess_diff.count() * 1000));
std::chrono::duration<float> inference_diff = inference_end - inference_start;
times->push_back(double(inference_diff.count() * 1000));
std::chrono::duration<float> postprocess_diff = postprocess_end - postprocess_start;
times->push_back(double(postprocess_diff.count() * 1000));
//// visualization //// visualization
if (this->visualize_) { if (this->visualize_) {
Utility::VisualizeBboxes(srcimg, boxes); Utility::VisualizeBboxes(srcimg, boxes);
......
...@@ -16,80 +16,80 @@ ...@@ -16,80 +16,80 @@
namespace PaddleOCR { namespace PaddleOCR {
void CRNNRecognizer::Run(std::vector<std::vector<std::vector<int>>> boxes, void CRNNRecognizer::Run(cv::Mat &img, std::vector<double> *times) {
cv::Mat &img, Classifier *cls) {
cv::Mat srcimg; cv::Mat srcimg;
img.copyTo(srcimg); img.copyTo(srcimg);
cv::Mat crop_img;
cv::Mat resize_img; cv::Mat resize_img;
std::cout << "The predicted text is :" << std::endl; float wh_ratio = float(srcimg.cols) / float(srcimg.rows);
int index = 0; auto preprocess_start = std::chrono::steady_clock::now();
for (int i = 0; i < boxes.size(); i++) { this->resize_op_.Run(srcimg, resize_img, wh_ratio, this->use_tensorrt_);
crop_img = GetRotateCropImage(srcimg, boxes[i]);
this->normalize_op_.Run(&resize_img, this->mean_, this->scale_,
if (cls != nullptr) { this->is_scale_);
crop_img = cls->Run(crop_img);
} std::vector<float> input(1 * 3 * resize_img.rows * resize_img.cols, 0.0f);
float wh_ratio = float(crop_img.cols) / float(crop_img.rows); this->permute_op_.Run(&resize_img, input.data());
auto preprocess_end = std::chrono::steady_clock::now();
this->resize_op_.Run(crop_img, resize_img, wh_ratio, this->use_tensorrt_);
// Inference.
this->normalize_op_.Run(&resize_img, this->mean_, this->scale_, auto input_names = this->predictor_->GetInputNames();
this->is_scale_); auto input_t = this->predictor_->GetInputHandle(input_names[0]);
input_t->Reshape({1, 3, resize_img.rows, resize_img.cols});
std::vector<float> input(1 * 3 * resize_img.rows * resize_img.cols, 0.0f); auto inference_start = std::chrono::steady_clock::now();
input_t->CopyFromCpu(input.data());
this->permute_op_.Run(&resize_img, input.data()); this->predictor_->Run();
// Inference. std::vector<float> predict_batch;
auto input_names = this->predictor_->GetInputNames(); auto output_names = this->predictor_->GetOutputNames();
auto input_t = this->predictor_->GetInputHandle(input_names[0]); auto output_t = this->predictor_->GetOutputHandle(output_names[0]);
input_t->Reshape({1, 3, resize_img.rows, resize_img.cols}); auto predict_shape = output_t->shape();
input_t->CopyFromCpu(input.data());
this->predictor_->Run(); int out_num = std::accumulate(predict_shape.begin(), predict_shape.end(), 1,
std::multiplies<int>());
std::vector<float> predict_batch; predict_batch.resize(out_num);
auto output_names = this->predictor_->GetOutputNames();
auto output_t = this->predictor_->GetOutputHandle(output_names[0]); output_t->CopyToCpu(predict_batch.data());
auto predict_shape = output_t->shape(); auto inference_end = std::chrono::steady_clock::now();
int out_num = std::accumulate(predict_shape.begin(), predict_shape.end(), 1, // ctc decode
std::multiplies<int>()); auto postprocess_start = std::chrono::steady_clock::now();
predict_batch.resize(out_num); std::vector<std::string> str_res;
int argmax_idx;
output_t->CopyToCpu(predict_batch.data()); int last_index = 0;
float score = 0.f;
// ctc decode int count = 0;
std::vector<std::string> str_res; float max_value = 0.0f;
int argmax_idx;
int last_index = 0; for (int n = 0; n < predict_shape[1]; n++) {
float score = 0.f; argmax_idx =
int count = 0; int(Utility::argmax(&predict_batch[n * predict_shape[2]],
float max_value = 0.0f; &predict_batch[(n + 1) * predict_shape[2]]));
max_value =
for (int n = 0; n < predict_shape[1]; n++) { float(*std::max_element(&predict_batch[n * predict_shape[2]],
argmax_idx = &predict_batch[(n + 1) * predict_shape[2]]));
int(Utility::argmax(&predict_batch[n * predict_shape[2]],
&predict_batch[(n + 1) * predict_shape[2]])); if (argmax_idx > 0 && (!(n > 0 && argmax_idx == last_index))) {
max_value = score += max_value;
float(*std::max_element(&predict_batch[n * predict_shape[2]], count += 1;
&predict_batch[(n + 1) * predict_shape[2]])); str_res.push_back(label_list_[argmax_idx]);
if (argmax_idx > 0 && (!(n > 0 && argmax_idx == last_index))) {
score += max_value;
count += 1;
str_res.push_back(label_list_[argmax_idx]);
}
last_index = argmax_idx;
} }
score /= count; last_index = argmax_idx;
for (int i = 0; i < str_res.size(); i++) { }
std::cout << str_res[i]; auto postprocess_end = std::chrono::steady_clock::now();
} score /= count;
std::cout << "\tscore: " << score << std::endl; for (int i = 0; i < str_res.size(); i++) {
std::cout << str_res[i];
} }
std::cout << "\tscore: " << score << std::endl;
std::chrono::duration<float> preprocess_diff = preprocess_end - preprocess_start;
times->push_back(double(preprocess_diff.count() * 1000));
std::chrono::duration<float> inference_diff = inference_end - inference_start;
times->push_back(double(inference_diff.count() * 1000));
std::chrono::duration<float> postprocess_diff = postprocess_end - postprocess_start;
times->push_back(double(postprocess_diff.count() * 1000));
} }
void CRNNRecognizer::LoadModel(const std::string &model_dir) { void CRNNRecognizer::LoadModel(const std::string &model_dir) {
...@@ -101,10 +101,16 @@ void CRNNRecognizer::LoadModel(const std::string &model_dir) { ...@@ -101,10 +101,16 @@ void CRNNRecognizer::LoadModel(const std::string &model_dir) {
if (this->use_gpu_) { if (this->use_gpu_) {
config.EnableUseGpu(this->gpu_mem_, this->gpu_id_); config.EnableUseGpu(this->gpu_mem_, this->gpu_id_);
if (this->use_tensorrt_) { if (this->use_tensorrt_) {
auto precision = paddle_infer::Config::Precision::kFloat32;
if (this->precision_ == "fp16") {
precision = paddle_infer::Config::Precision::kHalf;
}
if (this->precision_ == "int8") {
precision = paddle_infer::Config::Precision::kInt8;
}
config.EnableTensorRtEngine( config.EnableTensorRtEngine(
1 << 20, 10, 3, 1 << 20, 10, 3,
this->use_fp16_ ? paddle_infer::Config::Precision::kHalf precision,
: paddle_infer::Config::Precision::kFloat32,
false, false); false, false);
std::map<std::string, std::vector<int>> min_input_shape = { std::map<std::string, std::vector<int>> min_input_shape = {
{"x", {1, 3, 32, 10}}}; {"x", {1, 3, 32, 10}}};
...@@ -138,59 +144,4 @@ void CRNNRecognizer::LoadModel(const std::string &model_dir) { ...@@ -138,59 +144,4 @@ void CRNNRecognizer::LoadModel(const std::string &model_dir) {
this->predictor_ = CreatePredictor(config); this->predictor_ = CreatePredictor(config);
} }
cv::Mat CRNNRecognizer::GetRotateCropImage(const cv::Mat &srcimage,
std::vector<std::vector<int>> box) {
cv::Mat image;
srcimage.copyTo(image);
std::vector<std::vector<int>> points = box;
int x_collect[4] = {box[0][0], box[1][0], box[2][0], box[3][0]};
int y_collect[4] = {box[0][1], box[1][1], box[2][1], box[3][1]};
int left = int(*std::min_element(x_collect, x_collect + 4));
int right = int(*std::max_element(x_collect, x_collect + 4));
int top = int(*std::min_element(y_collect, y_collect + 4));
int bottom = int(*std::max_element(y_collect, y_collect + 4));
cv::Mat img_crop;
image(cv::Rect(left, top, right - left, bottom - top)).copyTo(img_crop);
for (int i = 0; i < points.size(); i++) {
points[i][0] -= left;
points[i][1] -= top;
}
int img_crop_width = int(sqrt(pow(points[0][0] - points[1][0], 2) +
pow(points[0][1] - points[1][1], 2)));
int img_crop_height = int(sqrt(pow(points[0][0] - points[3][0], 2) +
pow(points[0][1] - points[3][1], 2)));
cv::Point2f pts_std[4];
pts_std[0] = cv::Point2f(0., 0.);
pts_std[1] = cv::Point2f(img_crop_width, 0.);
pts_std[2] = cv::Point2f(img_crop_width, img_crop_height);
pts_std[3] = cv::Point2f(0.f, img_crop_height);
cv::Point2f pointsf[4];
pointsf[0] = cv::Point2f(points[0][0], points[0][1]);
pointsf[1] = cv::Point2f(points[1][0], points[1][1]);
pointsf[2] = cv::Point2f(points[2][0], points[2][1]);
pointsf[3] = cv::Point2f(points[3][0], points[3][1]);
cv::Mat M = cv::getPerspectiveTransform(pointsf, pts_std);
cv::Mat dst_img;
cv::warpPerspective(img_crop, dst_img, M,
cv::Size(img_crop_width, img_crop_height),
cv::BORDER_REPLICATE);
if (float(dst_img.rows) >= float(dst_img.cols) * 1.5) {
cv::Mat srcCopy = cv::Mat(dst_img.rows, dst_img.cols, dst_img.depth());
cv::transpose(dst_img, srcCopy);
cv::flip(srcCopy, srcCopy, 0);
return srcCopy;
} else {
return dst_img;
}
}
} // namespace PaddleOCR } // namespace PaddleOCR
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
// limitations under the License. // limitations under the License.
#include <include/postprocess_op.h> #include <include/postprocess_op.h>
#include <include/clipper.cpp>
namespace PaddleOCR { namespace PaddleOCR {
......
...@@ -92,4 +92,59 @@ void Utility::GetAllFiles(const char *dir_name, ...@@ -92,4 +92,59 @@ void Utility::GetAllFiles(const char *dir_name,
} }
} }
cv::Mat Utility::GetRotateCropImage(const cv::Mat &srcimage,
std::vector<std::vector<int>> box) {
cv::Mat image;
srcimage.copyTo(image);
std::vector<std::vector<int>> points = box;
int x_collect[4] = {box[0][0], box[1][0], box[2][0], box[3][0]};
int y_collect[4] = {box[0][1], box[1][1], box[2][1], box[3][1]};
int left = int(*std::min_element(x_collect, x_collect + 4));
int right = int(*std::max_element(x_collect, x_collect + 4));
int top = int(*std::min_element(y_collect, y_collect + 4));
int bottom = int(*std::max_element(y_collect, y_collect + 4));
cv::Mat img_crop;
image(cv::Rect(left, top, right - left, bottom - top)).copyTo(img_crop);
for (int i = 0; i < points.size(); i++) {
points[i][0] -= left;
points[i][1] -= top;
}
int img_crop_width = int(sqrt(pow(points[0][0] - points[1][0], 2) +
pow(points[0][1] - points[1][1], 2)));
int img_crop_height = int(sqrt(pow(points[0][0] - points[3][0], 2) +
pow(points[0][1] - points[3][1], 2)));
cv::Point2f pts_std[4];
pts_std[0] = cv::Point2f(0., 0.);
pts_std[1] = cv::Point2f(img_crop_width, 0.);
pts_std[2] = cv::Point2f(img_crop_width, img_crop_height);
pts_std[3] = cv::Point2f(0.f, img_crop_height);
cv::Point2f pointsf[4];
pointsf[0] = cv::Point2f(points[0][0], points[0][1]);
pointsf[1] = cv::Point2f(points[1][0], points[1][1]);
pointsf[2] = cv::Point2f(points[2][0], points[2][1]);
pointsf[3] = cv::Point2f(points[3][0], points[3][1]);
cv::Mat M = cv::getPerspectiveTransform(pointsf, pts_std);
cv::Mat dst_img;
cv::warpPerspective(img_crop, dst_img, M,
cv::Size(img_crop_width, img_crop_height),
cv::BORDER_REPLICATE);
if (float(dst_img.rows) >= float(dst_img.cols) * 1.5) {
cv::Mat srcCopy = cv::Mat(dst_img.rows, dst_img.cols, dst_img.depth());
cv::transpose(dst_img, srcCopy);
cv::flip(srcCopy, srcCopy, 0);
return srcCopy;
} else {
return dst_img;
}
}
} // namespace PaddleOCR } // namespace PaddleOCR
\ No newline at end of file
# model load config
use_gpu 0
gpu_id 0
gpu_mem 4000
cpu_math_library_num_threads 10
use_mkldnn 0
# det config
max_side_len 960
det_db_thresh 0.3
det_db_box_thresh 0.5
det_db_unclip_ratio 1.6
use_polygon_score 1
det_model_dir ./inference/ch_ppocr_mobile_v2.0_det_infer/
# cls config
use_angle_cls 0
cls_model_dir ./inference/ch_ppocr_mobile_v2.0_cls_infer/
cls_thresh 0.9
# rec config
rec_model_dir ./inference/ch_ppocr_mobile_v2.0_rec_infer/
char_list_file ../../ppocr/utils/ppocr_keys_v1.txt
# show the detection results
visualize 0
# use_tensorrt
use_tensorrt 0
use_fp16 0
./build/ocr_system ./tools/config.txt ../../doc/imgs/12.jpg
doc/joinus.PNG

203.3 KB | W: | H:

doc/joinus.PNG

190.7 KB | W: | H:

doc/joinus.PNG
doc/joinus.PNG
doc/joinus.PNG
doc/joinus.PNG
  • 2-up
  • Swipe
  • Onion skin
...@@ -25,6 +25,6 @@ class ClsLoss(nn.Layer): ...@@ -25,6 +25,6 @@ class ClsLoss(nn.Layer):
self.loss_func = nn.CrossEntropyLoss(reduction='mean') self.loss_func = nn.CrossEntropyLoss(reduction='mean')
def forward(self, predicts, batch): def forward(self, predicts, batch):
label = batch[1] label = batch[1].astype("int64")
loss = self.loss_func(input=predicts, label=label) loss = self.loss_func(input=predicts, label=label)
return {'loss': loss} return {'loss': loss}
...@@ -62,7 +62,7 @@ else ...@@ -62,7 +62,7 @@ else
if [ ${model_name} = "ocr_det" ]; then if [ ${model_name} = "ocr_det" ]; then
eval_model_name="ch_ppocr_mobile_v2.0_det_infer" eval_model_name="ch_ppocr_mobile_v2.0_det_infer"
rm -rf ./train_data/icdar2015 rm -rf ./train_data/icdar2015
wget -nc -P ./train_data https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar
cd ./inference && tar xf ${eval_model_name}.tar && tar xf ch_det_data_50.tar && cd ../ cd ./inference && tar xf ${eval_model_name}.tar && tar xf ch_det_data_50.tar && cd ../
else else
......
# 介绍
test.sh和params.txt文件配合使用,完成OCR轻量检测和识别模型从训练到预测的流程测试。
# 安装依赖
- 安装PaddlePaddle >= 2.0
- 安装PaddleOCR依赖
```
pip3 install -r ../requirements.txt
```
- 安装autolog
```
git clone https://github.com/LDOUBLEV/AutoLog
cd AutoLog
pip3 install -r requirements.txt
python3 setup.py bdist_wheel
pip3 install ./dist/auto_log-1.0.0-py3-none-any.whl
cd ../
```
# 目录介绍
```bash
tests/
├── ocr_det_params.txt # 测试OCR检测模型的参数配置文件
├── ocr_rec_params.txt # 测试OCR识别模型的参数配置文件
└── prepare.sh # 完成test.sh运行所需要的数据和模型下载
└── test.sh # 根据
```
# 使用方法
test.sh包含四种运行模式,每种模式的运行数据不同,分别用于测试速度和精度,分别是:
- 模式1 lite_train_infer,使用少量数据训练,用于快速验证训练到预测的走通流程,不验证精度和速度;
```
bash test/prepare.sh ./tests/ocr_det_params.txt 'lite_train_infer'
bash tests/test.sh ./tests/ocr_det_params.txt 'lite_train_infer'
```
- 模式2 whole_infer,使用少量数据训练,一定量数据预测,用于验证训练后的模型执行预测,预测速度是否合理;
```
bash tests/prepare.sh ./tests/ocr_det_params.txt 'whole_infer'
bash tests/test.sh ./tests/ocr_det_params.txt 'whole_infer'
```
- 模式3 infer 不训练,全量数据预测,走通开源模型评估、动转静,检查inference model预测时间和精度;
```
bash tests/prepare.sh ./tests/ocr_det_params.txt 'infer'
用法1:
bash tests/test.sh ./tests/ocr_det_params.txt 'infer'
用法2: 指定GPU卡预测,第三个传入参数为GPU卡号
bash tests/test.sh ./tests/ocr_det_params.txt 'infer' '1'
```
模式4: whole_train_infer , CE: 全量数据训练,全量数据预测,验证模型训练精度,预测精度,预测速度
```
bash tests/prepare.sh ./tests/ocr_det_params.txt 'whole_train_infer'
bash tests/test.sh ./tests/ocr_det_params.txt 'whole_train_infer'
```
...@@ -101,6 +101,7 @@ class TextDetector(object): ...@@ -101,6 +101,7 @@ class TextDetector(object):
if args.benchmark: if args.benchmark:
import auto_log import auto_log
pid = os.getpid() pid = os.getpid()
gpu_id = utility.get_infer_gpuid()
self.autolog = auto_log.AutoLogger( self.autolog = auto_log.AutoLogger(
model_name="det", model_name="det",
model_precision=args.precision, model_precision=args.precision,
...@@ -110,7 +111,7 @@ class TextDetector(object): ...@@ -110,7 +111,7 @@ class TextDetector(object):
inference_config=self.config, inference_config=self.config,
pids=pid, pids=pid,
process_name=None, process_name=None,
gpu_ids=0, gpu_ids=gpu_id if args.use_gpu else None,
time_keys=[ time_keys=[
'preprocess_time', 'inference_time', 'postprocess_time' 'preprocess_time', 'inference_time', 'postprocess_time'
], ],
......
...@@ -68,6 +68,7 @@ class TextRecognizer(object): ...@@ -68,6 +68,7 @@ class TextRecognizer(object):
if args.benchmark: if args.benchmark:
import auto_log import auto_log
pid = os.getpid() pid = os.getpid()
gpu_id = utility.get_infer_gpuid()
self.autolog = auto_log.AutoLogger( self.autolog = auto_log.AutoLogger(
model_name="rec", model_name="rec",
model_precision=args.precision, model_precision=args.precision,
...@@ -77,7 +78,7 @@ class TextRecognizer(object): ...@@ -77,7 +78,7 @@ class TextRecognizer(object):
inference_config=self.config, inference_config=self.config,
pids=pid, pids=pid,
process_name=None, process_name=None,
gpu_ids=0 if args.use_gpu else None, gpu_ids=gpu_id if args.use_gpu else None,
time_keys=[ time_keys=[
'preprocess_time', 'inference_time', 'postprocess_time' 'preprocess_time', 'inference_time', 'postprocess_time'
], ],
......
...@@ -159,6 +159,11 @@ def create_predictor(args, mode, logger): ...@@ -159,6 +159,11 @@ def create_predictor(args, mode, logger):
precision = inference.PrecisionType.Float32 precision = inference.PrecisionType.Float32
if args.use_gpu: if args.use_gpu:
gpu_id = get_infer_gpuid()
if gpu_id is None:
raise ValueError(
"Not found GPU in current device. Please check your device or set args.use_gpu as False"
)
config.enable_use_gpu(args.gpu_mem, 0) config.enable_use_gpu(args.gpu_mem, 0)
if args.use_tensorrt: if args.use_tensorrt:
config.enable_tensorrt_engine( config.enable_tensorrt_engine(
...@@ -280,6 +285,20 @@ def create_predictor(args, mode, logger): ...@@ -280,6 +285,20 @@ def create_predictor(args, mode, logger):
return predictor, input_tensor, output_tensors, config return predictor, input_tensor, output_tensors, config
def get_infer_gpuid():
cmd = "nvidia-smi"
res = os.popen(cmd).readlines()
if len(res) == 0:
return None
cmd = "env | grep CUDA_VISIBLE_DEVICES"
env_cuda = os.popen(cmd).readlines()
if len(env_cuda) == 0:
return 0
else:
gpu_id = env_cuda[0].strip().split("=")[1]
return int(gpu_id[0])
def draw_e2e_res(dt_boxes, strs, img_path): def draw_e2e_res(dt_boxes, strs, img_path):
src_im = cv2.imread(img_path) src_im = cv2.imread(img_path)
for box, str in zip(dt_boxes, strs): for box, str in zip(dt_boxes, strs):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册