提交 0045e089 编写于 作者: L LDOUBLEV

print to logger.info

# Copyright (c) <2015-Present> Tzutalin
# Copyright (C) 2013 MIT, Computer Science and Artificial Intelligence Laboratory. Bryan Russell, Antonio Torralba,
# William T. Freeman. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction, including without
# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pyrcc5 -o libs/resources.py resources.qrc
import argparse
import ast
import codecs
import os.path
import platform
import subprocess
import sys
from functools import partial
from collections import defaultdict
import json
__dir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(__dir__)
sys.path.append(os.path.abspath(os.path.join(__dir__, '../..')))
sys.path.append("..")
from paddleocr import PaddleOCR
try:
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
except ImportError:
# needed for py3+qt4
# Ref:
# http://pyqt.sourceforge.net/Docs/PyQt4/incompatible_apis.html
# http://stackoverflow.com/questions/21217399/pyqt4-qtcore-qvariant-object-instead-of-a-string
if sys.version_info.major >= 3:
import sip
sip.setapi('QVariant', 2)
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from combobox import ComboBox
from libs.constants import *
from libs.utils import *
from libs.settings import Settings
from libs.shape import Shape, DEFAULT_LINE_COLOR, DEFAULT_FILL_COLOR
from libs.stringBundle import StringBundle
from libs.canvas import Canvas
from libs.zoomWidget import ZoomWidget
from libs.autoDialog import AutoDialog
from libs.labelDialog import LabelDialog
from libs.colorDialog import ColorDialog
from libs.labelFile import LabelFile, LabelFileError
from libs.toolBar import ToolBar
from libs.ustr import ustr
from libs.hashableQListWidgetItem import HashableQListWidgetItem
__appname__ = 'PPOCRLabel'
class WindowMixin(object):
def menu(self, title, actions=None):
menu = self.menuBar().addMenu(title)
if actions:
addActions(menu, actions)
return menu
def toolbar(self, title, actions=None):
toolbar = ToolBar(title)
toolbar.setObjectName(u'%sToolBar' % title)
# toolbar.setOrientation(Qt.Vertical)
toolbar.setToolButtonStyle(Qt.ToolButtonTextUnderIcon)
if actions:
addActions(toolbar, actions)
self.addToolBar(Qt.LeftToolBarArea, toolbar)
return toolbar
class MainWindow(QMainWindow, WindowMixin):
FIT_WINDOW, FIT_WIDTH, MANUAL_ZOOM = list(range(3))
def __init__(self, lang="ch", defaultFilename=None, defaultPrefdefClassFile=None, defaultSaveDir=None):
super(MainWindow, self).__init__()
self.setWindowTitle(__appname__)
# Load setting in the main thread
self.settings = Settings()
self.settings.load()
settings = self.settings
self.lang = lang
# Load string bundle for i18n
if lang not in ['ch', 'en']:
lang = 'en'
self.stringBundle = StringBundle.getBundle(localeStr='zh-CN' if lang=='ch' else 'en') # 'en'
getStr = lambda strId: self.stringBundle.getString(strId)
self.defaultSaveDir = defaultSaveDir
self.ocr = PaddleOCR(use_pdserving=False, use_angle_cls=True, det=True, cls=True, use_gpu=False, lang=lang)
if os.path.exists('./data/paddle.png'):
result = self.ocr.ocr('./data/paddle.png', cls=True, det=True)
# For loading all image under a directory
self.mImgList = []
self.mImgList5 = []
self.dirname = None
self.labelHist = []
self.lastOpenDir = None
self.result_dic = []
self.changeFileFolder = False
self.haveAutoReced = False
self.labelFile = None
self.currIndex = 0
# Whether we need to save or not.
self.dirty = False
self._noSelectionSlot = False
self._beginner = True
self.screencastViewer = self.getAvailableScreencastViewer()
self.screencast = "https://github.com/PaddlePaddle/PaddleOCR"
# Load predefined classes to the list
self.loadPredefinedClasses(defaultPrefdefClassFile)
# Main widgets and related state.
self.labelDialog = LabelDialog(parent=self, listItem=self.labelHist)
self.autoDialog = AutoDialog(parent=self)
self.itemsToShapes = {}
self.shapesToItems = {}
self.itemsToShapesbox = {}
self.shapesToItemsbox = {}
self.prevLabelText = getStr('tempLabel')
self.model = 'paddle'
self.PPreader = None
self.autoSaveNum = 10
################# file list ###############
self.fileListWidget = QListWidget()
self.fileListWidget.itemClicked.connect(self.fileitemDoubleClicked)
self.fileListWidget.setIconSize(QSize(25, 25))
filelistLayout = QVBoxLayout()
filelistLayout.setContentsMargins(0, 0, 0, 0)
filelistLayout.addWidget(self.fileListWidget)
self.AutoRecognition = QToolButton()
self.AutoRecognition.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
self.AutoRecognition.setIcon(newIcon('Auto'))
# self.AutoRecognition.setIconSize(QSize(100,20))
# self.AutoRecognition.setFixedSize(QSize(80,30))
# self.AutoRecognition.setStyleSheet('text-align:center;')#border:none;font-size : 12pt;
autoRecLayout = QHBoxLayout()
autoRecLayout.setContentsMargins(0, 0, 0, 0)
autoRecLayout.addWidget(self.AutoRecognition)
autoRecContainer = QWidget()
autoRecContainer.setLayout(autoRecLayout)
filelistLayout.addWidget(autoRecContainer)
fileListContainer = QWidget()
fileListContainer.setLayout(filelistLayout)
self.filedock = QDockWidget(getStr('fileList'), self)
self.filedock.setObjectName(getStr('files'))
self.filedock.setWidget(fileListContainer)
self.addDockWidget(Qt.LeftDockWidgetArea, self.filedock)
######## Right area ##########
listLayout = QVBoxLayout()
listLayout.setContentsMargins(0, 0, 0, 0)
# Create a widget for edit and diffc button
self.diffcButton = QCheckBox(getStr('useDifficult'))
self.diffcButton.setChecked(False)
self.diffcButton.stateChanged.connect(self.btnstate)
self.editButton = QToolButton()
self.reRecogButton = QToolButton()
self.reRecogButton.setIcon(newIcon('reRec', 30))
# self.reRecogButton.setFixedSize(QSize(80,30))
self.reRecogButton.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
self.newButton = QToolButton()
self.newButton.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
# self.newButton.setFixedSize(QSize(80, 30))
self.SaveButton = QToolButton()
self.SaveButton.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
# self.SaveButton.setFixedSize(QSize(60, 30))
self.DelButton = QToolButton()
self.DelButton.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
# self.DelButton.setFixedSize(QSize(80, 30))
lefttoptoolbox = QHBoxLayout()
lefttoptoolbox.addWidget(self.newButton)
lefttoptoolbox.addWidget(self.reRecogButton)
lefttoptoolboxcontainer = QWidget()
lefttoptoolboxcontainer.setLayout(lefttoptoolbox)
listLayout.addWidget(lefttoptoolboxcontainer)
################## label list ####################
# Create and add a widget for showing current label items
self.labelList = QListWidget()
labelListContainer = QWidget()
labelListContainer.setLayout(listLayout)
self.labelList.itemActivated.connect(self.labelSelectionChanged)
self.labelList.itemSelectionChanged.connect(self.labelSelectionChanged)
self.labelList.itemDoubleClicked.connect(self.editLabel)
# Connect to itemChanged to detect checkbox changes.
self.labelList.itemChanged.connect(self.labelItemChanged)
self.labelListDock = QDockWidget(getStr('recognitionResult'),self)
self.labelListDock.setWidget(self.labelList)
self.labelListDock.setFeatures(QDockWidget.NoDockWidgetFeatures)
listLayout.addWidget(self.labelListDock)
################## detection box ####################
self.BoxList = QListWidget()
self.BoxList.itemActivated.connect(self.boxSelectionChanged)
self.BoxList.itemSelectionChanged.connect(self.boxSelectionChanged)
self.BoxList.itemDoubleClicked.connect(self.editBox)
# Connect to itemChanged to detect checkbox changes.
self.BoxList.itemChanged.connect(self.boxItemChanged)
self.BoxListDock = QDockWidget(getStr('detectionBoxposition'), self)
self.BoxListDock.setWidget(self.BoxList)
self.BoxListDock.setFeatures(QDockWidget.NoDockWidgetFeatures)
listLayout.addWidget(self.BoxListDock)
############ lower right area ############
leftbtmtoolbox = QHBoxLayout()
leftbtmtoolbox.addWidget(self.SaveButton)
leftbtmtoolbox.addWidget(self.DelButton)
leftbtmtoolboxcontainer = QWidget()
leftbtmtoolboxcontainer.setLayout(leftbtmtoolbox)
listLayout.addWidget(leftbtmtoolboxcontainer)
self.dock = QDockWidget(getStr('boxLabelText'), self)
self.dock.setObjectName(getStr('labels'))
self.dock.setWidget(labelListContainer)
########## zoom bar #########
self.imgsplider = QSlider(Qt.Horizontal)
self.imgsplider.valueChanged.connect(self.CanvasSizeChange)
self.imgsplider.setMinimum(-150)
self.imgsplider.setMaximum(150)
self.imgsplider.setSingleStep(1)
self.imgsplider.setTickPosition(QSlider.TicksBelow)
self.imgsplider.setTickInterval(1)
op = QGraphicsOpacityEffect()
op.setOpacity(0.2)
self.imgsplider.setGraphicsEffect(op)
# self.imgsplider.setAttribute(Qt.WA_TranslucentBackground)
self.imgsplider.setStyleSheet("background-color:transparent")
self.imgsliderDock = QDockWidget(getStr('ImageResize'), self)
self.imgsliderDock.setObjectName(getStr('IR'))
self.imgsliderDock.setWidget(self.imgsplider)
self.imgsliderDock.setFeatures(QDockWidget.DockWidgetFloatable)
# op = QGraphicsOpacityEffect()
# op.setOpacity(0.2)
# self.imgsliderDock.setGraphicsEffect(op)
self.imgsliderDock.setAttribute(Qt.WA_TranslucentBackground)
self.addDockWidget(Qt.RightDockWidgetArea, self.imgsliderDock)
self.zoomWidget = ZoomWidget()
self.colorDialog = ColorDialog(parent=self)
self.zoomWidgetValue = self.zoomWidget.value()
########## thumbnail #########
hlayout = QHBoxLayout()
m = (0, 0, 0, 0)
hlayout.setSpacing(0)
hlayout.setContentsMargins(*m)
self.preButton = QToolButton()
# self.preButton.setFixedHeight(100)
# self.preButton.setText(getStr("prevImg"))
self.preButton.setIcon(newIcon("prev",40))
self.preButton.setIconSize(QSize(40, 100))
self.preButton.clicked.connect(self.openPrevImg)
# self.preButton.setToolButtonStyle(Qt.ToolButtonTextUnderIcon)
self.preButton.setStyleSheet('border: none;')
self.iconlist = QListWidget()
self.iconlist.setViewMode(QListView.IconMode)
self.iconlist.setFlow(QListView.TopToBottom)
self.iconlist.setSpacing(10)
self.iconlist.setIconSize(QSize(50, 50))
self.iconlist.setMovement(False)
self.iconlist.setResizeMode(QListView.Adjust)
# self.iconlist.itemDoubleClicked.connect(self.iconitemDoubleClicked)
self.iconlist.itemClicked.connect(self.iconitemDoubleClicked)
self.iconlist.setStyleSheet("background-color:transparent; border: none;")
self.iconlist.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
# self.iconlist.setStyleSheet('border: none;')
self.nextButton = QToolButton()
# self.nextButton.setFixedHeight(100)
# self.nextButton.setText(getStr("nextImg"))
self.nextButton.setIcon(newIcon("next", 40))
self.nextButton.setIconSize(QSize(40, 100))
self.nextButton.setStyleSheet('border: none;')
self.nextButton.clicked.connect(self.openNextImg)
# self.nextButton.setToolButtonStyle(Qt.ToolButtonTextUnderIcon)
hlayout.addWidget(self.preButton)
hlayout.addWidget(self.iconlist)
hlayout.addWidget(self.nextButton)
# self.setLayout(hlayout)
iconListContainer = QWidget()
iconListContainer.setLayout(hlayout)
iconListContainer.setFixedHeight(100)
# iconListContainer.setFixedWidth(530)
# op = QGraphicsOpacityEffect()
# op.setOpacity(0.5)
# iconListContainer.setGraphicsEffect(op)
########### Canvas ###########
self.canvas = Canvas(parent=self)
self.canvas.zoomRequest.connect(self.zoomRequest)
self.canvas.setDrawingShapeToSquare(settings.get(SETTING_DRAW_SQUARE, False))
scroll = QScrollArea()
scroll.setWidget(self.canvas)
scroll.setWidgetResizable(True)
self.scrollBars = {
Qt.Vertical: scroll.verticalScrollBar(),
Qt.Horizontal: scroll.horizontalScrollBar()
}
self.scrollArea = scroll
self.canvas.scrollRequest.connect(self.scrollRequest)
self.canvas.newShape.connect(self.newShape)
self.canvas.shapeMoved.connect(self.updateBoxlist) # self.setDirty
self.canvas.selectionChanged.connect(self.shapeSelectionChanged)
self.canvas.drawingPolygon.connect(self.toggleDrawingSensitive)
centerLayout = QVBoxLayout()
centerLayout.setContentsMargins(0, 0, 0, 0)
centerLayout.addWidget(scroll)
#centerLayout.addWidget(self.icondock)
centerLayout.addWidget(iconListContainer,0,Qt.AlignCenter)
centercontainer = QWidget()
centercontainer.setLayout(centerLayout)
# self.scrolldock = QDockWidget('WorkSpace',self)
# self.scrolldock.setObjectName('WorkSpace')
# self.scrolldock.setWidget(centercontainer)
# self.scrolldock.setFeatures(QDockWidget.NoDockWidgetFeatures)
# orititle = self.scrolldock.titleBarWidget()
# tmpwidget = QWidget()
# self.scrolldock.setTitleBarWidget(tmpwidget)
# del orititle
self.setCentralWidget(centercontainer) #self.scrolldock
self.addDockWidget(Qt.RightDockWidgetArea, self.dock)
# self.filedock.setFeatures(QDockWidget.DockWidgetFloatable)
self.filedock.setFeatures(self.filedock.features() ^ QDockWidget.DockWidgetFloatable)
self.dockFeatures = QDockWidget.DockWidgetClosable | QDockWidget.DockWidgetFloatable
self.dock.setFeatures(self.dock.features() ^ self.dockFeatures)
self.filedock.setFeatures(QDockWidget.NoDockWidgetFeatures)
###### Actions #######
action = partial(newAction, self)
quit = action(getStr('quit'), self.close,
'Ctrl+Q', 'quit', getStr('quitApp'))
open = action(getStr('openFile'), self.openFile,
'Ctrl+O', 'open', getStr('openFileDetail'))
opendir = action(getStr('openDir'), self.openDirDialog,
'Ctrl+u', 'open', getStr('openDir'))
openNextImg = action(getStr('nextImg'), self.openNextImg,
'd', 'next', getStr('nextImgDetail'))
openPrevImg = action(getStr('prevImg'), self.openPrevImg,
'a', 'prev', getStr('prevImgDetail'))
verify = action(getStr('verifyImg'), self.verifyImg,
'space', 'verify', getStr('verifyImgDetail'))
save = action(getStr('save'), self.saveFile,
'Ctrl+S', 'save', getStr('saveDetail'), enabled=False)
alcm = action(getStr('choosemodel'), self.autolcm,
'Ctrl+M', 'next', getStr('tipchoosemodel'))
deleteImg = action(getStr('deleteImg'), self.deleteImg, 'Ctrl+D', 'close', getStr('deleteImgDetail'),
enabled=True)
resetAll = action(getStr('resetAll'), self.resetAll, None, 'resetall', getStr('resetAllDetail'))
color1 = action(getStr('boxLineColor'), self.chooseColor1,
'Ctrl+L', 'color_line', getStr('boxLineColorDetail'))
createMode = action(getStr('crtBox'), self.setCreateMode,
'w', 'new', getStr('crtBoxDetail'), enabled=False)
editMode = action('&Edit\nRectBox', self.setEditMode,
'Ctrl+J', 'edit', u'Move and edit Boxs', enabled=False)
create = action(getStr('crtBox'), self.createShape,
'w', 'new', getStr('crtBoxDetail'), enabled=False)
delete = action(getStr('delBox'), self.deleteSelectedShape,
'Delete', 'delete', getStr('delBoxDetail'), enabled=False)
copy = action(getStr('dupBox'), self.copySelectedShape,
'Ctrl+D', 'copy', getStr('dupBoxDetail'),
enabled=False)
hideAll = action(getStr('hideBox'), partial(self.togglePolygons, False),
'Ctrl+H', 'hide', getStr('hideAllBoxDetail'),
enabled=False)
showAll = action(getStr('showBox'), partial(self.togglePolygons, True),
'Ctrl+A', 'hide', getStr('showAllBoxDetail'),
enabled=False)
help = action(getStr('tutorial'), self.showTutorialDialog, None, 'help', getStr('tutorialDetail'))
showInfo = action(getStr('info'), self.showInfoDialog, None, 'help', getStr('info'))
showSteps = action(getStr('steps'), self.showStepsDialog, None, 'help', getStr('steps'))
zoom = QWidgetAction(self)
zoom.setDefaultWidget(self.zoomWidget)
self.zoomWidget.setWhatsThis(
u"Zoom in or out of the image. Also accessible with"
" %s and %s from the canvas." % (fmtShortcut("Ctrl+[-+]"),
fmtShortcut("Ctrl+Wheel")))
self.zoomWidget.setEnabled(False)
zoomIn = action(getStr('zoomin'), partial(self.addZoom, 10),
'Ctrl++', 'zoom-in', getStr('zoominDetail'), enabled=False)
zoomOut = action(getStr('zoomout'), partial(self.addZoom, -10),
'Ctrl+-', 'zoom-out', getStr('zoomoutDetail'), enabled=False)
zoomOrg = action(getStr('originalsize'), partial(self.setZoom, 100),
'Ctrl+=', 'zoom', getStr('originalsizeDetail'), enabled=False)
fitWindow = action(getStr('fitWin'), self.setFitWindow,
'Ctrl+F', 'fit-window', getStr('fitWinDetail'),
checkable=True, enabled=False)
fitWidth = action(getStr('fitWidth'), self.setFitWidth,
'Ctrl+Shift+F', 'fit-width', getStr('fitWidthDetail'),
checkable=True, enabled=False)
# Group zoom controls into a list for easier toggling.
zoomActions = (self.zoomWidget, zoomIn, zoomOut,
zoomOrg, fitWindow, fitWidth)
self.zoomMode = self.MANUAL_ZOOM
self.scalers = {
self.FIT_WINDOW: self.scaleFitWindow,
self.FIT_WIDTH: self.scaleFitWidth,
# Set to one to scale to 100% when loading files.
self.MANUAL_ZOOM: lambda: 1,
}
edit = action(getStr('editLabel'), self.editLabel,
'Ctrl+E', 'edit', getStr('editLabelDetail'),
enabled=False)
######## New actions #######
AutoRec = action(getStr('autoRecognition'), self.autoRecognition,
'Ctrl+Shift+A', 'Auto', getStr('autoRecognition'), enabled=False)
reRec = action(getStr('reRecognition'), self.reRecognition,
'Ctrl+Shift+R', 'reRec', getStr('reRecognition'), enabled=False)
createpoly = action(getStr('creatPolygon'), self.createPolygon,
'p', 'new', 'Creat Polygon', enabled=True)
saveRec = action(getStr('saveRec'), self.saveRecResult,
'', 'saveRec', getStr('saveRec'), enabled=False)
self.editButton.setDefaultAction(edit)
self.newButton.setDefaultAction(create)
self.DelButton.setDefaultAction(deleteImg)
self.SaveButton.setDefaultAction(save)
self.AutoRecognition.setDefaultAction(AutoRec)
self.reRecogButton.setDefaultAction(reRec)
# self.preButton.setDefaultAction(openPrevImg)
# self.nextButton.setDefaultAction(openNextImg)
############# Zoom layout ##############
zoomLayout = QHBoxLayout()
zoomLayout.addStretch()
self.zoominButton = QToolButton()
self.zoominButton.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
self.zoominButton.setDefaultAction(zoomIn)
self.zoomoutButton = QToolButton()
self.zoomoutButton.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
self.zoomoutButton.setDefaultAction(zoomOut)
self.zoomorgButton = QToolButton()
self.zoomorgButton.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
self.zoomorgButton.setDefaultAction(zoomOrg)
zoomLayout.addWidget(self.zoominButton)
zoomLayout.addWidget(self.zoomorgButton)
zoomLayout.addWidget(self.zoomoutButton)
zoomContainer = QWidget()
zoomContainer.setLayout(zoomLayout)
zoomContainer.setGeometry(0, 0, 30, 150)
shapeLineColor = action(getStr('shapeLineColor'), self.chshapeLineColor,
icon='color_line', tip=getStr('shapeLineColorDetail'),
enabled=False)
shapeFillColor = action(getStr('shapeFillColor'), self.chshapeFillColor,
icon='color', tip=getStr('shapeFillColorDetail'),
enabled=False)
# labels = self.dock.toggleViewAction()
# labels.setText(getStr('showHide'))
# labels.setShortcut('Ctrl+Shift+L')
# Label list context menu.
labelMenu = QMenu()
addActions(labelMenu, (edit, delete))
self.labelList.setContextMenuPolicy(Qt.CustomContextMenu)
self.labelList.customContextMenuRequested.connect(
self.popLabelListMenu)
# Draw squares/rectangles
self.drawSquaresOption = QAction(getStr('drawSquares'), self)
self.drawSquaresOption.setShortcut('Ctrl+Shift+R')
self.drawSquaresOption.setCheckable(True)
self.drawSquaresOption.setChecked(settings.get(SETTING_DRAW_SQUARE, False))
self.drawSquaresOption.triggered.connect(self.toogleDrawSquare)
# Store actions for further handling.
self.actions = struct(save=save, open=open, resetAll=resetAll, deleteImg=deleteImg,
lineColor=color1, create=create, delete=delete, edit=edit, copy=copy,
saveRec=saveRec,
createMode=createMode, editMode=editMode,
shapeLineColor=shapeLineColor, shapeFillColor=shapeFillColor,
zoom=zoom, zoomIn=zoomIn, zoomOut=zoomOut, zoomOrg=zoomOrg,
fitWindow=fitWindow, fitWidth=fitWidth,
zoomActions=zoomActions,
fileMenuActions=(
open, opendir, save, resetAll, quit),
beginner=(), advanced=(),
editMenu=(createpoly, edit, copy, delete,
None, color1, self.drawSquaresOption),
beginnerContext=(create, edit, copy, delete),
advancedContext=(createMode, editMode, edit, copy,
delete, shapeLineColor, shapeFillColor),
onLoadActive=(
create, createMode, editMode),
onShapesPresent=(hideAll, showAll))
# menus
self.menus = struct(
file=self.menu('&'+getStr('mfile')),
edit=self.menu('&'+getStr('medit')),
view=self.menu('&'+getStr('mview')),
autolabel=self.menu('&PaddleOCR'),
help=self.menu('&'+getStr('mhelp')),
recentFiles=QMenu('Open &Recent'),
labelList=labelMenu)
# Sync single class mode from PR#106
self.singleClassMode = QAction(getStr('singleClsMode'), self)
self.singleClassMode.setShortcut("Ctrl+Shift+S")
self.singleClassMode.setCheckable(True)
self.singleClassMode.setChecked(settings.get(SETTING_SINGLE_CLASS, False))
self.lastLabel = None
# Add option to enable/disable labels being displayed at the top of bounding boxes
self.displayLabelOption = QAction(getStr('displayLabel'), self)
self.displayLabelOption.setShortcut("Ctrl+Shift+P")
self.displayLabelOption.setCheckable(True)
self.displayLabelOption.setChecked(settings.get(SETTING_PAINT_LABEL, False))
self.displayLabelOption.triggered.connect(self.togglePaintLabelsOption)
addActions(self.menus.file,
(opendir, None, save, resetAll, deleteImg, quit))
addActions(self.menus.help, (showSteps, showInfo))
addActions(self.menus.view, (
self.displayLabelOption, # labels,
None,
hideAll, showAll, None,
zoomIn, zoomOut, zoomOrg, None,
fitWindow, fitWidth))
addActions(self.menus.autolabel, (alcm, saveRec, None, help)) #
self.menus.file.aboutToShow.connect(self.updateFileMenu)
# Custom context menu for the canvas widget:
addActions(self.canvas.menus[0], self.actions.beginnerContext)
addActions(self.canvas.menus[1], (
action('&Copy here', self.copyShape),
action('&Move here', self.moveShape)))
# self.tools = self.toolbar('Tools')
self.actions.beginner = (
open, opendir, openNextImg, openPrevImg, verify, save, None, create, copy, delete, None,
zoomIn, zoom, zoomOut, fitWindow, fitWidth)
self.actions.advanced = (
open, opendir, openNextImg, openPrevImg, save, None,
createMode, editMode, None,
hideAll, showAll)
self.statusBar().showMessage('%s started.' % __appname__)
self.statusBar().show()
# Application state.
self.image = QImage()
self.filePath = ustr(defaultFilename)
self.lastOpenDir = None
self.recentFiles = []
self.maxRecent = 7
self.lineColor = None
self.fillColor = None
self.zoom_level = 100
self.fit_window = False
# Add Chris
self.difficult = False
## Fix the compatible issue for qt4 and qt5. Convert the QStringList to python list
if settings.get(SETTING_RECENT_FILES):
if have_qstring():
recentFileQStringList = settings.get(SETTING_RECENT_FILES)
self.recentFiles = [ustr(i) for i in recentFileQStringList]
else:
self.recentFiles = recentFileQStringList = settings.get(SETTING_RECENT_FILES)
size = settings.get(SETTING_WIN_SIZE, QSize(1200, 800))
position = QPoint(0, 0)
saved_position = settings.get(SETTING_WIN_POSE, position)
# Fix the multiple monitors issue
for i in range(QApplication.desktop().screenCount()):
if QApplication.desktop().availableGeometry(i).contains(saved_position):
position = saved_position
break
self.resize(size)
self.move(position)
saveDir = ustr(settings.get(SETTING_SAVE_DIR, None))
self.lastOpenDir = ustr(settings.get(SETTING_LAST_OPEN_DIR, None))
self.restoreState(settings.get(SETTING_WIN_STATE, QByteArray()))
Shape.line_color = self.lineColor = QColor(settings.get(SETTING_LINE_COLOR, DEFAULT_LINE_COLOR))
Shape.fill_color = self.fillColor = QColor(settings.get(SETTING_FILL_COLOR, DEFAULT_FILL_COLOR))
self.canvas.setDrawingColor(self.lineColor)
# Add chris
Shape.difficult = self.difficult
# ADD:
# Populate the File menu dynamically.
self.updateFileMenu()
# Since loading the file may take some time, make sure it runs in the background.
if self.filePath and os.path.isdir(self.filePath):
self.queueEvent(partial(self.importDirImages, self.filePath or ""))
elif self.filePath:
self.queueEvent(partial(self.loadFile, self.filePath or ""))
# Callbacks:
self.zoomWidget.valueChanged.connect(self.paintCanvas)
self.populateModeActions()
# Display cursor coordinates at the right of status bar
self.labelCoordinates = QLabel('')
self.statusBar().addPermanentWidget(self.labelCoordinates)
# Open Dir if deafult file
if self.filePath and os.path.isdir(self.filePath):
self.openDirDialog(dirpath=self.filePath, silent=True)
def keyReleaseEvent(self, event):
if event.key() == Qt.Key_Control:
self.canvas.setDrawingShapeToSquare(False)
def keyPressEvent(self, event):
if event.key() == Qt.Key_Control:
# Draw rectangle if Ctrl is pressed
self.canvas.setDrawingShapeToSquare(True)
def noShapes(self):
return not self.itemsToShapes
def populateModeActions(self):
self.canvas.menus[0].clear()
addActions(self.canvas.menus[0], self.actions.beginnerContext)
self.menus.edit.clear()
actions = (self.actions.create,) # if self.beginner() else (self.actions.createMode, self.actions.editMode)
addActions(self.menus.edit, actions + self.actions.editMenu)
def setDirty(self):
self.dirty = True
self.actions.save.setEnabled(True)
def setClean(self):
self.dirty = False
self.actions.save.setEnabled(False)
self.actions.create.setEnabled(True)
def toggleActions(self, value=True):
"""Enable/Disable widgets which depend on an opened image."""
for z in self.actions.zoomActions:
z.setEnabled(value)
for action in self.actions.onLoadActive:
action.setEnabled(value)
def queueEvent(self, function):
QTimer.singleShot(0, function)
def status(self, message, delay=5000):
self.statusBar().showMessage(message, delay)
def resetState(self):
self.itemsToShapes.clear()
self.shapesToItems.clear()
self.itemsToShapesbox.clear() # ADD
self.shapesToItemsbox.clear()
self.labelList.clear()
self.BoxList.clear()
self.filePath = None
self.imageData = None
self.labelFile = None
self.canvas.resetState()
self.labelCoordinates.clear()
# self.comboBox.cb.clear()
self.result_dic = []
def currentItem(self):
items = self.labelList.selectedItems()
if items:
return items[0]
return None
def currentBox(self):
items = self.BoxList.selectedItems()
if items:
return items[0]
return None
def addRecentFile(self, filePath):
if filePath in self.recentFiles:
self.recentFiles.remove(filePath)
elif len(self.recentFiles) >= self.maxRecent:
self.recentFiles.pop()
self.recentFiles.insert(0, filePath)
def beginner(self):
return self._beginner
def advanced(self):
return not self.beginner()
def getAvailableScreencastViewer(self):
osName = platform.system()
if osName == 'Windows':
return ['C:\\Program Files\\Internet Explorer\\iexplore.exe']
elif osName == 'Linux':
return ['xdg-open']
elif osName == 'Darwin':
return ['open']
## Callbacks ##
def showTutorialDialog(self):
subprocess.Popen(self.screencastViewer + [self.screencast])
def showInfoDialog(self):
from libs.__init__ import __version__
msg = u'Name:{0} \nApp Version:{1} \n{2} '.format(__appname__, __version__, sys.version_info)
QMessageBox.information(self, u'Information', msg)
def showStepsDialog(self):
msg = stepsInfo(self.lang)
QMessageBox.information(self, u'Information', msg)
def createShape(self):
assert self.beginner()
self.canvas.setEditing(False)
self.actions.create.setEnabled(False)
self.canvas.fourpoint = False
def createPolygon(self):
assert self.beginner()
self.canvas.setEditing(False)
self.canvas.fourpoint = True
self.actions.create.setEnabled(False)
def toggleDrawingSensitive(self, drawing=True):
"""In the middle of drawing, toggling between modes should be disabled."""
self.actions.editMode.setEnabled(not drawing)
if not drawing and self.beginner():
# Cancel creation.
print('Cancel creation.')
self.canvas.setEditing(True)
self.canvas.restoreCursor()
self.actions.create.setEnabled(True)
def toggleDrawMode(self, edit=True):
self.canvas.setEditing(edit)
self.actions.createMode.setEnabled(edit)
self.actions.editMode.setEnabled(not edit)
def setCreateMode(self):
assert self.advanced()
self.toggleDrawMode(False)
def setEditMode(self):
assert self.advanced()
self.toggleDrawMode(True)
self.labelSelectionChanged()
def updateFileMenu(self):
currFilePath = self.filePath
def exists(filename):
return os.path.exists(filename)
menu = self.menus.recentFiles
menu.clear()
files = [f for f in self.recentFiles if f !=
currFilePath and exists(f)]
for i, f in enumerate(files):
icon = newIcon('labels')
action = QAction(
icon, '&%d %s' % (i + 1, QFileInfo(f).fileName()), self)
action.triggered.connect(partial(self.loadRecent, f))
menu.addAction(action)
def popLabelListMenu(self, point):
self.menus.labelList.exec_(self.labelList.mapToGlobal(point))
def editLabel(self):
if not self.canvas.editing():
return
item = self.currentItem()
if not item:
return
text = self.labelDialog.popUp(item.text())
if text is not None:
item.setText(text)
# item.setBackground(generateColorByText(text))
self.setDirty()
self.updateComboBox()
######## detection box related functions #######
def boxItemChanged(self, item):
shape = self.itemsToShapesbox[item]
box = ast.literal_eval(item.text())
# print('shape in labelItemChanged is',shape.points)
if box != [(p.x(), p.y()) for p in shape.points]:
# shape.points = box
shape.points = [QPointF(p[0], p[1]) for p in box]
# QPointF(x,y)
# shape.line_color = generateColorByText(shape.label)
self.setDirty()
else: # User probably changed item visibility
self.canvas.setShapeVisible(shape, item.checkState() == Qt.Checked)
def editBox(self): # ADD
if not self.canvas.editing():
return
item = self.currentBox()
if not item:
return
text = self.labelDialog.popUp(item.text())
imageSize = str(self.image.size())
width, height = self.image.width(), self.image.height()
if text:
try:
text_list = eval(text)
except:
msg_box = QMessageBox(QMessageBox.Warning, 'Warning', 'Please enter the correct format')
msg_box.exec_()
return
if len(text_list) < 4:
msg_box = QMessageBox(QMessageBox.Warning, 'Warning', 'Please enter the coordinates of 4 points')
msg_box.exec_()
return
for box in text_list:
if box[0] > width or box[0] < 0 or box[1] > height or box[1] < 0:
msg_box = QMessageBox(QMessageBox.Warning, 'Warning', 'Out of picture size')
msg_box.exec_()
return
item.setText(text)
# item.setBackground(generateColorByText(text))
self.setDirty()
self.updateComboBox()
def updateBoxlist(self):
shape = self.canvas.selectedShape
item = self.shapesToItemsbox[shape] # listitem
text = [(int(p.x()), int(p.y())) for p in shape.points]
item.setText(str(text))
self.setDirty()
def indexTo5Files(self, currIndex):
if currIndex < 2:
return self.mImgList[:5]
elif currIndex > len(self.mImgList)-3:
return self.mImgList[-5:]
else:
return self.mImgList[currIndex - 2 : currIndex + 3]
# Tzutalin 20160906 : Add file list and dock to move faster
def fileitemDoubleClicked(self, item=None):
self.currIndex = self.mImgList.index(ustr(os.path.join(os.path.abspath(self.dirname), item.text())))
filename = self.mImgList[self.currIndex]
if filename:
self.mImgList5 = self.indexTo5Files(self.currIndex)
# self.additems5(None)
self.loadFile(filename)
def iconitemDoubleClicked(self, item=None):
self.currIndex = self.mImgList.index(ustr(os.path.join(item.toolTip())))
filename = self.mImgList[self.currIndex]
if filename:
self.mImgList5 = self.indexTo5Files(self.currIndex)
# self.additems5(None)
self.loadFile(filename)
def CanvasSizeChange(self):
if len(self.mImgList) > 0:
self.zoomWidget.setValue(self.zoomWidgetValue + self.imgsplider.value())
# Add chris
def btnstate(self, item=None):
""" Function to handle difficult examples
Update on each object """
if not self.canvas.editing():
return
item = self.currentItem()
if not item: # If not selected Item, take the first one
item = self.labelList.item(self.labelList.count() - 1)
difficult = self.diffcButton.isChecked()
try:
shape = self.itemsToShapes[item]
except:
pass
# Checked and Update
try:
if difficult != shape.difficult:
shape.difficult = difficult
self.setDirty()
else: # User probably changed item visibility
self.canvas.setShapeVisible(shape, item.checkState() == Qt.Checked)
except:
pass
# React to canvas signals.
def shapeSelectionChanged(self, selected=False):
if self._noSelectionSlot:
self._noSelectionSlot = False
else:
shape = self.canvas.selectedShape
if shape:
self.shapesToItems[shape].setSelected(True)
self.shapesToItemsbox[shape].setSelected(True) # ADD
else:
self.labelList.clearSelection()
self.actions.delete.setEnabled(selected)
self.actions.copy.setEnabled(selected)
self.actions.edit.setEnabled(selected)
self.actions.shapeLineColor.setEnabled(selected)
self.actions.shapeFillColor.setEnabled(selected)
def addLabel(self, shape):
shape.paintLabel = self.displayLabelOption.isChecked()
item = HashableQListWidgetItem(shape.label)
item.setFlags(item.flags() | Qt.ItemIsUserCheckable)
item.setCheckState(Qt.Checked)
# item.setBackground(generateColorByText(shape.label))
self.itemsToShapes[item] = shape
self.shapesToItems[shape] = item
self.labelList.addItem(item)
# print('item in add label is ',[(p.x(), p.y()) for p in shape.points], shape.label)
# ADD for box
item = HashableQListWidgetItem(str([(int(p.x()), int(p.y())) for p in shape.points]))
# item = QListWidgetItem(str([(p.x(), p.y()) for p in shape.points]))
item.setFlags(item.flags() | Qt.ItemIsUserCheckable)
item.setCheckState(Qt.Checked)
# item.setBackground(generateColorByText(shape.label))
self.itemsToShapesbox[item] = shape
self.shapesToItemsbox[shape] = item
self.BoxList.addItem(item)
for action in self.actions.onShapesPresent:
action.setEnabled(True)
self.updateComboBox()
def remLabel(self, shape):
if shape is None:
# print('rm empty label')
return
item = self.shapesToItems[shape]
self.labelList.takeItem(self.labelList.row(item))
del self.shapesToItems[shape]
del self.itemsToShapes[item]
self.updateComboBox()
# ADD:
item = self.shapesToItemsbox[shape]
self.BoxList.takeItem(self.BoxList.row(item))
del self.shapesToItemsbox[shape]
del self.itemsToShapesbox[item]
self.updateComboBox()
def loadLabels(self, shapes):
s = []
for label, points, line_color, fill_color, difficult in shapes:
shape = Shape(label=label)
for x, y in points:
# Ensure the labels are within the bounds of the image. If not, fix them.
x, y, snapped = self.canvas.snapPointToCanvas(x, y)
if snapped:
self.setDirty()
shape.addPoint(QPointF(x, y))
shape.difficult = difficult
shape.close()
s.append(shape)
# if line_color:
# shape.line_color = QColor(*line_color)
# else:
# shape.line_color = generateColorByText(label)
#
# if fill_color:
# shape.fill_color = QColor(*fill_color)
# else:
# shape.fill_color = generateColorByText(label)
self.addLabel(shape)
self.updateComboBox()
self.canvas.loadShapes(s)
def updateComboBox(self):
# Get the unique labels and add them to the Combobox.
itemsTextList = [str(self.labelList.item(i).text()) for i in range(self.labelList.count())]
uniqueTextList = list(set(itemsTextList))
# Add a null row for showing all the labels
uniqueTextList.append("")
uniqueTextList.sort()
# self.comboBox.update_items(uniqueTextList)
def saveLabels(self, annotationFilePath, mode='Auto'):
annotationFilePath = ustr(annotationFilePath)
if self.labelFile is None:
self.labelFile = LabelFile()
self.labelFile.verified = self.canvas.verified
def format_shape(s):
# print('s in saveLabels is ',s)
return dict(label=s.label, # str
line_color=s.line_color.getRgb(),
fill_color=s.fill_color.getRgb(),
points=[(p.x(), p.y()) for p in s.points], # QPonitF
# add chris
difficult=s.difficult) # bool
shapes = [] if mode == 'Auto' else \
[format_shape(shape) for shape in self.canvas.shapes]
# Can add differrent annotation formats here
if self.model == 'paddle':
for box in self.result_dic:
trans_dic = {"label": box[1][0], "points": box[0], 'difficult': False}
if trans_dic["label"] is "" and mode == 'Auto':
continue
shapes.append(trans_dic)
try:
trans_dic = []
for box in shapes:
trans_dic.append({"transcription": box['label'], "points": box['points'], 'difficult': False})
self.PPlabel[annotationFilePath] = trans_dic
if mode == 'Auto':
self.Cachelabel[annotationFilePath] = trans_dic
# else:
# self.labelFile.save(annotationFilePath, shapes, self.filePath, self.imageData,
# self.lineColor.getRgb(), self.fillColor.getRgb())
# print('Image:{0} -> Annotation:{1}'.format(self.filePath, annotationFilePath))
return True
except LabelFileError as e:
self.errorMessage(u'Error saving label data', u'<b>%s</b>' % e)
return False
def copySelectedShape(self):
self.addLabel(self.canvas.copySelectedShape())
# fix copy and delete
self.shapeSelectionChanged(True)
def labelSelectionChanged(self):
item = self.currentItem()
if item and self.canvas.editing():
self._noSelectionSlot = True
self.canvas.selectShape(self.itemsToShapes[item])
shape = self.itemsToShapes[item]
# Add Chris
self.diffcButton.setChecked(shape.difficult)
def boxSelectionChanged(self):
item = self.currentBox()
if item and self.canvas.editing():
self._noSelectionSlot = True
self.canvas.selectShape(self.itemsToShapesbox[item])
shape = self.itemsToShapesbox[item]
# Add Chris
self.diffcButton.setChecked(shape.difficult)
def labelItemChanged(self, item):
shape = self.itemsToShapes[item]
label = item.text()
if label != shape.label:
shape.label = item.text()
# shape.line_color = generateColorByText(shape.label)
self.setDirty()
else: # User probably changed item visibility
self.canvas.setShapeVisible(shape, item.checkState() == Qt.Checked)
# Callback functions:
def newShape(self):
"""Pop-up and give focus to the label editor.
position MUST be in global coordinates.
"""
if len(self.labelHist) > 0:
self.labelDialog = LabelDialog(
parent=self, listItem=self.labelHist)
# Sync single class mode from PR#106
if self.singleClassMode.isChecked() and self.lastLabel:
text = self.lastLabel
else:
text = self.labelDialog.popUp(text=self.prevLabelText)
self.lastLabel = text
# Add Chris
self.diffcButton.setChecked(False)
if text is not None:
self.prevLabelText = self.stringBundle.getString('tempLabel')
# generate_color = generateColorByText(text)
shape = self.canvas.setLastLabel(text, None, None)#generate_color, generate_color
self.addLabel(shape)
if self.beginner(): # Switch to edit mode.
self.canvas.setEditing(True)
self.actions.create.setEnabled(True)
else:
self.actions.editMode.setEnabled(True)
self.setDirty()
else:
# self.canvas.undoLastLine()
self.canvas.resetAllLines()
def scrollRequest(self, delta, orientation):
units = - delta / (8 * 15)
bar = self.scrollBars[orientation]
bar.setValue(bar.value() + bar.singleStep() * units)
def setZoom(self, value):
self.actions.fitWidth.setChecked(False)
self.actions.fitWindow.setChecked(False)
self.zoomMode = self.MANUAL_ZOOM
self.zoomWidget.setValue(value)
def addZoom(self, increment=10):
self.setZoom(self.zoomWidget.value() + increment)
def zoomRequest(self, delta):
# get the current scrollbar positions
# calculate the percentages ~ coordinates
h_bar = self.scrollBars[Qt.Horizontal]
v_bar = self.scrollBars[Qt.Vertical]
# get the current maximum, to know the difference after zooming
h_bar_max = h_bar.maximum()
v_bar_max = v_bar.maximum()
# get the cursor position and canvas size
# calculate the desired movement from 0 to 1
# where 0 = move left
# 1 = move right
# up and down analogous
cursor = QCursor()
pos = cursor.pos()
relative_pos = QWidget.mapFromGlobal(self, pos)
cursor_x = relative_pos.x()
cursor_y = relative_pos.y()
w = self.scrollArea.width()
h = self.scrollArea.height()
# the scaling from 0 to 1 has some padding
# you don't have to hit the very leftmost pixel for a maximum-left movement
margin = 0.1
move_x = (cursor_x - margin * w) / (w - 2 * margin * w)
move_y = (cursor_y - margin * h) / (h - 2 * margin * h)
# clamp the values from 0 to 1
move_x = min(max(move_x, 0), 1)
move_y = min(max(move_y, 0), 1)
# zoom in
units = delta / (8 * 15)
scale = 10
self.addZoom(scale * units)
# get the difference in scrollbar values
# this is how far we can move
d_h_bar_max = h_bar.maximum() - h_bar_max
d_v_bar_max = v_bar.maximum() - v_bar_max
# get the new scrollbar values
new_h_bar_value = h_bar.value() + move_x * d_h_bar_max
new_v_bar_value = v_bar.value() + move_y * d_v_bar_max
h_bar.setValue(new_h_bar_value)
v_bar.setValue(new_v_bar_value)
def setFitWindow(self, value=True):
if value:
self.actions.fitWidth.setChecked(False)
self.zoomMode = self.FIT_WINDOW if value else self.MANUAL_ZOOM
self.adjustScale()
def setFitWidth(self, value=True):
if value:
self.actions.fitWindow.setChecked(False)
self.zoomMode = self.FIT_WIDTH if value else self.MANUAL_ZOOM
self.adjustScale()
def togglePolygons(self, value):
for item, shape in self.itemsToShapes.items():
item.setCheckState(Qt.Checked if value else Qt.Unchecked)
def loadFile(self, filePath=None):
"""Load the specified file, or the last opened file if None."""
self.resetState()
self.canvas.setEnabled(False)
if filePath is None:
filePath = self.settings.get(SETTING_FILENAME)
# Make sure that filePath is a regular python string, rather than QString
filePath = ustr(filePath)
# Fix bug: An index error after select a directory when open a new file.
unicodeFilePath = ustr(filePath)
# unicodeFilePath = os.path.abspath(unicodeFilePath)
# Tzutalin 20160906 : Add file list and dock to move faster
# Highlight the file item
if unicodeFilePath and self.fileListWidget.count() > 0:
if unicodeFilePath in self.mImgList:
index = self.mImgList.index(unicodeFilePath)
fileWidgetItem = self.fileListWidget.item(index)
print('unicodeFilePath is', unicodeFilePath)
fileWidgetItem.setSelected(True)
###
self.iconlist.clear()
self.additems5(None)
for i in range(5):
item_tooltip = self.iconlist.item(i).toolTip()
# print(i,"---",item_tooltip)
if item_tooltip == ustr(filePath):
titem = self.iconlist.item(i)
titem.setSelected(True)
self.iconlist.scrollToItem(titem)
break
else:
self.fileListWidget.clear()
self.mImgList.clear()
self.iconlist.clear()
# if unicodeFilePath and self.iconList.count() > 0:
# if unicodeFilePath in self.mImgList:
if unicodeFilePath and os.path.exists(unicodeFilePath):
if LabelFile.isLabelFile(unicodeFilePath):
try:
self.labelFile = LabelFile(unicodeFilePath)
except LabelFileError as e:
self.errorMessage(u'Error opening file',
(u"<p><b>%s</b></p>"
u"<p>Make sure <i>%s</i> is a valid label file.")
% (e, unicodeFilePath))
self.status("Error reading %s" % unicodeFilePath)
return False
self.imageData = self.labelFile.imageData
self.lineColor = QColor(*self.labelFile.lineColor)
self.fillColor = QColor(*self.labelFile.fillColor)
self.canvas.verified = self.labelFile.verified
else:
# Load image:
# read data first and store for saving into label file.
self.imageData = read(unicodeFilePath, None)
self.labelFile = None
self.canvas.verified = False
image = QImage.fromData(self.imageData)
if image.isNull():
self.errorMessage(u'Error opening file',
u"<p>Make sure <i>%s</i> is a valid image file." % unicodeFilePath)
self.status("Error reading %s" % unicodeFilePath)
return False
self.status("Loaded %s" % os.path.basename(unicodeFilePath))
self.image = image
self.filePath = unicodeFilePath
self.canvas.loadPixmap(QPixmap.fromImage(image))
if self.labelFile:
self.loadLabels(self.labelFile.shapes)
if self.validFilestate(filePath) is True:
self.setClean()
else:
self.dirty = False
self.actions.save.setEnabled(True)
self.canvas.setEnabled(True)
self.adjustScale(initial=True)
self.paintCanvas()
self.addRecentFile(self.filePath)
self.toggleActions(True)
self.showBoundingBoxFromPPlabel(filePath)
self.setWindowTitle(__appname__ + ' ' + filePath)
# Default : select last item if there is at least one item
if self.labelList.count():
self.labelList.setCurrentItem(self.labelList.item(self.labelList.count() - 1))
self.labelList.item(self.labelList.count() - 1).setSelected(True)
self.canvas.setFocus(True)
return True
return False
def showBoundingBoxFromPPlabel(self, filePath):
imgidx = self.getImglabelidx(filePath)
if imgidx not in self.PPlabel.keys():
return
shapes = []
for box in self.PPlabel[imgidx]:
shapes.append((box['transcription'], box['points'], None, None, box['difficult']))
print(shapes)
self.loadLabels(shapes)
self.canvas.verified = False
def validFilestate(self, filePath):
if filePath not in self.fileStatedict.keys():
return None
elif self.fileStatedict[filePath] == 1:
return True
else:
return False
def resizeEvent(self, event):
if self.canvas and not self.image.isNull() \
and self.zoomMode != self.MANUAL_ZOOM:
self.adjustScale()
super(MainWindow, self).resizeEvent(event)
def paintCanvas(self):
assert not self.image.isNull(), "cannot paint null image"
self.canvas.scale = 0.01 * self.zoomWidget.value()
self.canvas.adjustSize()
self.canvas.update()
def adjustScale(self, initial=False):
value = self.scalers[self.FIT_WINDOW if initial else self.zoomMode]()
self.zoomWidget.setValue(int(100 * value))
def scaleFitWindow(self):
"""Figure out the size of the pixmap in order to fit the main widget."""
e = 2.0 # So that no scrollbars are generated.
w1 = self.centralWidget().width() - e
h1 = self.centralWidget().height() - e -110
a1 = w1 / h1
# Calculate a new scale value based on the pixmap's aspect ratio.
w2 = self.canvas.pixmap.width() - 0.0
h2 = self.canvas.pixmap.height() - 0.0
a2 = w2 / h2
return w1 / w2 if a2 >= a1 else h1 / h2
def scaleFitWidth(self):
# The epsilon does not seem to work too well here.
w = self.centralWidget().width() - 2.0
return w / self.canvas.pixmap.width()
def closeEvent(self, event):
if not self.mayContinue():
event.ignore()
else:
settings = self.settings
# If it loads images from dir, don't load it at the begining
if self.dirname is None:
settings[SETTING_FILENAME] = self.filePath if self.filePath else ''
else:
settings[SETTING_FILENAME] = ''
settings[SETTING_WIN_SIZE] = self.size()
settings[SETTING_WIN_POSE] = self.pos()
settings[SETTING_WIN_STATE] = self.saveState()
settings[SETTING_LINE_COLOR] = self.lineColor
settings[SETTING_FILL_COLOR] = self.fillColor
settings[SETTING_RECENT_FILES] = self.recentFiles
settings[SETTING_ADVANCE_MODE] = not self._beginner
if self.defaultSaveDir and os.path.exists(self.defaultSaveDir):
settings[SETTING_SAVE_DIR] = ustr(self.defaultSaveDir)
else:
settings[SETTING_SAVE_DIR] = ''
if self.lastOpenDir and os.path.exists(self.lastOpenDir):
settings[SETTING_LAST_OPEN_DIR] = self.lastOpenDir
else:
settings[SETTING_LAST_OPEN_DIR] = ''
settings[SETTING_SINGLE_CLASS] = self.singleClassMode.isChecked()
settings[SETTING_PAINT_LABEL] = self.displayLabelOption.isChecked()
settings[SETTING_DRAW_SQUARE] = self.drawSquaresOption.isChecked()
settings.save()
try:
self.saveFilestate()
self.savePPlabel()
except:
pass
def loadRecent(self, filename):
if self.mayContinue():
self.loadFile(filename)
def scanAllImages(self, folderPath):
extensions = ['.%s' % fmt.data().decode("ascii").lower() for fmt in QImageReader.supportedImageFormats()]
images = []
for file in os.listdir(folderPath):
if file.lower().endswith(tuple(extensions)):
relativePath = os.path.join(folderPath, file)
path = ustr(os.path.abspath(relativePath))
images.append(path)
natural_sort(images, key=lambda x: x.lower())
return images
def openDirDialog(self, _value=False, dirpath=None, silent=False):
if not self.mayContinue():
return
defaultOpenDirPath = dirpath if dirpath else '.'
if self.lastOpenDir and os.path.exists(self.lastOpenDir):
defaultOpenDirPath = self.lastOpenDir
else:
defaultOpenDirPath = os.path.dirname(self.filePath) if self.filePath else '.'
if silent != True:
targetDirPath = ustr(QFileDialog.getExistingDirectory(self,
'%s - Open Directory' % __appname__,
defaultOpenDirPath,
QFileDialog.ShowDirsOnly | QFileDialog.DontResolveSymlinks))
else:
targetDirPath = ustr(defaultOpenDirPath)
self.lastOpenDir = targetDirPath
self.importDirImages(targetDirPath)
def importDirImages(self, dirpath, isDelete = False):
if not self.mayContinue() or not dirpath:
return
if self.defaultSaveDir and self.defaultSaveDir != dirpath:
self.saveFilestate()
self.savePPlabel()
if not isDelete:
self.loadFilestate(dirpath)
self.PPlabelpath = dirpath+ '/Label.txt'
self.PPlabel = self.loadLabelFile(self.PPlabelpath)
self.Cachelabelpath = dirpath + '/Cache.cach'
self.Cachelabel = self.loadLabelFile(self.Cachelabelpath)
if self.Cachelabel:
self.PPlabel = dict(self.Cachelabel, **self.PPlabel)
self.lastOpenDir = dirpath
self.dirname = dirpath
self.defaultSaveDir = dirpath
self.statusBar().showMessage('%s started. Annotation will be saved to %s' %
(__appname__, self.defaultSaveDir))
self.statusBar().show()
self.filePath = None
self.fileListWidget.clear()
self.mImgList = self.scanAllImages(dirpath)
self.mImgList5 = self.mImgList[:5]
self.openNextImg()
doneicon = newIcon('done')
closeicon = newIcon('close')
for imgPath in self.mImgList:
filename = os.path.basename(imgPath)
if self.validFilestate(imgPath) is True:
item = QListWidgetItem(doneicon, filename)
else:
item = QListWidgetItem(closeicon, filename)
self.fileListWidget.addItem(item)
print('dirPath in importDirImages is', dirpath)
self.iconlist.clear()
self.additems5(dirpath)
self.changeFileFolder = True
self.haveAutoReced = False
self.AutoRecognition.setEnabled(True)
self.reRecogButton.setEnabled(True)
def verifyImg(self, _value=False):
# Proceding next image without dialog if having any label
if self.filePath is not None:
try:
self.labelFile.toggleVerify()
except AttributeError:
# If the labelling file does not exist yet, create if and
# re-save it with the verified attribute.
self.saveFile()
if self.labelFile != None:
self.labelFile.toggleVerify()
else:
return
self.canvas.verified = self.labelFile.verified
self.paintCanvas()
self.saveFile()
def openPrevImg(self, _value=False):
if len(self.mImgList) <= 0:
return
if self.filePath is None:
return
currIndex = self.mImgList.index(self.filePath)
self.mImgList5 = self.mImgList[:5]
if currIndex - 1 >= 0:
filename = self.mImgList[currIndex - 1]
self.mImgList5 = self.indexTo5Files(currIndex - 1)
if filename:
self.loadFile(filename)
def openNextImg(self, _value=False):
if not self.mayContinue():
return
if len(self.mImgList) <= 0:
return
filename = None
if self.filePath is None:
filename = self.mImgList[0]
self.mImgList5 = self.mImgList[:5]
else:
currIndex = self.mImgList.index(self.filePath)
if currIndex + 1 < len(self.mImgList):
filename = self.mImgList[currIndex + 1]
self.mImgList5 = self.indexTo5Files(currIndex + 1)
else:
self.mImgList5 = self.indexTo5Files(currIndex)
if filename:
print('file name in openNext is ',filename)
self.loadFile(filename)
def openFile(self, _value=False):
if not self.mayContinue():
return
path = os.path.dirname(ustr(self.filePath)) if self.filePath else '.'
formats = ['*.%s' % fmt.data().decode("ascii").lower() for fmt in QImageReader.supportedImageFormats()]
filters = "Image & Label files (%s)" % ' '.join(formats + ['*%s' % LabelFile.suffix])
filename = QFileDialog.getOpenFileName(self, '%s - Choose Image or Label file' % __appname__, path, filters)
if filename:
if isinstance(filename, (tuple, list)):
filename = filename[0]
self.loadFile(filename)
# print('filename in openfile is ', self.filePath)
self.filePath = None
self.fileListWidget.clear()
self.iconlist.clear()
self.mImgList = [filename]
self.openNextImg()
if self.validFilestate(filename) is True:
item = QListWidgetItem(newIcon('done'), filename)
self.setClean()
elif self.validFilestate(filename) is None:
item = QListWidgetItem(newIcon('close'), filename)
else:
item = QListWidgetItem(newIcon('close'), filename)
self.setDirty()
self.fileListWidget.addItem(filename)
self.additems5(None)
print('opened image is', filename)
def updateFileListIcon(self, filename):
pass
def saveFile(self, _value=False, mode='Manual'):
if self.defaultSaveDir is not None and len(ustr(self.defaultSaveDir)):
if self.filePath:
imgidx = self.getImglabelidx(self.filePath)
self._saveFile(imgidx, mode=mode)
else:
imgFileDir = os.path.dirname(self.filePath)
imgFileName = os.path.basename(self.filePath)
savedFileName = os.path.splitext(imgFileName)[0]
savedPath = os.path.join(imgFileDir, savedFileName)
self._saveFile(savedPath if self.labelFile
else self.saveFileDialog(removeExt=False), mode=mode)
def saveFileAs(self, _value=False):
assert not self.image.isNull(), "cannot save empty image"
self._saveFile(self.saveFileDialog())
def saveFileDialog(self, removeExt=True):
caption = '%s - Choose File' % __appname__
filters = 'File (*%s)' % LabelFile.suffix
openDialogPath = self.currentPath()
dlg = QFileDialog(self, caption, openDialogPath, filters)
dlg.setDefaultSuffix(LabelFile.suffix[1:])
dlg.setAcceptMode(QFileDialog.AcceptSave)
filenameWithoutExtension = os.path.splitext(self.filePath)[0]
dlg.selectFile(filenameWithoutExtension)
dlg.setOption(QFileDialog.DontUseNativeDialog, False)
if dlg.exec_():
fullFilePath = ustr(dlg.selectedFiles()[0])
if removeExt:
return os.path.splitext(fullFilePath)[0] # Return file path without the extension.
else:
return fullFilePath
return ''
def _saveFile(self, annotationFilePath, mode='Manual'):
if mode == 'Manual':
if annotationFilePath and self.saveLabels(annotationFilePath, mode=mode):
self.setClean()
self.statusBar().showMessage('Saved to %s' % annotationFilePath)
self.statusBar().show()
currIndex = self.mImgList.index(self.filePath)
item = self.fileListWidget.item(currIndex)
item.setIcon(newIcon('done'))
self.fileStatedict[self.filePath] = 1
if len(self.fileStatedict)%self.autoSaveNum ==0:
self.saveFilestate()
self.savePPlabel(mode='Auto')
self.fileListWidget.insertItem(int(currIndex), item)
self.openNextImg()
self.actions.saveRec.setEnabled(True)
elif mode == 'Auto':
if annotationFilePath and self.saveLabels(annotationFilePath, mode=mode):
self.setClean()
self.statusBar().showMessage('Saved to %s' % annotationFilePath)
self.statusBar().show()
def closeFile(self, _value=False):
if not self.mayContinue():
return
self.resetState()
self.setClean()
self.toggleActions(False)
self.canvas.setEnabled(False)
self.actions.saveAs.setEnabled(False)
def deleteImg(self):
deletePath = self.filePath
if deletePath is not None:
deleteInfo = self.deleteImgDialog()
if deleteInfo == QMessageBox.Yes:
if platform.system() == 'Windows':
from win32com.shell import shell, shellcon
shell.SHFileOperation((0, shellcon.FO_DELETE, deletePath, None,
shellcon.FOF_SILENT | shellcon.FOF_ALLOWUNDO | shellcon.FOF_NOCONFIRMATION,
None, None))
# linux
elif platform.system() == 'Linux':
cmd = 'trash ' + deletePath
os.system(cmd)
# macOS
elif platform.system() == 'Darwin':
import subprocess
absPath = os.path.abspath(deletePath).replace('\\', '\\\\').replace('"', '\\"')
cmd = ['osascript', '-e',
'tell app "Finder" to move {the POSIX file "' + absPath + '"} to trash']
print(cmd)
subprocess.call(cmd, stdout=open(os.devnull, 'w'))
if self.filePath in self.fileStatedict.keys():
self.fileStatedict.pop(self.filePath)
imgidx = self.getImglabelidx(self.filePath)
if imgidx in self.PPlabel.keys():
self.PPlabel.pop(imgidx)
self.openNextImg()
self.importDirImages(self.lastOpenDir, isDelete=True)
def deleteImgDialog(self):
yes, cancel = QMessageBox.Yes, QMessageBox.Cancel
msg = u'The image will be deleted to the recycle bin'
return QMessageBox.warning(self, u'Attention', msg, yes | cancel)
def resetAll(self):
self.settings.reset()
self.close()
proc = QProcess()
proc.startDetached(os.path.abspath(__file__))
def mayContinue(self): #
if not self.dirty:
return True
else:
discardChanges = self.discardChangesDialog()
if discardChanges == QMessageBox.No:
return True
elif discardChanges == QMessageBox.Yes:
self.saveFile()
return True
else:
return False
def discardChangesDialog(self):
yes, no, cancel = QMessageBox.Yes, QMessageBox.No, QMessageBox.Cancel
msg = u'You have unsaved changes, would you like to save them and proceed?\nClick "No" to undo all changes.'
return QMessageBox.warning(self, u'Attention', msg, yes | no | cancel)
def errorMessage(self, title, message):
return QMessageBox.critical(self, title,
'<p><b>%s</b></p>%s' % (title, message))
def currentPath(self):
return os.path.dirname(self.filePath) if self.filePath else '.'
def chooseColor1(self):
color = self.colorDialog.getColor(self.lineColor, u'Choose line color',
default=DEFAULT_LINE_COLOR)
if color:
self.lineColor = color
Shape.line_color = color
self.canvas.setDrawingColor(color)
self.canvas.update()
self.setDirty()
def deleteSelectedShape(self):
self.remLabel(self.canvas.deleteSelected())
self.setDirty()
if self.noShapes():
for action in self.actions.onShapesPresent:
action.setEnabled(False)
def chshapeLineColor(self):
color = self.colorDialog.getColor(self.lineColor, u'Choose line color',
default=DEFAULT_LINE_COLOR)
if color:
self.canvas.selectedShape.line_color = color
self.canvas.update()
self.setDirty()
def chshapeFillColor(self):
color = self.colorDialog.getColor(self.fillColor, u'Choose fill color',
default=DEFAULT_FILL_COLOR)
if color:
self.canvas.selectedShape.fill_color = color
self.canvas.update()
self.setDirty()
def copyShape(self):
self.canvas.endMove(copy=True)
self.addLabel(self.canvas.selectedShape)
self.setDirty()
def moveShape(self):
self.canvas.endMove(copy=False)
self.setDirty()
def loadPredefinedClasses(self, predefClassesFile):
if os.path.exists(predefClassesFile) is True:
with codecs.open(predefClassesFile, 'r', 'utf8') as f:
for line in f:
line = line.strip()
if self.labelHist is None:
self.labelHist = [line]
else:
self.labelHist.append(line)
def togglePaintLabelsOption(self):
for shape in self.canvas.shapes:
shape.paintLabel = self.displayLabelOption.isChecked()
def toogleDrawSquare(self):
self.canvas.setDrawingShapeToSquare(self.drawSquaresOption.isChecked())
def additems(self, dirpath):
for file in self.mImgList:
pix = QPixmap(file)
_, filename = os.path.split(file)
filename, _ = os.path.splitext(filename)
item = QListWidgetItem(QIcon(pix.scaled(100, 100, Qt.IgnoreAspectRatio, Qt.FastTransformation)),
filename[:10])
item.setToolTip(file)
self.iconlist.addItem(item)
def additems5(self, dirpath):
for file in self.mImgList5:
pix = QPixmap(file)
_, filename = os.path.split(file)
filename, _ = os.path.splitext(filename)
pfilename = filename[:10]
if len(pfilename) < 10:
lentoken = 12 - len(pfilename)
prelen = lentoken // 2
bfilename = prelen * " " + pfilename + (lentoken - prelen) * " "
# item = QListWidgetItem(QIcon(pix.scaled(100, 100, Qt.KeepAspectRatio, Qt.SmoothTransformation)),filename[:10])
item = QListWidgetItem(QIcon(pix.scaled(100, 100, Qt.IgnoreAspectRatio, Qt.FastTransformation)),pfilename)
# item.setForeground(QBrush(Qt.white))
item.setToolTip(file)
self.iconlist.addItem(item)
owidth = 0
for index in range(len(self.mImgList5)):
item = self.iconlist.item(index)
itemwidget = self.iconlist.visualItemRect(item)
owidth += itemwidget.width()
self.iconlist.setMinimumWidth(owidth + 50)
def getImglabelidx(self, filePath):
if platform.system()=='Windows':
spliter = '\\'
else:
spliter = '/'
filepathsplit = filePath.split(spliter)[-2:]
return filepathsplit[0] + '/' + filepathsplit[1]
def autoRecognition(self):
assert self.mImgList is not None
print('Using model from ', self.model)
uncheckedList = [i for i in self.mImgList if i not in self.fileStatedict.keys()]
self.autoDialog = AutoDialog(parent=self, ocr=self.ocr, mImgList=uncheckedList, lenbar=len(uncheckedList))
self.autoDialog.popUp()
self.currIndex=len(self.mImgList)
self.loadFile(self.filePath) # ADD
self.haveAutoReced = True
self.AutoRecognition.setEnabled(False)
self.setDirty()
self.saveCacheLabel()
def reRecognition(self):
img = cv2.imread(self.filePath)
# org_box = [dic['points'] for dic in self.PPlabel[self.getImglabelidx(self.filePath)]]
if self.canvas.shapes:
self.result_dic = []
rec_flag = 0
for shape in self.canvas.shapes:
box = [[int(p.x()), int(p.y())] for p in shape.points]
assert len(box) == 4
img_crop = get_rotate_crop_image(img, np.array(box, np.float32))
if img_crop is None:
msg = 'Can not recognise the detection box in ' + self.filePath + '. Please change manually'
QMessageBox.information(self, "Information", msg)
return
result = self.ocr.ocr(img_crop, cls=True, det=False)
if result[0][0] is not '':
result.insert(0, box)
print('result in reRec is ', result)
self.result_dic.append(result)
if result[1][0] == shape.label:
print('label no change')
else:
rec_flag += 1
if len(self.result_dic) > 0 and rec_flag > 0:
self.saveFile(mode='Auto')
self.loadFile(self.filePath)
self.setDirty()
elif len(self.result_dic) == len(self.canvas.shapes) and rec_flag == 0:
QMessageBox.information(self, "Information", "The recognition result remains unchanged!")
else:
print('Can not recgonise in ', self.filePath)
else:
QMessageBox.information(self, "Information", "Draw a box!")
def autolcm(self):
vbox = QVBoxLayout()
hbox = QHBoxLayout()
self.panel = QLabel()
self.panel.setText(self.stringBundle.getString('choseModelLg'))
self.panel.setAlignment(Qt.AlignLeft)
self.comboBox = QComboBox()
self.comboBox.setObjectName("comboBox")
self.comboBox.addItems(['Chinese & English', 'English', 'French', 'German', 'Korean', 'Japanese'])
# self.comboBox_lg = QComboBox()
# self.comboBox_lg.setObjectName("comboBox_language")
vbox.addWidget(self.panel)
vbox.addWidget(self.comboBox)
self.dialog = QDialog()
self.dialog.resize(300, 100)
self.okBtn = QPushButton(self.stringBundle.getString('ok'))
self.cancelBtn = QPushButton(self.stringBundle.getString('cancel'))
self.okBtn.clicked.connect(self.modelChoose)
self.cancelBtn.clicked.connect(self.cancel)
self.dialog.setWindowTitle(self.stringBundle.getString('choseModelLg'))
hbox.addWidget(self.okBtn)
hbox.addWidget(self.cancelBtn)
vbox.addWidget(self.panel)
vbox.addLayout(hbox)
self.dialog.setLayout(vbox)
self.dialog.setWindowModality(Qt.ApplicationModal)
self.dialog.exec_()
if self.filePath:
self.AutoRecognition.setEnabled(True)
def modelChoose(self):
print(self.comboBox.currentText())
lg_idx = {'Chinese & English': 'ch', 'English': 'en', 'French': 'french', 'German': 'german',
'Korean': 'korean', 'Japanese': 'japan'}
del self.ocr
self.ocr = PaddleOCR(use_pdserving=False, use_angle_cls=True, det=True, cls=True, use_gpu=False,
lang=lg_idx[self.comboBox.currentText()])
self.dialog.close()
def cancel(self):
self.dialog.close()
def loadFilestate(self, saveDir):
self.fileStatepath = saveDir + '/fileState.txt'
self.fileStatedict = {}
if not os.path.exists(self.fileStatepath):
f = open(self.fileStatepath, 'w', encoding='utf-8')
else:
with open(self.fileStatepath, 'r', encoding='utf-8') as f:
states = f.readlines()
for each in states:
file, state = each.split('\t')
self.fileStatedict[file] = 1
def saveFilestate(self):
with open(self.fileStatepath, 'w', encoding='utf-8') as f:
for key in self.fileStatedict:
f.write(key + '\t')
f.write(str(self.fileStatedict[key]) + '\n')
def loadLabelFile(self, labelpath):
labeldict = {}
if not os.path.exists(labelpath):
f = open(labelpath, 'w', encoding='utf-8')
else:
with open(labelpath, 'r', encoding='utf-8') as f:
data = f.readlines()
for each in data:
file, label = each.split('\t')
if label:
label = label.replace('false', 'False')
labeldict[file] = eval(label)
else:
labeldict[file] = []
return labeldict
def savePPlabel(self,mode='Manual'):
savedfile = [self.getImglabelidx(i) for i in self.fileStatedict.keys()]
with open(self.PPlabelpath, 'w', encoding='utf-8') as f:
for key in self.PPlabel:
if key in savedfile:
f.write(key + '\t')
f.write(json.dumps(self.PPlabel[key], ensure_ascii=False) + '\n')
if mode=='Manual':
msg = 'Images that have been checked are saved in '+ self.PPlabelpath
QMessageBox.information(self, "Information", msg)
def saveCacheLabel(self):
with open(self.Cachelabelpath, 'w', encoding='utf-8') as f:
for key in self.Cachelabel:
f.write(key + '\t')
f.write(json.dumps(self.Cachelabel[key], ensure_ascii=False) + '\n')
def saveRecResult(self):
if None in [self.PPlabelpath, self.PPlabel, self.fileStatedict]:
QMessageBox.information(self, "Information", "Save file first")
return
rec_gt_dir = os.path.dirname(self.PPlabelpath) + '/rec_gt.txt'
crop_img_dir = os.path.dirname(self.PPlabelpath) + '/crop_img/'
if not os.path.exists(crop_img_dir):
os.mkdir(crop_img_dir)
with open(rec_gt_dir, 'w', encoding='utf-8') as f:
for key in self.fileStatedict:
idx = self.getImglabelidx(key)
for i, label in enumerate(self.PPlabel[idx]):
img = cv2.imread(key)
img_crop = get_rotate_crop_image(img, np.array(label['points'], np.float32))
img_name = os.path.splitext(os.path.basename(idx))[0] + '_crop_'+str(i)+'.jpg'
cv2.imwrite(crop_img_dir+img_name, img_crop)
f.write('crop_img/'+ img_name + '\t')
f.write(label['transcription'] + '\n')
QMessageBox.information(self, "Information", "Cropped images has been saved in "+str(crop_img_dir))
def inverted(color):
return QColor(*[255 - v for v in color.getRgb()])
def read(filename, default=None):
try:
with open(filename, 'rb') as f:
return f.read()
except:
return default
def get_main_app(argv=[]):
"""
Standard boilerplate Qt application code.
Do everything but app.exec_() -- so that we can test the application in one thread
"""
app = QApplication(argv)
app.setApplicationName(__appname__)
app.setWindowIcon(newIcon("app"))
# Tzutalin 201705+: Accept extra agruments to change predefined class file
argparser = argparse.ArgumentParser()
argparser.add_argument("--lang", default='ch', nargs="?")
argparser.add_argument("--predefined_classes_file",
default=os.path.join(os.path.dirname(__file__), "data", "predefined_classes.txt"),
nargs="?")
args = argparser.parse_args(argv[1:])
# Usage : labelImg.py image predefClassFile saveDir
win = MainWindow(lang=args.lang,
defaultPrefdefClassFile=args.predefined_classes_file,
)
win.show()
return app, win
def main():
'''construct main app and run it'''
app, _win = get_main_app(sys.argv)
return app.exec_()
if __name__ == '__main__':
resource_file = './libs/resources.py'
if not os.path.exists(resource_file):
output = os.system('pyrcc5 -o libs/resources.py resources.qrc')
assert output is 0, "operate the cmd have some problems ,please check whether there is a in the lib " \
"directory resources.py "
import libs.resources
sys.exit(main())
# PPOCRLabel
PPOCRLabel是一款适用于OCR领域的半自动化图形标注工具,使用python3和pyqt5编写,支持矩形框标注和四点标注模式,导出格式可直接用于PPOCR检测和识别模型的训练。
<img src="./data/gif/steps.gif" width="100%"/>
## 安装
### 1. 安装PaddleOCR
参考[PaddleOCR安装文档](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_ch/installation.md)准备好PaddleOCR
### 2. 安装PPOCRLabel
#### Windows + Anaconda
下载安装[Anaconda](https://www.anaconda.com/download/#download) (Python 3+)
```
conda install pyqt=5
cd ./PPOCRLabel # 将目录切换到PPOCRLabel文件夹下
pyrcc5 -o libs/resources.py resources.qrc
python PPOCRLabel.py
```
#### Ubuntu Linux
```
sudo apt-get install pyqt5-dev-tools
sudo apt-get install trash-cli
cd ./PPOCRLabel # 将目录切换到PPOCRLabel文件夹下
sudo pip3 install -r requirements/requirements-linux-python3.txt
make qt5py3
python3 PPOCRLabel.py
```
#### macOS
```
pip3 install pyqt5
pip3 uninstall opencv-python # 由于mac版本的opencv与pyqt有冲突,需先手动卸载opencv
pip3 install opencv-contrib-python-headless # 安装headless版本的open-cv
cd ./PPOCRLabel # 将目录切换到PPOCRLabel文件夹下
make qt5py3
python3 PPOCRLabel.py
```
## 使用
### 操作步骤
1. 安装与运行:使用上述命令安装与运行程序。
2. 打开文件夹:在菜单栏点击 “文件” - "打开目录" 选择待标记图片的文件夹<sup>[1]</sup>.
3. 自动标注:点击 ”自动标注“,使用PPOCR超轻量模型对图片文件名前图片状态<sup>[2]</sup>为 “X” 的图片进行自动标注。
4. 手动标注:点击 “矩形标注”(推荐直接在英文模式下点击键盘中的 “W”),用户可对当前图片中模型未检出的部分进行手动绘制标记框。点击键盘P,则使用四点标注模式(或点击“编辑” - “四点标注”),用户依次点击4个点后,双击左键表示标注完成。
5. 标记框绘制完成后,用户点击 “确认”,检测框会先被预分配一个 “待识别” 标签。
6. 重新识别:将图片中的所有检测画绘制/调整完成后,点击 “重新识别”,PPOCR模型会对当前图片中的**所有检测框**重新识别<sup>[3]</sup>
7. 内容更改:双击识别结果,对不准确的识别结果进行手动更改。
8. 保存:点击 “保存”,图片状态切换为 “√”,跳转至下一张。
9. 删除:点击 “删除图像”,图片将会被删除至回收站。
10. 标注结果:关闭应用程序或切换文件路径后,手动保存过的标签将会被存放在所打开图片文件夹下的*Label.txt*中。在菜单栏点击 “PaddleOCR” - "保存识别结果"后,会将此类图片的识别训练数据保存在*crop_img*文件夹下,识别标签保存在*rec_gt.txt*<sup>[4]</sup>
### 注意
[1] PPOCRLabel以文件夹为基本标记单位,打开待标记的图片文件夹后,不会在窗口栏中显示图片,而是在点击 "选择文件夹" 之后直接将文件夹下的图片导入到程序中。
[2] 图片状态表示本张图片用户是否手动保存过,未手动保存过即为 “X”,手动保存过为 “√”。点击 “自动标注”按钮后,PPOCRLabel不会对状态为 “√” 的图片重新标注。
[3] 点击“重新识别”后,模型会对图片中的识别结果进行覆盖。因此如果在此之前手动更改过识别结果,有可能在重新识别后产生变动。
[4] PPOCRLabel产生的文件包括一下几种,请勿手动更改其中内容,否则会引起程序出现异常。
| 文件名 | 说明 |
| :-----------: | :----------------------------------------------------------: |
| Label.txt | 检测标签,可直接用于PPOCR检测模型训练。用户每保存10张检测结果后,程序会进行自动写入。当用户关闭应用程序或切换文件路径后同样会进行写入。 |
| fileState.txt | 图片状态标记文件,保存当前文件夹下已经被用户手动确认过的图片名称。 |
| Cache.cach | 缓存文件,保存模型自动识别的结果。 |
| rec_gt.txt | 识别标签。可直接用于PPOCR识别模型训练。需用户手动点击菜单栏“PaddleOCR” - "保存识别结果"后产生。 |
| crop_img | 识别数据。按照检测框切割后的图片。与rec_gt.txt同时产生。 |
### 参考资料
1.[Tzutalin. LabelImg. Git code (2015)](https://github.com/tzutalin/labelImg)
# PPOCRLabel
PPOCRLabel is a semi-automatic graphic annotation tool suitable for OCR field. It is written in python3 and pyqt5. Support rectangular frame labeling and four-point labeling mode. Annotations can be directly used for the training of PPOCR detection and recognition models.
<img src="./data/gif/steps.gif" width="100%"/>
## Installation
### 1. Install PaddleOCR
Refer to [PaddleOCR installation document](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_ch/installation.md) to prepare PaddleOCR
### 2. Install PPOCRLabel
#### Windows + Anaconda
Download and install [Anaconda](https://www.anaconda.com/download/#download) (Python 3+)
```
conda install pyqt=5
cd ./PPOCRLabel # Change the directory to the PPOCRLabel folder
pyrcc5 -o libs/resources.py resources.qrc
python PPOCRLabel.py --lang en
```
#### Ubuntu Linux
```
sudo apt-get install pyqt5-dev-tools
sudo apt-get install trash-cli
cd ./PPOCRLabel # Change the directory to the PPOCRLabel folder
sudo pip3 install -r requirements/requirements-linux-python3.txt
make qt5py3
python3 PPOCRLabel.py --lang en
```
#### macOS
```
pip3 install pyqt5
pip3 uninstall opencv-python # Uninstall opencv manually as it conflicts with pyqt
pip3 install opencv-contrib-python-headless # Install the headless version of opencv
cd ./PPOCRLabel # Change the directory to the PPOCRLabel folder
make qt5py3
python3 PPOCRLabel.py --lang en
```
## Usage
### Steps
1. Build and launch using the instructions above.
2. Click 'Open Dir' in Menu/File to select the folder of the picture.<sup>[1]</sup>
3. Click 'Auto recognition', use PPOCR model to automatically annotate images which marked with 'X' <sup>[2]</sup>before the file name.
4. Create Box:
4.1 Click 'Create RectBox' or press 'W' in English keyboard mode to draw a new rectangle detection box. Click and release left mouse to select a region to annotate the text area.
4.2 Press 'P' to enter four-point labeling mode which enables you to create any four-point shape by clicking four points with the left mouse button in succession and DOUBLE CLICK the left mouse as the signal of labeling completion.
5. After the marking frame is drawn, the user clicks "OK", and the detection frame will be pre-assigned a "TEMPORARY" label.
6. Click 're-Recognition', model will rewrite ALL recognition results in ALL detection box<sup>[3]</sup>.
7. Double click the result in 'recognition result' list to manually change inaccurate recognition results.
8. Click "Save", the image status will switch to "√",then the program automatically jump to the next.
9. Click "Delete Image" and the image will be deleted to the recycle bin.
10. Labeling result: After closing the application or switching the file path, the manually saved label will be stored in *Label.txt* under the opened picture folder.
Click "PaddleOCR"-"Save Recognition Results" in the menu bar, the recognition training data of such pictures will be saved in the *crop_img* folder, and the recognition label will be saved in *rec_gt.txt*<sup>[4]</sup>.
### Note
[1] PPOCRLabel uses the opened folder as the project. After opening the image folder, the picture will not be displayed in the dialog. Instead, the pictures under the folder will be directly imported into the program after clicking "Open Dir".
[2] The image status indicates whether the user has saved the image manually. If it has not been saved manually it is "X", otherwise it is "√", PPOCRLabel will not relabel pictures with a status of "√".
[3] After clicking "Re-recognize", the model will overwrite ALL recognition results in the picture.
Therefore, if the recognition result has been manually changed before, it may change after re-recognition.
[4] The files produced by PPOCRLabel include the following, please do not manually change the contents, otherwise it will cause the program to be abnormal.
| File name | Description |
| :-----------: | :----------------------------------------------------------: |
| Label.txt | The detection label file can be directly used for PPOCR detection model training. After the user saves 10 label results, the file will be automatically saved. It will also be written when the user closes the application or changes the file folder. |
| fileState.txt | The picture status file save the image in the current folder that has been manually confirmed by the user. |
| Cache.cach | Cache files to save the results of model recognition. |
| rec_gt.txt | The recognition label file, which can be directly used for PPOCR identification model training, is generated after the user clicks on the menu bar "PaddleOCR"-"Save recognition result". |
| crop_img | The recognition data, generated at the same time with *rec_gt.txt* |
## Related
1.[Tzutalin. LabelImg. Git code (2015)](https://github.com/tzutalin/labelImg)
*.spec
build
dist
pyinstaller
python-2.*
pywin32*
virtual-wine
venv_wine
PyQt4-*
lxml-*
windows_v*
linux_v*
### Deploy to PyPI
```
cd [ROOT]
sh build-tools/build-for-pypi.sh
```
### Build for Ubuntu
```
cd build-tools
sh run-in-container.sh
sh envsetup.sh
sh build-ubuntu-binary.sh
```
### Build for Windows
```
cd build-tools
sh run-in-container.sh
sh envsetup.sh
sh build-windows-binary.sh
```
### Build for macOS High Sierra
```
cd build-tools
./build-for-macos.sh
```
Note: If there are some problems, try to
```
sudo rm -rf virtual-wne venv_wine
```
#!/bin/sh
brew install python@2
pip install --upgrade virtualenv
# clone labelimg source
rm -rf /tmp/labelImgSetup
mkdir /tmp/labelImgSetup
cd /tmp/labelImgSetup
curl https://codeload.github.com/tzutalin/labelImg/zip/master --output labelImg.zip
unzip labelImg.zip
rm labelImg.zip
# setup python3 space
virtualenv --system-site-packages -p python3 /tmp/labelImgSetup/labelImg-py3
source /tmp/labelImgSetup/labelImg-py3/bin/activate
cd labelImg-master
# build labelImg app
pip install py2app
pip install PyQt5 lxml
make qt5py3
rm -rf build dist
python setup.py py2app -A
mv "/tmp/labelImgSetup/labelImg-master/dist/labelImg.app" /Applications
# deactivate python3
deactivate
cd ../
rm -rf /tmp/labelImgSetup
echo 'DONE'
#!/bin/sh
# Packaging and Release
docker run --workdir=$(pwd)/ --volume="/home/$USER:/home/$USER" tzutalin/py2qt4 /bin/sh -c 'make qt4py2; make test;sudo python setup.py sdist;sudo python setup.py install'
while true; do
read -p "Do you wish to deploy this to PyPI(twine upload dist/* or pip install dist/*)?" yn
case $yn in
[Yy]* ) docker run -it --rm --workdir=$(pwd)/ --volume="/home/$USER:/home/$USER" tzutalin/py2qt4; break;;
[Nn]* ) exit;;
* ) echo "Please answer yes or no.";;
esac
done
# python setup.py register
# python setup.py sdist upload
# Net pypi: twine upload dist/*
# Test before upladoing: pip install dist/labelImg.tar.gz
#!/bin/bash
### Ubuntu use pyinstall v3.0
THIS_SCRIPT_PATH=`readlink -f $0`
THIS_SCRIPT_DIR=`dirname ${THIS_SCRIPT_PATH}`
cd pyinstaller
git checkout v3.2
cd ${THIS_SCRIPT_DIR}
rm -r build
rm -r dist
rm labelImg.spec
python pyinstaller/pyinstaller.py --hidden-import=xml \
--hidden-import=xml.etree \
--hidden-import=xml.etree.ElementTree \
--hidden-import=lxml.etree \
-D -F -n labelImg -c "../labelImg.py" -p ../libs -p ../
FOLDER=$(git describe --abbrev=0 --tags)
FOLDER="linux_"$FOLDER
rm -rf "$FOLDER"
mkdir "$FOLDER"
cp dist/labelImg $FOLDER
cp -rf ../data $FOLDER/data
zip "$FOLDER.zip" -r $FOLDER
#!/bin/bash
### Window requires pyinstall v2.1
wine msiexec -i python-2.7.8.msi
wine pywin32-218.win32-py2.7.exe
wine PyQt4-4.11.4-gpl-Py2.7-Qt4.8.7-x32.exe
wine lxml-3.7.3.win32-py2.7.exe
THIS_SCRIPT_PATH=`readlink -f $0`
THIS_SCRIPT_DIR=`dirname ${THIS_SCRIPT_PATH}`
cd pyinstaller
git checkout v2.1
cd ${THIS_SCRIPT_DIR}
echo ${THIS_SCRIPT_DIR}
#. venv_wine/bin/activate
rm -r build
rm -r dist
rm labelImg.spec
wine c:/Python27/python.exe pyinstaller/pyinstaller.py --hidden-import=xml \
--hidden-import=xml.etree \
--hidden-import=xml.etree.ElementTree \
--hidden-import=lxml.etree \
-D -F -n labelImg -c "../labelImg.py" -p ../libs -p ../
FOLDER=$(git describe --abbrev=0 --tags)
FOLDER="windows_"$FOLDER
rm -rf "$FOLDER"
mkdir "$FOLDER"
cp dist/labelImg.exe $FOLDER
cp -rf ../data $FOLDER/data
zip "$FOLDER.zip" -r $FOLDER
#!/bin/sh
THIS_SCRIPT_PATH=`readlink -f $0`
THIS_SCRIPT_DIR=`dirname ${THIS_SCRIPT_PATH}`
#OS Ubuntu 14.04
### Common packages for linux/windows
if [ ! -e "pyinstaller" ]; then
git clone https://github.com/pyinstaller/pyinstaller
cd pyinstaller
git checkout v2.1 -b v2.1
cd ${THIS_SCRIPT_DIR}
fi
echo "Going to clone and download packages for building windows"
#Pacakges
#> pyinstaller (2.1)
#> wine (1.6.2)
#> virtual-wine (0.1)
#> python-2.7.8.msi
#> pywin32-218.win32-py2.7.exe
## tool to install on Ubuntu
#$ sudo apt-get install wine
### Clone a repo to create virtual wine env
if [ ! -e "virtual-wine" ]; then
git clone https://github.com/htgoebel/virtual-wine.git
fi
apt-get install scons
### Create virtual env
rm -rf venv_wine
./virtual-wine/vwine-setup venv_wine
#### Active virutal env
. venv_wine/bin/activate
### Use wine to install packages to virtual env
if [ ! -e "python-2.7.8.msi" ]; then
wget "https://www.python.org/ftp/python/2.7.8/python-2.7.8.msi"
fi
if [ ! -e "pywin32-218.win32-py2.7.exe" ]; then
wget "http://nchc.dl.sourceforge.net/project/pywin32/pywin32/Build%20218/pywin32-218.win32-py2.7.exe"
fi
if [ ! -e "PyQt4-4.11.4-gpl-Py2.7-Qt4.8.7-x32.exe" ]; then
wget "http://nchc.dl.sourceforge.net/project/pyqt/PyQt4/PyQt-4.11.4/PyQt4-4.11.4-gpl-Py2.7-Qt4.8.7-x32.exe"
fi
if [ ! -e "lxml-3.7.3.win32-py2.7.exe" ]; then
wget "https://pypi.python.org/packages/a3/f6/a28c5cf63873f6c55a3eb7857b736379229b85ba918261d2e88cf886905e/lxml-3.7.3.win32-py2.7.exe#md5=a0f746355876aca4ca5371cb0f1d13ce"
fi
#!/bin/sh
docker run -it \
--user $(id -u) \
-e DISPLAY=unix$DISPLAY \
--workdir=$(pwd) \
--volume="/home/$USER:/home/$USER" \
--volume="/etc/group:/etc/group:ro" \
--volume="/etc/passwd:/etc/passwd:ro" \
--volume="/etc/shadow:/etc/shadow:ro" \
--volume="/etc/sudoers.d:/etc/sudoers.d:ro" \
-v /tmp/.X11-unix:/tmp/.X11-unix \
tzutalin/py2qt4
# Copyright (c) <2015-Present> Tzutalin
# Copyright (C) 2013 MIT, Computer Science and Artificial Intelligence Laboratory. Bryan Russell, Antonio Torralba,
# William T. Freeman. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction, including without
# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
try:
from PyQt5.QtWidgets import QWidget, QHBoxLayout, QComboBox
except ImportError:
# needed for py3+qt4
# Ref:
# http://pyqt.sourceforge.net/Docs/PyQt4/incompatible_apis.html
# http://stackoverflow.com/questions/21217399/pyqt4-qtcore-qvariant-object-instead-of-a-string
if sys.version_info.major >= 3:
import sip
sip.setapi('QVariant', 2)
from PyQt4.QtGui import QWidget, QHBoxLayout, QComboBox
class ComboBox(QWidget):
def __init__(self, parent=None, items=[]):
super(ComboBox, self).__init__(parent)
layout = QHBoxLayout()
self.cb = QComboBox()
self.items = items
self.cb.addItems(self.items)
self.cb.currentIndexChanged.connect(parent.comboSelectionChanged)
layout.addWidget(self.cb)
self.setLayout(layout)
def update_items(self, items):
self.items = items
self.cb.clear()
self.cb.addItems(self.items)
__version_info__ = ('1', '0', '0')
__version__ = '.'.join(__version_info__)
try:
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
except ImportError:
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import json
from libs.utils import newIcon
BB = QDialogButtonBox
class Worker(QThread):
progressBarValue = pyqtSignal(int)
listValue = pyqtSignal(str)
endsignal = pyqtSignal(int, str)
handle = 0
def __init__(self, ocr, mImgList, mainThread, model):
super(Worker, self).__init__()
self.ocr = ocr
self.mImgList = mImgList
self.mainThread = mainThread
self.model = model
def run(self):
try:
findex = 0
for Imgpath in self.mImgList:
if self.handle == 0:
self.listValue.emit(Imgpath)
if self.model == 'paddle':
self.result_dic = self.ocr.ocr(Imgpath, cls=True, det=True)
# 结果保存
if self.result_dic is None or len(self.result_dic) == 0:
print('Can not recognise file is : ', Imgpath)
pass
else:
for res in self.result_dic:
chars = res[1][0]
cond = res[1][1]
posi = res[0]
self.listValue.emit("文字:" + chars + " 置信度:" + str(cond) + " 坐标:" + json.dumps(posi))
self.mainThread.result_dic = self.result_dic
self.mainThread.filePath = Imgpath
# 保存
self.mainThread.saveFile(mode='Auto')
findex += 1
self.progressBarValue.emit(findex)
else:
break
self.endsignal.emit(0, "readAll")
self.exec()
except Exception as e:
print(e)
raise
class AutoDialog(QDialog):
def __init__(self, text="Enter object label", parent=None, ocr=None, mImgList=None, lenbar=0):
super(AutoDialog, self).__init__(parent)
self.setFixedWidth(1000)
self.parent = parent
self.ocr = ocr
self.mImgList = mImgList
self.pb = QProgressBar()
self.pb.setRange(0, lenbar)
self.pb.setValue(0)
layout = QVBoxLayout()
layout.addWidget(self.pb)
self.model = 'paddle'
self.listWidget = QListWidget(self)
layout.addWidget(self.listWidget)
self.buttonBox = bb = BB(BB.Ok | BB.Cancel, Qt.Horizontal, self)
bb.button(BB.Ok).setIcon(newIcon('done'))
bb.button(BB.Cancel).setIcon(newIcon('undo'))
bb.accepted.connect(self.validate)
bb.rejected.connect(self.reject)
layout.addWidget(bb)
bb.button(BB.Ok).setEnabled(False)
self.setLayout(layout)
self.setWindowTitle("自动标注中")
self.setWindowModality(Qt.ApplicationModal)
# self.setWindowFlags(Qt.WindowCloseButtonHint)
self.thread_1 = Worker(self.ocr, self.mImgList, self.parent, 'paddle')
self.thread_1.progressBarValue.connect(self.handleProgressBarSingal)
self.thread_1.listValue.connect(self.handleListWidgetSingal)
self.thread_1.endsignal.connect(self.handleEndsignalSignal)
def handleProgressBarSingal(self, i):
self.pb.setValue(i)
def handleListWidgetSingal(self, i):
self.listWidget.addItem(i)
titem = self.listWidget.item(self.listWidget.count() - 1)
self.listWidget.scrollToItem(titem)
def handleEndsignalSignal(self, i, str):
if i == 0 and str == "readAll":
self.buttonBox.button(BB.Ok).setEnabled(True)
self.buttonBox.button(BB.Cancel).setEnabled(False)
def reject(self):
print("reject")
self.thread_1.handle = -1
self.thread_1.quit()
# del self.thread_1
# if self.thread_1.isRunning():
# self.thread_1.terminate()
# self.thread_1.quit()
# super(AutoDialog,self).reject()
while not self.thread_1.isFinished():
pass
self.accept()
def validate(self):
self.accept()
def postProcess(self):
try:
self.edit.setText(self.edit.text().trimmed())
# print(self.edit.text())
except AttributeError:
# PyQt5: AttributeError: 'str' object has no attribute 'trimmed'
self.edit.setText(self.edit.text())
print(self.edit.text())
def popUp(self):
self.thread_1.start()
return 1 if self.exec_() else None
def closeEvent(self, event):
print("???")
# if self.thread_1.isRunning():
# self.thread_1.quit()
#
# # self._thread.terminate()
# # del self.thread_1
# super(AutoDialog, self).closeEvent(event)
self.reject()
# Copyright (c) <2015-Present> Tzutalin
# Copyright (C) 2013 MIT, Computer Science and Artificial Intelligence Laboratory. Bryan Russell, Antonio Torralba,
# William T. Freeman. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction, including without
# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
try:
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
except ImportError:
from PyQt4.QtGui import *
from PyQt4.QtCore import *
#from PyQt4.QtOpenGL import *
from libs.shape import Shape
from libs.utils import distance
CURSOR_DEFAULT = Qt.ArrowCursor
CURSOR_POINT = Qt.PointingHandCursor
CURSOR_DRAW = Qt.CrossCursor
CURSOR_MOVE = Qt.ClosedHandCursor
CURSOR_GRAB = Qt.OpenHandCursor
# class Canvas(QGLWidget):
class Canvas(QWidget):
zoomRequest = pyqtSignal(int)
scrollRequest = pyqtSignal(int, int)
newShape = pyqtSignal()
selectionChanged = pyqtSignal(bool)
shapeMoved = pyqtSignal()
drawingPolygon = pyqtSignal(bool)
CREATE, EDIT = list(range(2))
_fill_drawing = False # draw shadows
epsilon = 11.0
def __init__(self, *args, **kwargs):
super(Canvas, self).__init__(*args, **kwargs)
# Initialise local state.
self.mode = self.EDIT
self.shapes = []
self.current = None
self.selectedShape = None # save the selected shape here
self.selectedShapeCopy = None
self.drawingLineColor = QColor(0, 0, 255)
self.drawingRectColor = QColor(0, 0, 255)
self.line = Shape(line_color=self.drawingLineColor)
self.prevPoint = QPointF()
self.offsets = QPointF(), QPointF()
self.scale = 1.0
self.pixmap = QPixmap()
self.visible = {}
self._hideBackround = False
self.hideBackround = False
self.hShape = None
self.hVertex = None
self._painter = QPainter()
self._cursor = CURSOR_DEFAULT
# Menus:
self.menus = (QMenu(), QMenu())
# Set widget options.
self.setMouseTracking(True)
self.setFocusPolicy(Qt.WheelFocus)
self.verified = False
self.drawSquare = False
self.fourpoint = True # ADD
self.pointnum = 0
#initialisation for panning
self.pan_initial_pos = QPoint()
def setDrawingColor(self, qColor):
self.drawingLineColor = qColor
self.drawingRectColor = qColor
def enterEvent(self, ev):
self.overrideCursor(self._cursor)
def leaveEvent(self, ev):
self.restoreCursor()
def focusOutEvent(self, ev):
self.restoreCursor()
def isVisible(self, shape):
return self.visible.get(shape, True)
def drawing(self):
return self.mode == self.CREATE
def editing(self):
return self.mode == self.EDIT
def setEditing(self, value=True):
self.mode = self.EDIT if value else self.CREATE
if not value: # Create
self.unHighlight()
self.deSelectShape()
self.prevPoint = QPointF()
self.repaint()
def unHighlight(self):
if self.hShape:
self.hShape.highlightClear()
self.hVertex = self.hShape = None
def selectedVertex(self):
return self.hVertex is not None
def mouseMoveEvent(self, ev):
"""Update line with last point and current coordinates."""
pos = self.transformPos(ev.pos())
# Update coordinates in status bar if image is opened
window = self.parent().window()
if window.filePath is not None:
self.parent().window().labelCoordinates.setText(
'X: %d; Y: %d' % (pos.x(), pos.y()))
# Polygon drawing.
if self.drawing():
self.overrideCursor(CURSOR_DRAW) # ?
if self.current:
# Display annotation width and height while drawing
currentWidth = abs(self.current[0].x() - pos.x())
currentHeight = abs(self.current[0].y() - pos.y())
self.parent().window().labelCoordinates.setText(
'Width: %d, Height: %d / X: %d; Y: %d' % (currentWidth, currentHeight, pos.x(), pos.y()))
color = self.drawingLineColor
if self.outOfPixmap(pos):
# Don't allow the user to draw outside the pixmap.
# Clip the coordinates to 0 or max,
# if they are outside the range [0, max]
size = self.pixmap.size()
clipped_x = min(max(0, pos.x()), size.width())
clipped_y = min(max(0, pos.y()), size.height())
pos = QPointF(clipped_x, clipped_y)
elif len(self.current) > 1 and self.closeEnough(pos, self.current[0]) and not self.fourpoint:
# Attract line to starting point and colorise to alert the
# user:
pos = self.current[0]
color = self.current.line_color
self.overrideCursor(CURSOR_POINT)
self.current.highlightVertex(0, Shape.NEAR_VERTEX)
elif ( # ADD
len(self.current) > 1
and self.fourpoint
and self.closeEnough(pos, self.current[0])
):
# Attract line to starting point and
# colorise to alert the user.
pos = self.current[0]
self.overrideCursor(CURSOR_POINT)
self.current.highlightVertex(0, Shape.NEAR_VERTEX)
if self.drawSquare:
initPos = self.current[0]
minX = initPos.x()
minY = initPos.y()
min_size = min(abs(pos.x() - minX), abs(pos.y() - minY))
directionX = -1 if pos.x() - minX < 0 else 1
directionY = -1 if pos.y() - minY < 0 else 1
self.line[1] = QPointF(minX + directionX * min_size, minY + directionY * min_size)
elif self.fourpoint:
# self.line[self.pointnum] = pos # OLD
self.line[0] = self.current[-1]
self.line[1] = pos
else:
self.line[1] = pos # pos is the mouse's current position
self.line.line_color = color
self.prevPoint = QPointF() # ?
self.current.highlightClear()
else:
self.prevPoint = pos
self.repaint()
return
# Polygon copy moving.
if Qt.RightButton & ev.buttons():
if self.selectedShapeCopy and self.prevPoint:
self.overrideCursor(CURSOR_MOVE)
self.boundedMoveShape(self.selectedShapeCopy, pos)
self.repaint()
elif self.selectedShape:
self.selectedShapeCopy = self.selectedShape.copy()
self.repaint()
return
# Polygon/Vertex moving.
if Qt.LeftButton & ev.buttons():
if self.selectedVertex():
self.boundedMoveVertex(pos)
self.shapeMoved.emit()
self.repaint()
elif self.selectedShape and self.prevPoint:
self.overrideCursor(CURSOR_MOVE)
self.boundedMoveShape(self.selectedShape, pos)
self.shapeMoved.emit()
self.repaint()
else:
#pan
delta_x = pos.x() - self.pan_initial_pos.x()
delta_y = pos.y() - self.pan_initial_pos.y()
self.scrollRequest.emit(delta_x, Qt.Horizontal)
self.scrollRequest.emit(delta_y, Qt.Vertical)
self.update()
return
# Just hovering over the canvas, 2 posibilities:
# - Highlight shapes
# - Highlight vertex
# Update shape/vertex fill and tooltip value accordingly.
self.setToolTip("Image")
for shape in reversed([s for s in self.shapes if self.isVisible(s)]):
# Look for a nearby vertex to highlight. If that fails,
# check if we happen to be inside a shape.
index = shape.nearestVertex(pos, self.epsilon)
if index is not None:
if self.selectedVertex():
self.hShape.highlightClear()
self.hVertex, self.hShape = index, shape
shape.highlightVertex(index, shape.MOVE_VERTEX)
self.overrideCursor(CURSOR_POINT)
self.setToolTip("Click & drag to move point")
self.setStatusTip(self.toolTip())
self.update()
break
elif shape.containsPoint(pos):
if self.selectedVertex():
self.hShape.highlightClear()
self.hVertex, self.hShape = None, shape
self.setToolTip(
"Click & drag to move shape '%s'" % shape.label)
self.setStatusTip(self.toolTip())
self.overrideCursor(CURSOR_GRAB)
self.update()
break
else: # Nothing found, clear highlights, reset state.
if self.hShape:
self.hShape.highlightClear()
self.update()
self.hVertex, self.hShape = None, None
self.overrideCursor(CURSOR_DEFAULT)
def mousePressEvent(self, ev):
pos = self.transformPos(ev.pos())
if ev.button() == Qt.LeftButton:
if self.drawing():
# self.handleDrawing(pos) # OLD
if self.current and self.fourpoint: # ADD IF
# Add point to existing shape.
print('Adding points in mousePressEvent is ', self.line[1])
self.current.addPoint(self.line[1])
self.line[0] = self.current[-1]
if self.current.isClosed():
# print('1111')
self.finalise()
elif not self.outOfPixmap(pos):
# Create new shape.
self.current = Shape()# self.current = Shape(shape_type=self.createMode)
self.current.addPoint(pos)
# if self.createMode == "point":
# self.finalise()
# else:
# if self.createMode == "circle":
# self.current.shape_type = "circle"
self.line.points = [pos, pos]
self.setHiding()
self.drawingPolygon.emit(True)
self.update()
else:
selection = self.selectShapePoint(pos)
self.prevPoint = pos
if selection is None:
#pan
QApplication.setOverrideCursor(QCursor(Qt.OpenHandCursor))
self.pan_initial_pos = pos
elif ev.button() == Qt.RightButton and self.editing():
self.selectShapePoint(pos)
self.prevPoint = pos
self.update()
def mouseReleaseEvent(self, ev):
if ev.button() == Qt.RightButton:
menu = self.menus[bool(self.selectedShapeCopy)]
self.restoreCursor()
if not menu.exec_(self.mapToGlobal(ev.pos()))\
and self.selectedShapeCopy:
# Cancel the move by deleting the shadow copy.
self.selectedShapeCopy = None
self.repaint()
elif ev.button() == Qt.LeftButton and self.selectedShape: # OLD
if self.selectedVertex():
self.overrideCursor(CURSOR_POINT)
else:
self.overrideCursor(CURSOR_GRAB)
elif ev.button() == Qt.LeftButton and not self.fourpoint:
pos = self.transformPos(ev.pos())
if self.drawing():
self.handleDrawing(pos)
else:
#pan
QApplication.restoreOverrideCursor() # ?
def endMove(self, copy=False):
assert self.selectedShape and self.selectedShapeCopy
shape = self.selectedShapeCopy
#del shape.fill_color
#del shape.line_color
if copy:
self.shapes.append(shape)
self.selectedShape.selected = False
self.selectedShape = shape
self.repaint()
else:
self.selectedShape.points = [p for p in shape.points]
self.selectedShapeCopy = None
def hideBackroundShapes(self, value):
self.hideBackround = value
if self.selectedShape:
# Only hide other shapes if there is a current selection.
# Otherwise the user will not be able to select a shape.
self.setHiding(True)
self.repaint()
def handleDrawing(self, pos):
if self.current and self.current.reachMaxPoints() is False:
if self.fourpoint:
targetPos = self.line[self.pointnum]
self.current.addPoint(targetPos)
print('current points in handleDrawing is ', self.line[self.pointnum])
self.update()
if self.pointnum == 3:
self.finalise()
else: # 按住送掉后跳到这里
initPos = self.current[0]
print('initPos', self.current[0])
minX = initPos.x()
minY = initPos.y()
targetPos = self.line[1]
maxX = targetPos.x()
maxY = targetPos.y()
self.current.addPoint(QPointF(maxX, minY))
self.current.addPoint(targetPos)
self.current.addPoint(QPointF(minX, maxY))
self.finalise()
elif not self.outOfPixmap(pos):
print('release')
self.current = Shape()
self.current.addPoint(pos)
self.line.points = [pos, pos]
self.setHiding()
self.drawingPolygon.emit(True)
self.update()
def setHiding(self, enable=True):
self._hideBackround = self.hideBackround if enable else False
def canCloseShape(self):
return self.drawing() and self.current and len(self.current) > 2
def mouseDoubleClickEvent(self, ev):
# We need at least 4 points here, since the mousePress handler
# adds an extra one before this handler is called.
if self.canCloseShape() and len(self.current) > 3:
if not self.fourpoint:
self.current.popPoint()
self.finalise()
def selectShape(self, shape):
self.deSelectShape()
shape.selected = True
self.selectedShape = shape
self.setHiding()
self.selectionChanged.emit(True)
self.update()
def selectShapePoint(self, point):
"""Select the first shape created which contains this point."""
self.deSelectShape()
if self.selectedVertex(): # A vertex is marked for selection.
index, shape = self.hVertex, self.hShape
shape.highlightVertex(index, shape.MOVE_VERTEX)
self.selectShape(shape)
return self.hVertex
for shape in reversed(self.shapes):
if self.isVisible(shape) and shape.containsPoint(point):
self.selectShape(shape)
self.calculateOffsets(shape, point)
return self.selectedShape
return None
def calculateOffsets(self, shape, point):
rect = shape.boundingRect()
x1 = rect.x() - point.x()
y1 = rect.y() - point.y()
x2 = (rect.x() + rect.width()) - point.x()
y2 = (rect.y() + rect.height()) - point.y()
self.offsets = QPointF(x1, y1), QPointF(x2, y2)
def snapPointToCanvas(self, x, y):
"""
Moves a point x,y to within the boundaries of the canvas.
:return: (x,y,snapped) where snapped is True if x or y were changed, False if not.
"""
if x < 0 or x > self.pixmap.width() or y < 0 or y > self.pixmap.height():
x = max(x, 0)
y = max(y, 0)
x = min(x, self.pixmap.width())
y = min(y, self.pixmap.height())
return x, y, True
return x, y, False
def boundedMoveVertex(self, pos):
index, shape = self.hVertex, self.hShape
point = shape[index]
if self.outOfPixmap(pos):
size = self.pixmap.size()
clipped_x = min(max(0, pos.x()), size.width())
clipped_y = min(max(0, pos.y()), size.height())
pos = QPointF(clipped_x, clipped_y)
if self.drawSquare:
opposite_point_index = (index + 2) % 4
opposite_point = shape[opposite_point_index]
min_size = min(abs(pos.x() - opposite_point.x()), abs(pos.y() - opposite_point.y()))
directionX = -1 if pos.x() - opposite_point.x() < 0 else 1
directionY = -1 if pos.y() - opposite_point.y() < 0 else 1
shiftPos = QPointF(opposite_point.x() + directionX * min_size - point.x(),
opposite_point.y() + directionY * min_size - point.y())
else:
shiftPos = pos - point
shape.moveVertexBy(index, shiftPos)
lindex = (index + 1) % 4
rindex = (index + 3) % 4
lshift = None
rshift = None
if index % 2 == 0:
rshift = QPointF(shiftPos.x(), 0)
lshift = QPointF(0, shiftPos.y())
else:
lshift = QPointF(shiftPos.x(), 0)
rshift = QPointF(0, shiftPos.y())
shape.moveVertexBy(rindex, rshift)
shape.moveVertexBy(lindex, lshift)
def boundedMoveShape(self, shape, pos):
if self.outOfPixmap(pos):
return False # No need to move
o1 = pos + self.offsets[0]
if self.outOfPixmap(o1):
pos -= QPointF(min(0, o1.x()), min(0, o1.y()))
o2 = pos + self.offsets[1]
if self.outOfPixmap(o2):
pos += QPointF(min(0, self.pixmap.width() - o2.x()),
min(0, self.pixmap.height() - o2.y()))
# The next line tracks the new position of the cursor
# relative to the shape, but also results in making it
# a bit "shaky" when nearing the border and allows it to
# go outside of the shape's area for some reason. XXX
#self.calculateOffsets(self.selectedShape, pos)
dp = pos - self.prevPoint
if dp:
shape.moveBy(dp)
self.prevPoint = pos
return True
return False
def deSelectShape(self):
if self.selectedShape:
self.selectedShape.selected = False
self.selectedShape = None
self.setHiding(False)
self.selectionChanged.emit(False)
self.update()
def deleteSelected(self):
if self.selectedShape:
shape = self.selectedShape
self.shapes.remove(self.selectedShape)
self.selectedShape = None
self.update()
return shape
def copySelectedShape(self):
if self.selectedShape:
shape = self.selectedShape.copy()
self.deSelectShape()
self.shapes.append(shape)
shape.selected = True
self.selectedShape = shape
self.boundedShiftShape(shape)
return shape
def boundedShiftShape(self, shape):
# Try to move in one direction, and if it fails in another.
# Give up if both fail.
point = shape[0]
offset = QPointF(2.0, 2.0)
self.calculateOffsets(shape, point)
self.prevPoint = point
if not self.boundedMoveShape(shape, point - offset):
self.boundedMoveShape(shape, point + offset)
def paintEvent(self, event):
if not self.pixmap:
return super(Canvas, self).paintEvent(event)
p = self._painter
p.begin(self)
p.setRenderHint(QPainter.Antialiasing)
p.setRenderHint(QPainter.HighQualityAntialiasing)
p.setRenderHint(QPainter.SmoothPixmapTransform)
p.scale(self.scale, self.scale)
p.translate(self.offsetToCenter())
p.drawPixmap(0, 0, self.pixmap)
Shape.scale = self.scale
for shape in self.shapes:
if (shape.selected or not self._hideBackround) and self.isVisible(shape):
shape.fill = shape.selected or shape == self.hShape
shape.paint(p)
if self.current:
self.current.paint(p)
self.line.paint(p)
if self.selectedShapeCopy:
self.selectedShapeCopy.paint(p)
# Paint rect
if self.current is not None and len(self.line) == 2 and not self.fourpoint:
# print('Drawing rect')
leftTop = self.line[0]
rightBottom = self.line[1]
rectWidth = rightBottom.x() - leftTop.x()
rectHeight = rightBottom.y() - leftTop.y()
p.setPen(self.drawingRectColor)
brush = QBrush(Qt.BDiagPattern)
p.setBrush(brush)
p.drawRect(leftTop.x(), leftTop.y(), rectWidth, rectHeight)
# ADD:
if (
self.fillDrawing()
and self.fourpoint
and self.current is not None
and len(self.current.points) >= 2
):
print('paint event')
drawing_shape = self.current.copy()
drawing_shape.addPoint(self.line[1])
drawing_shape.fill = True
drawing_shape.paint(p)
if self.drawing() and not self.prevPoint.isNull() and not self.outOfPixmap(self.prevPoint):
p.setPen(QColor(0, 0, 0))
p.drawLine(self.prevPoint.x(), 0, self.prevPoint.x(), self.pixmap.height())
p.drawLine(0, self.prevPoint.y(), self.pixmap.width(), self.prevPoint.y())
self.setAutoFillBackground(True)
if self.verified:
pal = self.palette()
pal.setColor(self.backgroundRole(), QColor(184, 239, 38, 128))
self.setPalette(pal)
else:
pal = self.palette()
pal.setColor(self.backgroundRole(), QColor(232, 232, 232, 255))
self.setPalette(pal)
p.end()
def fillDrawing(self):
return self._fill_drawing
def transformPos(self, point):
"""Convert from widget-logical coordinates to painter-logical coordinates."""
return point / self.scale - self.offsetToCenter()
def offsetToCenter(self):
s = self.scale
area = super(Canvas, self).size()
w, h = self.pixmap.width() * s, self.pixmap.height() * s
aw, ah = area.width(), area.height()
x = (aw - w) / (2 * s) if aw > w else 0
y = (ah - h) / (2 * s) if ah > h else 0
return QPointF(x, y)
def outOfPixmap(self, p):
w, h = self.pixmap.width(), self.pixmap.height()
return not (0 <= p.x() <= w and 0 <= p.y() <= h)
def finalise(self):
assert self.current
if self.current.points[0] == self.current.points[-1]:
# print('finalse')
self.current = None
self.drawingPolygon.emit(False)
self.update()
return
self.current.close()
self.shapes.append(self.current)
self.current = None
self.setHiding(False)
self.newShape.emit()
self.update()
def closeEnough(self, p1, p2):
#d = distance(p1 - p2)
#m = (p1-p2).manhattanLength()
# print "d %.2f, m %d, %.2f" % (d, m, d - m)
return distance(p1 - p2) < self.epsilon
# These two, along with a call to adjustSize are required for the
# scroll area.
def sizeHint(self):
return self.minimumSizeHint()
def minimumSizeHint(self):
if self.pixmap:
return self.scale * self.pixmap.size()
return super(Canvas, self).minimumSizeHint()
def wheelEvent(self, ev):
qt_version = 4 if hasattr(ev, "delta") else 5
if qt_version == 4:
if ev.orientation() == Qt.Vertical:
v_delta = ev.delta()
h_delta = 0
else:
h_delta = ev.delta()
v_delta = 0
else:
delta = ev.angleDelta()
h_delta = delta.x()
v_delta = delta.y()
mods = ev.modifiers()
if Qt.ControlModifier == int(mods) and v_delta:
self.zoomRequest.emit(v_delta)
else:
v_delta and self.scrollRequest.emit(v_delta, Qt.Vertical)
h_delta and self.scrollRequest.emit(h_delta, Qt.Horizontal)
ev.accept()
def keyPressEvent(self, ev):
key = ev.key()
if key == Qt.Key_Escape and self.current:
print('ESC press')
self.current = None
self.drawingPolygon.emit(False)
self.update()
elif key == Qt.Key_Return and self.canCloseShape():
self.finalise()
elif key == Qt.Key_Left and self.selectedShape:
self.moveOnePixel('Left')
elif key == Qt.Key_Right and self.selectedShape:
self.moveOnePixel('Right')
elif key == Qt.Key_Up and self.selectedShape:
self.moveOnePixel('Up')
elif key == Qt.Key_Down and self.selectedShape:
self.moveOnePixel('Down')
def moveOnePixel(self, direction):
# print(self.selectedShape.points)
if direction == 'Left' and not self.moveOutOfBound(QPointF(-1.0, 0)):
# print("move Left one pixel")
self.selectedShape.points[0] += QPointF(-1.0, 0)
self.selectedShape.points[1] += QPointF(-1.0, 0)
self.selectedShape.points[2] += QPointF(-1.0, 0)
self.selectedShape.points[3] += QPointF(-1.0, 0)
elif direction == 'Right' and not self.moveOutOfBound(QPointF(1.0, 0)):
# print("move Right one pixel")
self.selectedShape.points[0] += QPointF(1.0, 0)
self.selectedShape.points[1] += QPointF(1.0, 0)
self.selectedShape.points[2] += QPointF(1.0, 0)
self.selectedShape.points[3] += QPointF(1.0, 0)
elif direction == 'Up' and not self.moveOutOfBound(QPointF(0, -1.0)):
# print("move Up one pixel")
self.selectedShape.points[0] += QPointF(0, -1.0)
self.selectedShape.points[1] += QPointF(0, -1.0)
self.selectedShape.points[2] += QPointF(0, -1.0)
self.selectedShape.points[3] += QPointF(0, -1.0)
elif direction == 'Down' and not self.moveOutOfBound(QPointF(0, 1.0)):
# print("move Down one pixel")
self.selectedShape.points[0] += QPointF(0, 1.0)
self.selectedShape.points[1] += QPointF(0, 1.0)
self.selectedShape.points[2] += QPointF(0, 1.0)
self.selectedShape.points[3] += QPointF(0, 1.0)
self.shapeMoved.emit()
self.repaint()
def moveOutOfBound(self, step):
points = [p1+p2 for p1, p2 in zip(self.selectedShape.points, [step]*4)]
return True in map(self.outOfPixmap, points)
def setLastLabel(self, text, line_color = None, fill_color = None):
assert text
self.shapes[-1].label = text
if line_color:
self.shapes[-1].line_color = line_color
if fill_color:
self.shapes[-1].fill_color = fill_color
return self.shapes[-1]
def undoLastLine(self):
assert self.shapes
self.current = self.shapes.pop()
self.current.setOpen()
self.line.points = [self.current[-1], self.current[0]]
self.drawingPolygon.emit(True)
def resetAllLines(self):
assert self.shapes
self.current = self.shapes.pop()
self.current.setOpen()
self.line.points = [self.current[-1], self.current[0]]
self.drawingPolygon.emit(True)
self.current = None
self.drawingPolygon.emit(False)
self.update()
def loadPixmap(self, pixmap):
self.pixmap = pixmap
self.shapes = []
self.repaint() # 这函数在哪
def loadShapes(self, shapes):
self.shapes = list(shapes)
self.current = None
self.repaint()
def setShapeVisible(self, shape, value):
self.visible[shape] = value
self.repaint()
def currentCursor(self):
cursor = QApplication.overrideCursor()
if cursor is not None:
cursor = cursor.shape()
return cursor
def overrideCursor(self, cursor):
self._cursor = cursor
if self.currentCursor() is None:
QApplication.setOverrideCursor(cursor)
else:
QApplication.changeOverrideCursor(cursor)
def restoreCursor(self):
QApplication.restoreOverrideCursor()
def resetState(self):
self.restoreCursor()
self.pixmap = None
self.update()
def setDrawingShapeToSquare(self, status):
self.drawSquare = status
# Copyright (c) <2015-Present> Tzutalin
# Copyright (C) 2013 MIT, Computer Science and Artificial Intelligence Laboratory. Bryan Russell, Antonio Torralba,
# William T. Freeman. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction, including without
# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
try:
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import QColorDialog, QDialogButtonBox
except ImportError:
from PyQt4.QtGui import *
from PyQt4.QtCore import *
BB = QDialogButtonBox
class ColorDialog(QColorDialog):
def __init__(self, parent=None):
super(ColorDialog, self).__init__(parent)
self.setOption(QColorDialog.ShowAlphaChannel)
# The Mac native dialog does not support our restore button.
self.setOption(QColorDialog.DontUseNativeDialog)
# Add a restore defaults button.
# The default is set at invocation time, so that it
# works across dialogs for different elements.
self.default = None
self.bb = self.layout().itemAt(1).widget()
self.bb.addButton(BB.RestoreDefaults)
self.bb.clicked.connect(self.checkRestore)
def getColor(self, value=None, title=None, default=None):
self.default = default
if title:
self.setWindowTitle(title)
if value:
self.setCurrentColor(value)
return self.currentColor() if self.exec_() else None
def checkRestore(self, button):
if self.bb.buttonRole(button) & BB.ResetRole and self.default:
self.setCurrentColor(self.default)
# Copyright (c) <2015-Present> Tzutalin
# Copyright (C) 2013 MIT, Computer Science and Artificial Intelligence Laboratory. Bryan Russell, Antonio Torralba,
# William T. Freeman. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction, including without
# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
SETTING_FILENAME = 'filename'
SETTING_RECENT_FILES = 'recentFiles'
SETTING_WIN_SIZE = 'window/size'
SETTING_WIN_POSE = 'window/position'
SETTING_WIN_GEOMETRY = 'window/geometry'
SETTING_LINE_COLOR = 'line/color'
SETTING_FILL_COLOR = 'fill/color'
SETTING_ADVANCE_MODE = 'advanced'
SETTING_WIN_STATE = 'window/state'
SETTING_SAVE_DIR = 'savedir'
SETTING_PAINT_LABEL = 'paintlabel'
SETTING_LAST_OPEN_DIR = 'lastOpenDir'
SETTING_AUTO_SAVE = 'autosave'
SETTING_SINGLE_CLASS = 'singleclass'
FORMAT_PASCALVOC='PascalVOC'
FORMAT_YOLO='YOLO'
SETTING_DRAW_SQUARE = 'draw/square'
SETTING_LABEL_FILE_FORMAT= 'labelFileFormat'
DEFAULT_ENCODING = 'utf-8'
# Copyright (c) <2015-Present> Tzutalin
# Copyright (C) 2013 MIT, Computer Science and Artificial Intelligence Laboratory. Bryan Russell, Antonio Torralba,
# William T. Freeman. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction, including without
# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#!/usr/bin/env python
# -*- coding: utf8 -*-
import json
from pathlib import Path
from libs.constants import DEFAULT_ENCODING
import os
JSON_EXT = '.json'
ENCODE_METHOD = DEFAULT_ENCODING
class CreateMLWriter:
def __init__(self, foldername, filename, imgsize, shapes, outputfile, databasesrc='Unknown', localimgpath=None):
self.foldername = foldername
self.filename = filename
self.databasesrc = databasesrc
self.imgsize = imgsize
self.boxlist = []
self.localimgpath = localimgpath
self.verified = False
self.shapes = shapes
self.outputfile = outputfile
def write(self):
if os.path.isfile(self.outputfile):
with open(self.outputfile, "r") as file:
input_data = file.read()
outputdict = json.loads(input_data)
else:
outputdict = []
outputimagedict = {
"image": self.filename,
"annotations": []
}
for shape in self.shapes:
points = shape["points"]
x1 = points[0][0]
y1 = points[0][1]
x2 = points[1][0]
y2 = points[2][1]
height, width, x, y = self.calculate_coordinates(x1, x2, y1, y2)
shapedict = {
"label": shape["label"],
"coordinates": {
"x": x,
"y": y,
"width": width,
"height": height
}
}
outputimagedict["annotations"].append(shapedict)
# check if image already in output
exists = False
for i in range(0, len(outputdict)):
if outputdict[i]["image"] == outputimagedict["image"]:
exists = True
outputdict[i] = outputimagedict
break
if not exists:
outputdict.append(outputimagedict)
Path(self.outputfile).write_text(json.dumps(outputdict), ENCODE_METHOD)
def calculate_coordinates(self, x1, x2, y1, y2):
if x1 < x2:
xmin = x1
xmax = x2
else:
xmin = x2
xmax = x1
if y1 < y2:
ymin = y1
ymax = y2
else:
ymin = y2
ymax = y1
width = xmax - xmin
if width < 0:
width = width * -1
height = ymax - ymin
# x and y from center of rect
x = xmin + width / 2
y = ymin + height / 2
return height, width, x, y
class CreateMLReader:
def __init__(self, jsonpath, filepath):
self.jsonpath = jsonpath
self.shapes = []
self.verified = False
self.filename = filepath.split("/")[-1:][0]
try:
self.parse_json()
except ValueError:
print("JSON decoding failed")
def parse_json(self):
with open(self.jsonpath, "r") as file:
inputdata = file.read()
outputdict = json.loads(inputdata)
self.verified = True
if len(self.shapes) > 0:
self.shapes = []
for image in outputdict:
if image["image"] == self.filename:
for shape in image["annotations"]:
self.add_shape(shape["label"], shape["coordinates"])
def add_shape(self, label, bndbox):
xmin = bndbox["x"] - (bndbox["width"] / 2)
ymin = bndbox["y"] - (bndbox["height"] / 2)
xmax = bndbox["x"] + (bndbox["width"] / 2)
ymax = bndbox["y"] + (bndbox["height"] / 2)
points = [(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)]
self.shapes.append((label, points, None, None, True))
def get_shapes(self):
return self.shapes
# Copyright (c) <2015-Present> Tzutalin
# Copyright (C) 2013 MIT, Computer Science and Artificial Intelligence Laboratory. Bryan Russell, Antonio Torralba,
# William T. Freeman. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction, including without
# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
try:
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
except ImportError:
# needed for py3+qt4
# Ref:
# http://pyqt.sourceforge.net/Docs/PyQt4/incompatible_apis.html
# http://stackoverflow.com/questions/21217399/pyqt4-qtcore-qvariant-object-instead-of-a-string
if sys.version_info.major >= 3:
import sip
sip.setapi('QVariant', 2)
from PyQt4.QtGui import *
from PyQt4.QtCore import *
# PyQt5: TypeError: unhashable type: 'QListWidgetItem'
class HashableQListWidgetItem(QListWidgetItem):
def __init__(self, *args):
super(HashableQListWidgetItem, self).__init__(*args)
def __hash__(self):
return hash(id(self))
# Copyright (c) <2015-Present> Tzutalin
# Copyright (C) 2013 MIT, Computer Science and Artificial Intelligence Laboratory. Bryan Russell, Antonio Torralba,
# William T. Freeman. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction, including without
# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
try:
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
except ImportError:
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from libs.utils import newIcon, labelValidator
BB = QDialogButtonBox
class LabelDialog(QDialog):
def __init__(self, text="Enter object label", parent=None, listItem=None):
super(LabelDialog, self).__init__(parent)
self.edit = QLineEdit() # OLD
# self.edit = QTextEdit()
self.edit.setText(text)
# self.edit.setValidator(labelValidator()) # 验证有效性
self.edit.editingFinished.connect(self.postProcess)
model = QStringListModel()
model.setStringList(listItem)
completer = QCompleter()
completer.setModel(model)
self.edit.setCompleter(completer)
layout = QVBoxLayout()
layout.addWidget(self.edit)
self.buttonBox = bb = BB(BB.Ok | BB.Cancel, Qt.Horizontal, self)
bb.button(BB.Ok).setIcon(newIcon('done'))
bb.button(BB.Cancel).setIcon(newIcon('undo'))
bb.accepted.connect(self.validate)
bb.rejected.connect(self.reject)
layout.addWidget(bb)
# if listItem is not None and len(listItem) > 0:
# self.listWidget = QListWidget(self)
# for item in listItem:
# self.listWidget.addItem(item)
# self.listWidget.itemClicked.connect(self.listItemClick)
# self.listWidget.itemDoubleClicked.connect(self.listItemDoubleClick)
# layout.addWidget(self.listWidget)
self.setLayout(layout)
def validate(self):
try:
if self.edit.text().trimmed():
self.accept()
except AttributeError:
# PyQt5: AttributeError: 'str' object has no attribute 'trimmed'
if self.edit.text().strip():
self.accept()
def postProcess(self):
try:
self.edit.setText(self.edit.text().trimmed())
# print(self.edit.text())
except AttributeError:
# PyQt5: AttributeError: 'str' object has no attribute 'trimmed'
self.edit.setText(self.edit.text())
print(self.edit.text())
def popUp(self, text='', move=True):
self.edit.setText(text)
self.edit.setSelection(0, len(text))
self.edit.setFocus(Qt.PopupFocusReason)
if move:
cursor_pos = QCursor.pos()
parent_bottomRight = self.parentWidget().geometry()
max_x = parent_bottomRight.x() + parent_bottomRight.width() - self.sizeHint().width()
max_y = parent_bottomRight.y() + parent_bottomRight.height() - self.sizeHint().height()
max_global = self.parentWidget().mapToGlobal(QPoint(max_x, max_y))
if cursor_pos.x() > max_global.x():
cursor_pos.setX(max_global.x())
if cursor_pos.y() > max_global.y():
cursor_pos.setY(max_global.y())
self.move(cursor_pos)
return self.edit.text() if self.exec_() else None
def listItemClick(self, tQListWidgetItem):
try:
text = tQListWidgetItem.text().trimmed()
except AttributeError:
# PyQt5: AttributeError: 'str' object has no attribute 'trimmed'
text = tQListWidgetItem.text().strip()
self.edit.setText(text)
def listItemDoubleClick(self, tQListWidgetItem):
self.listItemClick(tQListWidgetItem)
self.validate()
# Copyright (c) 2016 Tzutalin
# Create by TzuTaLin <tzu.ta.lin@gmail.com>
try:
from PyQt5.QtGui import QImage
except ImportError:
from PyQt4.QtGui import QImage
from base64 import b64encode, b64decode
from libs.pascal_voc_io import PascalVocWriter
from libs.yolo_io import YOLOWriter
from libs.pascal_voc_io import XML_EXT
from enum import Enum
import os.path
import sys
class LabelFileFormat(Enum):
PASCAL_VOC= 1
YOLO = 2
class LabelFileError(Exception):
pass
class LabelFile(object):
# It might be changed as window creates. By default, using XML ext
# suffix = '.lif'
suffix = XML_EXT
def __init__(self, filename=None):
self.shapes = ()
self.imagePath = None
self.imageData = None
self.verified = False
def savePascalVocFormat(self, filename, shapes, imagePath, imageData,
lineColor=None, fillColor=None, databaseSrc=None):
imgFolderPath = os.path.dirname(imagePath)
imgFolderName = os.path.split(imgFolderPath)[-1]
imgFileName = os.path.basename(imagePath)
#imgFileNameWithoutExt = os.path.splitext(imgFileName)[0]
# Read from file path because self.imageData might be empty if saving to
# Pascal format
image = QImage()
image.load(imagePath)
imageShape = [image.height(), image.width(),
1 if image.isGrayscale() else 3]
writer = PascalVocWriter(imgFolderName, imgFileName,
imageShape, localImgPath=imagePath)
writer.verified = self.verified
for shape in shapes:
points = shape['points']
label = shape['label']
# Add Chris
difficult = int(shape['difficult'])
bndbox = LabelFile.convertPoints2BndBox(points)
writer.addBndBox(bndbox[0], bndbox[1], bndbox[2], bndbox[3], label, difficult)
writer.save(targetFile=filename)
return
def saveYoloFormat(self, filename, shapes, imagePath, imageData, classList,
lineColor=None, fillColor=None, databaseSrc=None):
imgFolderPath = os.path.dirname(imagePath)
imgFolderName = os.path.split(imgFolderPath)[-1]
imgFileName = os.path.basename(imagePath)
#imgFileNameWithoutExt = os.path.splitext(imgFileName)[0]
# Read from file path because self.imageData might be empty if saving to
# Pascal format
image = QImage()
image.load(imagePath)
imageShape = [image.height(), image.width(),
1 if image.isGrayscale() else 3]
writer = YOLOWriter(imgFolderName, imgFileName,
imageShape, localImgPath=imagePath)
writer.verified = self.verified
for shape in shapes:
points = shape['points']
label = shape['label']
# Add Chris
difficult = int(shape['difficult'])
bndbox = LabelFile.convertPoints2BndBox(points)
writer.addBndBox(bndbox[0], bndbox[1], bndbox[2], bndbox[3], label, difficult)
writer.save(targetFile=filename, classList=classList)
return
def toggleVerify(self):
self.verified = not self.verified
''' ttf is disable
def load(self, filename):
import json
with open(filename, 'rb') as f:
data = json.load(f)
imagePath = data['imagePath']
imageData = b64decode(data['imageData'])
lineColor = data['lineColor']
fillColor = data['fillColor']
shapes = ((s['label'], s['points'], s['line_color'], s['fill_color'])\
for s in data['shapes'])
# Only replace data after everything is loaded.
self.shapes = shapes
self.imagePath = imagePath
self.imageData = imageData
self.lineColor = lineColor
self.fillColor = fillColor
def save(self, filename, shapes, imagePath, imageData, lineColor=None, fillColor=None):
import json
with open(filename, 'wb') as f:
json.dump(dict(
shapes=shapes,
lineColor=lineColor, fillColor=fillColor,
imagePath=imagePath,
imageData=b64encode(imageData)),
f, ensure_ascii=True, indent=2)
'''
@staticmethod
def isLabelFile(filename):
fileSuffix = os.path.splitext(filename)[1].lower()
return fileSuffix == LabelFile.suffix
@staticmethod
def convertPoints2BndBox(points):
xmin = float('inf')
ymin = float('inf')
xmax = float('-inf')
ymax = float('-inf')
for p in points:
x = p[0]
y = p[1]
xmin = min(x, xmin)
ymin = min(y, ymin)
xmax = max(x, xmax)
ymax = max(y, ymax)
# Martin Kersner, 2015/11/12
# 0-valued coordinates of BB caused an error while
# training faster-rcnn object detector.
if xmin < 1:
xmin = 1
if ymin < 1:
ymin = 1
return (int(xmin), int(ymin), int(xmax), int(ymax))
# Copyright (c) <2015-Present> Tzutalin
# Copyright (C) 2013 MIT, Computer Science and Artificial Intelligence Laboratory. Bryan Russell, Antonio Torralba,
# William T. Freeman. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction, including without
# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#!/usr/bin/env python
# -*- coding: utf8 -*-
import sys
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, SubElement
from lxml import etree
import codecs
from libs.constants import DEFAULT_ENCODING
from libs.ustr import ustr
XML_EXT = '.xml'
ENCODE_METHOD = DEFAULT_ENCODING
class PascalVocWriter:
def __init__(self, foldername, filename, imgSize,databaseSrc='Unknown', localImgPath=None):
self.foldername = foldername
self.filename = filename
self.databaseSrc = databaseSrc
self.imgSize = imgSize
self.boxlist = []
self.localImgPath = localImgPath
self.verified = False
def prettify(self, elem):
"""
Return a pretty-printed XML string for the Element.
"""
rough_string = ElementTree.tostring(elem, 'utf8')
root = etree.fromstring(rough_string)
return etree.tostring(root, pretty_print=True, encoding=ENCODE_METHOD).replace(" ".encode(), "\t".encode())
# minidom does not support UTF-8
'''reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent="\t", encoding=ENCODE_METHOD)'''
def genXML(self):
"""
Return XML root
"""
# Check conditions
if self.filename is None or \
self.foldername is None or \
self.imgSize is None:
return None
top = Element('annotation')
if self.verified:
top.set('verified', 'yes')
folder = SubElement(top, 'folder')
folder.text = self.foldername
filename = SubElement(top, 'filename')
filename.text = self.filename
if self.localImgPath is not None:
localImgPath = SubElement(top, 'path')
localImgPath.text = self.localImgPath
source = SubElement(top, 'source')
database = SubElement(source, 'database')
database.text = self.databaseSrc
size_part = SubElement(top, 'size')
width = SubElement(size_part, 'width')
height = SubElement(size_part, 'height')
depth = SubElement(size_part, 'depth')
width.text = str(self.imgSize[1])
height.text = str(self.imgSize[0])
if len(self.imgSize) == 3:
depth.text = str(self.imgSize[2])
else:
depth.text = '1'
segmented = SubElement(top, 'segmented')
segmented.text = '0'
return top
def addBndBox(self, xmin, ymin, xmax, ymax, name, difficult):
bndbox = {'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax}
bndbox['name'] = name
bndbox['difficult'] = difficult
self.boxlist.append(bndbox)
def appendObjects(self, top):
for each_object in self.boxlist:
object_item = SubElement(top, 'object')
name = SubElement(object_item, 'name')
name.text = ustr(each_object['name'])
pose = SubElement(object_item, 'pose')
pose.text = "Unspecified"
truncated = SubElement(object_item, 'truncated')
if int(float(each_object['ymax'])) == int(float(self.imgSize[0])) or (int(float(each_object['ymin']))== 1):
truncated.text = "1" # max == height or min
elif (int(float(each_object['xmax']))==int(float(self.imgSize[1]))) or (int(float(each_object['xmin']))== 1):
truncated.text = "1" # max == width or min
else:
truncated.text = "0"
difficult = SubElement(object_item, 'difficult')
difficult.text = str( bool(each_object['difficult']) & 1 )
bndbox = SubElement(object_item, 'bndbox')
xmin = SubElement(bndbox, 'xmin')
xmin.text = str(each_object['xmin'])
ymin = SubElement(bndbox, 'ymin')
ymin.text = str(each_object['ymin'])
xmax = SubElement(bndbox, 'xmax')
xmax.text = str(each_object['xmax'])
ymax = SubElement(bndbox, 'ymax')
ymax.text = str(each_object['ymax'])
def save(self, targetFile=None):
root = self.genXML()
self.appendObjects(root)
out_file = None
if targetFile is None:
out_file = codecs.open(
self.filename + XML_EXT, 'w', encoding=ENCODE_METHOD)
else:
out_file = codecs.open(targetFile, 'w', encoding=ENCODE_METHOD)
prettifyResult = self.prettify(root)
out_file.write(prettifyResult.decode('utf8'))
out_file.close()
class PascalVocReader:
def __init__(self, filepath):
# shapes type:
# [labbel, [(x1,y1), (x2,y2), (x3,y3), (x4,y4)], color, color, difficult]
self.shapes = []
self.filepath = filepath
self.verified = False
try:
self.parseXML()
except:
pass
def getShapes(self):
return self.shapes
def addShape(self, label, bndbox, difficult):
xmin = int(float(bndbox.find('xmin').text))
ymin = int(float(bndbox.find('ymin').text))
xmax = int(float(bndbox.find('xmax').text))
ymax = int(float(bndbox.find('ymax').text))
points = [(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)]
self.shapes.append((label, points, None, None, difficult))
def parseXML(self):
assert self.filepath.endswith(XML_EXT), "Unsupport file format"
parser = etree.XMLParser(encoding=ENCODE_METHOD)
xmltree = ElementTree.parse(self.filepath, parser=parser).getroot()
filename = xmltree.find('filename').text
try:
verified = xmltree.attrib['verified']
if verified == 'yes':
self.verified = True
except KeyError:
self.verified = False
for object_iter in xmltree.findall('object'):
bndbox = object_iter.find("bndbox")
label = object_iter.find('name').text
# Add chris
difficult = False
if object_iter.find('difficult') is not None:
difficult = bool(int(object_iter.find('difficult').text))
self.addShape(label, bndbox, difficult)
return True
因为 它太大了无法显示 source diff 。你可以改为 查看blob
# Copyright (c) <2015-Present> Tzutalin
# Copyright (C) 2013 MIT, Computer Science and Artificial Intelligence Laboratory. Bryan Russell, Antonio Torralba,
# William T. Freeman. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction, including without
# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import pickle
import os
import sys
class Settings(object):
def __init__(self):
# Be default, the home will be in the same folder as labelImg
home = os.path.expanduser("~")
self.data = {}
# self.path = os.path.join(home, '.labelImgSettings.pkl')
self.path = os.path.join(home, '.autoOCRSettings.pkl')
def __setitem__(self, key, value):
self.data[key] = value
def __getitem__(self, key):
return self.data[key]
def get(self, key, default=None):
if key in self.data:
return self.data[key]
return default
def save(self):
if self.path:
with open(self.path, 'wb') as f:
pickle.dump(self.data, f, pickle.HIGHEST_PROTOCOL)
return True
return False
def load(self):
try:
if os.path.exists(self.path):
with open(self.path, 'rb') as f:
self.data = pickle.load(f)
return True
except:
print('Loading setting failed')
return False
def reset(self):
if os.path.exists(self.path):
os.remove(self.path)
print('Remove setting pkl file ${0}'.format(self.path))
self.data = {}
self.path = None
# Copyright (c) <2015-Present> Tzutalin
# Copyright (C) 2013 MIT, Computer Science and Artificial Intelligence Laboratory. Bryan Russell, Antonio Torralba,
# William T. Freeman. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction, including without
# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#!/usr/bin/python
# -*- coding: utf-8 -*-
try:
from PyQt5.QtGui import *
from PyQt5.QtCore import *
except ImportError:
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from libs.utils import distance
import sys
DEFAULT_LINE_COLOR = QColor(0, 255, 0, 128)
DEFAULT_FILL_COLOR = QColor(255, 0, 0, 128)
DEFAULT_SELECT_LINE_COLOR = QColor(255, 255, 255)
DEFAULT_SELECT_FILL_COLOR = QColor(0, 128, 255, 155)
DEFAULT_VERTEX_FILL_COLOR = QColor(0, 255, 0, 255)
DEFAULT_HVERTEX_FILL_COLOR = QColor(255, 0, 0)
MIN_Y_LABEL = 10
class Shape(object):
P_SQUARE, P_ROUND = range(2)
MOVE_VERTEX, NEAR_VERTEX = range(2)
# The following class variables influence the drawing
# of _all_ shape objects.
line_color = DEFAULT_LINE_COLOR
fill_color = DEFAULT_FILL_COLOR
select_line_color = DEFAULT_SELECT_LINE_COLOR
select_fill_color = DEFAULT_SELECT_FILL_COLOR
vertex_fill_color = DEFAULT_VERTEX_FILL_COLOR
hvertex_fill_color = DEFAULT_HVERTEX_FILL_COLOR
point_type = P_ROUND
point_size = 8
scale = 1.0
def __init__(self, label=None, line_color=None, difficult=False, paintLabel=False):
self.label = label
self.points = []
self.fill = False
self.selected = False
self.difficult = difficult
self.paintLabel = paintLabel
self._highlightIndex = None
self._highlightMode = self.NEAR_VERTEX
self._highlightSettings = {
self.NEAR_VERTEX: (4, self.P_ROUND),
self.MOVE_VERTEX: (1.5, self.P_SQUARE),
}
self._closed = False
if line_color is not None:
# Override the class line_color attribute
# with an object attribute. Currently this
# is used for drawing the pending line a different color.
self.line_color = line_color
def close(self):
self._closed = True
def reachMaxPoints(self):
if len(self.points) >= 4:
return True
return False
def addPoint(self, point):
if not self.reachMaxPoints():
self.points.append(point)
def popPoint(self):
if self.points:
return self.points.pop()
return None
def isClosed(self):
return self._closed
def setOpen(self):
self._closed = False
def paint(self, painter):
if self.points:
color = self.select_line_color if self.selected else self.line_color
pen = QPen(color)
# Try using integer sizes for smoother drawing(?)
pen.setWidth(max(1, int(round(2.0 / self.scale))))
painter.setPen(pen)
line_path = QPainterPath()
vrtx_path = QPainterPath()
line_path.moveTo(self.points[0])
# Uncommenting the following line will draw 2 paths
# for the 1st vertex, and make it non-filled, which
# may be desirable.
#self.drawVertex(vrtx_path, 0)
for i, p in enumerate(self.points):
line_path.lineTo(p)
self.drawVertex(vrtx_path, i)
if self.isClosed():
line_path.lineTo(self.points[0])
painter.drawPath(line_path)
painter.drawPath(vrtx_path)
painter.fillPath(vrtx_path, self.vertex_fill_color)
# Draw text at the top-left
if self.paintLabel:
min_x = sys.maxsize
min_y = sys.maxsize
for point in self.points:
min_x = min(min_x, point.x())
min_y = min(min_y, point.y())
if min_x != sys.maxsize and min_y != sys.maxsize:
font = QFont()
font.setPointSize(8)
font.setBold(True)
painter.setFont(font)
if(self.label == None):
self.label = ""
if(min_y < MIN_Y_LABEL):
min_y += MIN_Y_LABEL
painter.drawText(min_x, min_y, self.label)
if self.fill:
color = self.select_fill_color if self.selected else self.fill_color
painter.fillPath(line_path, color)
def drawVertex(self, path, i):
d = self.point_size / self.scale
shape = self.point_type
point = self.points[i]
if i == self._highlightIndex:
size, shape = self._highlightSettings[self._highlightMode]
d *= size
if self._highlightIndex is not None:
self.vertex_fill_color = self.hvertex_fill_color
else:
self.vertex_fill_color = Shape.vertex_fill_color
if shape == self.P_SQUARE:
path.addRect(point.x() - d / 2, point.y() - d / 2, d, d)
elif shape == self.P_ROUND:
path.addEllipse(point, d / 2.0, d / 2.0)
else:
assert False, "unsupported vertex shape"
def nearestVertex(self, point, epsilon):
for i, p in enumerate(self.points):
if distance(p - point) <= epsilon:
return i
return None
def containsPoint(self, point):
return self.makePath().contains(point)
def makePath(self):
path = QPainterPath(self.points[0])
for p in self.points[1:]:
path.lineTo(p)
return path
def boundingRect(self):
return self.makePath().boundingRect()
def moveBy(self, offset):
self.points = [p + offset for p in self.points]
def moveVertexBy(self, i, offset):
self.points[i] = self.points[i] + offset
def highlightVertex(self, i, action):
self._highlightIndex = i
self._highlightMode = action
def highlightClear(self):
self._highlightIndex = None
def copy(self):
shape = Shape("%s" % self.label)
shape.points = [p for p in self.points]
shape.fill = self.fill
shape.selected = self.selected
shape._closed = self._closed
if self.line_color != Shape.line_color:
shape.line_color = self.line_color
if self.fill_color != Shape.fill_color:
shape.fill_color = self.fill_color
shape.difficult = self.difficult
return shape
def __len__(self):
return len(self.points)
def __getitem__(self, key):
return self.points[key]
def __setitem__(self, key, value):
self.points[key] = value
# Copyright (c) <2015-Present> Tzutalin
# Copyright (C) 2013 MIT, Computer Science and Artificial Intelligence Laboratory. Bryan Russell, Antonio Torralba,
# William T. Freeman. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction, including without
# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import os
import sys
import locale
from libs.ustr import ustr
try:
from PyQt5.QtCore import *
except ImportError:
if sys.version_info.major >= 3:
import sip
sip.setapi('QVariant', 2)
from PyQt4.QtCore import *
class StringBundle:
__create_key = object()
def __init__(self, create_key, localeStr):
assert(create_key == StringBundle.__create_key), "StringBundle must be created using StringBundle.getBundle"
self.idToMessage = {}
paths = self.__createLookupFallbackList(localeStr)
for path in paths:
self.__loadBundle(path)
@classmethod
def getBundle(cls, localeStr=None):
if localeStr is None:
try:
localeStr = locale.getlocale()[0] if locale.getlocale() and len(
locale.getlocale()) > 0 else os.getenv('LANG')
except:
print('Invalid locale')
localeStr = 'en'
return StringBundle(cls.__create_key, localeStr)
def getString(self, stringId):
assert(stringId in self.idToMessage), "Missing string id : " + stringId
return self.idToMessage[stringId]
def __createLookupFallbackList(self, localeStr):
resultPaths = []
basePath = ":/strings"
resultPaths.append(basePath)
if localeStr is not None:
# Don't follow standard BCP47. Simple fallback
tags = re.split('[^a-zA-Z]', localeStr)
for tag in tags:
lastPath = resultPaths[-1]
resultPaths.append(lastPath + '-' + tag)
return resultPaths
def __loadBundle(self, path):
PROP_SEPERATOR = '='
f = QFile(path)
if f.exists():
if f.open(QIODevice.ReadOnly | QFile.Text):
text = QTextStream(f)
text.setCodec("UTF-8")
while not text.atEnd():
line = ustr(text.readLine())
key_value = line.split(PROP_SEPERATOR)
key = key_value[0].strip()
value = PROP_SEPERATOR.join(key_value[1:]).strip().strip('"')
self.idToMessage[key] = value
f.close()
# Copyright (c) <2015-Present> Tzutalin
# Copyright (C) 2013 MIT, Computer Science and Artificial Intelligence Laboratory. Bryan Russell, Antonio Torralba,
# William T. Freeman. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction, including without
# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
try:
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
except ImportError:
from PyQt4.QtGui import *
from PyQt4.QtCore import *
class ToolBar(QToolBar):
def __init__(self, title):
super(ToolBar, self).__init__(title)
layout = self.layout()
m = (0, 0, 0, 0)
layout.setSpacing(0)
layout.setContentsMargins(*m)
self.setContentsMargins(*m)
self.setWindowFlags(self.windowFlags() | Qt.FramelessWindowHint)
def addAction(self, action):
if isinstance(action, QWidgetAction):
return super(ToolBar, self).addAction(action)
btn = ToolButton()
btn.setDefaultAction(action)
btn.setToolButtonStyle(self.toolButtonStyle())
self.addWidget(btn)
class ToolButton(QToolButton):
"""ToolBar companion class which ensures all buttons have the same size."""
minSize = (60, 60)
def minimumSizeHint(self):
ms = super(ToolButton, self).minimumSizeHint()
w1, h1 = ms.width(), ms.height()
w2, h2 = self.minSize
ToolButton.minSize = max(w1, w2), max(h1, h2)
return QSize(*ToolButton.minSize)
# Copyright (c) <2015-Present> Tzutalin
# Copyright (C) 2013 MIT, Computer Science and Artificial Intelligence Laboratory. Bryan Russell, Antonio Torralba,
# William T. Freeman. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction, including without
# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
from libs.constants import DEFAULT_ENCODING
def ustr(x):
'''py2/py3 unicode helper'''
if sys.version_info < (3, 0, 0):
from PyQt4.QtCore import QString
if type(x) == str:
return x.decode(DEFAULT_ENCODING)
if type(x) == QString:
#https://blog.csdn.net/friendan/article/details/51088476
#https://blog.csdn.net/xxm524/article/details/74937308
return unicode(x.toUtf8(), DEFAULT_ENCODING, 'ignore')
return x
else:
return x
# Copyright (c) <2015-Present> Tzutalin
# Copyright (C) 2013 MIT, Computer Science and Artificial Intelligence Laboratory. Bryan Russell, Antonio Torralba,
# William T. Freeman. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction, including without
# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from math import sqrt
from libs.ustr import ustr
import hashlib
import re
import sys
import cv2
import numpy as np
try:
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
except ImportError:
from PyQt4.QtGui import *
from PyQt4.QtCore import *
def newIcon(icon, iconSize=None):
if iconSize is not None:
return QIcon(QIcon(':/' + icon).pixmap(iconSize,iconSize))
else:
return QIcon(':/' + icon)
def newButton(text, icon=None, slot=None):
b = QPushButton(text)
if icon is not None:
b.setIcon(newIcon(icon))
if slot is not None:
b.clicked.connect(slot)
return b
def newAction(parent, text, slot=None, shortcut=None, icon=None,
tip=None, checkable=False, enabled=True, iconSize=None):
"""Create a new action and assign callbacks, shortcuts, etc."""
a = QAction(text, parent)
if icon is not None:
if iconSize is not None:
a.setIcon(newIcon(icon, iconSize))
else:
a.setIcon(newIcon(icon))
if shortcut is not None:
if isinstance(shortcut, (list, tuple)):
a.setShortcuts(shortcut)
else:
a.setShortcut(shortcut)
if tip is not None:
a.setToolTip(tip)
a.setStatusTip(tip)
if slot is not None:
a.triggered.connect(slot)
if checkable:
a.setCheckable(True)
a.setEnabled(enabled)
return a
def addActions(widget, actions):
for action in actions:
if action is None:
widget.addSeparator()
elif isinstance(action, QMenu):
widget.addMenu(action)
else:
widget.addAction(action)
def labelValidator():
return QRegExpValidator(QRegExp(r'^[^ \t].+'), None)
class struct(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def distance(p):
return sqrt(p.x() * p.x() + p.y() * p.y())
def fmtShortcut(text):
mod, key = text.split('+', 1)
return '<b>%s</b>+<b>%s</b>' % (mod, key)
def generateColorByText(text):
s = ustr(text)
hashCode = int(hashlib.sha256(s.encode('utf-8')).hexdigest(), 16)
r = int((hashCode / 255) % 255)
g = int((hashCode / 65025) % 255)
b = int((hashCode / 16581375) % 255)
return QColor(r, g, b, 100)
def have_qstring():
'''p3/qt5 get rid of QString wrapper as py3 has native unicode str type'''
return not (sys.version_info.major >= 3 or QT_VERSION_STR.startswith('5.'))
def util_qt_strlistclass():
return QStringList if have_qstring() else list
def natural_sort(list, key=lambda s:s):
"""
Sort the list into natural alphanumeric order.
"""
def get_alphanum_key_func(key):
convert = lambda text: int(text) if text.isdigit() else text
return lambda s: [convert(c) for c in re.split('([0-9]+)', key(s))]
sort_key = get_alphanum_key_func(key)
list.sort(key=sort_key)
def get_rotate_crop_image(img, points):
try:
img_crop_width = int(
max(
np.linalg.norm(points[0] - points[1]),
np.linalg.norm(points[2] - points[3])))
img_crop_height = int(
max(
np.linalg.norm(points[0] - points[3]),
np.linalg.norm(points[1] - points[2])))
pts_std = np.float32([[0, 0], [img_crop_width, 0],
[img_crop_width, img_crop_height],
[0, img_crop_height]])
M = cv2.getPerspectiveTransform(points, pts_std)
dst_img = cv2.warpPerspective(
img,
M, (img_crop_width, img_crop_height),
borderMode=cv2.BORDER_REPLICATE,
flags=cv2.INTER_CUBIC)
dst_img_height, dst_img_width = dst_img.shape[0:2]
if dst_img_height * 1.0 / dst_img_width >= 1.5:
dst_img = np.rot90(dst_img)
return dst_img
except Exception as e:
print(e)
def stepsInfo(lang='en'):
if lang == 'ch':
msg = "1. 安装与运行:使用上述命令安装与运行程序。\n" \
"2. 打开文件夹:在菜单栏点击 “文件” - 打开目录 选择待标记图片的文件夹.\n"\
"3. 自动标注:点击 ”自动标注“,使用PPOCR超轻量模型对图片文件名前图片状态为 “X” 的图片进行自动标注。\n" \
"4. 手动标注:点击 “矩形标注”(推荐直接在英文模式下点击键盘中的 “W”),用户可对当前图片中模型未检出的部分进行手动" \
"绘制标记框。点击键盘P,则使用四点标注模式(或点击“编辑” - “四点标注”),用户依次点击4个点后,双击左键表示标注完成。\n" \
"5. 标记框绘制完成后,用户点击 “确认”,检测框会先被预分配一个 “待识别” 标签。\n" \
"6. 重新识别:将图片中的所有检测画绘制/调整完成后,点击 “重新识别”,PPOCR模型会对当前图片中的**所有检测框**重新识别。\n" \
"7. 内容更改:双击识别结果,对不准确的识别结果进行手动更改。\n" \
"8. 保存:点击 “保存”,图片状态切换为 “√”,跳转至下一张。\n" \
"9. 删除:点击 “删除图像”,图片将会被删除至回收站。\n" \
"10. 标注结果:关闭应用程序或切换文件路径后,手动保存过的标签将会被存放在所打开图片文件夹下的" \
"*Label.txt*中。在菜单栏点击 “PaddleOCR” - 保存识别结果后,会将此类图片的识别训练数据保存在*crop_img*文件夹下," \
"识别标签保存在*rec_gt.txt*中。\n"
else:
msg = "1. Build and launch using the instructions above.\n" \
"2. Click 'Open Dir' in Menu/File to select the folder of the picture.\n"\
"3. Click 'Auto recognition', use PPOCR model to automatically annotate images which marked with 'X' before the file name."\
"4. Create Box:\n"\
"4.1 Click 'Create RectBox' or press 'W' in English keyboard mode to draw a new rectangle detection box. Click and release left mouse to select a region to annotate the text area.\n"\
"4.2 Press 'P' to enter four-point labeling mode which enables you to create any four-point shape by clicking four points with the left mouse button in succession and DOUBLE CLICK the left mouse as the signal of labeling completion.\n"\
"5. After the marking frame is drawn, the user clicks 'OK', and the detection frame will be pre-assigned a TEMPORARY label.\n"\
"6. Click re-Recognition, model will rewrite ALL recognition results in ALL detection box.\n"\
"7. Double click the result in 'recognition result' list to manually change inaccurate recognition results.\n"\
"8. Click 'Save', the image status will switch to '√',then the program automatically jump to the next.\n"\
"9. Click 'Delete Image' and the image will be deleted to the recycle bin.\n"\
"10. Labeling result: After closing the application or switching the file path, the manually saved label will be stored in *Label.txt* under the opened picture folder.\n"\
" Click PaddleOCR-Save Recognition Results in the menu bar, the recognition training data of such pictures will be saved in the *crop_img* folder, and the recognition label will be saved in *rec_gt.txt*.\n"
return msg
\ No newline at end of file
#!/usr/bin/env python
# -*- coding: utf8 -*-
import sys
import os
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, SubElement
from lxml import etree
import codecs
from libs.constants import DEFAULT_ENCODING
TXT_EXT = '.txt'
ENCODE_METHOD = DEFAULT_ENCODING
class YOLOWriter:
def __init__(self, foldername, filename, imgSize, databaseSrc='Unknown', localImgPath=None):
self.foldername = foldername
self.filename = filename
self.databaseSrc = databaseSrc
self.imgSize = imgSize
self.boxlist = []
self.localImgPath = localImgPath
self.verified = False
def addBndBox(self, xmin, ymin, xmax, ymax, name, difficult):
bndbox = {'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax}
bndbox['name'] = name
bndbox['difficult'] = difficult
self.boxlist.append(bndbox)
def BndBox2YoloLine(self, box, classList=[]):
xmin = box['xmin']
xmax = box['xmax']
ymin = box['ymin']
ymax = box['ymax']
xcen = float((xmin + xmax)) / 2 / self.imgSize[1]
ycen = float((ymin + ymax)) / 2 / self.imgSize[0]
w = float((xmax - xmin)) / self.imgSize[1]
h = float((ymax - ymin)) / self.imgSize[0]
# PR387
boxName = box['name']
if boxName not in classList:
classList.append(boxName)
classIndex = classList.index(boxName)
return classIndex, xcen, ycen, w, h
def save(self, classList=[], targetFile=None):
out_file = None #Update yolo .txt
out_class_file = None #Update class list .txt
if targetFile is None:
out_file = open(
self.filename + TXT_EXT, 'w', encoding=ENCODE_METHOD)
classesFile = os.path.join(os.path.dirname(os.path.abspath(self.filename)), "classes.txt")
out_class_file = open(classesFile, 'w')
else:
out_file = codecs.open(targetFile, 'w', encoding=ENCODE_METHOD)
classesFile = os.path.join(os.path.dirname(os.path.abspath(targetFile)), "classes.txt")
out_class_file = open(classesFile, 'w')
for box in self.boxlist:
classIndex, xcen, ycen, w, h = self.BndBox2YoloLine(box, classList)
# print (classIndex, xcen, ycen, w, h)
out_file.write("%d %.6f %.6f %.6f %.6f\n" % (classIndex, xcen, ycen, w, h))
# print (classList)
# print (out_class_file)
for c in classList:
out_class_file.write(c+'\n')
out_class_file.close()
out_file.close()
class YoloReader:
def __init__(self, filepath, image, classListPath=None):
# shapes type:
# [labbel, [(x1,y1), (x2,y2), (x3,y3), (x4,y4)], color, color, difficult]
self.shapes = []
self.filepath = filepath
if classListPath is None:
dir_path = os.path.dirname(os.path.realpath(self.filepath))
self.classListPath = os.path.join(dir_path, "classes.txt")
else:
self.classListPath = classListPath
# print (filepath, self.classListPath)
classesFile = open(self.classListPath, 'r')
self.classes = classesFile.read().strip('\n').split('\n')
# print (self.classes)
imgSize = [image.height(), image.width(),
1 if image.isGrayscale() else 3]
self.imgSize = imgSize
self.verified = False
# try:
self.parseYoloFormat()
# except:
# pass
def getShapes(self):
return self.shapes
def addShape(self, label, xmin, ymin, xmax, ymax, difficult):
points = [(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)]
self.shapes.append((label, points, None, None, difficult))
def yoloLine2Shape(self, classIndex, xcen, ycen, w, h):
label = self.classes[int(classIndex)]
xmin = max(float(xcen) - float(w) / 2, 0)
xmax = min(float(xcen) + float(w) / 2, 1)
ymin = max(float(ycen) - float(h) / 2, 0)
ymax = min(float(ycen) + float(h) / 2, 1)
xmin = int(self.imgSize[1] * xmin)
xmax = int(self.imgSize[1] * xmax)
ymin = int(self.imgSize[0] * ymin)
ymax = int(self.imgSize[0] * ymax)
return label, xmin, ymin, xmax, ymax
def parseYoloFormat(self):
bndBoxFile = open(self.filepath, 'r')
for bndBox in bndBoxFile:
classIndex, xcen, ycen, w, h = bndBox.strip().split(' ')
label, xmin, ymin, xmax, ymax = self.yoloLine2Shape(classIndex, xcen, ycen, w, h)
# Caveat: difficult flag is discarded when saved as yolo format.
self.addShape(label, xmin, ymin, xmax, ymax, False)
# Copyright (c) <2015-Present> Tzutalin
# Copyright (C) 2013 MIT, Computer Science and Artificial Intelligence Laboratory. Bryan Russell, Antonio Torralba,
# William T. Freeman. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction, including without
# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
try:
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
except ImportError:
from PyQt4.QtGui import *
from PyQt4.QtCore import *
class ZoomWidget(QSpinBox):
def __init__(self, value=100):
super(ZoomWidget, self).__init__()
self.setButtonSymbols(QAbstractSpinBox.NoButtons)
self.setRange(1, 500)
self.setSuffix(' %')
self.setValue(value)
self.setToolTip(u'Zoom Level')
self.setStatusTip(self.toolTip())
self.setAlignment(Qt.AlignCenter)
def minimumSizeHint(self):
height = super(ZoomWidget, self).minimumSizeHint().height()
fm = QFontMetrics(self.font())
width = fm.width(str(self.maximum()))
return QSize(width, height)
<!DOCTYPE RCC><RCC version="1.0">
<qresource>
<file alias="help">resources/icons/help.png</file>
<file alias="app">resources/icons/app.png</file>
<file alias="Auto">resources/icons/Auto.png</file>
<file alias="reRec">resources/icons/reRec.png</file>
<file alias="expert">resources/icons/expert2.png</file>
<file alias="done">resources/icons/done.png</file>
<file alias="file">resources/icons/file.png</file>
<file alias="labels">resources/icons/labels.png</file>
<file alias="new">resources/icons/objects.png</file>
<file alias="close">resources/icons/close.png</file>
<file alias="fit-width">resources/icons/fit-width.png</file>
<file alias="fit-window">resources/icons/fit-window.png</file>
<file alias="undo">resources/icons/undo.png</file>
<file alias="hide">resources/icons/eye.png</file>
<file alias="quit">resources/icons/quit.png</file>
<file alias="copy">resources/icons/copy.png</file>
<file alias="edit">resources/icons/edit.png</file>
<file alias="open">resources/icons/open.png</file>
<file alias="save">resources/icons/save.png</file>
<file alias="format_voc">resources/icons/format_voc.png</file>
<file alias="format_yolo">resources/icons/format_yolo.png</file>
<file alias="save-as">resources/icons/save-as.png</file>
<file alias="color">resources/icons/color.png</file>
<file alias="color_line">resources/icons/color_line.png</file>
<file alias="zoom">resources/icons/zoom.png</file>
<file alias="zoom-in">resources/icons/zoom-in.png</file>
<file alias="zoom-out">resources/icons/zoom-out.png</file>
<file alias="delete">resources/icons/cancel.png</file>
<file alias="next">resources/icons/next.png</file>
<file alias="prev">resources/icons/prev.png</file>
<file alias="resetall">resources/icons/resetall.png</file>
<file alias="verify">resources/icons/verify.png</file>
<file alias="strings">resources/strings/strings.properties</file>
<file alias="strings-zh-TW">resources/strings/strings-zh-TW.properties</file>
<file alias="strings-zh-CN">resources/strings/strings-zh-CN.properties</file>
</qresource>
</RCC>
<?xml version="1.0" standalone="no"?><!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"><svg t="1605840513947" class="icon" viewBox="0 0 1036 1024" version="1.1" xmlns="http://www.w3.org/2000/svg" p-id="570" xmlns:xlink="http://www.w3.org/1999/xlink" width="202.34375" height="200"><defs><style type="text/css"></style></defs><path d="M938.74776 69.162438L893.455144 52.837457a67.26383 67.26383 0 1 0-20.007307 64.317969l42.469499 15.465771c28.722146 10.433258 53.516478 49.097686 53.516478 83.466066v598.869028c0 34.000148-24.548843 72.664576-53.639223 82.729601l-38.296195 13.501863a67.26383 67.26383 0 1 0 14.115585 66.650109l46.397313-16.447724c55.60313-19.51633 98.195372-83.711555 98.195373-146.311105V216.210008c0-62.354061-42.960475-126.917519-98.195373-147.293059zM222.16703 888.971053a66.895597 66.895597 0 0 0-52.166292 24.548843L121.516773 897.440403A89.1123 89.1123 0 0 1 67.26383 822.443688V208.722611a89.97151 89.97151 0 0 1 54.252943-76.101414l51.920803-18.657121a67.632063 67.632063 0 1 0-15.465771-65.913643L98.195372 69.285182A157.358084 157.358084 0 0 0 0 208.722611v613.721077a156.253386 156.253386 0 0 0 99.30007 138.332731L159.56748 982.011168a67.509318 67.509318 0 1 0 62.59955-93.040115z" fill="#444444" p-id="571"></path><path d="M426.29066 572.413721l86.780161-205.105584 91.935417 205.105584z m282.311696 230.759125h119.798354l-267.58239-592.854561-98.195372 1.104698-251.625642 592.118096 118.079936-1.350187L380.507068 680.183142h272.492158l55.112153 122.744215z" fill="#444444" p-id="572"></path></svg>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 20010904//EN"
"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">
<!-- Created with Sodipodi ("http://www.sodipodi.com/") -->
<svg
width="48pt"
height="48pt"
viewBox="0 0 256 256"
style="overflow:visible;enable-background:new 0 0 256 256"
xml:space="preserve"
xmlns="http://www.w3.org/2000/svg"
xmlns:xap="http://ns.adobe.com/xap/1.0/"
xmlns:xapGImg="http://ns.adobe.com/xap/1.0/g/img/"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:xml="http://www.w3.org/XML/1998/namespace"
xmlns:xapMM="http://ns.adobe.com/xap/1.0/mm/"
xmlns:pdf="http://ns.adobe.com/pdf/1.3/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:a="http://ns.adobe.com/AdobeSVGViewerExtensions/3.0/"
xmlns:x="adobe:ns:meta/"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:xlink="http://www.w3.org/1999/xlink"
id="svg548"
sodipodi:version="0.32"
sodipodi:docname="/home/david/Desktop/action/button_ok.svg"
sodipodi:docbase="/home/david/Desktop/action/">
<defs
id="defs584">
<linearGradient
id="XMLID_5_"
gradientUnits="userSpaceOnUse"
x1="127.9536"
y1="47.3267"
x2="127.9536"
y2="212.9885">
<stop
offset="0"
style="stop-color:#009900"
id="stop556" />
<stop
offset="1"
style="stop-color:#334966"
id="stop557" />
<a:midPointStop
offset="0"
style="stop-color:#009900"
id="midPointStop558" />
<a:midPointStop
offset="0.5"
style="stop-color:#009900"
id="midPointStop559" />
<a:midPointStop
offset="1"
style="stop-color:#334966"
id="midPointStop560" />
</linearGradient>
<linearGradient
id="XMLID_6_"
gradientUnits="userSpaceOnUse"
x1="127.9536"
y1="77.2075"
x2="127.9536"
y2="307.6057">
<stop
offset="0"
style="stop-color:#33CC33"
id="stop563" />
<stop
offset="1"
style="stop-color:#336666"
id="stop564" />
<a:midPointStop
offset="0"
style="stop-color:#33CC33"
id="midPointStop565" />
<a:midPointStop
offset="0.5"
style="stop-color:#33CC33"
id="midPointStop566" />
<a:midPointStop
offset="1"
style="stop-color:#336666"
id="midPointStop567" />
</linearGradient>
<linearGradient
id="XMLID_7_"
gradientUnits="userSpaceOnUse"
x1="127.9536"
y1="77.3672"
x2="127.9536"
y2="307.3626">
<stop
offset="0.0056"
style="stop-color:#CCFF66"
id="stop570" />
<stop
offset="1"
style="stop-color:#009900"
id="stop571" />
<a:midPointStop
offset="0.0056"
style="stop-color:#CCFF66"
id="midPointStop572" />
<a:midPointStop
offset="0.5"
style="stop-color:#CCFF66"
id="midPointStop573" />
<a:midPointStop
offset="1"
style="stop-color:#009900"
id="midPointStop574" />
</linearGradient>
<radialGradient
id="XMLID_8_"
cx="54.2729"
cy="89.3477"
r="120.8132"
fx="54.2729"
fy="89.3477"
gradientUnits="userSpaceOnUse">
<stop
offset="0.000000"
style="stop-color:#ffffff;stop-opacity:1;"
id="stop577" />
<stop
offset="1.000000"
style="stop-color:#92ff00;stop-opacity:1;"
id="stop578" />
<a:midPointStop
offset="0"
style="stop-color:#FFFFFF"
id="midPointStop579" />
<a:midPointStop
offset="0.5"
style="stop-color:#FFFFFF"
id="midPointStop580" />
<a:midPointStop
offset="1"
style="stop-color:#000000"
id="midPointStop581" />
</radialGradient>
</defs>
<sodipodi:namedview
id="base" />
<metadata
id="metadata549">
<xpacket>begin='' id='W5M0MpCehiHzreSzNTczkc9d' </xpacket>
<x:xmpmeta
x:xmptk="XMP toolkit 3.0-29, framework 1.6">
<rdf:RDF>
<rdf:Description
rdf:about="uuid:609bc623-b01c-476b-9349-300763160df1">
<pdf:Producer>
Adobe PDF library 5.00</pdf:Producer>
</rdf:Description>
<rdf:Description
rdf:about="uuid:609bc623-b01c-476b-9349-300763160df1" />
<rdf:Description
rdf:about="uuid:609bc623-b01c-476b-9349-300763160df1" />
<rdf:Description
rdf:about="uuid:609bc623-b01c-476b-9349-300763160df1">
<xap:CreateDate>
2003-12-22T22:34:35+02:00</xap:CreateDate>
<xap:ModifyDate>
2004-04-17T21:25:50Z</xap:ModifyDate>
<xap:CreatorTool>
Adobe Illustrator 10.0</xap:CreatorTool>
<xap:MetadataDate>
2004-01-19T17:51:02+01:00</xap:MetadataDate>
<xap:Thumbnails>
<rdf:Alt>
<rdf:li
rdf:parseType="Resource">
<xapGImg:format>
JPEG</xapGImg:format>
<xapGImg:width>
256</xapGImg:width>
<xapGImg:height>
256</xapGImg:height>
<xapGImg:image>
/9j/4AAQSkZJRgABAgEASABIAAD/7QAsUGhvdG9zaG9wIDMuMAA4QklNA+0AAAAAABAASAAAAAEA
AQBIAAAAAQAB/+4ADkFkb2JlAGTAAAAAAf/bAIQABgQEBAUEBgUFBgkGBQYJCwgGBggLDAoKCwoK
DBAMDAwMDAwQDA4PEA8ODBMTFBQTExwbGxscHx8fHx8fHx8fHwEHBwcNDA0YEBAYGhURFRofHx8f
Hx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8f/8AAEQgBAAEAAwER
AAIRAQMRAf/EAaIAAAAHAQEBAQEAAAAAAAAAAAQFAwIGAQAHCAkKCwEAAgIDAQEBAQEAAAAAAAAA
AQACAwQFBgcICQoLEAACAQMDAgQCBgcDBAIGAnMBAgMRBAAFIRIxQVEGE2EicYEUMpGhBxWxQiPB
UtHhMxZi8CRygvElQzRTkqKyY3PCNUQnk6OzNhdUZHTD0uIIJoMJChgZhJRFRqS0VtNVKBry4/PE
1OT0ZXWFlaW1xdXl9WZ2hpamtsbW5vY3R1dnd4eXp7fH1+f3OEhYaHiImKi4yNjo+Ck5SVlpeYmZ
qbnJ2en5KjpKWmp6ipqqusra6voRAAICAQIDBQUEBQYECAMDbQEAAhEDBCESMUEFURNhIgZxgZEy
obHwFMHR4SNCFVJicvEzJDRDghaSUyWiY7LCB3PSNeJEgxdUkwgJChgZJjZFGidkdFU38qOzwygp
0+PzhJSktMTU5PRldYWVpbXF1eX1RlZmdoaWprbG1ub2R1dnd4eXp7fH1+f3OEhYaHiImKi4yNjo
+DlJWWl5iZmpucnZ6fkqOkpaanqKmqq6ytrq+v/aAAwDAQACEQMRAD8A9U4q7FXYq7FXYq7FXYq7
FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7F
XYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FX
Yq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXY
q7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq
7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7
FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FWGefPzS8v+
U4mhdhe6uR+70+JhUVGxlbf0x+PtmFqtdDDtzl3Ou1vaWPAK5z7v1vD9U/OP8w9SuWli1A2cQPJb
e1RVRR8yGc/7Js0OTtLNI3de55nL2vqJm+KvczD8u/z0v3v4tM81OssM5CRakqhGRj0EqoApU/zA
bd69s7RdpyMhHJ16uy7O7YlKQhl69f1vcIZopo1kicPG26spqM3r0q/FXYq7FXYq7FXYq7FXYq7F
XYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYqo3l5aWVtJdXcyW9tCvKWaRgqKo7ljsMEp
ACzyYymIiyaDw/8AMD8+Zrj1NO8ploYTVZNUYUkYd/RU/YH+Ud/ADrmi1fahPpx/P9Tzeu7aJ9OL
b+l+p5jYaLe6jKbq7dgkjF3lclpJCTUnfffxOaUl52Rs2Wb2vlaWy0Z770xbWw4iIPs8rMQNgdzt
U1P0ZV4gunI/KzGM5DsOnmwHzBEkOqyenRQ3F6DsSN/65aHHD6D/ACn1ue40+3ilflyBjavio5Kf
u2ztoG4gvouOVxB7w9IyTN2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2Kux
V2KuxVivnf8AMjy55Rtz9dl9fUGWsGnREGVvAt/Iv+U30VzF1GrhiG/PucLV67HgG+8u587ebfPn
mjzrfBblitqprb6dDURJ/lN/M3+U30UzntTqp5T6uXc8nrNdkzn1HbuRHl/yfJJPGvpG6vG3WJRV
F9z8vE7ZgymA4kISmeGIsvT9O8r6XodqdR1h1llj3CdUU9goP22/z98w5ZTI1F3eHQ48EePLuR+P
iwnzn5xe4lNxMaAVFna12A8T/E5k4sVB1Wq1Ms8rPLoGBWsFzqd8ZJCWDMGmf28B+oZsdJpTllX8
PVu0OiOaYH8I5vffyv06aMQVFPjMjewUf12zq3uHqWKuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV
2KuxV2KuxV2KuxV2KuxV2KrJpoYIXmnkWKGMFpJHIVVUbkknYAYCaQSALLxf8wfz7jj9XTfKdHk3
WTVnFVH/ABgQ/a/1m28AeuanU9o9Mfz/AFOg1vbFenF8/wBTyO103VNZuXvbyV29VuUt1MS7ue5q
27fPNJknvZ3LzmSZJs7l6H5T8hy3EatEn1ayP27hhV3p/L4/qzDy5wPe5Wl0E8252j3/AKno1tZ6
RoGnuyAQQoKyzNu7H3PUnwH3ZhkymXoIY8WnhtsO95j5085tcsZpSVt0JFpa1oSf5m9/E9szsOGn
nNXqpZ5f0RyedKLzVr4sxqzfbb9lFzY6fTHJLhDLSaSWaXDH4nuem+SfJjzPEqRnjXYdyT3/ANb9
WdNhwxxx4YvZ6fTxww4Yvc9E0aDTLVY0A9QgB2HQU/ZHtlremOKuxV2KuxV2KuxV2KuxV2KuxV2K
uxV2KuxV2KuxV2KuxV2KuxV2KuxVj3nHz35d8p2Yn1Sf9/ICbezjo00tP5V7D/KO2U5tRHGN3G1O
rhhFyPwfOnnb8zPM/nO5+rGtvpvL9xpkBPE0OxlbrI3z2HYDNFqdXLJz2j3PLazXzzc9o9yhoXlB
5JoxNGbi5c/BbJ8QHzp1/VmtyZXXDimaiLL1ny95EgtwlxqYWWUUK2w3jX/W/m/V881+TPewd3pO
yhH1ZNz3MqnngtoGllYRQxCrMdgAMxwLdvKQiLOwDyjzt50F1WR6pZREi3g/adv5j7/qzYYMNe95
bWauWeVD6Q80d7zV7+p3ZvnxRR/DNpg05meGKdNpZZZCMXo/krya0rRoqEioNabknv8APwGdHgwx
xxoPY6bTRww4Y/2vdtA0G30q2VQB6xFGPgPAfxy5yE1xV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2
KuxV2KuxV2KuxV2KuxVpmVFLMQqqKsx2AA7nFXkH5hfnzY6f6mneVil7eCqyaifigjPT92P92N7/
AGf9bNdqNcBtDc97ptZ2qI+nHue/p+14qsGteYb6S+vZ5JpJWrNeTEsSfAV607AbDNLly72dy83l
ykm5Gyzzyn5HlnH+jJ6UHSW8kFSfZelfkNswM2eubPT6TJnPdHven6Poun6VDwtk/eMKSTNu7fM+
HsM185mXN6HT6WGIVEfFHSzxxRtLIwSNAWdjsAB1ORAciUgBZ5PLvO3nRLoE8jHp8J/dp+1K3Ykf
qHbNhgwV73mdbrDnlwx+kPLp573V77YVJ+wn7KL/AJ9c2uDAZHhix0+mlOQjHm9B8meTjKURUqCQ
WYjdiehp+oZ0GDAMcaD1+k0scMaHPqXvPlzy9BpVstVHrkb9+Pjv4nucvcpOcVdirsVdirsVdirs
VeFfmV+eupwancaR5XZIY7ZjFPqTKJHeRTRhEGqgUHbkQa9s1mo1hBqLotZ2nISMcfTqw3S/zp/M
XTbpZZtQN5ETye2uo0ZWHsQFdf8AYnMeGryA87cHH2lmibu3v3kT8w9D836cs1q4gv0AF3YOfjjb
2O3JT2Yfgc2uHMMgsPRaXVRzRsc+oZTlzkuxV2KuxV2KuxV2KuxV2KuxV2KpL5q84aB5X083ur3I
iU1EMC/FNKw/ZjTqfn0Hc5XkyxgLLTn1EMQuRfOnn782/MXm6VrG2DWOkMaJYxEl5fAzMN2/1Rt8
+uajUaqU/KLzer7Qnl2+mP45pPo3lR5JEN0hkkYj07ZNyT706/IZrMmbudUZkmovVfL3kWONUm1J
R8NPTtF+yAOnMj9QzWZNRe0XZ6Xsz+LJ8v1syUJGgRAFVRRVAoAB2AGYpDuQABQaeZERndgqKCWY
mgAHUk4KUyA3Lzfzp5yjuFeOOQx6bF1PQysOm3h4D6flsNPp697z2t1hynhj9P3vK7y8vNWvAqgm
ppFEOijxP8Tm3w4DyHNrwacyIjEWSzvyb5PaRkCpyLEc3p9o/wBPAd832DAMY83rdJpI4Y0Pq6l7
15Z8tQaXbq7oPXI2B341/wCNsvctPsVdirsVdirsVdirsVQuqzSwaZeTxf3sUEjx/wCsqEj8cEjs
xmaiS+OPL0ccuqp6tGoGcBt6sB/mc5rNtF4bLyZrqnl83OkxXMoD201Qsq9Y5ASKHwO305gwy1Ku
rDwpRiJjkWHWl5rHlfWY7u0kMVxEaxyCvGRa7gjuD3GbPDlIPFFytPnMDxR5vpr8uPzH03zbpy/E
ItSiAFxbk718R4g9jm8w5hMWHq9Lqo5o2OfUMzy1yXYq7FXYq7FXYq7FXYq7FXlf5h/nnpOiepp/
l/hqWqiqvPWttCe9SP7xh4KaeJ7Zh5tWI7R3Lq9X2lGG0N5fY8JuZ/MHmjU5L/ULh7meQ/vbmU/C
o/lUCgAHZVGanLl3uR3edzZzI3I2WX+VvJkkzUtE26S3kg2HsP6D6c1ufUVz+TXiwTzHbk9P0Ty7
Y6ZHWJecxFHuH+0fl4DNfKUp8+TvdNpIYhtz702qB0wVTlqbyAAkmgG5JyosSXnnnLzgkqSQQS8L
CL+9lH+7COw/yfDxzP0+n6nm6LW6w5DwQ+n73lOoahdardqiKeNaQxD9Z982+LDWw5tOHASaG5LN
PJ3lB3dfh5s394/Y07D/ACR+ObzBgGMeb1ej0Ywx/pHm988qeV4NNt0lkT99SqqR09z7/qzIcxke
KuxV2KuxV2KuxV2KuxVxAYEEVB2IPQjFXx/5w0K48oedLuwAPp28vqWrH9u3k+JN/wDVPE+9c0mf
DRMXkdXp+CZi9D8j6lbziXTpqSWt6nqRq3Qmm4+lf1Zz+qgR6hzDDQTFnHLkUs84eUFgUggyWUh/
dS/tRt4H/PfLdNqL97VqdMcMrH0sBs7zWfK+sx3dpIYriI1jkFeMi13BHcHuM3OHL/FFs0+cxPFH
m+mvy4/MjTPNunKOQi1OIAXFsSOVfEeIPj/tZuMWUTD1Om1McsbHPuZplrkuxV2KuxV2KuxVLPMP
mXRPLunNqGr3SWtuuy8t3dv5Y0HxM3sMjOYiLLXlyxxi5Gnzt+YX50655mMmnaUH03R2JUxof384
O37xl6A/yL9JOa3NqTLYbB0Gq7Qlk2HpixXSfLMkrLJdgjl9m3X7R+dP1ZrMmcDk6eWToHp/l7yP
VY3vk9OID93aJsaf5RHT5ZqsupJNR3Lm6bs8nefyZ3b2sMESxooREFERRRQPllQxdTzdzGAiKCqz
4SyJUXkplMixJYD5w83I6S2lvIFtE/3onB+3T9lafs/rzL02nPM83S63V8fojyeT6pqc+p3KxxA+
kDSKLuSe5983WHDXvaMWE3Q3JZd5P8oyO61XlI/237U/lB8B3ObnBgEB5vUaLRjELP1F775Q8qQ6
dbxzSr+8oCikUp4Ej9Q7ZkOcyjFXYq7FXYq7FXYq7FXYq7FXYq8e/wCcivKX1zRrXzJbJWfTj6F4
QNzbyH4WP+pIf+GOYmqx2LdV2pguImOjybyfqskYVVak1qwkiJ/lrX8Dmj1WL5F5vJcZCQe32CW+
tWHwqJEnj5iFt+Q/aX/WGaXFgkZED6x9rv8AGBlj7w8483eUxbhkZTJZSH93J+1G3gff9eZum1F/
1nSajTnFKx9LAbe41jyzq8V5ZymKeI8oZlrxda7gjw8Rm5w5eobcGcxPFHm+mPy1/MzT/N1gEciH
VYQBcW5PU/zL4g5tsWUTD0+m1McsbHPqGcZa5LsVdirsVeb/AJifnVofln1dP03jqWtrVTGp/cQt
/wAWuOpH8i7+JGY+XOI7Dm4Gq18cew3k+fdV1bzL5v1V73UZ2upztyb4Yol6hUUbKPYZrc2XrIvP
59QZHikWR+WvKDySAW0fqSjaS5fZV+Xh+vNXqNTXNxoQnlNDk9P0Dyta2KiQD1J/2rhx+CDtmuJn
l8ou402jjDfr3shVUjFFHzPfLowERs5oFLWfIlVGWUKPftlE5UxJYL5u81rwls7aTjGtRdXFaCg6
qD4eOX6bTkniLp9Zq79Efi8l1bVZdQnEMIPoA0jQdWPiR+rN5hw173HxYfmyjyf5SkkkVmXlM32i
P2R/KD+s5t8GDh3PN6bRaMYhZ+r7nvvk3yjDY28c8yDlQFFp18D8vD78yHPZdirsVdirsVdirsVd
irsVdirsVdiqG1PTbTU9OudOvE9S1u4mhmTxVxQ08D4HARYpjOIkCDyL471DT7zyt5pudOuv7yxm
aGU0IDx9nA8GUhhmozYrBi8nqMBBMT0es/l/rbRMbblUxn1oPdT9pc0Ge8cxkHRn2dmr09z0LWdI
t9StTNEgcSrWSI9HB/42zL1WlGQeLj+rn7/2u6zYRMX3vHPNnlQW4ZGUyWUh/dyftRt4H3/XlOm1
N/1nnM+A4pWOTAre41fy1q8V3aSmKeI8opV+y69wR4eIzdYct7huwZyDxR5vpr8s/wAzNP8ANunh
HIh1WEAXFuTuT/MviDm0x5BIPS6bUjLGxzZxljkoHWdb0nRbCTUNVuktLSL7UshpU9lUdWY9gN8B
kBuWE8kYCyaD58/MT89dW1v1dN8vc9O0pqo9z0uZl+Y/u1PgN/E9sw8ucnYcnS6nXyntHYMD0zy7
NORLd1SM7iP9tvn4ZrcucDYOmnlrYPSPLvkpnWM3EfoW/wCxbqKO3z8P15p82qs1HeTdg0Rmbm9C
sNKt7WFUCKiL9mJeg+fjkIaezc9y7nHhERSNLU27ZeW1SZ8qLFQlmCCp69hlM5UxJYV5r81emJLS
1lowqLicGgUd1B/Wcnp9OZHik6rV6r+GPN5JrOsPeyfV4K/VwaADq58f6DN9hwcO55uNiw172Q+U
fKcssqO6Ezt/wgPYf5Xie2bXDh4dzzej0WjEBxS+r7nvnkvydDaQJcXEYpQcFPf/AJt/XmQ7FmuK
uxV2KuxV2KuxV2KuxV2KuxV2KuxV2KvCP+ckPKXF7LzTbJs1LO/p4irQufo5KT/q5jZ4dXU9pYeU
x7mA+TtaeIQyg1ltGAYdyh/5tqM0eswXY73QS/dzEg9+8s6kk9r6YbkoAkiPijb5j9m5tjA84vRa
bJYb13RYb2KRlQMWFJYj0cf1w6zScR44fV9658IkHjnmvysIAyMpezc/u5P2kbwPv+vK9Lqb/rPP
ZsJxGxyYLb3Or+WtXivLOUxTxHlFKv2XXuCPDxGbzDlvcOTgzkHijze2xf8AORmkReWEnktHm14j
h9UHwx8gPtvJ/L8tz7Zm+OK83dHtGPBderuePeYPM/mnzpqn1jUZ2nYV9KFfhghU9kXovz6nvXMT
Ll6ydPqNQZG5FNPL3lR2mUQx+vcjdpDsif0/Xmq1Gqob7BwrlkNReneXfKMNuVlYCWcdZmHwqf8A
IH8c1hlPNsNouy02jEd+ZZZDBFAtEFWPVj1OZGPFGA2diIgNs+ElbUmfKyWNqE06otT9AymcwAxJ
phvmjzQYeVrauPXIpLKD/djwHv8Aqx0+AzPFLk6zVaqvTHm8k1vWmumNtAf3APxMP2yP4Z0GDBw7
nm42LDW55p15S8qzSypNIhMzU4rT7Ff+NjmzxYq3L0Oi0fD6pfV9z3zyT5Mht4VuJ0+Gmy/ze3y8
fHMh2TO8VdirsVdirsVdirsVdirsVdirsVdirsVdiqV+adAtfMHl6/0a52jvIigb+VxvG/8AsXAb
BIWKa8uMTiYnq+PrUXWja7LZXimKWGV7a6Q/ssrcT9zDNZnxXHzDy+fEaI6h7H5D1sogiY/FbHp4
xN/T+mc7l/dZRMci2aDNQruemCUEAg1B3Bzb8Vu7tJ9c0eG8idlQMWFJYj0cf1zX6rTWeOH1OPmw
iQeReafKwhRgymSzc/A/7Ubdq/1w6XVWf6TocuE4jY5MLt/LUxuGE7gQKdmX7TD28M2stSK25pln
Fbc2eeXvJ7yInJDb2v7KAfvH+/8AWc0+o1m9D1STi00pm5PR9K0G3tYVX0xHGNxEvf3Y5TDTGR4p
u3xYBEJryVVooAA6AZl8m9TZ8gSi1NnyslFqE06ovJvuymcgAwMqYh5m8zG35W8DVuWHxMOkYP8A
xtgwYDkPFLk67VamthzeSa7rZnLW9uxMVf3sn858Pl+vOh0+nrcuPhw1ueaZ+VPK808yTypWQ0Ma
EV4g9GI/m8Bmyx463LvtHpK9UufR755G8lRwxrcTrRB27se4r+s/QMvdm9BACgACgGwA6AYq7FXY
q7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXzj/wA5FeUvqHmC38xW6UttVX07kjoLmJaV/wBnGB9I
OU5I726jX4qlxDqx7ydrhja3uWbdD6Vx7r0r92+aDXae7HxDpP7vJfR7hol8JrQRk1aLYHxU9Mxd
FluFHmHeYZ2EwMmZlt1pTq+kxXaOyKCzikkZ6OP65g6jT2eKP1OPlxCTGtP8lQQXXqLCxYGqmYgq
nyFN/wAcpJzT2Ozh49GAbplVraQWwqvxSd3PX6PDL8WCMOXNzoxAVmky0llam0mVkotSaTIEsbUJ
p1RSzHYZVOQAtiZUxTzJ5lFuDDCa3TDYdRGD3PvkMOE5TxH6XA1GorYc3k+va40rPbwSFuRPry1q
WJ6gH9edHptNW5cfDh/iKK8q+WZbqZJ5kqTQxIR0/wAph+oZsYQ6l3uj0n8Uvg978i+SVRFnnWiL
1J6k9wPfxOXOzejoiIgRAFVRRVGwAGKt4q7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FWN/mJ
5UTzR5Qv9KoDcsnq2THtcR/FHuenI/CfYnARYac+PjgQ+S9CuXtdQa3lBT1D6bqdiHU7V+nbMDVY
rjfc81qMdx9z2byTrVYY1dvii/dS/wCofsn/AD8M5qY8LLfSTbo82zOTJmdbs7aMmRtFrDJgJRaw
yZElFqbSZAlFqbSZAlFqMs6opZjQDK5SpiZMX8xeYxbIUjINww/dp1Cj+Zsrw4TllZ+lws+or3vK
vMGvSO8kEUnOR6+vNWpqeoB/XnSaXSgCzy6OPhw36pLvK/luS8lSeZKqd4oz0P8AlN7frzZRi7vS
6W/VLk968i+SBRZp1IRd2Y9a/wDNX6ssdo9NiijijWONQqKKKo6AYquxV2KuxV2KuxV2KuxV2Kux
V2KuxV2KuxV2KuxV2KuxV2Kvlv8APjyk2g+dG1C3ThZayDdREbATgj11+fIh/wDZZEh1GrxVK+hU
fKGsgSwTMaJMPTmHYN0r9/4ZzfaGm2I7tw6aP7uddHrunXnrWq1Pxp8LfR0zDwZOKLtsc7CIMuW2
ztaZcFotYZMiSi1NpMiSi1KSZVUsxoB1OVylTEyY35g8wrbR0WjSt/dRf8bNleLEc0v6IcTNnp5b
5g16QySRI5a4kP76Xwr2Hv8AqzpdJpBQJ5dGjDhMjxSUfLPl2W/lSeVaxVrGh/ap3P8Ak5swHdab
TcXqPJ7z5E8kcys0q8VWhZiP89/Adsk7R6nBBFBEsUS8Y0FFGKr8VdirsVdirsVdirsVdirsVdir
sVdirsVdirsVdirsVdirsVYN+cnlH/Enkm6SFOWoaf8A6ZZ0FWLRg80H+ulRTxpi0ajHxRfMHly8
4TtbMfhl3T/WH9RmHrMVji7nntVjsX3PY/Kmr+tBGWPxH93L/rDofpzlJR8LKR0LLT5GSmXLrcu1
hlwWi1plyJKLU3mABJNAOpyJKCWPa7r8dtFXqx/uo/E+J9srx4zmlX8IcbLlp5j5g1+T1HVX53Un
23/lH9c6XR6MUNvSGnDhMzxS5ITy75fm1GdZpVJgr8K95D/TxObWnc6fT8W55PdvInkgyMkjqFRQ
CWpsB22/UMXaPWba3ht4VhhXiijYfxOKqmKuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2Ku
xV2KuxV2KvkX82fKj+U/PV1FbJ6djct9d08gUUJISSg/4xuCtPCmS4RIUXU6jFUiOhTPypqq+qlD
SK6UU9nHT+mct2lpzR74umiDCVPRre69WFWrv0b5jNfCdhzoysLjLhtNrGmAFSdsiSi0l1nW4reL
kTWv93H3Y/0yOPHLNKhyaMmR5r5g8wSh2+PndydT2Qf59BnTaLRCuXpH2teHCZmzyS3QNDn1O5Ek
oYwctz3dvAH9ZzbnZ3GDT8XP6XunkTyO0rIzRgIAO3whR028PAd/lkHZgU9etLSC0gWGFeKL95Pi
cUq2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV5h/wA5AeUP015OOqW6
cr7RSZxQVZrdqCZf9iAH/wBicnA7uPqYXG+588+W70qWtyaMD6kR/X/XMPX4f4vgXQ6vHyk9X0TU
hPbo9f7wfEPBxsc46cPDmYsMc0yM3vjbbaV6rrEVvCWY7fsr3Y4MeOWWXCOTTObzvzB5gkDlmYNc
uPgXsi/LOn0OhFUPpH2ow4TkNnkk+iaNcatdc35ejy+N+7Mf2R75uTURQdxgwcXue4eRPI5maMem
AigAbfCFH8B+OVOyArZ7JY2NvZW6wwigH2m7k+JxSiMVdirsVdirsVdirsVdirsVdirsVdirsVdi
rsVdirsVdirsVdirsVdirsVWTQxTQvDMgkilUpIjCoZWFCCPAjFXxp538uz+T/Ot7ptD6VvL6lox
r8dvJ8Ue/f4TxPvXL5QE4V3uqz4ecWUeWdRXn6Yb4JQJIj70r+Izj+08BA4usdi6UXE0yC/1SOCA
yOaL4dyfAZrMcJZJcIZymwLX9fYMZHo0zCkUfZR751Gg0Aqhy6lOHCch8ki0jSrrV7ssxPp1Hqyd
SSf2V983hqAoO5w4b2HJ7b5E8jmZolWIKi7KvYAdd/1nMcl2IAAoPadN06CwthDEP9dqUJP+fTFK
KxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV4z/zkl5Q+u6Ha
+ZbZK3GmEQXZHU28rfCf9hIf+GOX4Zb04+ohYt4l5b1FlUR8qSwtyjr3Fa/gcwO0dNe/SXN0esxU
eIJjr2vEEySbuRSGGuw98w9B2fQocupacOE5D5Me03TrzV7wkk8agzS+A8B7+AzfnhxxoO5w4eg5
PaPInkcyNCkcXFF2Vf11P6zmKTbsIxAFB7dpWlW+nWywxAcqDm4FK0/gMCUbirsVdirsVdirsVdi
rsVdirsVdirsVdirsVdirsVdirsVdirsVdirsVdirsVdirsVQ+o6faajYXFheRia0uo2hniPRkcc
WH3HCDSCLfKX5gfk/wCYfK+pymzRr3SWJa1ulpzCH9mQbfEvQkbd9sy45okbuLPCfexez8savdTA
SoYkJozuat9C1qcJyxiNkRwn3PW/Ivkcs0UUcRCA7DuT3JP836sxJSJNlyoxAFB7lo2j2+mWqxxq
PUoA7D9Q9siyTDFXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FX
Yq7FXYqpXNrb3MRiuIxJGexxVIG/L3yuZfUFsUJ6qjFR+GKp1YaVYWEfC0hWMUpUbmnzOKorFXYq
7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7
FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7F
XYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FX
Yq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXY
q7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq//Z</xapGImg:image>
</rdf:li>
</rdf:Alt>
</xap:Thumbnails>
</rdf:Description>
<rdf:Description
rdf:about="uuid:609bc623-b01c-476b-9349-300763160df1">
<xapMM:DocumentID>
uuid:4b4d592f-95b8-4bcd-a892-74a536c5e52f</xapMM:DocumentID>
</rdf:Description>
<rdf:Description
rdf:about="uuid:609bc623-b01c-476b-9349-300763160df1">
<dc:format>
image/svg+xml</dc:format>
<dc:title>
<rdf:Alt>
<rdf:li
xml:lang="x-default">
test.ai</rdf:li>
</rdf:Alt>
</dc:title>
</rdf:Description>
</rdf:RDF>
</x:xmpmeta>
<xpacket>end='w' </xpacket>
</metadata>
<rect
id="_x3C_Slice_x3E_"
style="font-size:12;fill:none;"
width="256"
height="256" />
<path
style="font-size:12;opacity:0.2;"
d="M221.848,47.811c0,0-130.558,89.471-132.578,90.855c-1.689-1.683-41.779-41.595-41.779-41.595 c-2.978-2.968-6.891-4.068-10.467-2.943c-3.89,1.232-6.403,4.005-7.08,7.809l-0.42,2.363c-0.135,0.765-0.122,1.532,0.037,2.285 l0.589,2.802l0.408,1.247l46.254,101.694c1.449,3.183,4.375,5.427,7.83,6.001c3.441,0.579,6.936-0.598,9.349-3.144 L235.225,65.893c2.066-2.169,3.252-5.263,3.252-8.481l-0.129-1.236l-0.572-2.723c-0.697-3.33-2.852-5.804-6.227-7.157 C229.395,45.431,225.963,44.991,221.848,47.811z"
id="path552" />
<path
style="font-size:12;opacity:0.2;"
d="M218.848,47.811c0,0-130.558,89.471-132.578,90.855c-1.689-1.683-41.779-41.595-41.779-41.595 c-2.978-2.968-6.891-4.068-10.467-2.943c-3.89,1.232-6.403,4.005-7.08,7.809l-0.42,2.363c-0.135,0.765-0.122,1.532,0.037,2.285 l0.589,2.802l0.408,1.247l46.254,101.694c1.449,3.183,4.375,5.427,7.83,6.001c3.441,0.579,6.936-0.598,9.349-3.144 L232.225,65.893c2.066-2.169,3.252-5.263,3.252-8.481l-0.129-1.236l-0.572-2.723c-0.697-3.33-2.852-5.804-6.227-7.157 C226.395,45.431,222.963,44.991,218.848,47.811z"
id="path553" />
<path
style="font-size:12;opacity:0.2;"
d="M217.848,45.811c0,0-130.558,89.471-132.578,90.855c-1.689-1.683-41.779-41.595-41.779-41.595 c-2.978-2.968-6.891-4.068-10.467-2.943c-3.89,1.232-6.403,4.005-7.08,7.809l-0.42,2.363c-0.135,0.765-0.122,1.532,0.037,2.285 l0.589,2.802l0.408,1.247l46.254,101.694c1.449,3.183,4.375,5.427,7.83,6.001c3.441,0.579,6.936-0.598,9.349-3.144 L231.225,63.893c2.066-2.169,3.252-5.263,3.252-8.481l-0.129-1.236l-0.572-2.723c-0.697-3.33-2.852-5.804-6.227-7.157 C225.395,43.431,221.963,42.991,217.848,45.811z"
id="path554" />
<path
style="font-size:12;fill:url(#XMLID_5_);"
d="M215.848,43.811c0,0-130.558,89.471-132.578,90.855 c-1.689-1.683-41.779-41.595-41.779-41.595c-2.978-2.968-6.891-4.068-10.467-2.943c-3.89,1.232-6.403,4.005-7.08,7.809 l-0.42,2.363c-0.135,0.765-0.122,1.532,0.037,2.285l0.589,2.802l0.408,1.247l46.254,101.694c1.449,3.183,4.375,5.427,7.83,6.001 c3.441,0.579,6.936-0.598,9.349-3.144L229.225,61.893c2.066-2.169,3.252-5.263,3.252-8.481l-0.129-1.236l-0.572-2.723 c-0.697-3.33-2.852-5.804-6.227-7.157C223.395,41.431,219.963,40.991,215.848,43.811z"
id="path561" />
<path
style="font-size:12;fill:url(#XMLID_6_);"
d="M219.239,48.761c0,0-135.454,92.824-136.679,93.665 c-5.106-5.083-45.302-45.103-45.302-45.103c-1.187-1.182-2.833-1.976-4.431-1.472c-1.597,0.505-2.684,1.485-2.977,3.135 l-0.42,2.364l0.589,2.802c0.007,0.016,46.252,101.691,46.252,101.691c0.621,1.363,1.876,2.321,3.354,2.567 c1.477,0.247,2.978-0.265,4.008-1.353L224.865,57.77c1.021-1.072,1.611-2.665,1.611-4.358l-0.572-2.728 c-0.309-1.471-1.192-2.26-2.588-2.82C221.922,47.305,220.477,47.913,219.239,48.761z"
id="path568" />
<path
style="font-size:12;fill:url(#XMLID_7_);"
d="M84.485,146.561c-1.425,0.977-3.344,0.803-4.567-0.416c0,0-44.921-44.724-45.833-45.632 c-0.091,0.252-0.154,0.533-0.154,0.838c0,0.328,0.06,0.662,0.192,0.955c0,0,46.096,101.347,46.241,101.664 c0.877-0.93,141.232-149.292,141.232-149.292c0.232-0.243,0.381-0.741,0.381-1.266c0-0.322-0.074-0.645-0.2-0.935 C220.751,53.177,84.485,146.561,84.485,146.561z"
id="path575" />
<path
style="font-size:12;fill:url(#XMLID_8_);"
d="M86.517,149.525c-0.001,0-0.001,0.004-0.001,0.004 c-2.848,1.947-6.69,1.596-9.133-0.838c0,0-20.052-19.966-33.287-33.141c10.589,23.282,30.678,67.45,37.327,82.069 c6.078-6.424,93.826-99.178,119.981-126.826C170.026,92.297,86.517,149.525,86.517,149.525z"
id="path582" />
</svg>
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Sodipodi ("http://www.sodipodi.com/") -->
<svg
xmlns:pdf="http://ns.adobe.com/pdf/1.3/"
xmlns:xapMM="http://ns.adobe.com/xap/1.0/mm/"
xmlns:xapGImg="http://ns.adobe.com/xap/1.0/g/img/"
xmlns:xap="http://ns.adobe.com/xap/1.0/"
xmlns:ns0="http://ns.adobe.com/SaveForWeb/1.0/"
xmlns:a="http://ns.adobe.com/AdobeSVGViewerExtensions/3.0/"
xmlns:x="adobe:ns:meta/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
width="48pt"
height="48pt"
viewBox="0 0 256 256"
overflow="visible"
enable-background="new 0 0 256 256"
xml:space="preserve"
id="svg710"
sodipodi:version="0.32"
sodipodi:docname="application-text.svg"
version="1.1"
inkscape:version="0.48.1 r9760">
<defs
id="defs796">
<linearGradient
y2="245.0005"
x2="128.9995"
y1="11"
x1="128.9995"
gradientUnits="userSpaceOnUse"
id="XMLID_9_">
<stop
id="stop717"
style="stop-color:#494949"
offset="0" />
<stop
id="stop718"
style="stop-color:#000000"
offset="1" />
<a:midPointStop
id="midPointStop719"
style="stop-color:#494949"
offset="0" />
<a:midPointStop
id="midPointStop720"
style="stop-color:#494949"
offset="0.5" />
<a:midPointStop
id="midPointStop721"
style="stop-color:#000000"
offset="1" />
</linearGradient>
<linearGradient
y2="226.9471"
x2="226.9471"
y1="29.0532"
x1="29.0532"
gradientUnits="userSpaceOnUse"
id="XMLID_10_">
<stop
id="stop725"
style="stop-color:#FFFFFF"
offset="0" />
<stop
id="stop726"
style="stop-color:#DADADA"
offset="1" />
<a:midPointStop
id="midPointStop727"
style="stop-color:#FFFFFF"
offset="0" />
<a:midPointStop
id="midPointStop728"
style="stop-color:#FFFFFF"
offset="0.5" />
<a:midPointStop
id="midPointStop729"
style="stop-color:#DADADA"
offset="1" />
</linearGradient>
<linearGradient
gradientTransform="matrix(0.1991,0.98,-0.98,0.1991,91.6944,573.5653)"
y2="-164.2214"
x2="-360.2456"
y1="-94.4194"
x1="-481.7007"
gradientUnits="userSpaceOnUse"
id="XMLID_11_">
<stop
id="stop736"
style="stop-color:#990000"
offset="0" />
<stop
id="stop737"
style="stop-color:#7C0000"
offset="1" />
<a:midPointStop
id="midPointStop738"
style="stop-color:#990000"
offset="0" />
<a:midPointStop
id="midPointStop739"
style="stop-color:#990000"
offset="0.5" />
<a:midPointStop
id="midPointStop740"
style="stop-color:#7C0000"
offset="1" />
</linearGradient>
<linearGradient
gradientTransform="matrix(-0.999,0.0435,0.0435,0.999,-1277.0056,-496.5172)"
y2="706.3217"
x2="-1355.0455"
y1="685.3809"
x1="-1375.9844"
gradientUnits="userSpaceOnUse"
id="XMLID_12_">
<stop
id="stop743"
style="stop-color:#F8F1DC"
offset="0" />
<stop
id="stop744"
style="stop-color:#D6A84A"
offset="1" />
<a:midPointStop
id="midPointStop745"
style="stop-color:#F8F1DC"
offset="0" />
<a:midPointStop
id="midPointStop746"
style="stop-color:#F8F1DC"
offset="0.5" />
<a:midPointStop
id="midPointStop747"
style="stop-color:#D6A84A"
offset="1" />
</linearGradient>
<linearGradient
y2="160.1823"
x2="137.6021"
y1="-0.7954"
x1="65.0947"
gradientUnits="userSpaceOnUse"
id="XMLID_13_">
<stop
id="stop750"
style="stop-color:#FFA700"
offset="0" />
<stop
id="stop751"
style="stop-color:#FFD700"
offset="0.7753" />
<stop
id="stop752"
style="stop-color:#FF794B"
offset="1" />
<a:midPointStop
id="midPointStop753"
style="stop-color:#FFA700"
offset="0" />
<a:midPointStop
id="midPointStop754"
style="stop-color:#FFA700"
offset="0.5" />
<a:midPointStop
id="midPointStop755"
style="stop-color:#FFD700"
offset="0.7753" />
<a:midPointStop
id="midPointStop756"
style="stop-color:#FFD700"
offset="0.5" />
<a:midPointStop
id="midPointStop757"
style="stop-color:#FF794B"
offset="1" />
</linearGradient>
<linearGradient
gradientTransform="matrix(-0.999,0.0435,0.0435,0.999,-1277.0056,-496.5172)"
y2="622.5333"
x2="-1325.3219"
y1="635.7949"
x1="-1336.4497"
gradientUnits="userSpaceOnUse"
id="XMLID_14_">
<stop
id="stop763"
style="stop-color:#FFC957"
offset="0" />
<stop
id="stop764"
style="stop-color:#FF6D00"
offset="1" />
<a:midPointStop
id="midPointStop765"
style="stop-color:#FFC957"
offset="0" />
<a:midPointStop
id="midPointStop766"
style="stop-color:#FFC957"
offset="0.5" />
<a:midPointStop
id="midPointStop767"
style="stop-color:#FF6D00"
offset="1" />
</linearGradient>
<linearGradient
gradientTransform="matrix(-0.999,0.0435,0.0435,0.999,-1277.0056,-496.5172)"
y2="699.4763"
x2="-1354.6851"
y1="595.6309"
x1="-1401.459"
gradientUnits="userSpaceOnUse"
id="XMLID_15_">
<stop
id="stop770"
style="stop-color:#FFA700"
offset="0" />
<stop
id="stop771"
style="stop-color:#FFD700"
offset="0.7753" />
<stop
id="stop772"
style="stop-color:#FF9200"
offset="1" />
<a:midPointStop
id="midPointStop773"
style="stop-color:#FFA700"
offset="0" />
<a:midPointStop
id="midPointStop774"
style="stop-color:#FFA700"
offset="0.5" />
<a:midPointStop
id="midPointStop775"
style="stop-color:#FFD700"
offset="0.7753" />
<a:midPointStop
id="midPointStop776"
style="stop-color:#FFD700"
offset="0.5" />
<a:midPointStop
id="midPointStop777"
style="stop-color:#FF9200"
offset="1" />
</linearGradient>
<linearGradient
y2="115.5361"
x2="144.5898"
y1="115.5361"
x1="67.8452"
gradientUnits="userSpaceOnUse"
id="XMLID_16_">
<stop
id="stop780"
style="stop-color:#7D7D99"
offset="0" />
<stop
id="stop781"
style="stop-color:#B1B1C5"
offset="0.1798" />
<stop
id="stop782"
style="stop-color:#BCBCC8"
offset="0.3727" />
<stop
id="stop783"
style="stop-color:#C8C8CB"
offset="0.6825" />
<stop
id="stop784"
style="stop-color:#CCCCCC"
offset="1" />
<a:midPointStop
id="midPointStop785"
style="stop-color:#7D7D99"
offset="0" />
<a:midPointStop
id="midPointStop786"
style="stop-color:#7D7D99"
offset="0.5" />
<a:midPointStop
id="midPointStop787"
style="stop-color:#B1B1C5"
offset="0.1798" />
<a:midPointStop
id="midPointStop788"
style="stop-color:#B1B1C5"
offset="0.2881" />
<a:midPointStop
id="midPointStop789"
style="stop-color:#CCCCCC"
offset="1" />
</linearGradient>
<linearGradient
inkscape:collect="always"
xlink:href="#XMLID_16_"
id="linearGradient80060"
gradientUnits="userSpaceOnUse"
x1="67.8452"
y1="115.5361"
x2="144.5898"
y2="115.5361"
gradientTransform="translate(0,-25.600002)" /><linearGradient
inkscape:collect="always"
xlink:href="#XMLID_15_"
id="linearGradient80063"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(-0.999,0.0435,0.0435,0.999,-1277.0056,-522.11722)"
x1="-1401.459"
y1="595.6309"
x2="-1354.6851"
y2="699.4763" /><linearGradient
inkscape:collect="always"
xlink:href="#XMLID_14_"
id="linearGradient80066"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(-0.999,0.0435,0.0435,0.999,-1277.0056,-522.11722)"
x1="-1336.4497"
y1="635.7949"
x2="-1325.3219"
y2="622.5333" /><linearGradient
inkscape:collect="always"
xlink:href="#XMLID_13_"
id="linearGradient80072"
gradientUnits="userSpaceOnUse"
x1="65.0947"
y1="-0.7954"
x2="137.6021"
y2="160.1823"
gradientTransform="translate(0,-25.600002)" /><linearGradient
inkscape:collect="always"
xlink:href="#XMLID_12_"
id="linearGradient80075"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(-0.999,0.0435,0.0435,0.999,-1277.0056,-522.11722)"
x1="-1375.9844"
y1="685.3809"
x2="-1355.0455"
y2="706.3217" /><linearGradient
inkscape:collect="always"
xlink:href="#XMLID_11_"
id="linearGradient80078"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(0.1991,0.98,-0.98,0.1991,91.6944,547.96528)"
x1="-481.7007"
y1="-94.4194"
x2="-360.2456"
y2="-164.2214" /><linearGradient
inkscape:collect="always"
xlink:href="#XMLID_10_"
id="linearGradient80085"
gradientUnits="userSpaceOnUse"
x1="29.0532"
y1="29.0532"
x2="226.9471"
y2="226.9471" /><linearGradient
inkscape:collect="always"
xlink:href="#XMLID_9_"
id="linearGradient80089"
gradientUnits="userSpaceOnUse"
x1="128.9995"
y1="11"
x2="128.9995"
y2="245.0005" /></defs>
<sodipodi:namedview
id="base"
showgrid="false"
inkscape:zoom="3.6203867"
inkscape:cx="24.932695"
inkscape:cy="18.484388"
inkscape:window-width="1280"
inkscape:window-height="766"
inkscape:window-x="0"
inkscape:window-y="20"
inkscape:window-maximized="0"
inkscape:current-layer="svg710" />
<metadata
id="metadata711">
<ns0:sfw>
<ns0:slices>
<ns0:slice
x="0"
y="0"
width="256"
height="256"
sliceID="124333141" />
</ns0:slices>
<ns0:sliceSourceBounds
x="0"
y="0"
width="256"
height="256"
bottomLeftOrigin="true" />
<ns0:optimizationSettings>
<ns0:targetSettings
fileFormat="PNG24Format"
targetSettingsID="0">
<ns0:PNG24Format
transparency="true"
includeCaption="false"
interlaced="false"
noMatteColor="false"
matteColor="#FFFFFF"
filtered="false" />
</ns0:targetSettings>
</ns0:optimizationSettings>
</ns0:sfw>
<xpacket
id="xpacket79197">begin='' id='W5M0MpCehiHzreSzNTczkc9d' </xpacket>
<x:xmpmeta
x:xmptk="XMP toolkit 3.0-29, framework 1.6">
<metadata
id="metadata79254"><rdf:RDF>
<rdf:Description
rdf:about="uuid:cbee75c6-82d1-45ba-8274-b89c6084675c">
<pdf:Producer>
Adobe PDF library 5.00</pdf:Producer>
</rdf:Description>
<rdf:Description
rdf:about="uuid:cbee75c6-82d1-45ba-8274-b89c6084675c" />
<rdf:Description
rdf:about="uuid:cbee75c6-82d1-45ba-8274-b89c6084675c" />
<rdf:Description
rdf:about="uuid:cbee75c6-82d1-45ba-8274-b89c6084675c">
<xap:CreateDate>
2004-01-26T11:58:28+02:00</xap:CreateDate>
<xap:ModifyDate>
2004-03-28T20:41:40Z</xap:ModifyDate>
<xap:CreatorTool>
Adobe Illustrator 10.0</xap:CreatorTool>
<xap:MetadataDate>
2004-02-16T23:58:32+01:00</xap:MetadataDate>
<xap:Thumbnails>
<rdf:Alt>
<rdf:li
rdf:parseType="Resource">
<xapGImg:format>
JPEG</xapGImg:format>
<xapGImg:width>
256</xapGImg:width>
<xapGImg:height>
256</xapGImg:height>
<xapGImg:image>
/9j/4AAQSkZJRgABAgEASABIAAD/7QAsUGhvdG9zaG9wIDMuMAA4QklNA+0AAAAAABAASAAAAAEA
AQBIAAAAAQAB/+4ADkFkb2JlAGTAAAAAAf/bAIQABgQEBAUEBgUFBgkGBQYJCwgGBggLDAoKCwoK
DBAMDAwMDAwQDA4PEA8ODBMTFBQTExwbGxscHx8fHx8fHx8fHwEHBwcNDA0YEBAYGhURFRofHx8f
Hx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8f/8AAEQgBAAEAAwER
AAIRAQMRAf/EAaIAAAAHAQEBAQEAAAAAAAAAAAQFAwIGAQAHCAkKCwEAAgIDAQEBAQEAAAAAAAAA
AQACAwQFBgcICQoLEAACAQMDAgQCBgcDBAIGAnMBAgMRBAAFIRIxQVEGE2EicYEUMpGhBxWxQiPB
UtHhMxZi8CRygvElQzRTkqKyY3PCNUQnk6OzNhdUZHTD0uIIJoMJChgZhJRFRqS0VtNVKBry4/PE
1OT0ZXWFlaW1xdXl9WZ2hpamtsbW5vY3R1dnd4eXp7fH1+f3OEhYaHiImKi4yNjo+Ck5SVlpeYmZ
qbnJ2en5KjpKWmp6ipqqusra6voRAAICAQIDBQUEBQYECAMDbQEAAhEDBCESMUEFURNhIgZxgZEy
obHwFMHR4SNCFVJicvEzJDRDghaSUyWiY7LCB3PSNeJEgxdUkwgJChgZJjZFGidkdFU38qOzwygp
0+PzhJSktMTU5PRldYWVpbXF1eX1RlZmdoaWprbG1ub2R1dnd4eXp7fH1+f3OEhYaHiImKi4yNjo
+DlJWWl5iZmpucnZ6fkqOkpaanqKmqq6ytrq+v/aAAwDAQACEQMRAD8A9U4q7FXYq7FXYq7FXYq7
FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYqlvmDzFo
3l7TJdT1e5W1tItuTbszHoiKN2Y+AxV4j5g/5ydvTcMnl/SYlgU0Se/LOzDxMcTIF/4M4qk//QzP
nv8A5YNL/wCRVx/2UYq7/oZnz3/ywaX/AMirj/soxV3/AEMz57/5YNL/AORVx/2UYq7/AKGZ89/8
sGl/8irj/soxV3/QzPnv/lg0v/kVcf8AZRirv+hmfPf/ACwaX/yKuP8AsoxV3/QzPnv/AJYNL/5F
XH/ZRirv+hmfPf8AywaX/wAirj/soxV3/QzPnv8A5YNL/wCRVx/2UYq7/oZnz3/ywaX/AMirj/so
xV3/AEMz57/5YNL/AORVx/2UYq7/AKGZ89/8sGl/8irj/soxV3/QzPnv/lg0v/kVcf8AZRirv+hm
fPf/ACwaX/yKuP8AsoxV3/QzPnv/AJYNL/5FXH/ZRirv+hmfPf8AywaX/wAirj/soxV3/QzPnv8A
5YNL/wCRVx/2UYq7/oZnz3/ywaX/AMirj/soxV3/AEMz57/5YNL/AORVx/2UYq7/AKGZ89/8sGl/
8irj/soxV3/QzPnv/lg0v/kVcf8AZRirv+hmfPf/ACwaX/yKuP8AsoxVFad/zk75oS4B1HSbG4t+
6W/qwP8A8E7zj/hcVeyeRfzJ8tec7Vn0yUx3kQBuLCaizJ25AAkMlf2l+mmKsqxV2KuxV2KuxV2K
vm/XDqf5ufmk+j287Q+XtJLqJF3VIY2CSzAHYvM9AvtTwOKvePLfk/y35bs0tdHsYrZVFGlCgyuf
GSQ/Ex+ZxVOK4q6oxVrkMVdyGKu5jFWvUGKu9RffFWvVX3xV3rL74q71l8DirXrp4HFXfWE8DirX
1hPA4q76yngcVd9Zj8D+GKtfWo/A/hirvrcfgfw/rirvrcfgfw/rirX1yLwb8P64q765F4N+H9cV
d9di8G/D+uKtfXovBvw/riqVa/5X8r+abR7TV7GO55CiyMoWZP8AKjkHxKR7HFXzB5n0XXfys8/R
NZXBJgIudOujsJYGJUpIB8ijj+oxV9VeWtfs/MGhWWsWf9xexLKErUoxHxI3up2OKplirsVdirsV
Q+oMy2Fyy/aWJyvzCnFXhP8AziwqvL5nmYcpQLIBz1oxuC2/uVGKvficVaxVrFWicVaJxVrFWsVa
JxVonFWsVaxVrFWicVaxVrFWicVaJxVrFWsVaJxVonFWsVaxVdCSJkp/MP14q8V/5ypRBJ5ZkCjm
wvVZu5CmAgfRyOKsn/5x3vJX8lwWzElQZmSvbjMR/wAbYq9XxV2KuxV2KofUv+Oddf8AGGT/AIic
VeE/84pn/lKP+jD/ALGcVe+nFWsVaJxVonFWsVaxVonFWicVaxVrFWsVaJxVrFWsVaJxVonFWsVa
xVonFWicVaxVrFWicVXQ/wB9H/rD9eKvFv8AnKw/8ov/ANH/AP2LYqn/APzjn/yisHyuP+T4xV6/
irsVdirsVQ+pf8c66/4wyf8AETirwf8A5xRNf8U/9GH/AGM4q9+PXFWicVaJxVrFWsVaJxVonFWs
VaxVrFWicVaxVrFWicVaJxVrFWsVaJxVonFWsVaxVonFWicVXQ/30f8ArD9eKvFf+crjT/C3/R//
ANi2Ksg/5xy/5RS3+Vx/yfGKvYMVdirsVdiqH1L/AI511/xhk/4icVeDf84nmv8Ain/ow/7GcVe/
HrirROKtYq1irROKtE4q1irWKtYq0TirWKtYq0TirROKtYq1irROKtE4q1irWKtE4q0TirWKroP7
+P8A1h+vFXiv/OWBp/hb/o//AOxbFWQf844f8onb/K4/5PjFXsOKuxV2KuxVD6l/xzrr/jDJ/wAR
OKvBP+cTD/ylX/Rh/wBjOKvf2O5xVrFWsVaJxVonFXln5ofnxoPk9pNM05V1XX1qrwK1IYD/AMXO
v7X+Qu/iRmNm1IhsNy7vs7sWef1S9MPtPu/W+fdS81/mp5+uWaS6urm3ZivoQH6vZoaV4mhSKtP5
zXNXn1dbzlT1uDQ6fAPTEX8z+tX8r+Z/Pf5Xa5azXMUo0+evrac8oe3njGz8GQugkWoNRuNq7GhO
m1Q5xNhhrNHh1cDH+Ideo/Y+q/KfnXRfM+nw3umyVinXkgPXbZlPgynqM3UJiQsPAajTzwzMJiiE
+yTS1irROKtE4q1irWKtE4q0TirWKtYq0TirROKtYq1iq6A/v4/9Zf14q8U/5yzP/KK/9H//AGLY
qyH/AJxv/wCUSt/lcf8AJ/FXsWKuxV2KuxVD6l/xzrr/AIwyf8ROKvAv+cSj/wApV/0Yf9jOKvoB
upxVrFWicVaJxV4h+fH50yaCJPK/l2amsSLTUL1DvbI4qET/AItYGtf2R79MPU6jh9I5vSdi9keL
+9yD0dB3/s+95B5J/L5tQC6rrQZ4JgJLe2JPKXlv6krdeJ6qK1br0+1zGu7S8P0w3l937Xryeg5P
W7GwRESONFSNAFjjQBVVR0CqKAD2GaCUpTNyNlxpzA5Jlr3ky01XQTYapDytrj4gw2kikH2HQkfC
wH8QdiRncdk9ncOmqW0pG/c8jqe1JQ1PHjO0dvIvF/L+u6/+Vvm19PvuUmnyMryqlaPGTRLiCtPi
FKHxoVPTaeHMcciO40XoNTpsfaGATjtLp+o/jzfVXlnzJY67psN3bSrKJUEiOvR1P7Q/iOxzbRkC
LDw2XHKEjGQqQTgnCwaJxVrFWsVaJxVonFWsVaxVonFWicVaxVrFWicVXwf38f8ArL+vFXiX/OWp
/wCUV/6P/wDsWxVkX/ONv/KI23yuf+T+KvY8VdirsVdiqH1L/jnXX/GGT/iJxV4D/wA4kGv+K/8A
t3/9jOKvoFvtH54qtJxVonFWMfmT5vXyj5M1LWwA1xDGEs4z0aeUhI6juAzcm9gcryz4YkuZ2fpf
HzRh0PP3PkvyBob+ZPMFzqWpt9aS3YT3Pq0czTzMSvME7glWZutaUPXOY7R1RxQ2+qX4t9GkBECI
2H6HtlraEmp3J3JOcsBbjZMjItDtrU3a+oQWT4lQ9GI7Z1HY/YxmRlyD0dB3/s+/3PM9p9p1cIHf
qe5mUsMV5CSAC1KMh751s5iIsvOAW87/ADA8gadr+mtY3i8WXk1hegVkglI/FTQc16MPAgEeXajX
ZtNq5ZpbwyHcfo946PXdn5/DiBHp073j/kXzlrX5ceZZNB1rktgJfiZakRM2wnjJA5RuPtDw361B
7fQ62MoiUTcJOX2n2fHVw8SH94Pt8i+qNH1i11SzS4gdW5KGPA8lIYVDKR1U9jm5BeHlEg0eaOxQ
1irROKtE4q1irWKtE4q0TirWKtYq0TirROKr4P7+P/XX9eKvEv8AnLc0/wAKf9vD/sWxVkf/ADjX
/wAofbfK5/5P4q9jxV2KuxV2KofUv+Oddf8AGGT/AIicVfP/APziMa/4r/7d/wD2M4q+gm+0fniq
0nFWsVedfn15Y1LzF+Xlzb6chlurOaO8WAbtIsQZWVffi5I+WUamBlDZ2vYupjh1AMuRFPn78qPM
lrYm40e4iIuJpDNCxNAxChWjpTZhxqPHfw35/P2fHUyAMuCvK/1PXdpZp4o+JEcUevf7/c9Xt9Qk
moFURr4Dc/fm30Xs/gwnil65efL5frt43Vdq5cuw9I8v1ptbB6rwryG4I7ZstXq8WngZ5JCMR3/j
d1+PHKZqIssu0fUGZQrn9+o+LwYZwp9pBq8hEPTGPIHr5/s6O1/I+HHfcpndWsN3CSBWv2l/z75b
qtNDUQJq+8fjqxx5DAvKfzN/LO08x2fAkQapbqTp98QeJHUxTUqSh+9TuO6tzej1U+z8vBPfDL8X
7+96HR6wjccuoed/lX+Y+p+TtZPlrzCWtoIpDHE02wt3O5R/GJ67GtB16bj0PSaoUN7ieRYdr9mD
PHxsX1df6X7Q+oLC/hvbdZoj7MvcHwzaPGognFWicVaxVrFWicVaJxVrFWsVaJxVonFWsVX2/wDv
RF/rr+vFXiP/ADlyaf4U/wC3h/2LYqyT/nGr/lDrb5XP/URir2TFXYq7FXYqh9S/4511/wAYZP8A
iJxV8+/84hn/AJSz/t3/APYzir6Dc/Efniq3FWsVWnf5Yq+d/wA+PydeGWTzf5ahKnl6mpWkIPIP
Wvrx07/zU+fXrg6nT/xB6rsTtblhynb+E/o/V8kF+VXnTStfC6bqf7rW0X4BXilyqipZAOjgCrL9
K7VC6HtjtPXYcXFhIqPPaz79/wBSdb2Ljxz4gPQfs8vd3fLuvqaRJGKIoUe2ebavX5tRLiyzMz5/
o7lx44wFRFLlLIwZTRhuCMx4TMSCNiGZF7FP9M1H1BXpIPtr4+4zs+yu0+Mf0hzH6XW6jBXuRd9Z
Q3UJIFVO5p1B8R75s9do4ajGSOR/FtGHKYF41+bP5W/p+3N3Yqkeu2y/umPwrcxiv7pmNArfyMfk
djVdJ2br5aLJ4OX+7PI937O/uei0WsEf6v3Md/Jr81b3S75PLGvM0c0bfV7V56q3JW4/VpeW6sDs
len2fDPQ9LqOh+Dhds9lgjxsXvIH3j9PzfSFtdQ3MCzRGqt94Pgcz3lVTFWsVaJxVonFWsVaxVon
FWicVaxVrFV9uf8ASIv9df14q8Q/5y8P/KJ/9vD/ALFsVZL/AM40f8oba/K5/wCojFXsuKuxV2Ku
xVD6l/xzrr/jDJ/xE4q+fP8AnEE/8pZ/27/+xnFX0G/2j8ziq3FWsVaJxVZIiOjI6hkYEMp3BB6g
4q+Yvzr/ACku/K+of4r8sq8enGQSzRw1DWsla81p+wT93yzXanT16hyex7H7UGWPg5dz0vr5Hz+9
l35Z/mFaeatMEM7LHrVqg+t2/Tmo29aPxUnr/Kdj1Unzbt3sbwScuMfuzzHd+z7vcy1OnOGVfwnk
f0Hz+/5s0IzmGm243eNw6GjL0OW4ssschKPMLIAiiyDTtQWReQ6/7sTw9xnb9l9piYsfEOrz4KVd
R0+K5hLDodwR2PjmV2l2fDPCxy+78dWGDMYF4X+cX5Wzamr61pMBOs261ubeMfFdRrQBkp1kQDYd
WGw3AB13ZHaUsE/y+fl/Cf0e7u7uT0mi1YGx+k/Yu/JL83pLgx6Hq8pa+ReMMjH/AHoRR3J/3ao/
4Ie+eg6fPfpPN0/bPZXhk5cY9HUd37Pue+xTRzRrLGwZGFVYZlvOricVaJxVrFWsVaJxVonFWsVa
xVonFV9v/vRF/rr+vFXiH/OXx/5RP/t4f9i2Ksl/5xn/AOUMtflc/wDURir2bFXYq7FXYqh9S/45
11/xhk/4icVfPX/OH5r/AIt/7d//AGNYq+hH+23zOKrcVaJxVrFWsVUbq2t7u3ktrmNZYJlKSxuK
qynqCMUgkGw+VPzW/LbV/wAvNfj8xeXnkj0ppfUt7iPrbSMT+6bqCjVoK7EfCffVarTAXtcS9r2X
2jHVQ8LL9f8AuvP3/wBoeofl/wCeLHzboy3KFY9QgAS/tQd0c9CK78XpVfu6g55j232OdNLjh/dH
7PL3d32+dObFLFPhPwPf+3vZORmga7XQyyQyB0NCPxHgcvwZ5YpCUeaJREhRZDYXySIGH2T9te4O
d32b2jGcbHLqO51ebCQWtT02OePkvzVvD+zB2r2ZHLGx8D3fsTp85iXz3+cn5aTQyzea9EjMN3A3
ranBF8P2fiN0lKUYUq9Ov2v5iYdi9rSEvy+baY+k9/l+rvek0epBHAd4nl+r8e5lP5L/AJuLrFuN
M1RwupQj96NgJVH+7Y18R+2o+Y8B3eDPxCjzed7W7MOCXHD+7P2fjo9oV1ZQykFWFQR0IOZLpXYq
1irROKtE4q1irWKtE4q1iq+2/wB6Iv8AXX9eKvD/APnMA0/wl/28P+xXFWTf84y/8oXafK5/6iMV
ez4q7FXYq7FUPqX/ABzrr/jDJ/xE4q+eP+cPTX/Fv/bu/wCxrFX0K/22+ZxVaTirWKtYq0TirROK
oPVdLsNV0+fT7+Fbi0uFKSxOAQQfngIvYsoTMSJRNEPlHzr5S8yflN5ui1TSJGbTJWItJ2+JHQ7t
bzgEV6fxBBFc0+r0kSDGQuEnuNFrIa3Fwz+sc/8Aih+PseyeTvOOneaNFi1K0+BvsXNsTVopQAWQ
mgqN9jTcfdnmHa/ZEtLOxvjPI/oP43+biZMRhLhlz+8d/wCOSfBlOaWmFK1vO8EgdOn7Q7EZk6XV
Swz4o/HzYTgJCiyGyvI5Iwa1jbqD2Pvne9n6+M4f0D9jq8uIg+ahqmmCQB02cfYb+BzF7W7L4xxR
+ocj+j9TZp9RWxfNv5qfl1deWb//ABb5YBtIYZBJd28VB9WlJp6kQ6ekxNCnRe3wmi5XYnbByfus
m2aP21+nv+b0mnzxyx8Oe4P2/j8bvTfyh/Naz8xaeLe6ZYb+EAXNvX7J6eqlf91sf+BP3ntsOYTH
m8r2n2dLTz23geR/Q9TrXfLnWNE4q0TirWKtYq0TirWKtYqvtv8AemL/AF1/Xirw7/nMI0/wl/28
f+xXFWUf84x/8oVafK5/6iMVez4q7FXYq7FUPqX/ABzrr/jDJ/xE4q+d/wDnDo/8pd/27v8AsaxV
9CyH42+ZxVbirWKtE4q0TirWKtYqlXmXy5pXmPR7jSdThE1rcLxNeqnsynsR45GURIUW3DmlimJx
NEPlbU9P80flB5zPEG4024+yGNI7q3B6EgfDInZqbHxBIOk1uijOJhMXEvb6fPj12K+U4/Yf1F7Z
5e8yabrulQ6np0hktph0YUdHH2o5F3oy9/vFQQc8x7T7MnpcnCd4nke/9rimBBMZfUPx8k2SfNWY
sTBF2d8YJOQ3U/aXxzK0erlgnY5dQ0ZcPEGSWl1HLGBXlG3Q+Htne6LWRyQA5wLqcuMg+aB1nSI5
43BRXDqVZGAKupFCrA7GozWdrdmSvxMe0xyP469zkabUVsXzJ598j6r+XutxeZfLbOulep9glmNs
7HeCWpq8T9FY7/stvRm2/YnbH5gVL05o8x3+f63ooThqIHHk3v7fP3vbPyu/MnT/ADPpMZDenMlE
mgY7xSU+yT3U/sN/mOwxZRMW8frtFLTz4Ty6HvegE5Y4TWKtYq0TirWKtYq1iq+2P+kxf66/rxV4
d/zmKf8AlEf+3j/2K4qyj/nGL/lCbT5XX/URir2jFXYq7FXYqh9S/wCOddf8YZP+InFXzr/zhwf+
Uv8A+3d/2NYq+hpPtt8ziq3FWicVaJxVrFWsVaJxVonFWP8AnbyZpHm7QptK1JNm+KCcfbikH2WU
5CcBIUXI0upngmJw5vmCxuvMX5T+b59M1SJptOmI+sInSWIfZnhJ25rXpX2PY5oNfoI5YnHMbfjc
PbRnDV4xOG0x9nkfL+17fp2q2V/Zw31jOtxZ3C84Jk6MvTvuCCKEHcHY755rrtDPT5DCXwPeGiO/
MURzCNSf3zBMUGCP0/U2t3od4m+0v8RmZodYcEv6B5/rcXNp+IebKbW6jmjCkhkYfA2d1pdRHJHh
O4PIumyYzE2lXmLQLW+tZ7e4hWaC4Ro54W6SIwoRt3pmk7T7PniyDNi2nHf3/j7XK02or8cnzF5l
8va/+VvmmPVtKLTaJcMVgkapVlO7W1xTo4pVT+0ByG4YL0fY3a8dRDiG0x9Q/HR38hDVYzCfP8bh
9C/l9580zzPpENxby8uXw0enNXHWOQfzD8RvnUwmJCw8ZqtLPBMwl/ay7JuM0TirWKtYq1irROKq
lt/vTF/rr+vFXhn/ADmOf+UQ/wC3j/2K4qyn/nGD/lB7P5XX/UTir2nFXYq7FXYqh9S/4511/wAY
ZP8AiJxV85/84bGv+L/+3d/2NYq+iJP7xvmcVWE4q0TirWKtYq0TirROKtYq1irEPzJ/LzS/Ouhv
Z3AEV9EC1jd03jkp38VPcZXlxiYouZodbPTz4o8uo73zh5W17Vvy68y3Pl7zDG8envJ/pCgEiNzR
VuYtqspAo1Oo9xTOd7R7OjngYT59D3PZkxzwGXFz+/8Aon8be57ZFco6JJG6yRSKHilQhkdGFVZW
GxBG4Oec6nSzwzMJjcMIESFhXSf3zFMUGCaaXqxt34SGsLf8KfHNhoNacJ4ZfQfscPUabiFjmy23
uUnjEbmtRVG8c7fDljljwy+BdJPGYmwx7zZ5asdU0+5sr2AT2lyvG4hP7QrUMpHRlIrUdDnPa3SZ
NNl8fD9Q5+Y/HP8AW52l1HL7HzS6+Yfym83ru1zpF38SOPhS4hU9uoWaLluO1f5WFet7K7TjngJw
+I7vx0dxqMENXjo7SH2fsL6X8n+btO8xaXBdWswlWVOSOOrAdQR2dejDOhjISFh4rNhlikYyFEMg
yTU1irWKtE4q1iqpa/70xf66/rxV4X/zmSaf4Q/7eP8A2K4qyr/nF/8A5Qaz+V1/1E4q9qxV2Kux
V2KofUv+Oddf8YZP+InFXzl/zhoa/wCMP+3d/wBjWKvoiT+8b5n9eKrCcVaxVrFWicVaJxVrFWsV
aJxVonFWAfm1+V1j510gtEFh1u1UmzuSOvcxvTs2U5sQmPN2PZ3aEtPO+cDzDwbyD5vv/K2qyeVv
MnK2s1kKIZtvqkxJJ3/31ITv2B+IftV5rtPs2OojR2mOR/HR6+dSAy4975+Y/WP2e7sPqMjFW2Iz
gM2CWORjIVIMokSFjkqpP75QYoME40fWfQYQzN+6J+Fv5T/TNp2drvDPBL6fucDVaXi3HNmEMyXM
fpuaOPsnxzsYSGaPDLm6KUDA2OTCfzD8nWes6Df2VzErRtG8kZYf3M6IxjmSm/wnw6io6EjNHDSZ
NNqRPH9Mj6h5d7tdFqLIHX8bPA/yY8z3eh+Y59HuGeOK4LERmtY7mHqQOx4g8vGgzuNLOjXe2du6
cTxDIOcfuL6k0fU0v7USbeotA9Ohr0I+ebB5FHYq0TirWKtYqqWv+9UP+uv68VeF/wDOZZp/g/8A
7eP/AGK4qyr/AJxd/wCUFs/ldf8AUTir2vFXYq7FXYqh9S/4511/xhk/4icVfOH/ADhia/4w/wC3
b/2NYq+iZT+8b5n9eKrMVaxVonFWicVaxVrFWicVaJxVrFWsVeWfnR+Ulv5ssG1XTI1j1+1QlSBT
6wij+7b3/lOY+fDxCxzdt2X2kcEuGX92fs83kv5c+e7m1nTyr5hYxGFvQ0+5m2eJwaC2lr+xXZCf
s9Ps048x2p2YM8bG2SP2+RerkBH95DeJ5/8AFD9Pf7+fT+boxVgQymhB6gjOGnjMSQRRDkCpCxyK
qk+VmLEwT/Q9c9Nlt5noP91SE9D4H2zb9na4xIhI+4us1mkv1D4ppqdy+tXUGiwL3EmoTDokSmvH
5tnWwHjECveXCwQGnic0vdEd5/Y+b/zp0N/J/wCa0moWqFLW9dNTtlGwJdv3yV95Fb6DmzPplYc7
QZBqNNwy84l7d+Xmrxy8FR+UMyj02HQq45Ic2gNi3jJwMZGJ5hn5OFi1irWKtYqqWp/0qH/XX9Yx
V4V/zmcaf4P/AO3l/wBiuKsr/wCcXP8AlBLL5XX/AFE4q9sxV2KuxV2KofUv+Oddf8YZP+InFXzf
/wA4Xmv+Mf8At2/9jWKvomX+8f5n9eKrMVaJxVonFWsVaxVonFWicVaxVrFWicVaJxV4t+eP5PLr
UMnmPQYQNWiWt5bIAPrCj9r/AFwPvzFz4OLcc3edk9p+EfDmfQfs/Ywv8tvzA/SSxeXtaYrq0Q9O
xu3/AN3hf90yk9JV/ZY/a6H4qcuU7W7L8YccP7wfb+3u+Xc9IR4J4h/dnn/R8x5d/dz72frG7EhQ
aru3sPE+GcfHHKRoCy5RkEdpunXd7MI7YBiDR5m/uk+n9o/575vdB2OSbn8unxcXU6mGIXL5dT+p
6JoOmWmmWxiiq8kh5Tzt9uRvE/wzstPjjAUHkdZqp5pWeQ5DueX/APOT3lb9I+TbbXYUrcaNMPVY
Df6vcEI3Twk4H78syDZzexM/DkMDyl94Yb+TmvPLpFoC/wC9tHNsxP8Ak0eL8CBmVppXH3ON21g4
M5PSW76DhmWaFJV+y6hh9IzIdSuxVrFWicVVLX/eqH/XX9YxV4V/zmgaf4O/7eX/AGK4qyz/AJxa
/wCUDsvldf8AUScVe2Yq7FXYq7FUPqX/ABzrr/jDJ/xE4q+bf+cLTX/GP/bt/wCxrFX0VL/ev/rH
9eKrCcVaJxVrFWsVaJxVonFWsVaxVonFWicVaxVo74q8F/Or8k5by5fzF5ZhUTSVa/sRRQTSvqJ2
BP7Vdu+YmfT3vF6DsvtcYxwZPp6Hu/Y8z078w/O3lu9S31pJNQiiP+8uoF2ald/Tlrypttuy+2az
Jpo3uKL0UTHJD93Kr6int3kj85vJmuCO09UaTemgW0ueKKT4RyD4G9gaE+GARMXn9XoMsSZH1eb0
yC498thN1UosQ/OLz35a0DyZfWWrD61catby21rpyMBJJzUqXrvwVK15U69N8zcOM5Nujjz1XgET
/iB2fOf5VambLX7jTy443KcomFfikhPJSvzQscGnPDMxL0na4GbTxyx8j8JfgPqjytei50xd907e
zbj8a5nPLJvirROKtYqqWv8AvVD/AK6/rGKvCf8AnNI0/wAHf9vL/sVxVlv/ADix/wAoFY/K6/6i
Tir23FXYq7FXYqh9S/4511/xhk/4icVfNf8AzhWf+Uy/7dv/AGN4q+i5T+9f/WP68VWE4q1irWKt
E4q0TirWKtYq0TirROKtYq1irROKtHFWGeavy30fW0k9S3jkVqt6bAAhj3Unb78jKIPNtw554zcC
QXiHm38h720keTSXIpU/Vpq9P8k7n/iWYs9L/Nd/pe3jyyj4j9SRaL+Yv5leRD9RmZ3tACkdregy
xrtt6T1qvH+UNTxGYksfCdw7GeDBqomUCL7x+kMO1rVNX1/UpdS1C8e/vpz8bSbP2oqoPhCitFVP
uGbXBqMdUPS8V2j2JqcRMj+8j3j9I6fc1peoyWGoWGpLXnbSKJAD8TCMio9gYzx+/MbVR4MgkOrv
/Z/MM+klhPOO3wPL7bfV/wCX+pKzCIMGRxRSOhDfEp/XmWC6GUSDRZ2TihrFWsVVLT/euH/jIv6x
irwj/nNQ/wDKG/8Aby/7FMVZd/ziv/ygNj8rr/qKOKvbsVdirsVdiqH1L/jnXX/GGT/iJxV80/8A
OFBr/jL/ALdv/Y3ir6MmP71/9Y/rxVZirWKtE4q0TirWKtYq0TirROKtYq1irROKtYq1irWKqc0M
MyGOVA6HsRXFWMa/5B0jVIXR4kdXFDHKKinhy6/fXAQDzZwySgbiaLxjzh+QZiZ5tKZrdzUiB94y
dzsf6H6Mxp6UHk7vS9uTjtkHEO/q8r1vy75k0ovb39rII0IZpgvJaLVVJelQKdA2Y8xMCjydxpZ6
aczkx0Jy59D8R+l7H+T2vNNo9i3KsttW2fsAYqGP/hOOZmnlcXnO18PBnPdLf8fF73HIskayL9lw
GX5EVy51jeKtYqqWh/0uH/jIv6xirwf/AJzXNP8ABv8A28v+xTFWX/8AOKv/AJL+x+V3/wBRRxV7
firsVdirsVQ+pf8AHOuv+MMn/ETir5o/5wmNf8Z/9u3/ALG8VfRs396/+sf14qp4q0TirROKtYq1
irROKtE4q1irWKtE4q1irWKtYq0TirWKtYqskRJFKuoZT1UioxVI9V8o6ZfIQEUH+VxyX6O6/Rir
EW8gNpk0k1lEYjI4kbiOalhtUkfF274AAGc8kpVZJpnukpLHYRLIQSBVSO6ncdfnhYIvFWicVVbT
/euD/jIv/Ehirwb/AJzZNP8ABn/by/7FMVZf/wA4qf8AkvrD5Xf/AFFHFXuGKuxV2KuxVD6l/wAc
66/4wyf8ROKvmb/nCQ/8pn/27P8AsbxV9HTf3z/6x/XiqmTirROKtYq1irROKtE4q1irWKtE4q1i
rWKtYq0TirWKtYq1irROKtYq1irWKtE4q1iqrZ/71wf8ZF/4kMVeC/8AObZ/5Qz/ALef/YpirMP+
cUv/ACXth8rv/qKOKvccVdirsVdiqH1L/jnXX/GGT/iJxV8y/wDOER/5TT/t2f8AY3ir6OnP75/9
Y/rxVTJxVrFWsVaJxVonFWsVaxVonFWsVaxVrFWicVaxVrFWsVaJxVrFWsVaxVonFWsVaxVVs/8A
eyD/AIyL/wASGKvBf+c3T/yhf/bz/wCxTFWY/wDOKH/kvLD5Xf8A1FHFXuOKuxV2KuxVD6l/xzrr
/jDJ/wAROKvmP/nB81/xp/27P+xvFX0fOf30n+sf14qp4q1irROKtE4q1irWKtE4q1irWKtYq0Ti
rWKtYq1irROKtYq1irWKtE4q1irWKtYqq2Z/0yD/AIyJ/wASGKvBP+c4DT/Bf/bz/wCxTFWZf84n
/wDku9P+V3/1FHFXuWKuxV2KuxVD6l/xzrr/AIwyf8ROKvmD/nCCRUn86W7njORpzCM7NRDdBtvY
sK4q+kbiomkr/Mf14qp4q0TirROKtYq1irROKtYq1irWKtE4q1irWKtYq0TirWKtYq1irROKtYq1
irWKtE4qrWIJvIABU81P3GuKvAP+c4ZozL5MiDAyIupOydwrG1Cn6eJxVm3/ADieGH5dafUEHjdn
fwN0SMVe5Yq7FXYq7FVskayRtG32XBVvkRTFXxjrN7rf5Efnjca1FbNP5e1ZpDLAtFWW2mcPLGld
g8MlGT2p2JxV9U+U/PHknzvp8d/5f1SG8DrV4UcLcRnussJ+NCPcfLbFU8/R0X8zfhirv0bF/M34
Yq1+jIv52/DFXfoyL+dvwxV36Lh/nb8MVa/RUP8AO34Yq79FQ/zt+H9MVa/RMP8AO34Yq79Ew/zt
+GKu/REH87fh/TFWv0PB/O34f0xV36Hg/nb8P6Yq79DQfzt+H9MVa/QsH87fh/TFXfoWD/fj/h/T
FWv0Jb/78f8AD+mKu/Qdv/vx/wAP6Yq1+g7f/fj/AIf0xV36Ct/9+P8Ah/TFXfoK3/34/wCH9MVa
/QNv/vx/w/pirv0Bbf78f8P6Yqk3mfzh5E8iWEuoa9qcNpxUlIpHDXEngsUK/G5PsPntir4i/MXz
tr35wfmQtxa27Rxy8bTSbImvo2yEtykI2qas7n6OgGKvsf8AJ7y5HoWhW1jAP3NpbpEGIoWJp8R9
24VPzxV6FirsVdirsVdirE/zG/Lfy/560OTTNViUvSsE9KsjjoR3+7FXyP5v/wCcW/Nuk3rpYTLL
ASfTMwYrx9pIw1fpQYqx3/oXzz942v8AwU//AFSxV3/Qvnn7xtf+Cn/6pYq7/oXzz942v/BT/wDV
LFXf9C+efvG1/wCCn/6pYq7/AKF88/eNr/wU/wD1SxV3/Qvnn7xtf+Cn/wCqWKu/6F88/eNr/wAF
P/1SxV3/AEL55+8bX/gp/wDqlirv+hfPP3ja/wDBT/8AVLFXf9C+efvG1/4Kf/qlirv+hfPP3ja/
8FP/ANUsVd/0L55+8bX/AIKf/qlirv8AoXzz942v/BT/APVLFXf9C+efvG1/4Kf/AKpYq7/oXzz9
42v/AAU//VLFXf8AQvnn7xtf+Cn/AOqWKu/6F88/eNr/AMFP/wBUsVd/0L55+8bX/gp/+qWKu/6F
88/eNr/wU/8A1SxV3/Qvnn7xtf8Agp/+qWKu/wChfPP3ja/8FP8A9UsVd/0L55+8bX/gp/8Aqliq
L0z/AJxz85XFwEu54IIu7xiWRv8AgWWP9eKvevys/JPTPLg/0WEz3sgHr3UtC5HWjECiJ/kjr3xV
7vpthHY2qwpuert4se+KorFXYq7FXYq7FXYqtkijlUpIgdD1VgCPxxVCnRtLJ/3mT7sVd+htL/5Z
k/HFXfobS/8AlmT8cVd+htL/AOWZPxxV36G0v/lmT8cVd+htL/5Zk/HFXfobS/8AlmT8cVd+htL/
AOWZPxxV36G0v/lmT8cVd+htL/5Zk/HFXfobS/8AlmT8cVd+htL/AOWZPxxV36G0v/lmT8cVd+ht
L/5Zk/HFXfobS/8AlmT8cVd+htL/AOWZPxxV36G0v/lmT8cVd+htL/5Zk/HFXfobS/8AlmT8cVd+
htL/AOWZPxxV36G0v/lmT8cVd+htL/5Zk/HFXDRtLB/3mT7sVRUcUcShI0CIOiqAB+GKrsVdirsV
f//Z</xapGImg:image>
</rdf:li>
</rdf:Alt>
</xap:Thumbnails>
</rdf:Description>
<rdf:Description
rdf:about="uuid:cbee75c6-82d1-45ba-8274-b89c6084675c">
<xapMM:DocumentID>
uuid:4ee3f24b-6ed2-4a2e-8f7a-50b762c8da8b</xapMM:DocumentID>
</rdf:Description>
<rdf:Description
rdf:about="uuid:cbee75c6-82d1-45ba-8274-b89c6084675c">
<dc:format>
image/svg+xml</dc:format>
<dc:title>
<rdf:Alt>
<rdf:li
xml:lang="x-default">
mime.ai</rdf:li>
</rdf:Alt>
</dc:title>
</rdf:Description>
<cc:Work
rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata></x:xmpmeta>
<xpacket
id="xpacket79199">end='w' </xpacket>
</metadata>
<path
style="opacity:0.2"
inkscape:connector-curvature="0"
id="path713"
d="m 44,15.5 c -9.374,0 -17,7.626 -17,17 v 200 c 0,9.374 7.626,17 17,17 h 176 c 9.375,0 17,-7.626 17,-17 v -200 c 0,-9.374 -7.625,-17 -17,-17 H 44 z" /><path
style="opacity:0.2"
inkscape:connector-curvature="0"
id="path714"
d="m 42,13.5 c -9.374,0 -17,7.626 -17,17 v 200 c 0,9.374 7.626,17 17,17 h 176 c 9.375,0 17,-7.626 17,-17 v -200 c 0,-9.374 -7.625,-17 -17,-17 H 42 z" /><path
style="opacity:0.2"
inkscape:connector-curvature="0"
id="path715"
d="m 40,12.5 c -9.374,0 -17,7.626 -17,17 v 200 c 0,9.374 7.626,17 17,17 h 176 c 9.375,0 17,-7.626 17,-17 v -200 c 0,-9.374 -7.625,-17 -17,-17 H 40 z" /><path
inkscape:connector-curvature="0"
style="fill:url(#linearGradient80089)"
id="path722"
d="m 41,11 c -9.374,0 -17,7.626 -17,17 v 200 c 0,9.374 7.626,17 17,17 h 176 c 9.375,0 17,-7.626 17,-17 V 28 c 0,-9.374 -7.625,-17 -17,-17 H 41 z" /><path
style="fill:#ffffff"
inkscape:connector-curvature="0"
id="path723"
d="m 28,228 c 0,6.627 5.373,12 12,12 h 176 c 6.627,0 12,-5.373 12,-12 V 28 c 0,-6.627 -5.373,-12 -12,-12 H 40 c -6.627,0 -12,5.373 -12,12 v 200 z" /><path
inkscape:connector-curvature="0"
style="fill:url(#linearGradient80085)"
id="path730"
d="m 40,21 c -3.86,0 -7,3.14 -7,7 v 200 c 0,3.859 3.14,7 7,7 h 176 c 3.859,0 7,-3.141 7,-7 V 28 c 0,-3.86 -3.141,-7 -7,-7 H 40 z" /><path
style="opacity:0.2"
inkscape:connector-curvature="0"
id="path731"
d="m 191.924,170.38398 c -11.613,-36.12699 -13.717,-42.66999 -14.859,-44.06399 0.119,0.076 0.289,0.178 0.289,0.178 L 98.804,39.042999 c -4.195,-4.65 -14.005,0.356 -21.355,6.976 -7.283,6.542 -13.32,15.772999 -9.37,20.563999 l 78.944,87.542982 0.533,0.094 37.768,17.602 7.688,2.365 -1.088,-3.803 z" /><path
style="opacity:0.2"
inkscape:connector-curvature="0"
id="path732"
d="m 193.557,167.91598 c -11.611,-36.12499 -13.713,-42.66999 -14.855,-44.06399 0.117,0.072 0.287,0.178 0.287,0.178 L 100.444,36.574999 c -4.199,-4.651 -14.015,0.355 -21.361,6.975 -7.281,6.545 -13.32,15.772999 -9.368,20.565999 l 78.945,87.538982 0.533,0.1 37.77,17.598 7.682,2.367 -1.088,-3.804 z" /><path
style="opacity:0.2"
inkscape:connector-curvature="0"
id="path733"
d="M 186.773,165.44898 C 175.16,129.32199 173.06,122.77699 171.91,121.38099 c 0.121,0.074 0.295,0.18 0.295,0.18 L 93.653,34.103999 c -4.192,-4.65 -14.009,0.359 -21.354,6.978 -7.283,6.542 -13.321,15.770999 -9.369,20.564999 l 78.942,87.540982 0.535,0.096 37.768,17.598 7.686,2.367 -1.088,-3.8 z" /><path
style="fill:#ffffff"
inkscape:connector-curvature="0"
id="path734"
d="m 186.43,163.75498 c -11.613,-36.12499 -13.713,-42.66599 -14.863,-44.06099 0.123,0.072 0.293,0.18 0.293,0.18 L 93.314,32.415999 c -4.199,-4.651 -14.015,0.357 -21.359,6.977 -7.283,6.543 -13.322,15.773999 -9.37,20.565999 l 78.941,87.540982 0.535,0.098 37.771,17.598 7.686,2.363 -1.088,-3.804 z" /><path
inkscape:connector-curvature="0"
style="fill:url(#linearGradient80078)"
id="path741"
d="m 186.43,163.75498 c -11.613,-36.12499 -13.713,-42.66599 -14.863,-44.06099 0.123,0.072 0.293,0.18 0.293,0.18 L 93.314,32.415999 c -4.199,-4.651 -14.015,0.357 -21.359,6.977 -7.283,6.543 -13.322,15.773999 -9.37,20.565999 l 78.941,87.540982 0.535,0.098 37.771,17.598 7.686,2.363 -1.088,-3.804 z" /><path
inkscape:connector-curvature="0"
style="fill:url(#linearGradient80075)"
id="path748"
d="m 166.969,122.16199 13.723,38.12899 -36.371,-17.90199 0.168,-0.152 c -0.25,-0.08 -0.496,-0.178 -0.701,-0.316 l -0.125,0.121 -75.303,-83.569992 0.123,-0.104 c -2.246,-2.49 1.032,-9.093999 7.308,-14.751999 6.28,-5.652 13.18,-8.219 15.425,-5.733 l 75.292,83.564991 0.461,0.714 z" /><path
inkscape:connector-curvature="0"
style="fill:url(#linearGradient80072)"
id="path758"
d="m 148.652,144.52098 c 2.076,-0.369 4.635,-1.479 7.252,-3.13899 1.617,-1.018 3.279,-2.283 4.898,-3.744 1.455,-1.303 2.736,-2.666 3.84,-4.01 2.076,-2.531 3.322,-5.213 3.781,-7.424 l -1.455,-4.043 -0.463,-0.715 -74.798,-83.017991 c 0.608,2.24 -0.962,5.938 -4.063,9.74 -1.134,1.389 -2.441,2.789 -3.945,4.141 -1.574,1.418999 -3.195,2.651999 -4.767,3.653999 -4.493,2.871 -8.628,3.928 -10.548,2.486 l -0.025,0.021 75.303,83.569992 0.125,-0.121 c 0.205,0.139 0.451,0.236 0.701,0.316 l -0.168,0.152 4.332,2.13399 z" /><path
style="fill:#ffffff"
inkscape:connector-curvature="0"
id="path759"
d="m 68.083,57.809998 c 1.732,1.772 5.994,0.776 10.643,-2.194 1.541,-0.982 3.132,-2.193 4.677,-3.585999 1.476,-1.325 2.759,-2.701 3.872,-4.063 3.578,-4.388 5.091,-8.642 3.477,-10.584 l 0.023,-0.024 75.817,84.118991 c 0.635,2.262 -0.588,6.498 -3.754,10.357 -1.082,1.318 -2.34,2.656 -3.77,3.934 -1.588,1.434 -3.219,2.676 -4.807,3.676 -4.74,3.006 -9.303,4.19899 -11.016,2.301 -0.393,-0.439 -2.098,-2.336 -2.145,-2.406 l -73.255,-81.313992 0.238,-0.216 z" /><path
style="fill:#ffffff"
inkscape:connector-curvature="0"
id="path760"
d="m 75.79,43.614999 c 6.28,-5.652 13.18,-8.219 15.425,-5.733 l 16.961,18.827999 1.152,26.49 -17.973,0.784 -22.996,-25.513 0.123,-0.104 c -2.246,-2.49 1.032,-9.092999 7.308,-14.751999 z" /><path
style="fill:#ffffff"
inkscape:connector-curvature="0"
id="path761"
d="m 68.083,57.809998 c 1.732,1.772 5.994,0.776 10.643,-2.194 1.541,-0.982 3.132,-2.193 4.677,-3.585999 1.476,-1.325 2.759,-2.701 3.872,-4.063 3.578,-4.388 5.091,-8.642 3.477,-10.584 l 0.023,-0.024 75.817,84.118991 c 0.635,2.262 -0.588,6.498 -3.754,10.357 -1.082,1.318 -2.34,2.656 -3.77,3.934 -1.588,1.434 -3.219,2.676 -4.807,3.676 -4.74,3.006 -9.303,4.19899 -11.016,2.301 -0.393,-0.439 -2.098,-2.336 -2.145,-2.406 l -73.255,-81.313992 0.238,-0.216 z" /><path
inkscape:connector-curvature="0"
style="fill:url(#linearGradient80066)"
id="path768"
d="m 75.79,43.614999 c 6.28,-5.652 13.18,-8.219 15.425,-5.733 l 16.961,18.827999 1.152,26.49 -17.973,0.784 -22.996,-25.513 0.123,-0.104 c -2.246,-2.49 1.032,-9.092999 7.308,-14.751999 z" /><path
inkscape:connector-curvature="0"
style="fill:url(#linearGradient80063)"
id="path778"
d="m 68.083,57.809998 c 1.732,1.772 5.994,0.776 10.643,-2.194 1.541,-0.982 3.132,-2.193 4.677,-3.585999 1.476,-1.325 2.759,-2.701 3.872,-4.063 3.578,-4.388 5.091,-8.642 3.477,-10.584 l 0.023,-0.024 75.817,84.118991 c 0.635,2.262 -0.588,6.498 -3.754,10.357 -1.082,1.318 -2.34,2.656 -3.77,3.934 -1.588,1.434 -3.219,2.676 -4.807,3.676 -4.74,3.006 -9.303,4.19899 -11.016,2.301 -0.393,-0.439 -2.098,-2.336 -2.145,-2.406 l -73.255,-81.313992 0.238,-0.216 z" /><path
inkscape:connector-curvature="0"
style="fill:url(#linearGradient80060)"
id="path790"
d="m 74.357,65.112998 c 0,0 6.036,-0.212 10.685,-3.182 1.542,-0.983 3.132,-2.193 4.677,-3.586 1.477,-1.326 2.76,-2.701 3.873,-4.064 2.928,-3.588999 4.469,-7.087999 4.049,-9.306999 l -6.865,-7.617 -0.023,0.024 c 1.614,1.942 0.102,6.196 -3.477,10.584 -1.113,1.362 -2.396,2.738 -3.872,4.063 -1.545,1.392999 -3.136,2.603999 -4.677,3.585999 -4.648,2.971 -8.91,3.967 -10.643,2.194 l -0.238,0.217 73.256,81.310992 c 0.047,0.07 1.752,1.967 2.145,2.406 0.342,0.377 0.799,0.627 1.344,0.771 L 74.357,65.112998 z" /><path
style="fill:#003333"
inkscape:connector-curvature="0"
id="path791"
d="m 172.035,149.75398 c -1.635,1.477 -3.307,2.764 -4.949,3.84 l 13.605,6.697 -5.096,-14.156 c -1.058,1.218 -2.243,2.441 -3.56,3.619 z" /><path
style="opacity:0.5;fill:#ffffff"
inkscape:connector-curvature="0"
id="path792"
d="M 163.121,131.45299 86.968,48.329999 c 0.1,-0.12 0.213,-0.242 0.307,-0.364 1.428,-1.752 2.52,-3.49 3.225,-5.058 l 75.768,82.706991 c -0.553,1.824 -1.6,3.867 -3.147,5.838 z" /><path
style="opacity:0.5;fill:#ffffff"
inkscape:connector-curvature="0"
id="path793"
d="m 87.275,47.965999 c 0.634,-0.774 1.189,-1.548 1.694,-2.3 l 76.015,82.973991 c -0.578,1.063 -1.283,2.146 -2.146,3.193 -0.744,0.896 -1.566,1.805 -2.465,2.697 L 84.152,51.331999 c 1.164,-1.108 2.209,-2.24 3.123,-3.366 z" /><rect
style="fill:none"
y="0"
x="0"
height="256"
width="256"
id="_x3C_Slice_x3E_" />
<rect
style="fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:4.26666689;stroke-opacity:1"
id="rect79256"
width="150.77966"
height="48.813557"
x="9.313406"
y="170.86343"
ry="0" /><text
xml:space="preserve"
style="font-size:42.66666794px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:justify;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Linux Libertine O C;-inkscape-font-specification:Linux Libertine O C"
x="24.554667"
y="207.10201"
id="text80094"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan80096"
x="24.554667"
y="207.10201"
style="font-style:italic;font-weight:bold;-inkscape-font-specification:Linux Libertine O C Bold Italic">Labels</tspan></text>
</svg>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
xmlns:a="http://ns.adobe.com/AdobeSVGViewerExtensions/3.0/"
xmlns:i="http://ns.adobe.com/AdobeIllustrator/10.0/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://web.resource.org/cc/"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:sodipodi="http://inkscape.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
version="1.0"
width="48pt"
height="48pt"
viewBox="0 0 256 256"
id="svg2"
xml:space="preserve"
sodipodi:version="0.32"
inkscape:version="0.42+devel"
sodipodi:docname="gtk-open2.svg"
sodipodi:docbase="/home/cschalle/gnome/gnome-themes-extras/Nuvola/icons/scalable/stock"><metadata
id="metadata85"><rdf:RDF><cc:Work
rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" /></cc:Work></rdf:RDF></metadata><sodipodi:namedview
inkscape:cy="417.84947"
inkscape:cx="305.25953"
inkscape:zoom="0.43415836"
inkscape:window-height="563"
inkscape:window-width="822"
inkscape:pageshadow="2"
inkscape:pageopacity="0.0"
borderopacity="1.0"
bordercolor="#666666"
pagecolor="#ffffff"
id="base"
inkscape:window-x="0"
inkscape:window-y="30"
inkscape:current-layer="svg2" /><defs
id="defs151" />
<g
id="switch6">
<foreignObject
id="foreignObject8"
height="1"
width="1"
y="0"
x="0"
requiredExtensions="http://ns.adobe.com/AdobeIllustrator/10.0/">
<i:pgfRef
xlink:href="#adobe_illustrator_pgf">
</i:pgfRef>
</foreignObject>
<g
id="g10">
<g
id="Layer_1">
<rect
width="256"
height="256"
x="0"
y="0"
style="fill:none"
id="rect13" />
</g>
<g
id="Layer_2">
<linearGradient
x1="98.551804"
y1="41.2593"
x2="98.551804"
y2="214.72549"
id="XMLID_14_"
gradientUnits="userSpaceOnUse">
<stop
style="stop-color:#c9e6ff;stop-opacity:1"
offset="0"
id="stop17" />
<stop
style="stop-color:#006dff;stop-opacity:1"
offset="0.55620003"
id="stop19" />
<stop
style="stop-color:#0035ed;stop-opacity:1"
offset="1"
id="stop21" />
<a:midPointStop
style="stop-color:#C9E6FF"
offset="0" />
<a:midPointStop
style="stop-color:#C9E6FF"
offset="0.5" />
<a:midPointStop
style="stop-color:#006DFF"
offset="0.5562" />
<a:midPointStop
style="stop-color:#006DFF"
offset="0.5" />
<a:midPointStop
style="stop-color:#0035ED"
offset="1" />
</linearGradient>
<path
d="M 17.219,51.266 C 16.115,51.266 15.219,52.163 15.219,53.266 L 15.219,202.735 C 15.219,203.838 16.115,204.735 17.219,204.735 L 179.885,204.735 C 180.989,204.735 181.885,203.838 181.885,202.735 L 181.885,75.933 C 181.885,74.83 180.989,73.933 179.885,73.933 L 100.552,73.933 L 100.552,53.266 C 100.552,52.163 99.656,51.266 98.552,51.266 L 17.219,51.266 z "
style="fill:url(#XMLID_14_)"
id="path23" />
<linearGradient
x1="98.551804"
y1="41.258801"
x2="98.551804"
y2="214.7274"
id="XMLID_15_"
gradientUnits="userSpaceOnUse">
<stop
style="stop-color:#dcf0ff;stop-opacity:1"
offset="0"
id="stop26" />
<stop
style="stop-color:#428aff;stop-opacity:1"
offset="0.58990002"
id="stop28" />
<stop
style="stop-color:#006dff;stop-opacity:1"
offset="0.85949999"
id="stop30" />
<stop
style="stop-color:#0035ed;stop-opacity:1"
offset="1"
id="stop32" />
<a:midPointStop
style="stop-color:#DCF0FF"
offset="0" />
<a:midPointStop
style="stop-color:#DCF0FF"
offset="0.5" />
<a:midPointStop
style="stop-color:#428AFF"
offset="0.5899" />
<a:midPointStop
style="stop-color:#428AFF"
offset="0.5" />
<a:midPointStop
style="stop-color:#006DFF"
offset="0.8595" />
<a:midPointStop
style="stop-color:#006DFF"
offset="0.5" />
<a:midPointStop
style="stop-color:#0035ED"
offset="1" />
</linearGradient>
<path
d="M 20.219,56.266 C 20.219,61.91 20.219,194.091 20.219,199.735 C 25.891,199.735 171.213,199.735 176.885,199.735 C 176.885,194.154 176.885,84.514 176.885,78.933 C 171.33,78.933 95.552,78.933 95.552,78.933 C 95.552,78.933 95.552,60.651 95.552,56.266 C 90.2,56.266 25.572,56.266 20.219,56.266 z "
style="fill:url(#XMLID_15_)"
id="path34" />
<linearGradient
x1="98.551804"
y1="41.2593"
x2="98.551804"
y2="214.72549"
id="XMLID_16_"
gradientUnits="userSpaceOnUse">
<stop
style="stop-color:#ffffff;stop-opacity:1"
offset="0"
id="stop37" />
<stop
style="stop-color:#e9f2ff;stop-opacity:1"
offset="0.1147"
id="stop39" />
<stop
style="stop-color:#b0d2ff;stop-opacity:1"
offset="0.35389999"
id="stop41" />
<stop
style="stop-color:#579fff;stop-opacity:1"
offset="0.6936"
id="stop43" />
<stop
style="stop-color:#006dff;stop-opacity:1"
offset="1"
id="stop45" />
<a:midPointStop
style="stop-color:#FFFFFF"
offset="0" />
<a:midPointStop
style="stop-color:#FFFFFF"
offset="0.5424" />
<a:midPointStop
style="stop-color:#006DFF"
offset="1" />
</linearGradient>
<path
d="M 179.885,73.933 L 100.552,73.933 L 100.552,53.266 C 100.552,52.163 99.656,51.266 98.552,51.266 L 17.219,51.266 C 16.115,51.266 15.219,52.163 15.219,53.266 L 15.219,57.266 L 91.552,57.266 C 92.656,57.266 93.552,58.163 93.552,59.266 L 93.552,79.933 L 172.885,79.933 C 173.989,79.933 174.885,80.83 174.885,81.933 L 174.885,204.735 L 179.885,204.735 C 180.989,204.735 181.885,203.838 181.885,202.735 L 181.885,75.933 C 181.885,74.83 180.988,73.933 179.885,73.933 z "
style="fill:url(#XMLID_16_)"
id="path47" />
<linearGradient
x1="106.9839"
y1="98.599098"
x2="106.9839"
y2="206.73489"
id="XMLID_17_"
gradientUnits="userSpaceOnUse">
<stop
style="stop-color:#0099ff;stop-opacity:1"
offset="0"
id="stop50" />
<stop
style="stop-color:#0089e5;stop-opacity:1"
offset="0.0937"
id="stop52" />
<stop
style="stop-color:#00406b;stop-opacity:1"
offset="0.54689997"
id="stop54" />
<stop
style="stop-color:#00121e;stop-opacity:1"
offset="0.85769999"
id="stop56" />
<stop
style="stop-color:#000000;stop-opacity:1"
offset="1"
id="stop58" />
<a:midPointStop
style="stop-color:#0099FF"
offset="0" />
<a:midPointStop
style="stop-color:#0099FF"
offset="0.4689" />
<a:midPointStop
style="stop-color:#000000"
offset="1" />
</linearGradient>
<path
d="M 32.083,106.599 L 32.083,206.734 L 42.083,206.734 C 42.083,180.445 42.083,111.718 42.083,108.599 C 45.222,108.599 143.57,108.599 181.884,108.599 L 181.884,98.599 L 40.083,98.599 C 35.665,98.599 32.083,102.181 32.083,106.599 z "
style="opacity:0.3;fill:url(#XMLID_17_)"
id="path60" />
<linearGradient
x1="6.3671999"
y1="47.148399"
x2="179.4046"
y2="220.1859"
id="XMLID_18_"
gradientUnits="userSpaceOnUse">
<stop
style="stop-color:#0053bd;stop-opacity:1"
offset="0"
id="stop63" />
<stop
style="stop-color:#00008d;stop-opacity:1"
offset="1"
id="stop65" />
<a:midPointStop
style="stop-color:#0053BD"
offset="0" />
<a:midPointStop
style="stop-color:#0053BD"
offset="0.5" />
<a:midPointStop
style="stop-color:#00008D"
offset="1" />
</linearGradient>
<path
d="M 179.885,63.933 L 110.552,63.933 L 110.552,53.266 C 110.552,46.639 105.18,41.266 98.552,41.266 L 17.219,41.266 C 10.591,41.266 5.219,46.639 5.219,53.266 L 5.219,75.933 L 5.219,202.735 C 5.219,209.362 10.591,214.735 17.219,214.735 L 98.552,214.735 L 179.885,214.735 C 186.512,214.735 191.885,209.362 191.885,202.735 L 191.885,75.933 C 191.885,69.305 186.512,63.933 179.885,63.933 z M 181.885,202.734 C 181.885,203.837 180.989,204.734 179.885,204.734 L 17.219,204.734 C 16.115,204.734 15.219,203.837 15.219,202.734 L 15.219,53.266 C 15.219,52.163 16.115,51.266 17.219,51.266 L 98.552,51.266 C 99.656,51.266 100.552,52.163 100.552,53.266 L 100.552,73.933 L 179.885,73.933 C 180.989,73.933 181.885,74.83 181.885,75.933 L 181.885,202.734 z "
style="fill:url(#XMLID_18_)"
id="path67" />
<linearGradient
x1="128.48441"
y1="86.066902"
x2="128.48441"
y2="228.0708"
id="XMLID_19_"
gradientUnits="userSpaceOnUse">
<stop
style="stop-color:#c9e6ff;stop-opacity:1"
offset="0"
id="stop70" />
<stop
style="stop-color:#006dff;stop-opacity:1"
offset="0.55620003"
id="stop72" />
<stop
style="stop-color:#0035ed;stop-opacity:1"
offset="1"
id="stop74" />
<a:midPointStop
style="stop-color:#C9E6FF"
offset="0" />
<a:midPointStop
style="stop-color:#C9E6FF"
offset="0.5" />
<a:midPointStop
style="stop-color:#006DFF"
offset="0.5562" />
<a:midPointStop
style="stop-color:#006DFF"
offset="0.5" />
<a:midPointStop
style="stop-color:#0035ED"
offset="1" />
</linearGradient>
<path
d="M 51.083,96.599 C 51.083,100.388 51.083,200.946 51.083,204.734 C 54.933,204.734 202.035,204.734 205.884,204.734 C 205.884,200.946 205.884,100.387 205.884,96.599 C 202.035,96.599 54.933,96.599 51.083,96.599 z "
style="fill:url(#XMLID_19_)"
id="path76" />
<linearGradient
x1="128.48441"
y1="86.064499"
x2="128.48441"
y2="228.06689"
id="XMLID_20_"
gradientUnits="userSpaceOnUse">
<stop
style="stop-color:#dcf0ff;stop-opacity:1"
offset="0"
id="stop79" />
<stop
style="stop-color:#428aff;stop-opacity:1"
offset="0.6742"
id="stop81" />
<stop
style="stop-color:#006dff;stop-opacity:1"
offset="1"
id="stop83" />
<a:midPointStop
style="stop-color:#DCF0FF"
offset="0" />
<a:midPointStop
style="stop-color:#DCF0FF"
offset="0.5" />
<a:midPointStop
style="stop-color:#428AFF"
offset="0.6742" />
<a:midPointStop
style="stop-color:#428AFF"
offset="0.5" />
<a:midPointStop
style="stop-color:#006DFF"
offset="1" />
</linearGradient>
<path
d="M 56.083,101.599 C 56.083,110.255 56.083,191.079 56.083,199.734 C 65.135,199.734 191.833,199.734 200.884,199.734 C 200.884,191.079 200.884,110.255 200.884,101.599 C 191.834,101.599 65.135,101.599 56.083,101.599 z "
style="fill:url(#XMLID_20_)"
id="path85" />
<linearGradient
x1="54.491199"
y1="76.673798"
x2="217.155"
y2="239.3376"
id="XMLID_21_"
gradientUnits="userSpaceOnUse">
<stop
style="stop-color:#0053bd;stop-opacity:1"
offset="0"
id="stop88" />
<stop
style="stop-color:#00008d;stop-opacity:1"
offset="1"
id="stop90" />
<a:midPointStop
style="stop-color:#0053BD"
offset="0" />
<a:midPointStop
style="stop-color:#0053BD"
offset="0.5" />
<a:midPointStop
style="stop-color:#00008D"
offset="1" />
</linearGradient>
<path
d="M 207.885,86.599 L 49.083,86.599 C 44.664,86.599 41.083,90.181 41.083,94.599 L 41.083,206.734 C 41.083,211.152 44.664,214.734 49.083,214.734 L 207.884,214.734 C 212.302,214.734 215.884,211.152 215.884,206.734 L 215.884,94.599 C 215.885,90.181 212.303,86.599 207.885,86.599 z M 205.885,204.734 C 202.035,204.734 54.933,204.734 51.084,204.734 C 51.084,200.946 51.084,100.387 51.084,96.599 C 54.934,96.599 202.036,96.599 205.885,96.599 C 205.885,100.388 205.885,200.946 205.885,204.734 z "
style="fill:url(#XMLID_21_)"
id="path92" />
<linearGradient
x1="128.48441"
y1="86.066902"
x2="128.48441"
y2="228.0708"
id="XMLID_22_"
gradientUnits="userSpaceOnUse">
<stop
style="stop-color:#ffffff;stop-opacity:1"
offset="0"
id="stop95" />
<stop
style="stop-color:#f7fbff;stop-opacity:1"
offset="0.0862"
id="stop97" />
<stop
style="stop-color:#e2eeff;stop-opacity:1"
offset="0.2177"
id="stop99" />
<stop
style="stop-color:#c0dbff;stop-opacity:1"
offset="0.3779"
id="stop101" />
<stop
style="stop-color:#8fbfff;stop-opacity:1"
offset="0.56089997"
id="stop103" />
<stop
style="stop-color:#529cff;stop-opacity:1"
offset="0.76310003"
id="stop105" />
<stop
style="stop-color:#0871ff;stop-opacity:1"
offset="0.97839999"
id="stop107" />
<stop
style="stop-color:#006dff;stop-opacity:1"
offset="1"
id="stop109" />
<a:midPointStop
style="stop-color:#FFFFFF"
offset="0" />
<a:midPointStop
style="stop-color:#FFFFFF"
offset="0.6158" />
<a:midPointStop
style="stop-color:#006DFF"
offset="1" />
</linearGradient>
<path
d="M 51.083,96.599 C 51.083,97.141 51.083,99.667 51.083,103.599 C 82.419,103.599 194.529,103.599 197.884,103.599 C 197.884,106.846 197.884,181.163 197.884,204.734 C 202.511,204.734 205.39,204.734 205.884,204.734 C 205.884,200.946 205.884,100.387 205.884,96.599 C 202.035,96.599 54.933,96.599 51.083,96.599 z "
style="fill:url(#XMLID_22_)"
id="path111" />
<path
d="M 132.455,30.044 C 126.885,30.044 122.355,34.574 122.355,40.143 L 122.355,158.953 C 122.355,164.521 126.885,169.053 132.455,169.053 L 237.008,169.053 C 242.576,169.053 247.108,164.522 247.108,158.953 L 247.108,40.143 C 247.108,34.574 242.577,30.044 237.008,30.044 L 132.455,30.044 z "
style="fill:#003366"
id="path113" />
<linearGradient
x1="158.8916"
y1="73.708504"
x2="299.68201"
y2="214.4994"
id="XMLID_23_"
gradientUnits="userSpaceOnUse">
<stop
style="stop-color:#ffffff;stop-opacity:1"
offset="0"
id="stop116" />
<stop
style="stop-color:#99ccff;stop-opacity:1"
offset="1"
id="stop118" />
<a:midPointStop
style="stop-color:#FFFFFF"
offset="0" />
<a:midPointStop
style="stop-color:#FFFFFF"
offset="0.5" />
<a:midPointStop
style="stop-color:#99CCFF"
offset="1" />
</linearGradient>
<path
d="M 132.455,35.984 C 130.162,35.984 128.295,37.85 128.295,40.143 L 128.295,158.953 C 128.295,161.246 130.162,163.111 132.455,163.111 L 237.008,163.111 C 239.301,163.111 241.166,161.246 241.166,158.953 L 241.166,40.143 C 241.166,37.85 239.301,35.984 237.008,35.984 L 132.455,35.984 z "
style="fill:url(#XMLID_23_)"
id="path120" />
<path
d="M 205.523,86.479 C 216.566,76.124 229.841,71.031 244.136,68.5 L 244.136,40.143 C 244.136,36.206 240.943,33.014 237.007,33.014 L 132.455,33.014 C 128.517,33.014 125.326,36.206 125.326,40.143 L 125.326,125.251 C 154.779,127.473 182.639,106.979 205.523,86.479 z "
style="opacity:0.4;fill:#ffffff"
id="path122" />
<linearGradient
x1="141.7061"
y1="66.528297"
x2="239.2188"
y2="164.041"
id="XMLID_24_"
gradientUnits="userSpaceOnUse">
<stop
style="stop-color:#0053bd;stop-opacity:1"
offset="0"
id="stop125" />
<stop
style="stop-color:#00008d;stop-opacity:1"
offset="1"
id="stop127" />
<a:midPointStop
style="stop-color:#0053BD"
offset="0" />
<a:midPointStop
style="stop-color:#0053BD"
offset="0.5" />
<a:midPointStop
style="stop-color:#00008D"
offset="1" />
</linearGradient>
<path
d="M 207.885,86.599 L 122.355,86.599 L 122.355,96.599 C 162.027,96.599 203.855,96.599 205.885,96.599 C 205.885,98.946 205.885,138.441 205.885,169.053 L 215.885,169.053 L 215.885,94.599 C 215.885,90.181 212.303,86.599 207.885,86.599 z "
style="opacity:0.2;fill:url(#XMLID_24_)"
id="path129" />
<linearGradient
x1="164.1201"
y1="89.542"
x2="164.1201"
y2="184.68871"
id="XMLID_25_"
gradientUnits="userSpaceOnUse">
<stop
style="stop-color:#c9e6ff;stop-opacity:1"
offset="0"
id="stop132" />
<stop
style="stop-color:#006dff;stop-opacity:1"
offset="0.55620003"
id="stop134" />
<stop
style="stop-color:#0035ed;stop-opacity:1"
offset="1"
id="stop136" />
<a:midPointStop
style="stop-color:#C9E6FF"
offset="0" />
<a:midPointStop
style="stop-color:#C9E6FF"
offset="0.5" />
<a:midPointStop
style="stop-color:#006DFF"
offset="0.5562" />
<a:midPointStop
style="stop-color:#006DFF"
offset="0.5" />
<a:midPointStop
style="stop-color:#0035ED"
offset="1" />
</linearGradient>
<path
d="M 122.355,158.953 C 122.355,164.521 126.885,169.053 132.455,169.053 L 205.885,169.053 C 205.885,138.442 205.885,98.947 205.885,96.599 C 203.856,96.599 162.028,96.599 122.355,96.599 L 122.355,158.953 L 122.355,158.953 z "
style="opacity:0.2;fill:url(#XMLID_25_)"
id="path138" />
<linearGradient
x1="185.8848"
y1="86.066902"
x2="185.8848"
y2="228.0708"
id="XMLID_26_"
gradientUnits="userSpaceOnUse">
<stop
style="stop-color:#0053bd;stop-opacity:1"
offset="0"
id="stop141" />
<stop
style="stop-color:#00008d;stop-opacity:1"
offset="1"
id="stop143" />
<a:midPointStop
style="stop-color:#0053BD"
offset="0" />
<a:midPointStop
style="stop-color:#0053BD"
offset="0.5" />
<a:midPointStop
style="stop-color:#00008D"
offset="1" />
</linearGradient>
<path
d="M 181.885,96.599 L 181.885,202.734 C 181.885,203.837 180.989,204.734 179.885,204.734 C 184.268,204.734 188.244,204.734 191.705,204.734 C 191.814,204.083 191.885,203.417 191.885,202.734 L 191.885,96.599 C 188.916,96.599 185.557,96.599 181.885,96.599 z "
style="opacity:0.3;fill:url(#XMLID_26_)"
id="path145" />
<path
d="M 122.355,96.599 L 122.355,103.599 C 159.458,103.599 195.991,103.599 197.885,103.599 C 197.885,105.771 197.885,139.741 197.885,169.053 L 205.885,169.053 C 205.885,138.442 205.885,98.947 205.885,96.599 C 203.855,96.599 162.027,96.599 122.355,96.599 z "
style="opacity:0.2;fill:#ffffff"
id="path147" />
<rect
width="256"
height="256"
x="0"
y="0"
style="fill:none"
id="_x3C_Slice_x3E_" />
</g>
</g>
</g>
</svg>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 20010904//EN"
"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">
<!-- Created with Sodipodi ("http://www.sodipodi.com/") -->
<svg
width="48pt"
height="48pt"
viewBox="0 0 48 48"
style="overflow:visible;enable-background:new 0 0 48 48"
xml:space="preserve"
id="svg589"
sodipodi:version="0.32"
sodipodi:docname="/home/david/Desktop/action/filesaveas.svg"
sodipodi:docbase="/home/david/Desktop/action"
xmlns="http://www.w3.org/2000/svg"
xmlns:xap="http://ns.adobe.com/xap/1.0/"
xmlns:xapGImg="http://ns.adobe.com/xap/1.0/g/img/"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:xml="http://www.w3.org/XML/1998/namespace"
xmlns:xapMM="http://ns.adobe.com/xap/1.0/mm/"
xmlns:pdf="http://ns.adobe.com/pdf/1.3/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:a="http://ns.adobe.com/AdobeSVGViewerExtensions/3.0/"
xmlns:x="adobe:ns:meta/"
xmlns:xlink="http://www.w3.org/1999/xlink">
<defs
id="defs677">
<defs
id="defs796" />
<sodipodi:namedview
id="namedview726" />
<metadata
id="metadata711">
<sfw>
<slices>
<slice
x="0"
y="0"
width="256"
height="256"
sliceID="124333141" />
</slices>
<sliceSourceBounds
x="0"
y="0"
width="256"
height="256"
bottomLeftOrigin="true" />
<optimizationSettings>
<targetSettings
fileFormat="PNG24Format"
targetSettingsID="0">
<PNG24Format
transparency="true"
includeCaption="false"
interlaced="false"
noMatteColor="false"
matteColor="#FFFFFF"
filtered="false" />
</targetSettings>
</optimizationSettings>
</sfw>
<xpacket>
begin='' id='W5M0MpCehiHzreSzNTczkc9d'</xpacket>
<x:xmpmeta
x:xmptk="XMP toolkit 3.0-29, framework 1.6">
<rdf:RDF>
<rdf:Description
rdf:about="uuid:cbee75c6-82d1-45ba-8274-b89c6084675c">
<pdf:Producer>
Adobe PDF library 5.00</pdf:Producer>
</rdf:Description>
<rdf:Description
rdf:about="uuid:cbee75c6-82d1-45ba-8274-b89c6084675c" />
<rdf:Description
rdf:about="uuid:cbee75c6-82d1-45ba-8274-b89c6084675c" />
<rdf:Description
rdf:about="uuid:cbee75c6-82d1-45ba-8274-b89c6084675c">
<xap:CreateDate>
2004-01-26T11:58:28+02:00</xap:CreateDate>
<xap:ModifyDate>
2004-03-28T20:41:40Z</xap:ModifyDate>
<xap:CreatorTool>
Adobe Illustrator 10.0</xap:CreatorTool>
<xap:MetadataDate>
2004-02-16T23:58:32+01:00</xap:MetadataDate>
<xap:Thumbnails>
<rdf:Alt>
<rdf:li
rdf:parseType="Resource">
<xapGImg:format>
JPEG</xapGImg:format>
<xapGImg:width>
256</xapGImg:width>
<xapGImg:height>
256</xapGImg:height>
<xapGImg:image>
/9j/4AAQSkZJRgABAgEASABIAAD/7QAsUGhvdG9zaG9wIDMuMAA4QklNA+0AAAAAABAASAAAAAEA
AQBIAAAAAQAB/+4ADkFkb2JlAGTAAAAAAf/bAIQABgQEBAUEBgUFBgkGBQYJCwgGBggLDAoKCwoK
DBAMDAwMDAwQDA4PEA8ODBMTFBQTExwbGxscHx8fHx8fHx8fHwEHBwcNDA0YEBAYGhURFRofHx8f
Hx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8f/8AAEQgBAAEAAwER
AAIRAQMRAf/EAaIAAAAHAQEBAQEAAAAAAAAAAAQFAwIGAQAHCAkKCwEAAgIDAQEBAQEAAAAAAAAA
AQACAwQFBgcICQoLEAACAQMDAgQCBgcDBAIGAnMBAgMRBAAFIRIxQVEGE2EicYEUMpGhBxWxQiPB
UtHhMxZi8CRygvElQzRTkqKyY3PCNUQnk6OzNhdUZHTD0uIIJoMJChgZhJRFRqS0VtNVKBry4/PE
1OT0ZXWFlaW1xdXl9WZ2hpamtsbW5vY3R1dnd4eXp7fH1+f3OEhYaHiImKi4yNjo+Ck5SVlpeYmZ
qbnJ2en5KjpKWmp6ipqqusra6voRAAICAQIDBQUEBQYECAMDbQEAAhEDBCESMUEFURNhIgZxgZEy
obHwFMHR4SNCFVJicvEzJDRDghaSUyWiY7LCB3PSNeJEgxdUkwgJChgZJjZFGidkdFU38qOzwygp
0+PzhJSktMTU5PRldYWVpbXF1eX1RlZmdoaWprbG1ub2R1dnd4eXp7fH1+f3OEhYaHiImKi4yNjo
+DlJWWl5iZmpucnZ6fkqOkpaanqKmqq6ytrq+v/aAAwDAQACEQMRAD8A9U4q7FXYq7FXYq7FXYq7
FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYqlvmDzFo
3l7TJdT1e5W1tItuTbszHoiKN2Y+AxV4j5g/5ydvTcMnl/SYlgU0Se/LOzDxMcTIF/4M4qk//QzP
nv8A5YNL/wCRVx/2UYq7/oZnz3/ywaX/AMirj/soxV3/AEMz57/5YNL/AORVx/2UYq7/AKGZ89/8
sGl/8irj/soxV3/QzPnv/lg0v/kVcf8AZRirv+hmfPf/ACwaX/yKuP8AsoxV3/QzPnv/AJYNL/5F
XH/ZRirv+hmfPf8AywaX/wAirj/soxV3/QzPnv8A5YNL/wCRVx/2UYq7/oZnz3/ywaX/AMirj/so
xV3/AEMz57/5YNL/AORVx/2UYq7/AKGZ89/8sGl/8irj/soxV3/QzPnv/lg0v/kVcf8AZRirv+hm
fPf/ACwaX/yKuP8AsoxV3/QzPnv/AJYNL/5FXH/ZRirv+hmfPf8AywaX/wAirj/soxV3/QzPnv8A
5YNL/wCRVx/2UYq7/oZnz3/ywaX/AMirj/soxV3/AEMz57/5YNL/AORVx/2UYq7/AKGZ89/8sGl/
8irj/soxV3/QzPnv/lg0v/kVcf8AZRirv+hmfPf/ACwaX/yKuP8AsoxVFad/zk75oS4B1HSbG4t+
6W/qwP8A8E7zj/hcVeyeRfzJ8tec7Vn0yUx3kQBuLCaizJ25AAkMlf2l+mmKsqxV2KuxV2KuxV2K
vm/XDqf5ufmk+j287Q+XtJLqJF3VIY2CSzAHYvM9AvtTwOKvePLfk/y35bs0tdHsYrZVFGlCgyuf
GSQ/Ex+ZxVOK4q6oxVrkMVdyGKu5jFWvUGKu9RffFWvVX3xV3rL74q71l8DirXrp4HFXfWE8DirX
1hPA4q76yngcVd9Zj8D+GKtfWo/A/hirvrcfgfw/rirvrcfgfw/rirX1yLwb8P64q765F4N+H9cV
d9di8G/D+uKtfXovBvw/riqVa/5X8r+abR7TV7GO55CiyMoWZP8AKjkHxKR7HFXzB5n0XXfys8/R
NZXBJgIudOujsJYGJUpIB8ijj+oxV9VeWtfs/MGhWWsWf9xexLKErUoxHxI3up2OKplirsVdirsV
Q+oMy2Fyy/aWJyvzCnFXhP8AziwqvL5nmYcpQLIBz1oxuC2/uVGKvficVaxVrFWicVaJxVrFWsVa
JxVonFWsVaxVrFWicVaxVrFWicVaJxVrFWsVaJxVonFWsVaxVdCSJkp/MP14q8V/5ypRBJ5ZkCjm
wvVZu5CmAgfRyOKsn/5x3vJX8lwWzElQZmSvbjMR/wAbYq9XxV2KuxV2KofUv+Oddf8AGGT/AIic
VeE/84pn/lKP+jD/ALGcVe+nFWsVaJxVonFWsVaxVonFWicVaxVrFWsVaJxVrFWsVaJxVonFWsVa
xVonFWicVaxVrFWicVXQ/wB9H/rD9eKvFv8AnKw/8ov/ANH/AP2LYqn/APzjn/yisHyuP+T4xV6/
irsVdirsVQ+pf8c66/4wyf8AETirwf8A5xRNf8U/9GH/AGM4q9+PXFWicVaJxVrFWsVaJxVonFWs
VaxVrFWicVaxVrFWicVaJxVrFWsVaJxVonFWsVaxVonFWicVXQ/30f8ArD9eKvFf+crjT/C3/R//
ANi2Ksg/5xy/5RS3+Vx/yfGKvYMVdirsVdiqH1L/AI511/xhk/4icVeDf84nmv8Ain/ow/7GcVe/
HrirROKtYq1irROKtE4q1irWKtYq0TirWKtYq0TirROKtYq1irROKtE4q1irWKtE4q0TirWKroP7
+P8A1h+vFXiv/OWBp/hb/o//AOxbFWQf844f8onb/K4/5PjFXsOKuxV2KuxVD6l/xzrr/jDJ/wAR
OKvBP+cTD/ylX/Rh/wBjOKvf2O5xVrFWsVaJxVonFXln5ofnxoPk9pNM05V1XX1qrwK1IYD/AMXO
v7X+Qu/iRmNm1IhsNy7vs7sWef1S9MPtPu/W+fdS81/mp5+uWaS6urm3ZivoQH6vZoaV4mhSKtP5
zXNXn1dbzlT1uDQ6fAPTEX8z+tX8r+Z/Pf5Xa5azXMUo0+evrac8oe3njGz8GQugkWoNRuNq7GhO
m1Q5xNhhrNHh1cDH+Ideo/Y+q/KfnXRfM+nw3umyVinXkgPXbZlPgynqM3UJiQsPAajTzwzMJiiE
+yTS1irROKtE4q1irWKtE4q0TirWKtYq0TirROKtYq1iq6A/v4/9Zf14q8U/5yzP/KK/9H//AGLY
qyH/AJxv/wCUSt/lcf8AJ/FXsWKuxV2KuxVD6l/xzrr/AIwyf8ROKvAv+cSj/wApV/0Yf9jOKvoB
upxVrFWicVaJxV4h+fH50yaCJPK/l2amsSLTUL1DvbI4qET/AItYGtf2R79MPU6jh9I5vSdi9keL
+9yD0dB3/s+95B5J/L5tQC6rrQZ4JgJLe2JPKXlv6krdeJ6qK1br0+1zGu7S8P0w3l937Xryeg5P
W7GwRESONFSNAFjjQBVVR0CqKAD2GaCUpTNyNlxpzA5Jlr3ky01XQTYapDytrj4gw2kikH2HQkfC
wH8QdiRncdk9ncOmqW0pG/c8jqe1JQ1PHjO0dvIvF/L+u6/+Vvm19PvuUmnyMryqlaPGTRLiCtPi
FKHxoVPTaeHMcciO40XoNTpsfaGATjtLp+o/jzfVXlnzJY67psN3bSrKJUEiOvR1P7Q/iOxzbRkC
LDw2XHKEjGQqQTgnCwaJxVrFWsVaJxVonFWsVaxVonFWicVaxVrFWicVXwf38f8ArL+vFXiX/OWp
/wCUV/6P/wDsWxVkX/ONv/KI23yuf+T+KvY8VdirsVdiqH1L/jnXX/GGT/iJxV4D/wA4kGv+K/8A
t3/9jOKvoFvtH54qtJxVonFWMfmT5vXyj5M1LWwA1xDGEs4z0aeUhI6juAzcm9gcryz4YkuZ2fpf
HzRh0PP3PkvyBob+ZPMFzqWpt9aS3YT3Pq0czTzMSvME7glWZutaUPXOY7R1RxQ2+qX4t9GkBECI
2H6HtlraEmp3J3JOcsBbjZMjItDtrU3a+oQWT4lQ9GI7Z1HY/YxmRlyD0dB3/s+/3PM9p9p1cIHf
qe5mUsMV5CSAC1KMh751s5iIsvOAW87/ADA8gadr+mtY3i8WXk1hegVkglI/FTQc16MPAgEeXajX
ZtNq5ZpbwyHcfo946PXdn5/DiBHp073j/kXzlrX5ceZZNB1rktgJfiZakRM2wnjJA5RuPtDw361B
7fQ62MoiUTcJOX2n2fHVw8SH94Pt8i+qNH1i11SzS4gdW5KGPA8lIYVDKR1U9jm5BeHlEg0eaOxQ
1irROKtE4q1irWKtE4q0TirWKtYq0TirROKr4P7+P/XX9eKvEv8AnLc0/wAKf9vD/sWxVkf/ADjX
/wAofbfK5/5P4q9jxV2KuxV2KofUv+Oddf8AGGT/AIicVfP/APziMa/4r/7d/wD2M4q+gm+0fniq
0nFWsVedfn15Y1LzF+Xlzb6chlurOaO8WAbtIsQZWVffi5I+WUamBlDZ2vYupjh1AMuRFPn78qPM
lrYm40e4iIuJpDNCxNAxChWjpTZhxqPHfw35/P2fHUyAMuCvK/1PXdpZp4o+JEcUevf7/c9Xt9Qk
moFURr4Dc/fm30Xs/gwnil65efL5frt43Vdq5cuw9I8v1ptbB6rwryG4I7ZstXq8WngZ5JCMR3/j
d1+PHKZqIssu0fUGZQrn9+o+LwYZwp9pBq8hEPTGPIHr5/s6O1/I+HHfcpndWsN3CSBWv2l/z75b
qtNDUQJq+8fjqxx5DAvKfzN/LO08x2fAkQapbqTp98QeJHUxTUqSh+9TuO6tzej1U+z8vBPfDL8X
7+96HR6wjccuoed/lX+Y+p+TtZPlrzCWtoIpDHE02wt3O5R/GJ67GtB16bj0PSaoUN7ieRYdr9mD
PHxsX1df6X7Q+oLC/hvbdZoj7MvcHwzaPGognFWicVaxVrFWicVaJxVrFWsVaJxVonFWsVX2/wDv
RF/rr+vFXiP/ADlyaf4U/wC3h/2LYqyT/nGr/lDrb5XP/URir2TFXYq7FXYqh9S/4511/wAYZP8A
iJxV8+/84hn/AJSz/t3/APYzir6Dc/Efniq3FWsVWnf5Yq+d/wA+PydeGWTzf5ahKnl6mpWkIPIP
Wvrx07/zU+fXrg6nT/xB6rsTtblhynb+E/o/V8kF+VXnTStfC6bqf7rW0X4BXilyqipZAOjgCrL9
K7VC6HtjtPXYcXFhIqPPaz79/wBSdb2Ljxz4gPQfs8vd3fLuvqaRJGKIoUe2ebavX5tRLiyzMz5/
o7lx44wFRFLlLIwZTRhuCMx4TMSCNiGZF7FP9M1H1BXpIPtr4+4zs+yu0+Mf0hzH6XW6jBXuRd9Z
Q3UJIFVO5p1B8R75s9do4ajGSOR/FtGHKYF41+bP5W/p+3N3Yqkeu2y/umPwrcxiv7pmNArfyMfk
djVdJ2br5aLJ4OX+7PI937O/uei0WsEf6v3Md/Jr81b3S75PLGvM0c0bfV7V56q3JW4/VpeW6sDs
len2fDPQ9LqOh+Dhds9lgjxsXvIH3j9PzfSFtdQ3MCzRGqt94Pgcz3lVTFWsVaJxVonFWsVaxVon
FWicVaxVrFV9uf8ASIv9df14q8Q/5y8P/KJ/9vD/ALFsVZL/AM40f8oba/K5/wCojFXsuKuxV2Ku
xVD6l/xzrr/jDJ/xE4q+fP8AnEE/8pZ/27/+xnFX0G/2j8ziq3FWsVaJxVZIiOjI6hkYEMp3BB6g
4q+Yvzr/ACku/K+of4r8sq8enGQSzRw1DWsla81p+wT93yzXanT16hyex7H7UGWPg5dz0vr5Hz+9
l35Z/mFaeatMEM7LHrVqg+t2/Tmo29aPxUnr/Kdj1Unzbt3sbwScuMfuzzHd+z7vcy1OnOGVfwnk
f0Hz+/5s0IzmGm243eNw6GjL0OW4ssschKPMLIAiiyDTtQWReQ6/7sTw9xnb9l9piYsfEOrz4KVd
R0+K5hLDodwR2PjmV2l2fDPCxy+78dWGDMYF4X+cX5Wzamr61pMBOs261ubeMfFdRrQBkp1kQDYd
WGw3AB13ZHaUsE/y+fl/Cf0e7u7uT0mi1YGx+k/Yu/JL83pLgx6Hq8pa+ReMMjH/AHoRR3J/3ao/
4Ie+eg6fPfpPN0/bPZXhk5cY9HUd37Pue+xTRzRrLGwZGFVYZlvOricVaJxVrFWsVaJxVonFWsVa
xVonFV9v/vRF/rr+vFXiH/OXx/5RP/t4f9i2Ksl/5xn/AOUMtflc/wDURir2bFXYq7FXYqh9S/45
11/xhk/4icVfPX/OH5r/AIt/7d//AGNYq+hH+23zOKrcVaJxVrFWsVUbq2t7u3ktrmNZYJlKSxuK
qynqCMUgkGw+VPzW/LbV/wAvNfj8xeXnkj0ppfUt7iPrbSMT+6bqCjVoK7EfCffVarTAXtcS9r2X
2jHVQ8LL9f8AuvP3/wBoeofl/wCeLHzboy3KFY9QgAS/tQd0c9CK78XpVfu6g55j232OdNLjh/dH
7PL3d32+dObFLFPhPwPf+3vZORmga7XQyyQyB0NCPxHgcvwZ5YpCUeaJREhRZDYXySIGH2T9te4O
d32b2jGcbHLqO51ebCQWtT02OePkvzVvD+zB2r2ZHLGx8D3fsTp85iXz3+cn5aTQyzea9EjMN3A3
ranBF8P2fiN0lKUYUq9Ov2v5iYdi9rSEvy+baY+k9/l+rvek0epBHAd4nl+r8e5lP5L/AJuLrFuN
M1RwupQj96NgJVH+7Y18R+2o+Y8B3eDPxCjzed7W7MOCXHD+7P2fjo9oV1ZQykFWFQR0IOZLpXYq
1irROKtE4q1irWKtE4q1iq+2/wB6Iv8AXX9eKvD/APnMA0/wl/28P+xXFWTf84y/8oXafK5/6iMV
ez4q7FXYq7FUPqX/ABzrr/jDJ/xE4q+eP+cPTX/Fv/bu/wCxrFX0K/22+ZxVaTirWKtYq0TirROK
oPVdLsNV0+fT7+Fbi0uFKSxOAQQfngIvYsoTMSJRNEPlHzr5S8yflN5ui1TSJGbTJWItJ2+JHQ7t
bzgEV6fxBBFc0+r0kSDGQuEnuNFrIa3Fwz+sc/8Aih+PseyeTvOOneaNFi1K0+BvsXNsTVopQAWQ
mgqN9jTcfdnmHa/ZEtLOxvjPI/oP43+biZMRhLhlz+8d/wCOSfBlOaWmFK1vO8EgdOn7Q7EZk6XV
Swz4o/HzYTgJCiyGyvI5Iwa1jbqD2Pvne9n6+M4f0D9jq8uIg+ahqmmCQB02cfYb+BzF7W7L4xxR
+ocj+j9TZp9RWxfNv5qfl1deWb//ABb5YBtIYZBJd28VB9WlJp6kQ6ekxNCnRe3wmi5XYnbByfus
m2aP21+nv+b0mnzxyx8Oe4P2/j8bvTfyh/Naz8xaeLe6ZYb+EAXNvX7J6eqlf91sf+BP3ntsOYTH
m8r2n2dLTz23geR/Q9TrXfLnWNE4q0TirWKtYq0TirWKtYqvtv8AemL/AF1/Xirw7/nMI0/wl/28
f+xXFWUf84x/8oVafK5/6iMVez4q7FXYq7FUPqX/ABzrr/jDJ/xE4q+d/wDnDo/8pd/27v8AsaxV
9CyH42+ZxVbirWKtE4q0TirWKtYqlXmXy5pXmPR7jSdThE1rcLxNeqnsynsR45GURIUW3DmlimJx
NEPlbU9P80flB5zPEG4024+yGNI7q3B6EgfDInZqbHxBIOk1uijOJhMXEvb6fPj12K+U4/Yf1F7Z
5e8yabrulQ6np0hktph0YUdHH2o5F3oy9/vFQQc8x7T7MnpcnCd4nke/9rimBBMZfUPx8k2SfNWY
sTBF2d8YJOQ3U/aXxzK0erlgnY5dQ0ZcPEGSWl1HLGBXlG3Q+Htne6LWRyQA5wLqcuMg+aB1nSI5
43BRXDqVZGAKupFCrA7GozWdrdmSvxMe0xyP469zkabUVsXzJ598j6r+XutxeZfLbOulep9glmNs
7HeCWpq8T9FY7/stvRm2/YnbH5gVL05o8x3+f63ooThqIHHk3v7fP3vbPyu/MnT/ADPpMZDenMlE
mgY7xSU+yT3U/sN/mOwxZRMW8frtFLTz4Ty6HvegE5Y4TWKtYq0TirWKtYq1iq+2P+kxf66/rxV4
d/zmKf8AlEf+3j/2K4qyj/nGL/lCbT5XX/URir2jFXYq7FXYqh9S/wCOddf8YZP+InFXzr/zhwf+
Uv8A+3d/2NYq+hpPtt8ziq3FWicVaJxVrFWsVaJxVonFWP8AnbyZpHm7QptK1JNm+KCcfbikH2WU
5CcBIUXI0upngmJw5vmCxuvMX5T+b59M1SJptOmI+sInSWIfZnhJ25rXpX2PY5oNfoI5YnHMbfjc
PbRnDV4xOG0x9nkfL+17fp2q2V/Zw31jOtxZ3C84Jk6MvTvuCCKEHcHY755rrtDPT5DCXwPeGiO/
MURzCNSf3zBMUGCP0/U2t3od4m+0v8RmZodYcEv6B5/rcXNp+IebKbW6jmjCkhkYfA2d1pdRHJHh
O4PIumyYzE2lXmLQLW+tZ7e4hWaC4Ro54W6SIwoRt3pmk7T7PniyDNi2nHf3/j7XK02or8cnzF5l
8va/+VvmmPVtKLTaJcMVgkapVlO7W1xTo4pVT+0ByG4YL0fY3a8dRDiG0x9Q/HR38hDVYzCfP8bh
9C/l9580zzPpENxby8uXw0enNXHWOQfzD8RvnUwmJCw8ZqtLPBMwl/ay7JuM0TirWKtYq1irROKq
lt/vTF/rr+vFXhn/ADmOf+UQ/wC3j/2K4qyn/nGD/lB7P5XX/UTir2nFXYq7FXYqh9S/4511/wAY
ZP8AiJxV85/84bGv+L/+3d/2NYq+iJP7xvmcVWE4q0TirWKtYq0TirROKtYq1irEPzJ/LzS/Ouhv
Z3AEV9EC1jd03jkp38VPcZXlxiYouZodbPTz4o8uo73zh5W17Vvy68y3Pl7zDG8envJ/pCgEiNzR
VuYtqspAo1Oo9xTOd7R7OjngYT59D3PZkxzwGXFz+/8Aon8be57ZFco6JJG6yRSKHilQhkdGFVZW
GxBG4Oec6nSzwzMJjcMIESFhXSf3zFMUGCaaXqxt34SGsLf8KfHNhoNacJ4ZfQfscPUabiFjmy23
uUnjEbmtRVG8c7fDljljwy+BdJPGYmwx7zZ5asdU0+5sr2AT2lyvG4hP7QrUMpHRlIrUdDnPa3SZ
NNl8fD9Q5+Y/HP8AW52l1HL7HzS6+Yfym83ru1zpF38SOPhS4hU9uoWaLluO1f5WFet7K7TjngJw
+I7vx0dxqMENXjo7SH2fsL6X8n+btO8xaXBdWswlWVOSOOrAdQR2dejDOhjISFh4rNhlikYyFEMg
yTU1irWKtE4q1iqpa/70xf66/rxV4X/zmSaf4Q/7eP8A2K4qyr/nF/8A5Qaz+V1/1E4q9qxV2Kux
V2KofUv+Oddf8YZP+InFXzl/zhoa/wCMP+3d/wBjWKvoiT+8b5n9eKrCcVaxVrFWicVaJxVrFWsV
aJxVonFWAfm1+V1j510gtEFh1u1UmzuSOvcxvTs2U5sQmPN2PZ3aEtPO+cDzDwbyD5vv/K2qyeVv
MnK2s1kKIZtvqkxJJ3/31ITv2B+IftV5rtPs2OojR2mOR/HR6+dSAy4975+Y/WP2e7sPqMjFW2Iz
gM2CWORjIVIMokSFjkqpP75QYoME40fWfQYQzN+6J+Fv5T/TNp2drvDPBL6fucDVaXi3HNmEMyXM
fpuaOPsnxzsYSGaPDLm6KUDA2OTCfzD8nWes6Df2VzErRtG8kZYf3M6IxjmSm/wnw6io6EjNHDSZ
NNqRPH9Mj6h5d7tdFqLIHX8bPA/yY8z3eh+Y59HuGeOK4LERmtY7mHqQOx4g8vGgzuNLOjXe2du6
cTxDIOcfuL6k0fU0v7USbeotA9Ohr0I+ebB5FHYq0TirWKtYqqWv+9UP+uv68VeF/wDOZZp/g/8A
7eP/AGK4qyr/AJxd/wCUFs/ldf8AUTir2vFXYq7FXYqh9S/4511/xhk/4icVfOH/ADhia/4w/wC3
b/2NYq+iZT+8b5n9eKrMVaxVonFWicVaxVrFWicVaJxVrFWsVeWfnR+Ulv5ssG1XTI1j1+1QlSBT
6wij+7b3/lOY+fDxCxzdt2X2kcEuGX92fs83kv5c+e7m1nTyr5hYxGFvQ0+5m2eJwaC2lr+xXZCf
s9Ps048x2p2YM8bG2SP2+RerkBH95DeJ5/8AFD9Pf7+fT+boxVgQymhB6gjOGnjMSQRRDkCpCxyK
qk+VmLEwT/Q9c9Nlt5noP91SE9D4H2zb9na4xIhI+4us1mkv1D4ppqdy+tXUGiwL3EmoTDokSmvH
5tnWwHjECveXCwQGnic0vdEd5/Y+b/zp0N/J/wCa0moWqFLW9dNTtlGwJdv3yV95Fb6DmzPplYc7
QZBqNNwy84l7d+Xmrxy8FR+UMyj02HQq45Ic2gNi3jJwMZGJ5hn5OFi1irWKtYqqWp/0qH/XX9Yx
V4V/zmcaf4P/AO3l/wBiuKsr/wCcXP8AlBLL5XX/AFE4q9sxV2KuxV2KofUv+Oddf8YZP+InFXzf
/wA4Xmv+Mf8At2/9jWKvomX+8f5n9eKrMVaJxVonFWsVaxVonFWicVaxVrFWicVaJxV4t+eP5PLr
UMnmPQYQNWiWt5bIAPrCj9r/AFwPvzFz4OLcc3edk9p+EfDmfQfs/Ywv8tvzA/SSxeXtaYrq0Q9O
xu3/AN3hf90yk9JV/ZY/a6H4qcuU7W7L8YccP7wfb+3u+Xc9IR4J4h/dnn/R8x5d/dz72frG7EhQ
aru3sPE+GcfHHKRoCy5RkEdpunXd7MI7YBiDR5m/uk+n9o/575vdB2OSbn8unxcXU6mGIXL5dT+p
6JoOmWmmWxiiq8kh5Tzt9uRvE/wzstPjjAUHkdZqp5pWeQ5DueX/APOT3lb9I+TbbXYUrcaNMPVY
Df6vcEI3Twk4H78syDZzexM/DkMDyl94Yb+TmvPLpFoC/wC9tHNsxP8Ak0eL8CBmVppXH3ON21g4
M5PSW76DhmWaFJV+y6hh9IzIdSuxVrFWicVVLX/eqH/XX9YxV4V/zmgaf4O/7eX/AGK4qyz/AJxa
/wCUDsvldf8AUScVe2Yq7FXYq7FUPqX/ABzrr/jDJ/xE4q+bf+cLTX/GP/bt/wCxrFX0VL/ev/rH
9eKrCcVaJxVrFWsVaJxVonFWsVaxVonFWicVaxVo74q8F/Or8k5by5fzF5ZhUTSVa/sRRQTSvqJ2
BP7Vdu+YmfT3vF6DsvtcYxwZPp6Hu/Y8z078w/O3lu9S31pJNQiiP+8uoF2ald/Tlrypttuy+2az
Jpo3uKL0UTHJD93Kr6int3kj85vJmuCO09UaTemgW0ueKKT4RyD4G9gaE+GARMXn9XoMsSZH1eb0
yC498thN1UosQ/OLz35a0DyZfWWrD61catby21rpyMBJJzUqXrvwVK15U69N8zcOM5Nujjz1XgET
/iB2fOf5VambLX7jTy443KcomFfikhPJSvzQscGnPDMxL0na4GbTxyx8j8JfgPqjytei50xd907e
zbj8a5nPLJvirROKtYqqWv8AvVD/AK6/rGKvCf8AnNI0/wAHf9vL/sVxVlv/ADix/wAoFY/K6/6i
Tir23FXYq7FXYqh9S/4511/xhk/4icVfNf8AzhWf+Uy/7dv/AGN4q+i5T+9f/WP68VWE4q1irWKt
E4q0TirWKtYq0TirROKtYq1irROKtHFWGeavy30fW0k9S3jkVqt6bAAhj3Unb78jKIPNtw554zcC
QXiHm38h720keTSXIpU/Vpq9P8k7n/iWYs9L/Nd/pe3jyyj4j9SRaL+Yv5leRD9RmZ3tACkdregy
xrtt6T1qvH+UNTxGYksfCdw7GeDBqomUCL7x+kMO1rVNX1/UpdS1C8e/vpz8bSbP2oqoPhCitFVP
uGbXBqMdUPS8V2j2JqcRMj+8j3j9I6fc1peoyWGoWGpLXnbSKJAD8TCMio9gYzx+/MbVR4MgkOrv
/Z/MM+klhPOO3wPL7bfV/wCX+pKzCIMGRxRSOhDfEp/XmWC6GUSDRZ2TihrFWsVVLT/euH/jIv6x
irwj/nNQ/wDKG/8Aby/7FMVZd/ziv/ygNj8rr/qKOKvbsVdirsVdiqH1L/jnXX/GGT/iJxV80/8A
OFBr/jL/ALdv/Y3ir6MmP71/9Y/rxVZirWKtE4q0TirWKtYq0TirROKtYq1irROKtYq1irWKqc0M
MyGOVA6HsRXFWMa/5B0jVIXR4kdXFDHKKinhy6/fXAQDzZwySgbiaLxjzh+QZiZ5tKZrdzUiB94y
dzsf6H6Mxp6UHk7vS9uTjtkHEO/q8r1vy75k0ovb39rII0IZpgvJaLVVJelQKdA2Y8xMCjydxpZ6
aczkx0Jy59D8R+l7H+T2vNNo9i3KsttW2fsAYqGP/hOOZmnlcXnO18PBnPdLf8fF73HIskayL9lw
GX5EVy51jeKtYqqWh/0uH/jIv6xirwf/AJzXNP8ABv8A28v+xTFWX/8AOKv/AJL+x+V3/wBRRxV7
firsVdirsVQ+pf8AHOuv+MMn/ETir5o/5wmNf8Z/9u3/ALG8VfRs396/+sf14qp4q0TirROKtYq1
irROKtE4q1irWKtE4q1irWKtYq0TirWKtYqskRJFKuoZT1UioxVI9V8o6ZfIQEUH+VxyX6O6/Rir
EW8gNpk0k1lEYjI4kbiOalhtUkfF274AAGc8kpVZJpnukpLHYRLIQSBVSO6ncdfnhYIvFWicVVbT
/euD/jIv/Ehirwb/AJzZNP8ABn/by/7FMVZf/wA4qf8AkvrD5Xf/AFFHFXuGKuxV2KuxVD6l/wAc
66/4wyf8ROKvmb/nCQ/8pn/27P8AsbxV9HTf3z/6x/XiqmTirROKtYq1irROKtE4q1irWKtE4q1i
rWKtYq0TirWKtYq1irROKtYq1irWKtE4q1iqrZ/71wf8ZF/4kMVeC/8AObZ/5Qz/ALef/YpirMP+
cUv/ACXth8rv/qKOKvccVdirsVdiqH1L/jnXX/GGT/iJxV8y/wDOER/5TT/t2f8AY3ir6OnP75/9
Y/rxVTJxVrFWsVaJxVonFWsVaxVonFWsVaxVrFWicVaxVrFWsVaJxVrFWsVaxVonFWsVaxVVs/8A
eyD/AIyL/wASGKvBf+c3T/yhf/bz/wCxTFWY/wDOKH/kvLD5Xf8A1FHFXuOKuxV2KuxVD6l/xzrr
/jDJ/wAROKvmP/nB81/xp/27P+xvFX0fOf30n+sf14qp4q1irROKtE4q1irWKtE4q1irWKtYq0Ti
rWKtYq1irROKtYq1irWKtE4q1irWKtYqq2Z/0yD/AIyJ/wASGKvBP+c4DT/Bf/bz/wCxTFWZf84n
/wDku9P+V3/1FHFXuWKuxV2KuxVD6l/xzrr/AIwyf8ROKvmD/nCCRUn86W7njORpzCM7NRDdBtvY
sK4q+kbiomkr/Mf14qp4q0TirROKtYq1irROKtYq1irWKtE4q1irWKtYq0TirWKtYq1irROKtYq1
irWKtE4qrWIJvIABU81P3GuKvAP+c4ZozL5MiDAyIupOydwrG1Cn6eJxVm3/ADieGH5dafUEHjdn
fwN0SMVe5Yq7FXYq7FVskayRtG32XBVvkRTFXxjrN7rf5Efnjca1FbNP5e1ZpDLAtFWW2mcPLGld
g8MlGT2p2JxV9U+U/PHknzvp8d/5f1SG8DrV4UcLcRnussJ+NCPcfLbFU8/R0X8zfhirv0bF/M34
Yq1+jIv52/DFXfoyL+dvwxV36Lh/nb8MVa/RUP8AO34Yq79FQ/zt+H9MVa/RMP8AO34Yq79Ew/zt
+GKu/REH87fh/TFWv0PB/O34f0xV36Hg/nb8P6Yq79DQfzt+H9MVa/QsH87fh/TFXfoWD/fj/h/T
FWv0Jb/78f8AD+mKu/Qdv/vx/wAP6Yq1+g7f/fj/AIf0xV36Ct/9+P8Ah/TFXfoK3/34/wCH9MVa
/QNv/vx/w/pirv0Bbf78f8P6Yqk3mfzh5E8iWEuoa9qcNpxUlIpHDXEngsUK/G5PsPntir4i/MXz
tr35wfmQtxa27Rxy8bTSbImvo2yEtykI2qas7n6OgGKvsf8AJ7y5HoWhW1jAP3NpbpEGIoWJp8R9
24VPzxV6FirsVdirsVdirE/zG/Lfy/560OTTNViUvSsE9KsjjoR3+7FXyP5v/wCcW/Nuk3rpYTLL
ASfTMwYrx9pIw1fpQYqx3/oXzz942v8AwU//AFSxV3/Qvnn7xtf+Cn/6pYq7/oXzz942v/BT/wDV
LFXf9C+efvG1/wCCn/6pYq7/AKF88/eNr/wU/wD1SxV3/Qvnn7xtf+Cn/wCqWKu/6F88/eNr/wAF
P/1SxV3/AEL55+8bX/gp/wDqlirv+hfPP3ja/wDBT/8AVLFXf9C+efvG1/4Kf/qlirv+hfPP3ja/
8FP/ANUsVd/0L55+8bX/AIKf/qlirv8AoXzz942v/BT/APVLFXf9C+efvG1/4Kf/AKpYq7/oXzz9
42v/AAU//VLFXf8AQvnn7xtf+Cn/AOqWKu/6F88/eNr/AMFP/wBUsVd/0L55+8bX/gp/+qWKu/6F
88/eNr/wU/8A1SxV3/Qvnn7xtf8Agp/+qWKu/wChfPP3ja/8FP8A9UsVd/0L55+8bX/gp/8Aqliq
L0z/AJxz85XFwEu54IIu7xiWRv8AgWWP9eKvevys/JPTPLg/0WEz3sgHr3UtC5HWjECiJ/kjr3xV
7vpthHY2qwpuert4se+KorFXYq7FXYq7FXYqtkijlUpIgdD1VgCPxxVCnRtLJ/3mT7sVd+htL/5Z
k/HFXfobS/8AlmT8cVd+htL/AOWZPxxV36G0v/lmT8cVd+htL/5Zk/HFXfobS/8AlmT8cVd+htL/
AOWZPxxV36G0v/lmT8cVd+htL/5Zk/HFXfobS/8AlmT8cVd+htL/AOWZPxxV36G0v/lmT8cVd+ht
L/5Zk/HFXfobS/8AlmT8cVd+htL/AOWZPxxV36G0v/lmT8cVd+htL/5Zk/HFXfobS/8AlmT8cVd+
htL/AOWZPxxV36G0v/lmT8cVd+htL/5Zk/HFXDRtLB/3mT7sVRUcUcShI0CIOiqAB+GKrsVdirsV
f//Z</xapGImg:image>
</rdf:li>
</rdf:Alt>
</xap:Thumbnails>
</rdf:Description>
<rdf:Description
rdf:about="uuid:cbee75c6-82d1-45ba-8274-b89c6084675c">
<xapMM:DocumentID>
uuid:4ee3f24b-6ed2-4a2e-8f7a-50b762c8da8b</xapMM:DocumentID>
</rdf:Description>
<rdf:Description
rdf:about="uuid:cbee75c6-82d1-45ba-8274-b89c6084675c">
<dc:format>
image/svg+xml</dc:format>
<dc:title>
<rdf:Alt>
<rdf:li
xml:lang="x-default">
mime.ai</rdf:li>
</rdf:Alt>
</dc:title>
</rdf:Description>
</rdf:RDF>
</x:xmpmeta>
<xpacket>
end='w'</xpacket>
</metadata>
<linearGradient
id="XMLID_9_"
gradientUnits="userSpaceOnUse"
x1="128.9995"
y1="11"
x2="128.9995"
y2="245.0005">
<stop
offset="0"
style="stop-color:#494949"
id="stop717" />
<stop
offset="1"
style="stop-color:#000000"
id="stop718" />
<a:midPointStop
offset="0"
style="stop-color:#494949"
id="midPointStop719" />
<a:midPointStop
offset="0.5"
style="stop-color:#494949"
id="midPointStop720" />
<a:midPointStop
offset="1"
style="stop-color:#000000"
id="midPointStop721" />
</linearGradient>
<linearGradient
id="XMLID_10_"
gradientUnits="userSpaceOnUse"
x1="29.0532"
y1="29.0532"
x2="226.9471"
y2="226.9471">
<stop
offset="0"
style="stop-color:#FFFFFF"
id="stop725" />
<stop
offset="1"
style="stop-color:#DADADA"
id="stop726" />
<a:midPointStop
offset="0"
style="stop-color:#FFFFFF"
id="midPointStop727" />
<a:midPointStop
offset="0.5"
style="stop-color:#FFFFFF"
id="midPointStop728" />
<a:midPointStop
offset="1"
style="stop-color:#DADADA"
id="midPointStop729" />
</linearGradient>
<linearGradient
id="XMLID_11_"
gradientUnits="userSpaceOnUse"
x1="-481.7007"
y1="-94.4194"
x2="-360.2456"
y2="-164.2214"
gradientTransform="matrix(0.1991 0.98 -0.98 0.1991 91.6944 573.5653)">
<stop
offset="0"
style="stop-color:#990000"
id="stop736" />
<stop
offset="1"
style="stop-color:#7C0000"
id="stop737" />
<a:midPointStop
offset="0"
style="stop-color:#990000"
id="midPointStop738" />
<a:midPointStop
offset="0.5"
style="stop-color:#990000"
id="midPointStop739" />
<a:midPointStop
offset="1"
style="stop-color:#7C0000"
id="midPointStop740" />
</linearGradient>
<linearGradient
id="XMLID_12_"
gradientUnits="userSpaceOnUse"
x1="-1375.9844"
y1="685.3809"
x2="-1355.0455"
y2="706.3217"
gradientTransform="matrix(-0.999 0.0435 0.0435 0.999 -1277.0056 -496.5172)">
<stop
offset="0"
style="stop-color:#F8F1DC"
id="stop743" />
<stop
offset="1"
style="stop-color:#D6A84A"
id="stop744" />
<a:midPointStop
offset="0"
style="stop-color:#F8F1DC"
id="midPointStop745" />
<a:midPointStop
offset="0.5"
style="stop-color:#F8F1DC"
id="midPointStop746" />
<a:midPointStop
offset="1"
style="stop-color:#D6A84A"
id="midPointStop747" />
</linearGradient>
<linearGradient
id="XMLID_13_"
gradientUnits="userSpaceOnUse"
x1="65.0947"
y1="-0.7954"
x2="137.6021"
y2="160.1823">
<stop
offset="0"
style="stop-color:#FFA700"
id="stop750" />
<stop
offset="0.7753"
style="stop-color:#FFD700"
id="stop751" />
<stop
offset="1"
style="stop-color:#FF794B"
id="stop752" />
<a:midPointStop
offset="0"
style="stop-color:#FFA700"
id="midPointStop753" />
<a:midPointStop
offset="0.5"
style="stop-color:#FFA700"
id="midPointStop754" />
<a:midPointStop
offset="0.7753"
style="stop-color:#FFD700"
id="midPointStop755" />
<a:midPointStop
offset="0.5"
style="stop-color:#FFD700"
id="midPointStop756" />
<a:midPointStop
offset="1"
style="stop-color:#FF794B"
id="midPointStop757" />
</linearGradient>
<linearGradient
id="XMLID_14_"
gradientUnits="userSpaceOnUse"
x1="-1336.4497"
y1="635.7949"
x2="-1325.3219"
y2="622.5333"
gradientTransform="matrix(-0.999 0.0435 0.0435 0.999 -1277.0056 -496.5172)">
<stop
offset="0"
style="stop-color:#FFC957"
id="stop763" />
<stop
offset="1"
style="stop-color:#FF6D00"
id="stop764" />
<a:midPointStop
offset="0"
style="stop-color:#FFC957"
id="midPointStop765" />
<a:midPointStop
offset="0.5"
style="stop-color:#FFC957"
id="midPointStop766" />
<a:midPointStop
offset="1"
style="stop-color:#FF6D00"
id="midPointStop767" />
</linearGradient>
<linearGradient
id="XMLID_15_"
gradientUnits="userSpaceOnUse"
x1="-1401.459"
y1="595.6309"
x2="-1354.6851"
y2="699.4763"
gradientTransform="matrix(-0.999 0.0435 0.0435 0.999 -1277.0056 -496.5172)">
<stop
offset="0"
style="stop-color:#FFA700"
id="stop770" />
<stop
offset="0.7753"
style="stop-color:#FFD700"
id="stop771" />
<stop
offset="1"
style="stop-color:#FF9200"
id="stop772" />
<a:midPointStop
offset="0"
style="stop-color:#FFA700"
id="midPointStop773" />
<a:midPointStop
offset="0.5"
style="stop-color:#FFA700"
id="midPointStop774" />
<a:midPointStop
offset="0.7753"
style="stop-color:#FFD700"
id="midPointStop775" />
<a:midPointStop
offset="0.5"
style="stop-color:#FFD700"
id="midPointStop776" />
<a:midPointStop
offset="1"
style="stop-color:#FF9200"
id="midPointStop777" />
</linearGradient>
<linearGradient
id="XMLID_16_"
gradientUnits="userSpaceOnUse"
x1="67.8452"
y1="115.5361"
x2="144.5898"
y2="115.5361">
<stop
offset="0"
style="stop-color:#7D7D99"
id="stop780" />
<stop
offset="0.1798"
style="stop-color:#B1B1C5"
id="stop781" />
<stop
offset="0.3727"
style="stop-color:#BCBCC8"
id="stop782" />
<stop
offset="0.6825"
style="stop-color:#C8C8CB"
id="stop783" />
<stop
offset="1"
style="stop-color:#CCCCCC"
id="stop784" />
<a:midPointStop
offset="0"
style="stop-color:#7D7D99"
id="midPointStop785" />
<a:midPointStop
offset="0.5"
style="stop-color:#7D7D99"
id="midPointStop786" />
<a:midPointStop
offset="0.1798"
style="stop-color:#B1B1C5"
id="midPointStop787" />
<a:midPointStop
offset="0.2881"
style="stop-color:#B1B1C5"
id="midPointStop788" />
<a:midPointStop
offset="1"
style="stop-color:#CCCCCC"
id="midPointStop789" />
</linearGradient>
</defs>
<sodipodi:namedview
id="base" />
<metadata
id="metadata590">
<xpacket>
begin='' id='W5M0MpCehiHzreSzNTczkc9d'</xpacket>
<x:xmpmeta
x:xmptk="XMP toolkit 3.0-29, framework 1.6">
<rdf:RDF>
<rdf:Description
rdf:about="uuid:9dfcc10e-f4e2-4cbf-91b0-8deea2f1a998">
<pdf:Producer>
Adobe PDF library 5.00</pdf:Producer>
</rdf:Description>
<rdf:Description
rdf:about="uuid:9dfcc10e-f4e2-4cbf-91b0-8deea2f1a998" />
<rdf:Description
rdf:about="uuid:9dfcc10e-f4e2-4cbf-91b0-8deea2f1a998" />
<rdf:Description
rdf:about="uuid:9dfcc10e-f4e2-4cbf-91b0-8deea2f1a998">
<xap:CreateDate>
2004-02-04T02:08:51+02:00</xap:CreateDate>
<xap:ModifyDate>
2004-03-29T09:20:16Z</xap:ModifyDate>
<xap:CreatorTool>
Adobe Illustrator 10.0</xap:CreatorTool>
<xap:MetadataDate>
2004-02-29T14:54:28+01:00</xap:MetadataDate>
<xap:Thumbnails>
<rdf:Alt>
<rdf:li
rdf:parseType="Resource">
<xapGImg:format>
JPEG</xapGImg:format>
<xapGImg:width>
256</xapGImg:width>
<xapGImg:height>
256</xapGImg:height>
<xapGImg:image>
/9j/4AAQSkZJRgABAgEASABIAAD/7QAsUGhvdG9zaG9wIDMuMAA4QklNA+0AAAAAABAASAAAAAEA
AQBIAAAAAQAB/+4ADkFkb2JlAGTAAAAAAf/bAIQABgQEBAUEBgUFBgkGBQYJCwgGBggLDAoKCwoK
DBAMDAwMDAwQDA4PEA8ODBMTFBQTExwbGxscHx8fHx8fHx8fHwEHBwcNDA0YEBAYGhURFRofHx8f
Hx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8f/8AAEQgBAAEAAwER
AAIRAQMRAf/EAaIAAAAHAQEBAQEAAAAAAAAAAAQFAwIGAQAHCAkKCwEAAgIDAQEBAQEAAAAAAAAA
AQACAwQFBgcICQoLEAACAQMDAgQCBgcDBAIGAnMBAgMRBAAFIRIxQVEGE2EicYEUMpGhBxWxQiPB
UtHhMxZi8CRygvElQzRTkqKyY3PCNUQnk6OzNhdUZHTD0uIIJoMJChgZhJRFRqS0VtNVKBry4/PE
1OT0ZXWFlaW1xdXl9WZ2hpamtsbW5vY3R1dnd4eXp7fH1+f3OEhYaHiImKi4yNjo+Ck5SVlpeYmZ
qbnJ2en5KjpKWmp6ipqqusra6voRAAICAQIDBQUEBQYECAMDbQEAAhEDBCESMUEFURNhIgZxgZEy
obHwFMHR4SNCFVJicvEzJDRDghaSUyWiY7LCB3PSNeJEgxdUkwgJChgZJjZFGidkdFU38qOzwygp
0+PzhJSktMTU5PRldYWVpbXF1eX1RlZmdoaWprbG1ub2R1dnd4eXp7fH1+f3OEhYaHiImKi4yNjo
+DlJWWl5iZmpucnZ6fkqOkpaanqKmqq6ytrq+v/aAAwDAQACEQMRAD8A9U4q7FXYq7FXYq7FXYq7
FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7F
XYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FX
Yq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXY
q7FXzd+b/wDzlWum3k+h+QxFc3EJMdzrkoEkKuNiLZPsyU/nb4fAEb50vZ/YXEBPLsP5v62meXue
A3v5mfmprl080vmLVriXdjHBcTIi17rFCVRfoXOghocEBQhH5NJmepUf8Tfmj/1dtb/6SLv/AJqy
f5fD/Nj8gjxPN3+JvzR/6u2t/wDSRd/81Y/l8P8ANj8gviebv8Tfmj/1dtb/AOki7/5qx/L4f5sf
kF8Tzd/ib80f+rtrf/SRd/8ANWP5fD/Nj8gviebv8Tfmj/1dtb/6SLv/AJqx/L4f5sfkF8Tzd/ib
80f+rtrf/SRd/wDNWP5fD/Nj8gviebv8Tfmj/wBXbW/+ki7/AOasfy+H+bH5BfE83f4m/NH/AKu2
t/8ASRd/81Y/l8P82PyC+J5u/wATfmj/ANXbW/8ApIu/+asfy+H+bH5BfE83f4m/NH/q7a3/ANJF
3/zVj+Xw/wA2PyC+J5u/xN+aP/V21v8A6SLv/mrH8vh/mx+QXxPN3+JvzR/6u2t/9JF3/wA1Y/l8
P82PyC+J5u/xN+aP/V21v/pIu/8AmrH8vh/mx+QXxPN3+JvzR/6u2t/9JF3/AM1Y/l8P82PyC+J5
u/xN+aP/AFdtb/6SLv8A5qx/L4f5sfkF8Tzd/ib80f8Aq7a3/wBJF3/zVj+Xw/zY/IL4nm7/ABN+
aP8A1dtb/wCki7/5qx/L4f5sfkF8Tzd/ib80f+rtrf8A0kXf/NWP5fD/ADY/IL4nm7/E35o/9XbW
/wDpIu/+asfy+H+bH5BfE82j5t/M+Aes2ta3EI/i9U3N2vGnfly2x/LYT/DH5BePzZ15C/5yh/Mb
y7cxRaxcHzDpQIEsF2f9IC9zHc058v8AX5D9ea/VdiYcg9I4JeXL5NkchD688jeefLvnby/DrmhT
+rayEpLE4CywygAtFKtTxYV+RG4qDnH6nTTwT4JjdyIytkGY6XYq7FXYq7FXYq7FXjX/ADlH+YV1
5W8hppunymHU/MMj2qSqaMltGoNwynxPNE/2WbrsPSDLl4pfTDf49GvJKg+VPy+8lP5ivecqM9rG
4jWFaqZpTvw57cVUULGvcfMdtYFk7Ac3Ua3VHGAI/XLk+jNK/LfSLS0SK4JYqDSGCkUCV3PBVAPX
vtXwzWT7TlfoAA+11f5Xi3mTIo608meV/wBL2lnLbSSLcc/92sB8Kk70IOU5+0s4xSmCPT5NuDRY
pZBEjmyu2/KnydcFgliF4ip5TT/wY5ov5f1f877B+p2/8kaf+b9pVv8AlT3lL/lkT/kdcf1w/wAv
az+d9kf1I/kjTfzftLR/J/yl/wAsif8AI65/rj/L2s/nfZH9S/yRpv5v2lafyg8p/wDLKn/I65/r
h/l3Wfzvsj+pf5J03837S0fyh8p/8sqf8jrn+uP8u6z+d9kf1L/JOm/m/aWj+UXlP/llj/5HXP8A
XH+XdZ/O+yP6l/knTfzftLX/ACqPyn/yzR/8jrn+uH+XNb/O+yP6l/knTd32lr/lUflX/lmj/wCR
1z/XB/Lmt/nfZH9S/wAk6bu+0u/5VD5W/wCWaP8A5HXP9cf5d1n877I/qX+SdN/N+0u/5VB5Y/5Z
ov8Akdc/1x/l3Wfzvsj+pf5J03837S7/AJU/5a/5Zov+R1z/AFx/l3Wfzvsj+pf5J03837S7/lT3
lv8A5Zov+R1z/XB/L2s/nfZH9S/yRpv5v2l3/KnfLv8AyzRf8jrn+uP8vaz+d9kf1L/JGm/m/aXf
8qc8v/8ALNF/yOuf64/y9rP532R/Uv8AJGm/m/aXf8qb0H/lmh/5HXP9cf5f1n877I/qX+SNN/N+
0u/5U1oP/LND/wAjrn+uD+X9Z/O+wfqT/JGn/m/aVk/5P6BDBJM1rEVjUswE1xWg8KnH/RBq/wCd
9g/Uv8kaf+b9pYp5i8oeXLOGBoLQo0j8SRJIe3+Uxza9ldq6jNKQnLkO4Ov1/Z2HGAYj7SkreXdK
IoEZD/Mrmo+Vaj8M3I1eR1fgRee/mD+W8NxE91ZIPrhq0UygL6rbt6ctNubfssevy6XwmJjbYjo5
ml1csUhGRuB+xJP+cfvzGvfJvny1T1T+iNXdLTUbcn4SWNIpPZkduvgTmq7Z0gy4Sf4obj9L0WOV
F93xSJLGsiGqOAyn2O+cK5K7FXYq7FXYq7FXYq+R/wDnM65lbzjoFsT+6i05pEG/2pJ2VvbpGM6/
2cH7uR/pfocfNzb/ACCs7caXZzBAJPQuJS3fn9ZMXL/gNs2uvkRirvl+h0GffUm+kfx972EnNKyU
LXfzNpZ/4y/8QOOo/wAWn8PvbdN/fRei6SPjl/1R+vOWDvyjyMsQsIwoWkYVWEYULSMKFhGSVrFV
wOBVwOBVwOBK4HFVwOBK4HAq4HAlcDgVQ1I/7jrn/jE36siUh5X5uH+j23tL/DN52F9U/c6vtX6Q
x0nOidEgNZodNmBAP2aE9jzG4+jL9P8AWGrL9JfNGuSmDzPqEsICGK9maNRsF4ykgCnhmRKArhel
08iccT5B+iHk+4afQbcsalBx+8Bv+Ns8wdknWKuxV2KuxV2KuxV8hf8AOZn/ACneif8AbLH/AFES
52Hs7/dS/rfoDj5uaO/IUf7gbI/8ulx/1GnNlr/7v/O/Q6DN/jEv6v6nqxOahksshXzJpv8Az0/4
gcjqf8Xn8PvbdL/exei6SPjk/wBUfrzlw9AmBGTYrSMKrCMKFpGFVhGFC0jChYRklaxVcDgVcDgV
cDgSuBxVcDgSuBwKuBwJUdRP+4+5/wCMTfqyJSHlvmwf6Lb+0n8M3XYX1S9zq+1fpDwzzXoX1nzD
eT8a82U1/wBgBm1y6fikS6qGfhFJt5T076lomoJSnOSM/dTMzQYuCTj6rJxh4h5k/wCUi1T/AJjJ
/wDk62bM83fab+6j/VH3P0N8jf8AHBj+Y/5NpnlztGQYq7FXYq7FXYq7FXyF/wA5mf8AKd6J/wBs
sf8AURLnYezv91L+t+gOPm5ph+Q4/wCddsj/AMutx/1Gtmx1/wBH+d+h0Gb/ABiX9X9T1InNUl2n
b+Y9P/56f8QOQ1X+Lz+H3t+l/vYvRtJH7yT/AFR+vOWDv0xIySFhGSQtIwqsIwoWkYVWEYULSMKF
hGSVrFVwOBVwOBVwOBK4HFVwOBK4HAqjf/8AHPuf+MTfqyEkh5j5rH+iQ/65/Uc3XYf1y9zre1Pp
DDpbGzkcu8QZ26k50weeMQoXVvDDZyrEgQNQkD5jLMX1BhMbPmrzN/ykmrf8xlx/ydbMp6XTf3cf
6o+5+hnkb/jgx/Mf8m0zy52bIMVdirsVdirsVdir5C/5zM/5TvRP+2WP+oiXOw9nf7qX9b9AcfNz
TL8iR/zrFif+Xa4/6jWzYa76f879Doc/9/L3fqenE5rEL9KFfMNh85P+IHK9X/cT+H3uRpP72L0f
SR+8k/1f45yzv0xIwqtIwoWEZJC0jCqwjChaRhVYRhQtIwoWEZJWsVXA4FXA4FXA4ErgcVXA4EqV
9/vBc/8AGJv1ZCXJIea+ah/ocfsx/wCInNx2H9cvcHW9qfQGIE507z6HvN7dx8v1jLMfNhPk+Z/N
H/KTav8A8xtx/wAnWzJek0/93H+qPufoX5G/44MfzH/JtM8vdmyDFXYq7FXYq7FXYq+Qv+czP+U7
0T/tlj/qIlzsPZ3+6l/W/QHHzc0z/Isf86nYH/l3uP8AqNbM/W8v879Doc/9/L3fqelk5rkK2j76
/ZfN/wDiBynWf3Evx1cjSf3oej6UP3r/AOr/ABzl3fpliq0jCq0jChYRkkLSMKrCMKFpGFVhGFC0
jChYRklaxVcDgVcDgVcDgSuBxVTvP94rn/jE36shPkyDzjzUP9BX5n/iJzbdifXL4Ou7U+gfFhhO
dS86pXG8TD5frycebGXJ8z+av+Un1j/mNuf+TrZkh6TT/wB3H+qPufoV5G/44MfzH/JtM8vdmyDF
XYq7FXYq7FXYq+Qv+czP+U70T/tlj/qIlzsPZ3+6l/W/QHHzc01/I0f86fp5/wCKLj/qNbM7W8v8
79Dos/8AfH3fqejE5gMEVoe+u2fzf/iByjW/3Evx1cnR/wB4Ho+l/wB4/wAv45y7v0xxV2KrSMKr
SMKFhGSQtIwqsIwoWkYVWEYULSMKFhGSVrFVwOBVwOBVwOBKy6P+h3H/ABib9WQnySHnnmkf6APY
t/xE5texPrPwdf2n9A+LByc6t5xTfcEZIIL5p82f8pTrP/Mdc/8AJ5syRyek0/8Adx9w+5+hPkb/
AI4MfzH/ACbTPL3ZsgxV2KuxV2KuxV2KvkL/AJzM/wCU70T/ALZY/wCoiXOw9nf7qX9b9AcfNzTf
8jx/zpWnH/im4/6jHzO1n6f0Oi1H98fd+p6ETmE1o3y/vrdr82/4gcxtd/cycrR/3gej6b/eP8v4
5y7v0wxV2KuxVaRhVaRhQsIySFpGFVhGFC0jCqwjChaRhQsIyStYquBwKuBwKtuT/olx/wAYm/Vk
J8mUXn/mkf7jj/sv+InNp2L/AHh+Dr+0/oHxYGTnWvONDdgMUPmnzb/yletf8x9z/wAnmzIjyelw
f3cfcH6EeRv+ODH8x/ybTPMHZMgxV2KuxV2KuxV2KvkL/nMz/lO9E/7ZY/6iJc7D2d/upf1v0Bx8
3NOPyRH/ADo2mn/im4/6jHzN1fP4/odHqP70+5n5OYjUmHlzfWrb5t/xA5ia7+5k5Wi/vA9H07+8
f5fxzmHfo/FXYq7FXYqtIwqtIwoWEZJC0jCqwjChaRhVYRhQtIwoWEZJWsVXA4Fan/3luP8AjE36
shk5MosD80D/AHGt8m/4gc2XY394fg4Haf0fN56TnXvNLod5VHz/AFYJclD5p83/APKWa3/zH3X/
ACebMiPIPS4P7uPuD9CPI3/HBj+Y/wCTaZ5g7JkGKuxV2KuxV2KuxV8hf85mf8p3on/bLH/URLnY
ezv91L+t+gOPm5p1+SYp5B0w/wDFVx/1GPmZq/q+P6HR6n+9PuZ0TmM0pr5Y31iD5t/xA5h6/wDu
i5mi/vA9G0/7b/LOYd8jsVdirsVdirsVWkYVWkYULCMkhaRhVYRhQtIwqsIwoWkYULCMkrWKul/3
mn/4xt+rK8nJMebB/NA/3Fyf6r/8QObHsb+8Pw+9we0/o+bzgnOxeZVLXe4QfP8AUcjPkmPN81ec
f+Uu1z/toXX/ACebL4fSHpcH0R9wfoP5G/44MfzH/JtM8xdkyDFXYq7FXYq7FXYq+Qv+czP+U70T
/tlj/qIlzsPZ3+6l/W/QHHzc08/JUf8AIPNLP/Fdx/1GSZl6r6z7/wBDpNT/AHh9zNicocdOPKu+
rQ/M/wDEGzB7Q/ui5uh+sPRbEhXappt3zmXfI3mn8w+/FXeon8w+/FWvUj/mH3jFXepH/MPvGKu9
WP8AnH3jFXepF/Ov3jFVpeP+dfvGG1Wl4/51+8YbQtLJ/Mv3jDa0tJT+ZfvGHiCKWnj/ADL/AMEP
64eILS08f5l/4If1w8QRS0qP5l/4If1w8YWlpUfzL/wS/wBceMIorCn+Uv8AwS/1w8YXhKyai289
WXeNgPiB3I+eRnIEJiGFeZx/uKm/1H/4gc2PY/8AefL73B7S+j5vNCc7N5dWsN7uMfP/AIichl+k
so83zX5z/wCUw13/ALaF1/yffL8f0j3PS4foj7g/QbyN/wAcGP5j/k2meYuyZBirsVdirsVdirsV
fIX/ADmZ/wAp3on/AGyx/wBREudh7O/3Uv636A4+bmnv5Lj/AJBxpZ/yLj/qMkzK1X1n3/odJqv7
w+5mZOVOOmvly5jtrwTyAlIzuFpXdSO9Mw9bjM4cI6uVpJiMrLK/8T2H++5fuX/mrNL/ACdk7x+P
g7b85DuLX+JbD/fcv3L/AM1Y/wAnZO8fj4L+ch3Fr/Elj/vuX7l/5qx/k7J3j8fBfzkO4tf4jsf9
9y/cv/NWP8nZO8fj4L+ch3Fo+YrH/fcv3L/zVj/J2TvH4+C/nIdxW/4hsv5JPuX/AJqx/k7J3j8f
BfzkO4tfp+y/kk+5f+asf5Oyd4/HwX85DuLX6es/5JPuX/mrH+TsnePx8F/OQ7i1+nbP+ST7l/5q
x/k7J3j8fBfzkO4tfpy0/kk+5f64/wAnZO8fj4L+ch3Fr9N2n8kn3L/XH+TsnePx8F/OQ7i0datf
5JPuX+uP8nZO8fj4L+ch3Fb+mLX+R/uH9cf5Oyd4/HwX85DuLX6Xtv5H+4f1x/k7J3j8fBfzkO4t
fpa2/lf7h/XH+TsnePx8F/OQ7i0dVt/5X+4f1x/k7J3j8fBfzkO4tHVLf+V/uH9cf5Oyd4/HwX85
DuKW6/dxz6XcKgYFY5DvT+Q++bDs7TSx5Bdbkfe4etzicNvN5sTnWPOojTN7+If63/ETleb6Cyhz
fNnnX/lMte/7aN3/AMn3y/H9I9z02H6B7g/QXyN/xwY/mP8Ak2meYuxZBirsVdirsVdirsVfIX/O
Zn/Kd6J/2yx/1ES52Hs7/dS/rfoDj5uaf/kyP+QZ6Uf8m4/6jJMytT/eH8dHS6r6z7mXk5W4rSyy
JXgxWvWhIxMQVEiOTjdXH+/X/wCCOPAO5eM9603Vz/v1/wDgjh4I9y8Z71pu7n/fz/8ABHDwR7kc
Z71pu7r/AH8//BH+uHw49y8cu9aby6/39J/wR/rh8OPcEccu9ab27/3/ACf8E39cPhx7gjjl3rTe
3f8Av+T/AINv64fDj3BfEl3rTfXn+/5P+Db+uHw49wR4ku8rTfXv/LRJ/wAG39cPhR7gviS7ytN/
e/8ALRJ/wbf1w+FHuCPEl3ladQvv+WiX/g2/rh8KPcEeJLvK06hff8tMv/Bt/XD4Ue4L4ku8rTqN
/wD8tMv/AAbf1w+FDuCPEl3ladRv/wDlpl/4Nv64fBh3D5L4ku8rTqWof8tUv/Bt/XD4MO4fJHiy
7ytOp6h/y1Tf8jG/rh8GHcPkjxZd5aOp6j/y1Tf8jG/rh8GHcPkviy7ypvqN+6lWuZWVhRlLsQQe
xFcIwwHQfJByS7yhScta0Xo++pQj/W/4icq1H0Fnj+p82+d/+Uz1/wD7aN3/AMn3y7F9I9z02H6B
7g/QTyN/xwY/mP8Ak2meZOxZBirsVdirsVdirsVfIX/OZn/Kd6J/2yx/1ES52Hs7/dS/rfoDj5ub
IfybH/ILtJPtcf8AUZLmTqP70/jo6XVfWWVE5FxFpOFVpOFDCLz82fLtrdz2slteGSCRonKpFQlC
VNKyDbbLRjLLgKgfzh8tf8s17/wEX/VXD4ZXwytP5weWv+Wa9/4CL/qrjwFHhlo/m95b/wCWa8/4
CL/qrh4Cvhlo/m75b/5Zrz/gIv8Aqrh4V8Mrf+Vt+XD/AMe15/wEX/VXCIFHhF3/ACtjy6f+Pa8/
4CL/AKqZMYijwy1/ytXy8f8Aj3u/+Ai/6qZYNPJHhl3/ACtPy+f+Pe7/AOAj/wCqmTGll5I8Mtf8
rQ0A/wDHvd/8BH/1UywaKfkjwy7/AJWboR/497r/AICP/qpkx2fPvCOAtf8AKytDP+6Lr/gI/wDq
pkx2bk7x+PgjgLY/MXRT0guf+Bj/AOa8P8nZO8fj4LwFseftIPSG4/4FP+a8f5Pn3j8fBHAUTY+b
dOvbqO2iimWSQkKXVQNhXejHwyGTSSiLNIMSE4JzGYLCcKFpOFCN0PfVYB/rf8QOU6n+7LZi+oPm
7zx/ymvmD/tpXn/J98uxfQPcHpsX0D3B+gfkb/jgx/Mf8m0zzJ2LIMVdirsVdirsVdir5C/5zM/5
TvRP+2WP+oiXOw9nf7qX9b9AcfNzZF+To/5BVpB9rj/qMlzI1H98fx0dNq/qLJycXDWk4ULScKEq
/IbT7OTVvMty0S/Wm1BoRPQcxHVmKqT0BPXNL25M3EdKd52bEUS9s/RNv/O/3j+maC3Zu/RNv/O/
3j+mNq79E2/87/eP6Y2rv0Tb/wA7/eP6Y2rv0Tb/AM7/AHj+mNq79E2/87/eP6Y2rv0Tb/zv94/p
jau/RNv/ADv94/pjau/RNv8Azv8AeP6Y2rv0Tb/zv94/pjau/RNv/O/3j+mNq80/PXTbMeUJmaMP
LbyQvBKwBZC8gRqEU6qc6L2YyyjqwAdpA38nA7RiDiJ7nzykeekEvOpz5cSmsWx9z/xE5jak+gsZ
cmeE5qWhaThQtJwqj/L2+sW4/wBf/iDZRq/7s/jq2YfqD5v89f8AKb+Yf+2nef8AUQ+W4foHuD02
L6R7n6BeRv8Ajgx/Mf8AJtM8zdiyDFXYq7FXYq7FXYq+Qv8AnMz/AJTvRP8Atlj/AKiJc7D2d/up
f1v0Bx83Nkn5Pj/kEujn/mI/6jJcvz/35/HR02r+osjJyThLScKFhOSQgvyCamo+YR46o3/G2aHt
z6o+533Zv0l7pmhdk7FXYq7FXYq7FXYq7FXYq7FXYq8w/PPfytdr7wf8nRm/9m/8bj7pfc4PaP8A
cn4PntI89IJebTXQUpqlufc/8ROY+c+gsZcmZk5rWhaThVaThQmPlrfW7Yf6/wDybbMfWf3R/HVt
wfWHzh58/wCU58xf9tO8/wCoh8twfRH3B6fH9I9z9AfI3/HBj+Y/5NpnmbsGQYq7FXYq7FXYq7FX
yF/zmZ/yneif9ssf9REudh7O/wB1L+t+gOPm5sm/KEf8gh0Y+9x/1GTZdm/vz+OgdPrOZT8nLHAW
E5JC0nCqX/kO9NT8wf8AbUb/AI2zQ9ufVH3O+7N+kvdPUzQ07Jg/5n+a7ny3o9zq0CGY20cREHMx
hvUnEfUA9OVemZmh03jZRC6u/utpz5eCBl3PIv8AoY3V/wDq1j/pKf8A5ozoR7NxP8f2ftdf/KR/
m/ay/wDLf81dQ826lcW0tsbQWypJyWZpOXJuNKELmu7U7JGliJCXFZ7nJ0ur8UkVVPZvUzR05rvU
xpXepjSu9TGld6mNK71MaV3qY0rzP8625eXrlf8AjB/ydGb32c/xuPul9zg9o/3J+DwdI89FJebT
PRkpqEJ9z+o5RmPpLCXJlJOYLStJwoWE4UJp5V31+1H/ABk/5NtmNrf7o/D727T/AFh84efv+U68
x/8AbUvf+oh8swf3cfcHp8f0j3P0B8jf8cGP5j/k2meaOwZBirsVdirsVdirsVfIX/OZn/Kd6J/2
yx/1ES52Hs7/AHUv636A4+bmyf8AKMf8gc0U/wCVcf8AUZNl2b/GD+OgdPrOZTsnLnXrScKrScKE
s/I1qanr3/bTb/jbND22PVH3O/7N+kvb/UzROyeYfny9fJmoj/iu2/6i0zbdiD/CofH/AHJcTW/3
R+H3vmQDPQ4wefep/kEeOuah/wAYov8Ak5nOe1Eaxw/rH7nZdmfUfc+l/UziXcu9TFXepirvUxV3
qYq71MVd6mKvOPzhblolwPaH/k5m79nv8aj7j9zgdo/3J+DxdI89BJebTDTEpeRH3P6jlOQ7MZck
/JzFaFhOFC0nCqbeUd/MVoP+Mn/Jpsxdf/cy+H3hu031h84/mB/ynnmT/tqXv/UQ+Waf+7j/AFR9
z0+P6R7n6AeRv+ODH8x/ybTPNHYMgxV2KuxV2KuxV2KvkL/nMz/lO9E/7ZY/6iJc7D2d/upf1v0B
x83NlP5TD/kC+iH/AC7n/qMmy3L/AIzL8dA6jWcym5OZDrlpOFC0nChKfyUbjqmue+pN/wAbZpO3
h6of1Xf9m/SXtXqZz9Oyeafnm9fKOoD/AIrt/wDqKXNz2CP8Lh/nf7kuJrv7o/D73zaFz0mMHnre
nfkWeOt33/GKP/k5nMe1kaxQ/rH7nZ9l/Ufc+j/UzhKdy71MaV3qY0rvUxpXepjSu9TGld6mNK8/
/NduWlzL7Rf8nM3XYH+NR+P3OD2l/cn4PJEjzvSXmkbYpS4Q/wCfTKpnZjLkmpOUtC0nCq0nJITj
ybv5lsx/xk/5NPmH2h/cy+H3hv0394Hzl+YP/KfeZf8Atq3v/US+Waf+7j/VH3PTw+kPv/yN/wAc
GP5j/k2meaOwZBirsVdirsVdirsVfIX/ADmZ/wAp3on/AGyx/wBREudh7O/3Uv636A4+bmyv8qB/
yBPRD/xZc/8AUZNlmT/GpfjoHUa1MycynWrScKFhOFUn/JxuOqa1/wBtJv8AjbNR7QD1Q/qu+7M+
kvZfUznKdm83/Ox+XlW/H/Fdv/1Erm69nh/hkP8AO/3JcTXf3J+H3vncLnp8YvOPSvyUHDWL0+Mc
f/E85P2u/uof1j9ztOy/qPufQ3qZwVO6d6mNK71MaV3qY0rvUxpXepjSu9TGlYJ+ZjcrGUe0X/E8
3HYX+Mx+P3OB2l/cn4PNEjzuSXmkVbpSRTlZLGXJFk5FpWk5JC0nChOvJG/miyH/ABl/5MvmF2l/
cS+H3hyNL/eD8dHzn+Yf/Kf+Zv8AtrX3/US+T0391H+qPueoh9Iff3kb/jgx/Mf8m0zzVz2QYq7F
XYq7FXYq7FXyF/zmZ/yneif9ssf9REudh7O/3Uv636A4+bmyz8qv/JHaGf8Aiy5/6jJ8nk/xuXu/
QHUa1MCczHWLCcKrScKEk/KN+Gqaz/20W/42zV+0Y3x/1Xfdl/SXr31gZzVO0Yv520E+YLSSwbms
EyIHkjKhgUk9Tbl8hmXodXLTZRliATG+fmKas2IZImJ6sFH5J2Q/3ddffF/TOh/0W5/5kPt/W4P8
lw7ynvlX8v18vXbz25mkMoVX9QpQBWrtxAzV9pdsZNXERkAOHutyNPpI4iSDzei/WBmnpy3fWBjS
u+sDGld9YGNK76wMaV31gY0rvrAxpWGfmA4kt5B/kx/8Tzbdi/4wPj9zgdpf3J+DAkjztCXmldEp
vkbYy5Licm0LScKFhOFU98ib+a7H/nr/AMmXzB7T/wAXl8PvDkaT+8H46PnT8xf/ACYPmf8A7a19
/wBRL5PTf3Uf6o+56iHIPv3yN/xwY/mP+TaZ5q57IMVdirsVdirsVdir5C/5zMB/x1oh7fosf9RE
udh7O/3Uv636A4+bmyz8qv8AyRuh07S3Ffb/AEyfJz/xuXu/QHUa3kjSczXWLScKFpOFDH/ywfhq
OsH/AJf2/W2a72lG+P8AqO+7L+kvT/rXvnMU7R31r3xpXfWvfGld9a98aV31r3xpXfWvfGld9a98
aV31r3xpXfWvfGld9a98aV31r3xpWM+bpPUiYeyf8Szadj/4wPj9zg9pf3J+DFUjzsCXmVVkpGTg
id2MuSHJy9oWE4VWk4UJ95CqfNljQbD1a/8AIl8wO1P8Xl8PvDkaP+8H46PnX8xf/Jg+Z/8AtrX3
/US+T0v91H+qPuephyD798jf8cGP5j/k2meaueyDFXYq7FXYq7FXYq+b/wDnMvyrcXGj6F5ngQtH
YSSWV6QK8VuOLxMfBQ8bLXxYZ0vs7nAlLGeu4+DTmHVif/OOXm+xvdGvfImoTiO5LvdaSXbZlIDS
RINt0ZfUp1ILeGbPtDGYTGUfF12pxcQZ/fafeWUhjuIytDQPT4W+Ry3FljMWC6acDHmhCcta1hOF
Uo/KW39fzBf2/X1dQYU/4LNf7UHfH/Ud92V9Je4/4U/yPwzkuN2tO/wp/kfhjxrTv8Kf5H4Y8a07
/Cn+R+GPGtO/wp/kfhjxrTv8Kf5H4Y8a07/Cn+R+GPGtO/wp/kfhjxrTv8Kf5H4Y8a07/Cn+R+GP
GtO/wp/kfhjxrTz78wrH6lf/AFelKxI1Pmx/pm27GN5x8fucDtP+5PwYmkedcS8wuuEpbufb+OMD
6mMuSWE5ltK0nChyJJK4jjUu7bKqgkk+wGJIAsqBfJldi1p5F0G982+Yf3BjjMdlZsQsskjbqig/
tvxoB2FSds0Wu1H5iQxY9+8u20OlINl82eV7HUPNvny1WWs1zqF4bm8cDqC5lmb2rvT3zK1mUYMB
PdGh9wd/AWafoD5TtzBo6L2LEj5ABf8AjXPPHLTjFXYq7FXYq7FXYql/mDQdL8waLeaLqsIuNPv4
mhuIj3Vu4PZlO6nsd8sxZZY5CUeYQRb4V/NL8oPNv5a656pEs2kiX1NL1uDko+FqpzZf7qVdtvHd
Sc7vQ9o49TGuUusfxzDjTgQmOjf85K/mRp1klrMbLUymy3F5C5loBQAtDJCG+ZFfE4z7KxSN7j3O
OcUSj/8Aoaf8wf8Aq36T/wAibn/soyH8kYu+X2fqR4Ad/wBDT/mD/wBW/Sf+RNz/ANlGP8kYu+X2
fqXwAoN/zkl5puryK6v9OtRJACIHsXmtXUk9SzvcfgBlObsSEuUiPfv+puxejkjP+hnPMn++bz/u
JS/9U8xv9Dw/n/7H9rd4rv8AoZzzJ/vm8/7iUv8A1Tx/0PD+f/sf2r4rv+hnPMn++bz/ALiUv/VP
H/Q8P5/+x/aviu/6Gc8yf75vP+4lL/1Tx/0PD+f/ALH9q+K7/oZzzJ/vm8/7iUv/AFTx/wBDw/n/
AOx/aviu/wChnPMn++bz/uJS/wDVPH/Q8P5/+x/aviu/6Gc8yf75vP8AuJS/9U8f9Dw/n/7H9q+K
7/oZzzJ/vm8/7iUv/VPH/Q8P5/8Asf2r4rv+hnPMn++bz/uJS/8AVPH/AEPD+f8A7H9q+K7/AKGc
8yf75vP+4lL/ANU8f9Dw/n/7H9q+K7/oZzzJ/vm8/wC4lL/1Tx/0PD+f/sf2r4qEm/5yR8yi8jvr
awikvEBQyahNLdjgRSg4mBh1/mPyy7D2FCJ3kT7hX62vJLjFK3/Q0/5g/wDVv0n/AJE3P/ZRmT/J
GLvl9n6nH8AO/wChp/zB/wCrfpP/ACJuf+yjH+SMXfL7P1L4Ad/0NP8AmD/1b9J/5E3P/ZRj/JGL
vl9n6l8AO/6Gn/MH/q36T/yJuf8Asox/kjF3y+z9S+AGj/zlP+YJH/HP0ke/o3P/AGUY/wAkYu+X
2fqXwQwPXvM/nfz/AKxF9emm1O7qRa2cS0jiDHf040AVR0qx32+I5lxhi08L2iO9tjCtg+ifyJ/J
ubQF+u36q+tXajmRusEXXiD+vxNPAE8f2r2l+YlUfoH2+f6nKhCn0XBCkEKQxiiRgKv0ZqGxfirs
VdirsVdirsVdiqhfWFlf2slpewpcW0o4yQyKGVh7g4QSNwryzXP+cZ/yy1G4a4i0xIGY1McTyQrX
5RMo/wCFzYY+1tTAUJn40fvYHGEp/wChVPy+/wCWAf8ASXdf1yf8tar+f9kf1L4cXf8AQqn5ff8A
LAP+ku6/rj/LWq/n/ZH9S+HF3/Qqn5ff8sA/6S7r+uP8tar+f9kf1L4cXf8AQqn5ff8ALAP+ku6/
rj/LWq/n/ZH9S+HF3/Qqn5ff8sA/6S7r+uP8tar+f9kf1L4cXf8AQqn5ff8ALAP+ku6/rj/LWq/n
/ZH9S+HF3/Qqn5ff8sA/6S7r+uP8tar+f9kf1L4cXf8AQqn5ff8ALAP+ku6/rj/LWq/n/ZH9S+HF
3/Qqn5ff8sA/6S7r+uP8tar+f9kf1L4cXf8AQqn5ff8ALAP+ku6/rj/LWq/n/ZH9S+HF3/Qqn5ff
8sA/6S7r+uP8tar+f9kf1L4cXf8AQqn5ff8ALAP+ku6/rj/LWq/n/ZH9S+HF3/Qqn5ff8sA/6S7r
+uP8tar+f9kf1L4cXf8AQqn5ff8ALAP+ku6/rj/LWq/n/ZH9S+HF3/Qqn5ff8sA/6S7r+uP8tar+
f9kf1L4cXf8AQqn5ff8ALAP+ku6/rj/LWq/n/ZH9S+HF3/Qqn5ff8sA/6S7r+uP8tar+f9kf1L4c
Xf8AQqn5ff8ALAP+ku6/rj/LWq/n/ZH9S+HF3/Qqn5ff8sA/6S7r+uP8tar+f9kf1L4cW1/5xW/L
9WDCwWo33urkj7icT2zqv5/2R/UvhxZl5Z/KLy9oKcLG1t7RduRgT42p4sQN/c5g5tRkym5yMmQA
DNrOytrSL04E4j9o9ST7nKUq+KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2K
uxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2Ku
xV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2Kux
V2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV//2Q==</xapGImg:image>
</rdf:li>
</rdf:Alt>
</xap:Thumbnails>
</rdf:Description>
<rdf:Description
rdf:about="uuid:9dfcc10e-f4e2-4cbf-91b0-8deea2f1a998">
<xapMM:DocumentID>
uuid:f3c53255-be8a-4b04-817b-695bf2c54c8b</xapMM:DocumentID>
</rdf:Description>
<rdf:Description
rdf:about="uuid:9dfcc10e-f4e2-4cbf-91b0-8deea2f1a998">
<dc:format>
image/svg+xml</dc:format>
<dc:title>
<rdf:Alt>
<rdf:li
xml:lang="x-default">
filesave.ai</rdf:li>
</rdf:Alt>
</dc:title>
</rdf:Description>
</rdf:RDF>
</x:xmpmeta>
<xpacket>
end='w'</xpacket>
</metadata>
<g
id="Layer_1">
<path
style="opacity:0.2;"
d="M9.416,5.208c-2.047,0-3.712,1.693-3.712,3.775V39.15c0,2.082,1.666,3.775,3.712,3.775h29.401 c2.047,0,3.712-1.693,3.712-3.775V8.983c0-2.082-1.665-3.775-3.712-3.775H9.416z"
id="path592" />
<path
style="opacity:0.2;"
d="M9.041,4.833c-2.047,0-3.712,1.693-3.712,3.775v30.167c0,2.082,1.666,3.775,3.712,3.775h29.401 c2.047,0,3.712-1.693,3.712-3.775V8.608c0-2.082-1.665-3.775-3.712-3.775H9.041z"
id="path593" />
<path
style="fill:#00008D;"
d="M8.854,4.646c-2.047,0-3.712,1.693-3.712,3.775v30.167c0,2.082,1.666,3.775,3.712,3.775h29.401 c2.047,0,3.712-1.693,3.712-3.775V8.42c0-2.082-1.665-3.775-3.712-3.775H8.854z"
id="path594" />
<path
style="fill:#00008D;"
d="M8.854,5.021c-1.84,0-3.337,1.525-3.337,3.4v30.167c0,1.875,1.497,3.4,3.337,3.4h29.401 c1.84,0,3.337-1.525,3.337-3.4V8.42c0-1.875-1.497-3.4-3.337-3.4H8.854z"
id="path595" />
<path
id="path166_1_"
style="fill:#FFFFFF;"
d="M40.654,38.588c0,1.36-1.074,2.463-2.399,2.463H8.854c-1.326,0-2.4-1.103-2.4-2.463V8.42 c0-1.36,1.074-2.462,2.4-2.462h29.401c1.325,0,2.399,1.103,2.399,2.462V38.588z" />
<linearGradient
id="path166_2_"
gradientUnits="userSpaceOnUse"
x1="-149.0464"
y1="251.1436"
x2="-149.0464"
y2="436.303"
gradientTransform="matrix(0.1875 0 0 -0.1875 51.5 83.75)">
<stop
offset="0"
style="stop-color:#B4E2FF"
id="stop598" />
<stop
offset="1"
style="stop-color:#006DFF"
id="stop599" />
<a:midPointStop
offset="0"
style="stop-color:#B4E2FF"
id="midPointStop600" />
<a:midPointStop
offset="0.5"
style="stop-color:#B4E2FF"
id="midPointStop601" />
<a:midPointStop
offset="1"
style="stop-color:#006DFF"
id="midPointStop602" />
</linearGradient>
<path
id="path166"
style="fill:url(#path166_2_);"
d="M40.654,38.588c0,1.36-1.074,2.463-2.399,2.463H8.854c-1.326,0-2.4-1.103-2.4-2.463V8.42 c0-1.36,1.074-2.462,2.4-2.462h29.401c1.325,0,2.399,1.103,2.399,2.462V38.588z" />
<path
style="fill:#FFFFFF;"
d="M8.854,6.521c-1.013,0-1.837,0.852-1.837,1.9v30.167c0,1.048,0.824,1.9,1.837,1.9h29.401 c1.013,0,1.837-0.853,1.837-1.9V8.42c0-1.048-0.824-1.9-1.837-1.9H8.854z"
id="path604" />
<linearGradient
id="XMLID_1_"
gradientUnits="userSpaceOnUse"
x1="7.3057"
y1="7.2559"
x2="50.7728"
y2="50.7231">
<stop
offset="0"
style="stop-color:#94CAFF"
id="stop606" />
<stop
offset="1"
style="stop-color:#006DFF"
id="stop607" />
<a:midPointStop
offset="0"
style="stop-color:#94CAFF"
id="midPointStop608" />
<a:midPointStop
offset="0.5"
style="stop-color:#94CAFF"
id="midPointStop609" />
<a:midPointStop
offset="1"
style="stop-color:#006DFF"
id="midPointStop610" />
</linearGradient>
<path
style="fill:url(#XMLID_1_);"
d="M8.854,6.521c-1.013,0-1.837,0.852-1.837,1.9v30.167c0,1.048,0.824,1.9,1.837,1.9h29.401 c1.013,0,1.837-0.853,1.837-1.9V8.42c0-1.048-0.824-1.9-1.837-1.9H8.854z"
id="path611" />
<linearGradient
id="XMLID_2_"
gradientUnits="userSpaceOnUse"
x1="23.5039"
y1="2.187"
x2="23.5039"
y2="34.4368">
<stop
offset="0"
style="stop-color:#428AFF"
id="stop613" />
<stop
offset="1"
style="stop-color:#C9E6FF"
id="stop614" />
<a:midPointStop
offset="0"
style="stop-color:#428AFF"
id="midPointStop615" />
<a:midPointStop
offset="0.5"
style="stop-color:#428AFF"
id="midPointStop616" />
<a:midPointStop
offset="1"
style="stop-color:#C9E6FF"
id="midPointStop617" />
</linearGradient>
<path
style="fill:url(#XMLID_2_);"
d="M36.626,6.861c0,0-26.184,0-26.914,0c0,0.704,0,16.59,0,17.294c0.721,0,26.864,0,27.583,0 c0-0.704,0-16.59,0-17.294C36.988,6.861,36.626,6.861,36.626,6.861z"
id="path618" />
<polygon
id="path186_1_"
style="fill:#FFFFFF;"
points="35.809,6.486 10.221,6.486 10.221,23.405 36.788,23.405 36.788,6.486 " />
<linearGradient
id="path186_2_"
gradientUnits="userSpaceOnUse"
x1="-104.5933"
y1="411.6699"
x2="-206.815"
y2="309.4482"
gradientTransform="matrix(0.1875 0 0 -0.1875 51.5 83.75)">
<stop
offset="0"
style="stop-color:#CCCCCC"
id="stop621" />
<stop
offset="1"
style="stop-color:#F0F0F0"
id="stop622" />
<a:midPointStop
offset="0"
style="stop-color:#CCCCCC"
id="midPointStop623" />
<a:midPointStop
offset="0.5"
style="stop-color:#CCCCCC"
id="midPointStop624" />
<a:midPointStop
offset="1"
style="stop-color:#F0F0F0"
id="midPointStop625" />
</linearGradient>
<polygon
id="path186"
style="fill:url(#path186_2_);"
points="35.809,6.486 10.221,6.486 10.221,23.405 36.788,23.405 36.788,6.486 " />
<path
style="fill:#FFFFFF;stroke:#FFFFFF;stroke-width:0.1875;"
d="M11.488,7.019c0,0.698,0,14.542,0,15.239c0.716,0,23.417,0,24.133,0c0-0.698,0-14.541,0-15.239 C34.904,7.019,12.204,7.019,11.488,7.019z"
id="path627" />
<linearGradient
id="XMLID_3_"
gradientUnits="userSpaceOnUse"
x1="34.5967"
y1="3.5967"
x2="18.4087"
y2="19.7847">
<stop
offset="0"
style="stop-color:#FFFFFF"
id="stop629" />
<stop
offset="0.5506"
style="stop-color:#E6EDFF"
id="stop630" />
<stop
offset="1"
style="stop-color:#FFFFFF"
id="stop631" />
<a:midPointStop
offset="0"
style="stop-color:#FFFFFF"
id="midPointStop632" />
<a:midPointStop
offset="0.5"
style="stop-color:#FFFFFF"
id="midPointStop633" />
<a:midPointStop
offset="0.5506"
style="stop-color:#E6EDFF"
id="midPointStop634" />
<a:midPointStop
offset="0.5"
style="stop-color:#E6EDFF"
id="midPointStop635" />
<a:midPointStop
offset="1"
style="stop-color:#FFFFFF"
id="midPointStop636" />
</linearGradient>
<path
style="fill:url(#XMLID_3_);stroke:#FFFFFF;stroke-width:0.1875;"
d="M11.488,7.019c0,0.698,0,14.542,0,15.239c0.716,0,23.417,0,24.133,0c0-0.698,0-14.541,0-15.239 C34.904,7.019,12.204,7.019,11.488,7.019z"
id="path637" />
<linearGradient
id="path205_1_"
gradientUnits="userSpaceOnUse"
x1="-174.4409"
y1="300.0908"
x2="-108.8787"
y2="210.2074"
gradientTransform="matrix(0.1875 0 0 -0.1875 51.5 83.75)">
<stop
offset="0"
style="stop-color:#003399"
id="stop639" />
<stop
offset="0.2697"
style="stop-color:#0035ED"
id="stop640" />
<stop
offset="1"
style="stop-color:#57ADFF"
id="stop641" />
<a:midPointStop
offset="0"
style="stop-color:#003399"
id="midPointStop642" />
<a:midPointStop
offset="0.5"
style="stop-color:#003399"
id="midPointStop643" />
<a:midPointStop
offset="0.2697"
style="stop-color:#0035ED"
id="midPointStop644" />
<a:midPointStop
offset="0.5"
style="stop-color:#0035ED"
id="midPointStop645" />
<a:midPointStop
offset="1"
style="stop-color:#57ADFF"
id="midPointStop646" />
</linearGradient>
<rect
id="path205"
x="12.154"
y="26.479"
style="fill:url(#path205_1_);"
width="22.007"
height="13.978" />
<linearGradient
id="XMLID_4_"
gradientUnits="userSpaceOnUse"
x1="21.8687"
y1="25.1875"
x2="21.8687"
y2="44.6251">
<stop
offset="0"
style="stop-color:#DFDFDF"
id="stop649" />
<stop
offset="1"
style="stop-color:#7D7D99"
id="stop650" />
<a:midPointStop
offset="0"
style="stop-color:#DFDFDF"
id="midPointStop651" />
<a:midPointStop
offset="0.5"
style="stop-color:#DFDFDF"
id="midPointStop652" />
<a:midPointStop
offset="1"
style="stop-color:#7D7D99"
id="midPointStop653" />
</linearGradient>
<path
style="fill:url(#XMLID_4_);"
d="M13.244,27.021c-0.311,0-0.563,0.252-0.563,0.563v13.104c0,0.312,0.252,0.563,0.563,0.563h17.249 c0.311,0,0.563-0.251,0.563-0.563V27.583c0-0.311-0.252-0.563-0.563-0.563H13.244z M18.85,30.697c0,0.871,0,5.078,0,5.949 c-0.683,0-2.075,0-2.759,0c0-0.871,0-5.078,0-5.949C16.775,30.697,18.167,30.697,18.85,30.697z"
id="path654" />
<linearGradient
id="XMLID_5_"
gradientUnits="userSpaceOnUse"
x1="-158.0337"
y1="288.0684"
x2="-158.0337"
y2="231.3219"
gradientTransform="matrix(0.1875 0 0 -0.1875 51.5 83.75)">
<stop
offset="0"
style="stop-color:#F0F0F0"
id="stop656" />
<stop
offset="0.6348"
style="stop-color:#CECEDB"
id="stop657" />
<stop
offset="0.8595"
style="stop-color:#B1B1C5"
id="stop658" />
<stop
offset="1"
style="stop-color:#FFFFFF"
id="stop659" />
<a:midPointStop
offset="0"
style="stop-color:#F0F0F0"
id="midPointStop660" />
<a:midPointStop
offset="0.5"
style="stop-color:#F0F0F0"
id="midPointStop661" />
<a:midPointStop
offset="0.6348"
style="stop-color:#CECEDB"
id="midPointStop662" />
<a:midPointStop
offset="0.5"
style="stop-color:#CECEDB"
id="midPointStop663" />
<a:midPointStop
offset="0.8595"
style="stop-color:#B1B1C5"
id="midPointStop664" />
<a:midPointStop
offset="0.5"
style="stop-color:#B1B1C5"
id="midPointStop665" />
<a:midPointStop
offset="1"
style="stop-color:#FFFFFF"
id="midPointStop666" />
</linearGradient>
<path
style="fill:url(#XMLID_5_);"
d="M13.244,27.583v13.104h17.249V27.583H13.244z M19.413,37.209h-3.884v-7.074h3.884V37.209z"
id="path667" />
<linearGradient
id="path228_1_"
gradientUnits="userSpaceOnUse"
x1="-68.1494"
y1="388.4561"
x2="-68.1494"
y2="404.6693"
gradientTransform="matrix(0.1875 0 0 -0.1875 51.5 83.75)">
<stop
offset="0"
style="stop-color:#3399FF"
id="stop669" />
<stop
offset="1"
style="stop-color:#000000"
id="stop670" />
<a:midPointStop
offset="0"
style="stop-color:#3399FF"
id="midPointStop671" />
<a:midPointStop
offset="0.5"
style="stop-color:#3399FF"
id="midPointStop672" />
<a:midPointStop
offset="1"
style="stop-color:#000000"
id="midPointStop673" />
</linearGradient>
<rect
id="path228"
x="37.83"
y="9.031"
style="fill:url(#path228_1_);"
width="1.784"
height="1.785" />
<polyline
id="_x3C_Slice_x3E_"
style="fill:none;"
points="0,48 0,0 48,0 48,48 " />
</g>
<rect
id="rect810"
fill="none"
width="256"
height="256"
transform="matrix(0.125000,0.000000,0.000000,0.125000,-41.51768,12.75884)"
style="font-size:12;fill:none;" />
<g
id="g979"
transform="matrix(0.207200,1.691268,-1.691268,0.207200,86.28419,53.75496)">
<path
opacity="0.2"
d="M191.924,195.984c-11.613-36.127-13.717-42.67-14.859-44.064c0.119,0.076,0.289,0.178,0.289,0.178 l-78.55-87.455c-4.195-4.65-14.005,0.356-21.355,6.976c-7.283,6.542-13.32,15.773-9.37,20.564l78.944,87.543l0.533,0.094 l37.768,17.602l7.688,2.365L191.924,195.984z"
id="path731"
transform="matrix(0.125000,0.000000,0.000000,0.125000,-41.51768,12.75884)"
style="font-size:12;opacity:0.2;" />
<path
opacity="0.2"
d="M193.557,193.516c-11.611-36.125-13.713-42.67-14.855-44.064c0.117,0.072,0.287,0.178,0.287,0.178 l-78.545-87.455c-4.199-4.651-14.015,0.355-21.361,6.975c-7.281,6.545-13.32,15.773-9.368,20.566l78.945,87.539l0.533,0.1 l37.77,17.598l7.682,2.367L193.557,193.516z"
id="path732"
transform="matrix(0.125000,0.000000,0.000000,0.125000,-41.51768,12.75884)"
style="font-size:12;opacity:0.2;" />
<path
opacity="0.2"
d="M186.773,191.049c-11.613-36.127-13.713-42.672-14.863-44.068c0.121,0.074,0.295,0.18,0.295,0.18 L93.653,59.704c-4.192-4.65-14.009,0.359-21.354,6.978c-7.283,6.542-13.321,15.771-9.369,20.565l78.942,87.541l0.535,0.096 l37.768,17.598l7.686,2.367L186.773,191.049z"
id="path733"
transform="matrix(0.125000,0.000000,0.000000,0.125000,-41.51768,12.75884)"
style="font-size:12;opacity:0.2;" />
<path
fill="#FFFFFF"
d="M186.43,189.355c-11.613-36.125-13.713-42.666-14.863-44.061c0.123,0.072,0.293,0.18,0.293,0.18 L93.314,58.016c-4.199-4.651-14.015,0.357-21.359,6.977c-7.283,6.543-13.322,15.774-9.37,20.566l78.941,87.541l0.535,0.098 l37.771,17.598l7.686,2.363L186.43,189.355z"
id="path734"
transform="matrix(0.125000,0.000000,0.000000,0.125000,-41.51768,12.75884)"
style="font-size:12;fill:#ffffff;" />
<path
fill="url(#XMLID_11_)"
d="M186.43,189.355c-11.613-36.125-13.713-42.666-14.863-44.061c0.123,0.072,0.293,0.18,0.293,0.18 L93.314,58.016c-4.199-4.651-14.015,0.357-21.359,6.977c-7.283,6.543-13.322,15.774-9.37,20.566l78.941,87.541l0.535,0.098 l37.771,17.598l7.686,2.363L186.43,189.355z"
id="path741"
transform="matrix(0.125000,0.000000,0.000000,0.125000,-41.51768,12.75884)"
style="font-size:12;fill:url(#XMLID_11_);" />
<path
fill="url(#XMLID_12_)"
d="M166.969,147.762l13.723,38.129l-36.371-17.902l0.168-0.152c-0.25-0.08-0.496-0.178-0.701-0.316 l-0.125,0.121l-75.303-83.57l0.123-0.104c-2.246-2.49,1.032-9.094,7.308-14.752c6.28-5.652,13.18-8.219,15.425-5.733 l75.292,83.565L166.969,147.762z"
id="path748"
transform="matrix(0.125000,0.000000,0.000000,0.125000,-41.51768,12.75884)"
style="font-size:12;fill:url(#XMLID_12_);" />
<path
fill="url(#XMLID_13_)"
d="M148.652,170.121c2.076-0.369,4.635-1.479,7.252-3.139c1.617-1.018,3.279-2.283,4.898-3.744 c1.455-1.303,2.736-2.666,3.84-4.01c2.076-2.531,3.322-5.213,3.781-7.424l-1.455-4.043l-0.463-0.715L91.707,64.028 c0.608,2.24-0.962,5.938-4.063,9.74c-1.134,1.389-2.441,2.789-3.945,4.141c-1.574,1.419-3.195,2.652-4.767,3.654 c-4.493,2.871-8.628,3.928-10.548,2.486l-0.025,0.021l75.303,83.57l0.125-0.121c0.205,0.139,0.451,0.236,0.701,0.316 l-0.168,0.152L148.652,170.121z"
id="path758"
transform="matrix(0.125000,0.000000,0.000000,0.125000,-41.51768,12.75884)"
style="font-size:12;fill:url(#XMLID_13_);" />
<path
fill="#FFFFFF"
d="M68.083,83.41c1.732,1.772,5.994,0.776,10.643-2.194c1.541-0.982,3.132-2.193,4.677-3.586 c1.476-1.325,2.759-2.701,3.872-4.063c3.578-4.388,5.091-8.642,3.477-10.584l0.023-0.024l75.817,84.119 c0.635,2.262-0.588,6.498-3.754,10.357c-1.082,1.318-2.34,2.656-3.77,3.934c-1.588,1.434-3.219,2.676-4.807,3.676 c-4.74,3.006-9.303,4.199-11.016,2.301c-0.393-0.439-2.098-2.336-2.145-2.406L67.845,83.626L68.083,83.41z"
id="path759"
transform="matrix(0.125000,0.000000,0.000000,0.125000,-41.51768,12.75884)"
style="font-size:12;fill:#ffffff;" />
<path
fill="#FFFFFF"
d="M75.79,69.215c6.28-5.652,13.18-8.219,15.425-5.733l16.961,18.828l1.152,26.49l-17.973,0.784 L68.359,84.071l0.123-0.104C66.236,81.477,69.514,74.874,75.79,69.215z"
id="path760"
transform="matrix(0.125000,0.000000,0.000000,0.125000,-41.51768,12.75884)"
style="font-size:12;fill:#ffffff;" />
<path
fill="#FFFFFF"
d="M68.083,83.41c1.732,1.772,5.994,0.776,10.643-2.194c1.541-0.982,3.132-2.193,4.677-3.586 c1.476-1.325,2.759-2.701,3.872-4.063c3.578-4.388,5.091-8.642,3.477-10.584l0.023-0.024l75.817,84.119 c0.635,2.262-0.588,6.498-3.754,10.357c-1.082,1.318-2.34,2.656-3.77,3.934c-1.588,1.434-3.219,2.676-4.807,3.676 c-4.74,3.006-9.303,4.199-11.016,2.301c-0.393-0.439-2.098-2.336-2.145-2.406L67.845,83.626L68.083,83.41z"
id="path761"
transform="matrix(0.125000,0.000000,0.000000,0.125000,-41.51768,12.75884)"
style="font-size:12;fill:#ffffff;" />
<path
fill="url(#XMLID_14_)"
d="M75.79,69.215c6.28-5.652,13.18-8.219,15.425-5.733l16.961,18.828l1.152,26.49l-17.973,0.784 L68.359,84.071l0.123-0.104C66.236,81.477,69.514,74.874,75.79,69.215z"
id="path768"
transform="matrix(0.125000,0.000000,0.000000,0.125000,-41.51768,12.75884)"
style="font-size:12;fill:url(#XMLID_14_);" />
<path
fill="url(#XMLID_15_)"
d="M68.083,83.41c1.732,1.772,5.994,0.776,10.643-2.194c1.541-0.982,3.132-2.193,4.677-3.586 c1.476-1.325,2.759-2.701,3.872-4.063c3.578-4.388,5.091-8.642,3.477-10.584l0.023-0.024l75.817,84.119 c0.635,2.262-0.588,6.498-3.754,10.357c-1.082,1.318-2.34,2.656-3.77,3.934c-1.588,1.434-3.219,2.676-4.807,3.676 c-4.74,3.006-9.303,4.199-11.016,2.301c-0.393-0.439-2.098-2.336-2.145-2.406L67.845,83.626L68.083,83.41z"
id="path778"
transform="matrix(0.125000,0.000000,0.000000,0.125000,-41.51768,12.75884)"
style="font-size:12;fill:url(#XMLID_15_);" />
<path
fill="url(#XMLID_16_)"
d="M74.357,90.713c0,0,6.036-0.212,10.685-3.182c1.542-0.983,3.132-2.193,4.677-3.586 c1.477-1.326,2.76-2.701,3.873-4.064c2.928-3.589,4.469-7.088,4.049-9.307l-6.865-7.617l-0.023,0.024 c1.614,1.942,0.102,6.196-3.477,10.584c-1.113,1.362-2.396,2.738-3.872,4.063c-1.545,1.393-3.136,2.604-4.677,3.586 c-4.648,2.971-8.91,3.967-10.643,2.194l-0.238,0.217l73.256,81.311c0.047,0.07,1.752,1.967,2.145,2.406 c0.342,0.377,0.799,0.627,1.344,0.771L74.357,90.713z"
id="path790"
transform="matrix(0.125000,0.000000,0.000000,0.125000,-41.51768,12.75884)"
style="font-size:12;fill:url(#XMLID_16_);" />
<path
fill="#003333"
d="M172.035,175.354c-1.635,1.477-3.307,2.764-4.949,3.84l13.605,6.697l-5.096-14.156 C174.537,172.953,173.352,174.176,172.035,175.354z"
id="path791"
transform="matrix(0.125000,0.000000,0.000000,0.125000,-41.51768,12.75884)"
style="font-size:12;fill:#003333;" />
<path
opacity="0.5"
fill="#FFFFFF"
d="M163.121,157.053L86.968,73.93c0.1-0.12,0.213-0.242,0.307-0.364 c1.428-1.752,2.52-3.49,3.225-5.058l75.768,82.707C165.715,153.039,164.668,155.082,163.121,157.053z"
id="path792"
transform="matrix(0.125000,0.000000,0.000000,0.125000,-41.51768,12.75884)"
style="font-size:12;opacity:0.5;fill:#ffffff;" />
<path
opacity="0.5"
fill="#FFFFFF"
d="M87.275,73.566c0.634-0.774,1.189-1.548,1.694-2.3l76.015,82.974 c-0.578,1.063-1.283,2.146-2.146,3.193c-0.744,0.896-1.566,1.805-2.465,2.697L84.152,76.932 C85.316,75.824,86.361,74.692,87.275,73.566z"
id="path793"
transform="matrix(0.125000,0.000000,0.000000,0.125000,-41.51768,12.75884)"
style="font-size:12;opacity:0.5;fill:#ffffff;" />
</g>
</svg>
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 20010904//EN"
"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">
<!-- Created with Sodipodi ("http://www.sodipodi.com/") -->
<svg
width="48pt"
height="48pt"
viewBox="0 0 48 48"
style="overflow:visible;enable-background:new 0 0 48 48"
xml:space="preserve"
xmlns="http://www.w3.org/2000/svg"
xmlns:xap="http://ns.adobe.com/xap/1.0/"
xmlns:xapGImg="http://ns.adobe.com/xap/1.0/g/img/"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:xml="http://www.w3.org/XML/1998/namespace"
xmlns:xapMM="http://ns.adobe.com/xap/1.0/mm/"
xmlns:pdf="http://ns.adobe.com/pdf/1.3/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:a="http://ns.adobe.com/AdobeSVGViewerExtensions/3.0/"
xmlns:x="adobe:ns:meta/"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:xlink="http://www.w3.org/1999/xlink"
id="svg589"
sodipodi:version="0.32"
sodipodi:docname="/home/david/Desktop/temp/devices/gnome-dev-floppy.svg"
sodipodi:docbase="/home/david/Desktop/temp/devices/">
<defs
id="defs677" />
<sodipodi:namedview
id="base" />
<metadata
id="metadata590">
<xpacket>begin='' id='W5M0MpCehiHzreSzNTczkc9d' </xpacket>
<x:xmpmeta
x:xmptk="XMP toolkit 3.0-29, framework 1.6">
<rdf:RDF>
<rdf:Description
rdf:about="uuid:9dfcc10e-f4e2-4cbf-91b0-8deea2f1a998">
<pdf:Producer>
Adobe PDF library 5.00</pdf:Producer>
</rdf:Description>
<rdf:Description
rdf:about="uuid:9dfcc10e-f4e2-4cbf-91b0-8deea2f1a998" />
<rdf:Description
rdf:about="uuid:9dfcc10e-f4e2-4cbf-91b0-8deea2f1a998" />
<rdf:Description
rdf:about="uuid:9dfcc10e-f4e2-4cbf-91b0-8deea2f1a998">
<xap:CreateDate>
2004-02-04T02:08:51+02:00</xap:CreateDate>
<xap:ModifyDate>
2004-03-29T09:20:16Z</xap:ModifyDate>
<xap:CreatorTool>
Adobe Illustrator 10.0</xap:CreatorTool>
<xap:MetadataDate>
2004-02-29T14:54:28+01:00</xap:MetadataDate>
<xap:Thumbnails>
<rdf:Alt>
<rdf:li
rdf:parseType="Resource">
<xapGImg:format>
JPEG</xapGImg:format>
<xapGImg:width>
256</xapGImg:width>
<xapGImg:height>
256</xapGImg:height>
<xapGImg:image>
/9j/4AAQSkZJRgABAgEASABIAAD/7QAsUGhvdG9zaG9wIDMuMAA4QklNA+0AAAAAABAASAAAAAEA
AQBIAAAAAQAB/+4ADkFkb2JlAGTAAAAAAf/bAIQABgQEBAUEBgUFBgkGBQYJCwgGBggLDAoKCwoK
DBAMDAwMDAwQDA4PEA8ODBMTFBQTExwbGxscHx8fHx8fHx8fHwEHBwcNDA0YEBAYGhURFRofHx8f
Hx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8f/8AAEQgBAAEAAwER
AAIRAQMRAf/EAaIAAAAHAQEBAQEAAAAAAAAAAAQFAwIGAQAHCAkKCwEAAgIDAQEBAQEAAAAAAAAA
AQACAwQFBgcICQoLEAACAQMDAgQCBgcDBAIGAnMBAgMRBAAFIRIxQVEGE2EicYEUMpGhBxWxQiPB
UtHhMxZi8CRygvElQzRTkqKyY3PCNUQnk6OzNhdUZHTD0uIIJoMJChgZhJRFRqS0VtNVKBry4/PE
1OT0ZXWFlaW1xdXl9WZ2hpamtsbW5vY3R1dnd4eXp7fH1+f3OEhYaHiImKi4yNjo+Ck5SVlpeYmZ
qbnJ2en5KjpKWmp6ipqqusra6voRAAICAQIDBQUEBQYECAMDbQEAAhEDBCESMUEFURNhIgZxgZEy
obHwFMHR4SNCFVJicvEzJDRDghaSUyWiY7LCB3PSNeJEgxdUkwgJChgZJjZFGidkdFU38qOzwygp
0+PzhJSktMTU5PRldYWVpbXF1eX1RlZmdoaWprbG1ub2R1dnd4eXp7fH1+f3OEhYaHiImKi4yNjo
+DlJWWl5iZmpucnZ6fkqOkpaanqKmqq6ytrq+v/aAAwDAQACEQMRAD8A9U4q7FXYq7FXYq7FXYq7
FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7F
XYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FX
Yq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXY
q7FXzd+b/wDzlWum3k+h+QxFc3EJMdzrkoEkKuNiLZPsyU/nb4fAEb50vZ/YXEBPLsP5v62meXue
A3v5mfmprl080vmLVriXdjHBcTIi17rFCVRfoXOghocEBQhH5NJmepUf8Tfmj/1dtb/6SLv/AJqy
f5fD/Nj8gjxPN3+JvzR/6u2t/wDSRd/81Y/l8P8ANj8gviebv8Tfmj/1dtb/AOki7/5qx/L4f5sf
kF8Tzd/ib80f+rtrf/SRd/8ANWP5fD/Nj8gviebv8Tfmj/1dtb/6SLv/AJqx/L4f5sfkF8Tzd/ib
80f+rtrf/SRd/wDNWP5fD/Nj8gviebv8Tfmj/wBXbW/+ki7/AOasfy+H+bH5BfE83f4m/NH/AKu2
t/8ASRd/81Y/l8P82PyC+J5u/wATfmj/ANXbW/8ApIu/+asfy+H+bH5BfE83f4m/NH/q7a3/ANJF
3/zVj+Xw/wA2PyC+J5u/xN+aP/V21v8A6SLv/mrH8vh/mx+QXxPN3+JvzR/6u2t/9JF3/wA1Y/l8
P82PyC+J5u/xN+aP/V21v/pIu/8AmrH8vh/mx+QXxPN3+JvzR/6u2t/9JF3/AM1Y/l8P82PyC+J5
u/xN+aP/AFdtb/6SLv8A5qx/L4f5sfkF8Tzd/ib80f8Aq7a3/wBJF3/zVj+Xw/zY/IL4nm7/ABN+
aP8A1dtb/wCki7/5qx/L4f5sfkF8Tzd/ib80f+rtrf8A0kXf/NWP5fD/ADY/IL4nm7/E35o/9XbW
/wDpIu/+asfy+H+bH5BfE82j5t/M+Aes2ta3EI/i9U3N2vGnfly2x/LYT/DH5BePzZ15C/5yh/Mb
y7cxRaxcHzDpQIEsF2f9IC9zHc058v8AX5D9ea/VdiYcg9I4JeXL5NkchD688jeefLvnby/DrmhT
+rayEpLE4CywygAtFKtTxYV+RG4qDnH6nTTwT4JjdyIytkGY6XYq7FXYq7FXYq7FXjX/ADlH+YV1
5W8hppunymHU/MMj2qSqaMltGoNwynxPNE/2WbrsPSDLl4pfTDf49GvJKg+VPy+8lP5ivecqM9rG
4jWFaqZpTvw57cVUULGvcfMdtYFk7Ac3Ua3VHGAI/XLk+jNK/LfSLS0SK4JYqDSGCkUCV3PBVAPX
vtXwzWT7TlfoAA+11f5Xi3mTIo608meV/wBL2lnLbSSLcc/92sB8Kk70IOU5+0s4xSmCPT5NuDRY
pZBEjmyu2/KnydcFgliF4ip5TT/wY5ov5f1f877B+p2/8kaf+b9pVv8AlT3lL/lkT/kdcf1w/wAv
az+d9kf1I/kjTfzftLR/J/yl/wAsif8AI65/rj/L2s/nfZH9S/yRpv5v2lafyg8p/wDLKn/I65/r
h/l3Wfzvsj+pf5J03837S0fyh8p/8sqf8jrn+uP8u6z+d9kf1L/JOm/m/aWj+UXlP/llj/5HXP8A
XH+XdZ/O+yP6l/knTfzftLX/ACqPyn/yzR/8jrn+uH+XNb/O+yP6l/knTd32lr/lUflX/lmj/wCR
1z/XB/Lmt/nfZH9S/wAk6bu+0u/5VD5W/wCWaP8A5HXP9cf5d1n877I/qX+SdN/N+0u/5VB5Y/5Z
ov8Akdc/1x/l3Wfzvsj+pf5J03837S7/AJU/5a/5Zov+R1z/AFx/l3Wfzvsj+pf5J03837S7/lT3
lv8A5Zov+R1z/XB/L2s/nfZH9S/yRpv5v2l3/KnfLv8AyzRf8jrn+uP8vaz+d9kf1L/JGm/m/aXf
8qc8v/8ALNF/yOuf64/y9rP532R/Uv8AJGm/m/aXf8qb0H/lmh/5HXP9cf5f1n877I/qX+SNN/N+
0u/5U1oP/LND/wAjrn+uD+X9Z/O+wfqT/JGn/m/aVk/5P6BDBJM1rEVjUswE1xWg8KnH/RBq/wCd
9g/Uv8kaf+b9pYp5i8oeXLOGBoLQo0j8SRJIe3+Uxza9ldq6jNKQnLkO4Ov1/Z2HGAYj7SkreXdK
IoEZD/Mrmo+Vaj8M3I1eR1fgRee/mD+W8NxE91ZIPrhq0UygL6rbt6ctNubfssevy6XwmJjbYjo5
ml1csUhGRuB+xJP+cfvzGvfJvny1T1T+iNXdLTUbcn4SWNIpPZkduvgTmq7Z0gy4Sf4obj9L0WOV
F93xSJLGsiGqOAyn2O+cK5K7FXYq7FXYq7FXYq+R/wDnM65lbzjoFsT+6i05pEG/2pJ2VvbpGM6/
2cH7uR/pfocfNzb/ACCs7caXZzBAJPQuJS3fn9ZMXL/gNs2uvkRirvl+h0GffUm+kfx972EnNKyU
LXfzNpZ/4y/8QOOo/wAWn8PvbdN/fRei6SPjl/1R+vOWDvyjyMsQsIwoWkYVWEYULSMKFhGSVrFV
wOBVwOBVwOBK4HFVwOBK4HAq4HAlcDgVQ1I/7jrn/jE36siUh5X5uH+j23tL/DN52F9U/c6vtX6Q
x0nOidEgNZodNmBAP2aE9jzG4+jL9P8AWGrL9JfNGuSmDzPqEsICGK9maNRsF4ykgCnhmRKArhel
08iccT5B+iHk+4afQbcsalBx+8Bv+Ns8wdknWKuxV2KuxV2KuxV8hf8AOZn/ACneif8AbLH/AFES
52Hs7/dS/rfoDj5uaO/IUf7gbI/8ulx/1GnNlr/7v/O/Q6DN/jEv6v6nqxOahksshXzJpv8Az0/4
gcjqf8Xn8PvbdL/exei6SPjk/wBUfrzlw9AmBGTYrSMKrCMKFpGFVhGFC0jChYRklaxVcDgVcDgV
cDgSuBxVcDgSuBwKuBwJUdRP+4+5/wCMTfqyJSHlvmwf6Lb+0n8M3XYX1S9zq+1fpDwzzXoX1nzD
eT8a82U1/wBgBm1y6fikS6qGfhFJt5T076lomoJSnOSM/dTMzQYuCTj6rJxh4h5k/wCUi1T/AJjJ
/wDk62bM83fab+6j/VH3P0N8jf8AHBj+Y/5NpnlztGQYq7FXYq7FXYq7FXyF/wA5mf8AKd6J/wBs
sf8AURLnYezv91L+t+gOPm5ph+Q4/wCddsj/AMutx/1Gtmx1/wBH+d+h0Gb/ABiX9X9T1InNUl2n
b+Y9P/56f8QOQ1X+Lz+H3t+l/vYvRtJH7yT/AFR+vOWDv0xIySFhGSQtIwqsIwoWkYVWEYULSMKF
hGSVrFVwOBVwOBVwOBK4HFVwOBK4HAqjf/8AHPuf+MTfqyEkh5j5rH+iQ/65/Uc3XYf1y9zre1Pp
DDpbGzkcu8QZ26k50weeMQoXVvDDZyrEgQNQkD5jLMX1BhMbPmrzN/ykmrf8xlx/ydbMp6XTf3cf
6o+5+hnkb/jgx/Mf8m0zy52bIMVdirsVdirsVdir5C/5zM/5TvRP+2WP+oiXOw9nf7qX9b9AcfNz
TL8iR/zrFif+Xa4/6jWzYa76f879Doc/9/L3fqenE5rEL9KFfMNh85P+IHK9X/cT+H3uRpP72L0f
SR+8k/1f45yzv0xIwqtIwoWEZJC0jCqwjChaRhVYRhQtIwoWEZJWsVXA4FXA4FXA4ErgcVXA4EqV
9/vBc/8AGJv1ZCXJIea+ah/ocfsx/wCInNx2H9cvcHW9qfQGIE507z6HvN7dx8v1jLMfNhPk+Z/N
H/KTav8A8xtx/wAnWzJek0/93H+qPufoX5G/44MfzH/JtM8vdmyDFXYq7FXYq7FXYq+Qv+czP+U7
0T/tlj/qIlzsPZ3+6l/W/QHHzc0z/Isf86nYH/l3uP8AqNbM/W8v879Doc/9/L3fqelk5rkK2j76
/ZfN/wDiBynWf3Evx1cjSf3oej6UP3r/AOr/ABzl3fpliq0jCq0jChYRkkLSMKrCMKFpGFVhGFC0
jChYRklaxVcDgVcDgVcDgSuBxVTvP94rn/jE36shPkyDzjzUP9BX5n/iJzbdifXL4Ou7U+gfFhhO
dS86pXG8TD5frycebGXJ8z+av+Un1j/mNuf+TrZkh6TT/wB3H+qPufoV5G/44MfzH/JtM8vdmyDF
XYq7FXYq7FXYq+Qv+czP+U70T/tlj/qIlzsPZ3+6l/W/QHHzc01/I0f86fp5/wCKLj/qNbM7W8v8
79Dos/8AfH3fqejE5gMEVoe+u2fzf/iByjW/3Evx1cnR/wB4Ho+l/wB4/wAv45y7v0xxV2KrSMKr
SMKFhGSQtIwqsIwoWkYVWEYULSMKFhGSVrFVwOBVwOBVwOBKy6P+h3H/ABib9WQnySHnnmkf6APY
t/xE5texPrPwdf2n9A+LByc6t5xTfcEZIIL5p82f8pTrP/Mdc/8AJ5syRyek0/8Adx9w+5+hPkb/
AI4MfzH/ACbTPL3ZsgxV2KuxV2KuxV2KvkL/AJzM/wCU70T/ALZY/wCoiXOw9nf7qX9b9AcfNzTf
8jx/zpWnH/im4/6jHzO1n6f0Oi1H98fd+p6ETmE1o3y/vrdr82/4gcxtd/cycrR/3gej6b/eP8v4
5y7v0wxV2KuxVaRhVaRhQsIySFpGFVhGFC0jCqwjChaRhQsIyStYquBwKuBwKtuT/olx/wAYm/Vk
J8mUXn/mkf7jj/sv+InNp2L/AHh+Dr+0/oHxYGTnWvONDdgMUPmnzb/yletf8x9z/wAnmzIjyelw
f3cfcH6EeRv+ODH8x/ybTPMHZMgxV2KuxV2KuxV2KvkL/nMz/lO9E/7ZY/6iJc7D2d/upf1v0Bx8
3NOPyRH/ADo2mn/im4/6jHzN1fP4/odHqP70+5n5OYjUmHlzfWrb5t/xA5ia7+5k5Wi/vA9H07+8
f5fxzmHfo/FXYq7FXYqtIwqtIwoWEZJC0jCqwjChaRhVYRhQtIwoWEZJWsVXA4Fan/3luP8AjE36
shk5MosD80D/AHGt8m/4gc2XY394fg4Haf0fN56TnXvNLod5VHz/AFYJclD5p83/APKWa3/zH3X/
ACebMiPIPS4P7uPuD9CPI3/HBj+Y/wCTaZ5g7JkGKuxV2KuxV2KuxV8hf85mf8p3on/bLH/URLnY
ezv91L+t+gOPm5p1+SYp5B0w/wDFVx/1GPmZq/q+P6HR6n+9PuZ0TmM0pr5Y31iD5t/xA5h6/wDu
i5mi/vA9G0/7b/LOYd8jsVdirsVdirsVWkYVWkYULCMkhaRhVYRhQtIwqsIwoWkYULCMkrWKul/3
mn/4xt+rK8nJMebB/NA/3Fyf6r/8QObHsb+8Pw+9we0/o+bzgnOxeZVLXe4QfP8AUcjPkmPN81ec
f+Uu1z/toXX/ACebL4fSHpcH0R9wfoP5G/44MfzH/JtM8xdkyDFXYq7FXYq7FXYq+Qv+czP+U70T
/tlj/qIlzsPZ3+6l/W/QHHzc08/JUf8AIPNLP/Fdx/1GSZl6r6z7/wBDpNT/AHh9zNicocdOPKu+
rQ/M/wDEGzB7Q/ui5uh+sPRbEhXappt3zmXfI3mn8w+/FXeon8w+/FWvUj/mH3jFXepH/MPvGKu9
WP8AnH3jFXepF/Ov3jFVpeP+dfvGG1Wl4/51+8YbQtLJ/Mv3jDa0tJT+ZfvGHiCKWnj/ADL/AMEP
64eILS08f5l/4If1w8QRS0qP5l/4If1w8YWlpUfzL/wS/wBceMIorCn+Uv8AwS/1w8YXhKyai289
WXeNgPiB3I+eRnIEJiGFeZx/uKm/1H/4gc2PY/8AefL73B7S+j5vNCc7N5dWsN7uMfP/AIichl+k
so83zX5z/wCUw13/ALaF1/yffL8f0j3PS4foj7g/QbyN/wAcGP5j/k2meYuyZBirsVdirsVdirsV
fIX/ADmZ/wAp3on/AGyx/wBREudh7O/3Uv636A4+bmnv5Lj/AJBxpZ/yLj/qMkzK1X1n3/odJqv7
w+5mZOVOOmvly5jtrwTyAlIzuFpXdSO9Mw9bjM4cI6uVpJiMrLK/8T2H++5fuX/mrNL/ACdk7x+P
g7b85DuLX+JbD/fcv3L/AM1Y/wAnZO8fj4L+ch3Fr/Elj/vuX7l/5qx/k7J3j8fBfzkO4tf4jsf9
9y/cv/NWP8nZO8fj4L+ch3Fo+YrH/fcv3L/zVj/J2TvH4+C/nIdxW/4hsv5JPuX/AJqx/k7J3j8f
BfzkO4tfp+y/kk+5f+asf5Oyd4/HwX85DuLX6es/5JPuX/mrH+TsnePx8F/OQ7i1+nbP+ST7l/5q
x/k7J3j8fBfzkO4tfpy0/kk+5f64/wAnZO8fj4L+ch3Fr9N2n8kn3L/XH+TsnePx8F/OQ7i0datf
5JPuX+uP8nZO8fj4L+ch3Fb+mLX+R/uH9cf5Oyd4/HwX85DuLX6Xtv5H+4f1x/k7J3j8fBfzkO4t
fpa2/lf7h/XH+TsnePx8F/OQ7i0dVt/5X+4f1x/k7J3j8fBfzkO4tHVLf+V/uH9cf5Oyd4/HwX85
DuKW6/dxz6XcKgYFY5DvT+Q++bDs7TSx5Bdbkfe4etzicNvN5sTnWPOojTN7+If63/ETleb6Cyhz
fNnnX/lMte/7aN3/AMn3y/H9I9z02H6B7g/QXyN/xwY/mP8Ak2meYuxZBirsVdirsVdirsVfIX/O
Zn/Kd6J/2yx/1ES52Hs7/dS/rfoDj5uaf/kyP+QZ6Uf8m4/6jJMytT/eH8dHS6r6z7mXk5W4rSyy
JXgxWvWhIxMQVEiOTjdXH+/X/wCCOPAO5eM9603Vz/v1/wDgjh4I9y8Z71pu7n/fz/8ABHDwR7kc
Z71pu7r/AH8//BH+uHw49y8cu9aby6/39J/wR/rh8OPcEccu9ab27/3/ACf8E39cPhx7gjjl3rTe
3f8Av+T/AINv64fDj3BfEl3rTfXn+/5P+Db+uHw49wR4ku8rTfXv/LRJ/wAG39cPhR7gviS7ytN/
e/8ALRJ/wbf1w+FHuCPEl3ladQvv+WiX/g2/rh8KPcEeJLvK06hff8tMv/Bt/XD4Ue4L4ku8rTqN
/wD8tMv/AAbf1w+FDuCPEl3ladRv/wDlpl/4Nv64fBh3D5L4ku8rTqWof8tUv/Bt/XD4MO4fJHiy
7ytOp6h/y1Tf8jG/rh8GHcPkjxZd5aOp6j/y1Tf8jG/rh8GHcPkviy7ypvqN+6lWuZWVhRlLsQQe
xFcIwwHQfJByS7yhScta0Xo++pQj/W/4icq1H0Fnj+p82+d/+Uz1/wD7aN3/AMn3y7F9I9z02H6B
7g/QTyN/xwY/mP8Ak2meZOxZBirsVdirsVdirsVfIX/OZn/Kd6J/2yx/1ES52Hs7/dS/rfoDj5ub
IfybH/ILtJPtcf8AUZLmTqP70/jo6XVfWWVE5FxFpOFVpOFDCLz82fLtrdz2slteGSCRonKpFQlC
VNKyDbbLRjLLgKgfzh8tf8s17/wEX/VXD4ZXwytP5weWv+Wa9/4CL/qrjwFHhlo/m95b/wCWa8/4
CL/qrh4Cvhlo/m75b/5Zrz/gIv8Aqrh4V8Mrf+Vt+XD/AMe15/wEX/VXCIFHhF3/ACtjy6f+Pa8/
4CL/AKqZMYijwy1/ytXy8f8Aj3u/+Ai/6qZYNPJHhl3/ACtPy+f+Pe7/AOAj/wCqmTGll5I8Mtf8
rQ0A/wDHvd/8BH/1UywaKfkjwy7/AJWboR/497r/AICP/qpkx2fPvCOAtf8AKytDP+6Lr/gI/wDq
pkx2bk7x+PgjgLY/MXRT0guf+Bj/AOa8P8nZO8fj4LwFseftIPSG4/4FP+a8f5Pn3j8fBHAUTY+b
dOvbqO2iimWSQkKXVQNhXejHwyGTSSiLNIMSE4JzGYLCcKFpOFCN0PfVYB/rf8QOU6n+7LZi+oPm
7zx/ymvmD/tpXn/J98uxfQPcHpsX0D3B+gfkb/jgx/Mf8m0zzJ2LIMVdirsVdirsVdir5C/5zM/5
TvRP+2WP+oiXOw9nf7qX9b9AcfNzZF+To/5BVpB9rj/qMlzI1H98fx0dNq/qLJycXDWk4ULScKEq
/IbT7OTVvMty0S/Wm1BoRPQcxHVmKqT0BPXNL25M3EdKd52bEUS9s/RNv/O/3j+maC3Zu/RNv/O/
3j+mNq79E2/87/eP6Y2rv0Tb/wA7/eP6Y2rv0Tb/AM7/AHj+mNq79E2/87/eP6Y2rv0Tb/zv94/p
jau/RNv/ADv94/pjau/RNv8Azv8AeP6Y2rv0Tb/zv94/pjau/RNv/O/3j+mNq80/PXTbMeUJmaMP
LbyQvBKwBZC8gRqEU6qc6L2YyyjqwAdpA38nA7RiDiJ7nzykeekEvOpz5cSmsWx9z/xE5jak+gsZ
cmeE5qWhaThQtJwqj/L2+sW4/wBf/iDZRq/7s/jq2YfqD5v89f8AKb+Yf+2nef8AUQ+W4foHuD02
L6R7n6BeRv8Ajgx/Mf8AJtM8zdiyDFXYq7FXYq7FXYq+Qv8AnMz/AJTvRP8Atlj/AKiJc7D2d/up
f1v0Bx83Nkn5Pj/kEujn/mI/6jJcvz/35/HR02r+osjJyThLScKFhOSQgvyCamo+YR46o3/G2aHt
z6o+533Zv0l7pmhdk7FXYq7FXYq7FXYq7FXYq7FXYq8w/PPfytdr7wf8nRm/9m/8bj7pfc4PaP8A
cn4PntI89IJebTXQUpqlufc/8ROY+c+gsZcmZk5rWhaThVaThQmPlrfW7Yf6/wDybbMfWf3R/HVt
wfWHzh58/wCU58xf9tO8/wCoh8twfRH3B6fH9I9z9AfI3/HBj+Y/5NpnmbsGQYq7FXYq7FXYq7FX
yF/zmZ/yneif9ssf9REudh7O/wB1L+t+gOPm5sm/KEf8gh0Y+9x/1GTZdm/vz+OgdPrOZT8nLHAW
E5JC0nCqX/kO9NT8wf8AbUb/AI2zQ9ufVH3O+7N+kvdPUzQ07Jg/5n+a7ny3o9zq0CGY20cREHMx
hvUnEfUA9OVemZmh03jZRC6u/utpz5eCBl3PIv8AoY3V/wDq1j/pKf8A5ozoR7NxP8f2ftdf/KR/
m/ay/wDLf81dQ826lcW0tsbQWypJyWZpOXJuNKELmu7U7JGliJCXFZ7nJ0ur8UkVVPZvUzR05rvU
xpXepjSu9TGld6mNK71MaV3qY0rzP8625eXrlf8AjB/ydGb32c/xuPul9zg9o/3J+DwdI89FJebT
PRkpqEJ9z+o5RmPpLCXJlJOYLStJwoWE4UJp5V31+1H/ABk/5NtmNrf7o/D727T/AFh84efv+U68
x/8AbUvf+oh8swf3cfcHp8f0j3P0B8jf8cGP5j/k2meaOwZBirsVdirsVdirsVfIX/OZn/Kd6J/2
yx/1ES52Hs7/AHUv636A4+bmyf8AKMf8gc0U/wCVcf8AUZNl2b/GD+OgdPrOZTsnLnXrScKrScKE
s/I1qanr3/bTb/jbND22PVH3O/7N+kvb/UzROyeYfny9fJmoj/iu2/6i0zbdiD/CofH/AHJcTW/3
R+H3vmQDPQ4wefep/kEeOuah/wAYov8Ak5nOe1Eaxw/rH7nZdmfUfc+l/UziXcu9TFXepirvUxV3
qYq71MVd6mKvOPzhblolwPaH/k5m79nv8aj7j9zgdo/3J+DxdI89BJebTDTEpeRH3P6jlOQ7MZck
/JzFaFhOFC0nCqbeUd/MVoP+Mn/Jpsxdf/cy+H3hu031h84/mB/ynnmT/tqXv/UQ+Waf+7j/AFR9
z0+P6R7n6AeRv+ODH8x/ybTPNHYMgxV2KuxV2KuxV2KvkL/nMz/lO9E/7ZY/6iJc7D2d/upf1v0B
x83NlP5TD/kC+iH/AC7n/qMmy3L/AIzL8dA6jWcym5OZDrlpOFC0nChKfyUbjqmue+pN/wAbZpO3
h6of1Xf9m/SXtXqZz9Oyeafnm9fKOoD/AIrt/wDqKXNz2CP8Lh/nf7kuJrv7o/D73zaFz0mMHnre
nfkWeOt33/GKP/k5nMe1kaxQ/rH7nZ9l/Ufc+j/UzhKdy71MaV3qY0rvUxpXepjSu9TGld6mNK8/
/NduWlzL7Rf8nM3XYH+NR+P3OD2l/cn4PJEjzvSXmkbYpS4Q/wCfTKpnZjLkmpOUtC0nCq0nJITj
ybv5lsx/xk/5NPmH2h/cy+H3hv0394Hzl+YP/KfeZf8Atq3v/US+Waf+7j/VH3PTw+kPv/yN/wAc
GP5j/k2meaOwZBirsVdirsVdirsVfIX/ADmZ/wAp3on/AGyx/wBREudh7O/3Uv636A4+bmyv8qB/
yBPRD/xZc/8AUZNlmT/GpfjoHUa1MycynWrScKFhOFUn/JxuOqa1/wBtJv8AjbNR7QD1Q/qu+7M+
kvZfUznKdm83/Ox+XlW/H/Fdv/1Erm69nh/hkP8AO/3JcTXf3J+H3vncLnp8YvOPSvyUHDWL0+Mc
f/E85P2u/uof1j9ztOy/qPufQ3qZwVO6d6mNK71MaV3qY0rvUxpXepjSu9TGlYJ+ZjcrGUe0X/E8
3HYX+Mx+P3OB2l/cn4PNEjzuSXmkVbpSRTlZLGXJFk5FpWk5JC0nChOvJG/miyH/ABl/5MvmF2l/
cS+H3hyNL/eD8dHzn+Yf/Kf+Zv8AtrX3/US+T0391H+qPueoh9Iff3kb/jgx/Mf8m0zzVz2QYq7F
XYq7FXYq7FXyF/zmZ/yneif9ssf9REudh7O/3Uv636A4+bmyz8qv/JHaGf8Aiy5/6jJ8nk/xuXu/
QHUa1MCczHWLCcKrScKEk/KN+Gqaz/20W/42zV+0Y3x/1Xfdl/SXr31gZzVO0Yv520E+YLSSwbms
EyIHkjKhgUk9Tbl8hmXodXLTZRliATG+fmKas2IZImJ6sFH5J2Q/3ddffF/TOh/0W5/5kPt/W4P8
lw7ynvlX8v18vXbz25mkMoVX9QpQBWrtxAzV9pdsZNXERkAOHutyNPpI4iSDzei/WBmnpy3fWBjS
u+sDGld9YGNK76wMaV31gY0rvrAxpWGfmA4kt5B/kx/8Tzbdi/4wPj9zgdpf3J+DAkjztCXmldEp
vkbYy5Licm0LScKFhOFU98ib+a7H/nr/AMmXzB7T/wAXl8PvDkaT+8H46PnT8xf/ACYPmf8A7a19
/wBRL5PTf3Uf6o+56iHIPv3yN/xwY/mP+TaZ5q57IMVdirsVdirsVdir5C/5zMB/x1oh7fosf9RE
udh7O/3Uv636A4+bmyz8qv8AyRuh07S3Ffb/AEyfJz/xuXu/QHUa3kjSczXWLScKFpOFDH/ywfhq
OsH/AJf2/W2a72lG+P8AqO+7L+kvT/rXvnMU7R31r3xpXfWvfGld9a98aV31r3xpXfWvfGld9a98
aV31r3xpXfWvfGld9a98aV31r3xpWM+bpPUiYeyf8Szadj/4wPj9zg9pf3J+DFUjzsCXmVVkpGTg
id2MuSHJy9oWE4VWk4UJ95CqfNljQbD1a/8AIl8wO1P8Xl8PvDkaP+8H46PnX8xf/Jg+Z/8AtrX3
/US+T0v91H+qPuephyD798jf8cGP5j/k2meaueyDFXYq7FXYq7FXYq+b/wDnMvyrcXGj6F5ngQtH
YSSWV6QK8VuOLxMfBQ8bLXxYZ0vs7nAlLGeu4+DTmHVif/OOXm+xvdGvfImoTiO5LvdaSXbZlIDS
RINt0ZfUp1ILeGbPtDGYTGUfF12pxcQZ/fafeWUhjuIytDQPT4W+Ry3FljMWC6acDHmhCcta1hOF
Uo/KW39fzBf2/X1dQYU/4LNf7UHfH/Ud92V9Je4/4U/yPwzkuN2tO/wp/kfhjxrTv8Kf5H4Y8a07
/Cn+R+GPGtO/wp/kfhjxrTv8Kf5H4Y8a07/Cn+R+GPGtO/wp/kfhjxrTv8Kf5H4Y8a07/Cn+R+GP
GtO/wp/kfhjxrTz78wrH6lf/AFelKxI1Pmx/pm27GN5x8fucDtP+5PwYmkedcS8wuuEpbufb+OMD
6mMuSWE5ltK0nChyJJK4jjUu7bKqgkk+wGJIAsqBfJldi1p5F0G982+Yf3BjjMdlZsQsskjbqig/
tvxoB2FSds0Wu1H5iQxY9+8u20OlINl82eV7HUPNvny1WWs1zqF4bm8cDqC5lmb2rvT3zK1mUYMB
PdGh9wd/AWafoD5TtzBo6L2LEj5ABf8AjXPPHLTjFXYq7FXYq7FXYql/mDQdL8waLeaLqsIuNPv4
mhuIj3Vu4PZlO6nsd8sxZZY5CUeYQRb4V/NL8oPNv5a656pEs2kiX1NL1uDko+FqpzZf7qVdtvHd
Sc7vQ9o49TGuUusfxzDjTgQmOjf85K/mRp1klrMbLUymy3F5C5loBQAtDJCG+ZFfE4z7KxSN7j3O
OcUSj/8Aoaf8wf8Aq36T/wAibn/soyH8kYu+X2fqR4Ad/wBDT/mD/wBW/Sf+RNz/ANlGP8kYu+X2
fqXwAoN/zkl5puryK6v9OtRJACIHsXmtXUk9SzvcfgBlObsSEuUiPfv+puxejkjP+hnPMn++bz/u
JS/9U8xv9Dw/n/7H9rd4rv8AoZzzJ/vm8/7iUv8A1Tx/0PD+f/sf2r4rv+hnPMn++bz/ALiUv/VP
H/Q8P5/+x/aviu/6Gc8yf75vP+4lL/1Tx/0PD+f/ALH9q+K7/oZzzJ/vm8/7iUv/AFTx/wBDw/n/
AOx/aviu/wChnPMn++bz/uJS/wDVPH/Q8P5/+x/aviu/6Gc8yf75vP8AuJS/9U8f9Dw/n/7H9q+K
7/oZzzJ/vm8/7iUv/VPH/Q8P5/8Asf2r4rv+hnPMn++bz/uJS/8AVPH/AEPD+f8A7H9q+K7/AKGc
8yf75vP+4lL/ANU8f9Dw/n/7H9q+K7/oZzzJ/vm8/wC4lL/1Tx/0PD+f/sf2r4qEm/5yR8yi8jvr
awikvEBQyahNLdjgRSg4mBh1/mPyy7D2FCJ3kT7hX62vJLjFK3/Q0/5g/wDVv0n/AJE3P/ZRmT/J
GLvl9n6nH8AO/wChp/zB/wCrfpP/ACJuf+yjH+SMXfL7P1L4Ad/0NP8AmD/1b9J/5E3P/ZRj/JGL
vl9n6l8AO/6Gn/MH/q36T/yJuf8Asox/kjF3y+z9S+AGj/zlP+YJH/HP0ke/o3P/AGUY/wAkYu+X
2fqXwQwPXvM/nfz/AKxF9emm1O7qRa2cS0jiDHf040AVR0qx32+I5lxhi08L2iO9tjCtg+ifyJ/J
ubQF+u36q+tXajmRusEXXiD+vxNPAE8f2r2l+YlUfoH2+f6nKhCn0XBCkEKQxiiRgKv0ZqGxfirs
VdirsVdirsVdiqhfWFlf2slpewpcW0o4yQyKGVh7g4QSNwryzXP+cZ/yy1G4a4i0xIGY1McTyQrX
5RMo/wCFzYY+1tTAUJn40fvYHGEp/wChVPy+/wCWAf8ASXdf1yf8tar+f9kf1L4cXf8AQqn5ff8A
LAP+ku6/rj/LWq/n/ZH9S+HF3/Qqn5ff8sA/6S7r+uP8tar+f9kf1L4cXf8AQqn5ff8ALAP+ku6/
rj/LWq/n/ZH9S+HF3/Qqn5ff8sA/6S7r+uP8tar+f9kf1L4cXf8AQqn5ff8ALAP+ku6/rj/LWq/n
/ZH9S+HF3/Qqn5ff8sA/6S7r+uP8tar+f9kf1L4cXf8AQqn5ff8ALAP+ku6/rj/LWq/n/ZH9S+HF
3/Qqn5ff8sA/6S7r+uP8tar+f9kf1L4cXf8AQqn5ff8ALAP+ku6/rj/LWq/n/ZH9S+HF3/Qqn5ff
8sA/6S7r+uP8tar+f9kf1L4cXf8AQqn5ff8ALAP+ku6/rj/LWq/n/ZH9S+HF3/Qqn5ff8sA/6S7r
+uP8tar+f9kf1L4cXf8AQqn5ff8ALAP+ku6/rj/LWq/n/ZH9S+HF3/Qqn5ff8sA/6S7r+uP8tar+
f9kf1L4cXf8AQqn5ff8ALAP+ku6/rj/LWq/n/ZH9S+HF3/Qqn5ff8sA/6S7r+uP8tar+f9kf1L4c
Xf8AQqn5ff8ALAP+ku6/rj/LWq/n/ZH9S+HF3/Qqn5ff8sA/6S7r+uP8tar+f9kf1L4cW1/5xW/L
9WDCwWo33urkj7icT2zqv5/2R/UvhxZl5Z/KLy9oKcLG1t7RduRgT42p4sQN/c5g5tRkym5yMmQA
DNrOytrSL04E4j9o9ST7nKUq+KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2K
uxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2Ku
xV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2Kux
V2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV//2Q==</xapGImg:image>
</rdf:li>
</rdf:Alt>
</xap:Thumbnails>
</rdf:Description>
<rdf:Description
rdf:about="uuid:9dfcc10e-f4e2-4cbf-91b0-8deea2f1a998">
<xapMM:DocumentID>
uuid:f3c53255-be8a-4b04-817b-695bf2c54c8b</xapMM:DocumentID>
</rdf:Description>
<rdf:Description
rdf:about="uuid:9dfcc10e-f4e2-4cbf-91b0-8deea2f1a998">
<dc:format>
image/svg+xml</dc:format>
<dc:title>
<rdf:Alt>
<rdf:li
xml:lang="x-default">
filesave.ai</rdf:li>
</rdf:Alt>
</dc:title>
</rdf:Description>
</rdf:RDF>
</x:xmpmeta>
<xpacket>end='w' </xpacket>
</metadata>
<g
id="Layer_1">
<path
style="opacity:0.2;"
d="M9.416,5.208c-2.047,0-3.712,1.693-3.712,3.775V39.15c0,2.082,1.666,3.775,3.712,3.775h29.401 c2.047,0,3.712-1.693,3.712-3.775V8.983c0-2.082-1.665-3.775-3.712-3.775H9.416z"
id="path592" />
<path
style="opacity:0.2;"
d="M9.041,4.833c-2.047,0-3.712,1.693-3.712,3.775v30.167c0,2.082,1.666,3.775,3.712,3.775h29.401 c2.047,0,3.712-1.693,3.712-3.775V8.608c0-2.082-1.665-3.775-3.712-3.775H9.041z"
id="path593" />
<path
style="fill:#00008D;"
d="M8.854,4.646c-2.047,0-3.712,1.693-3.712,3.775v30.167c0,2.082,1.666,3.775,3.712,3.775h29.401 c2.047,0,3.712-1.693,3.712-3.775V8.42c0-2.082-1.665-3.775-3.712-3.775H8.854z"
id="path594" />
<path
style="fill:#00008D;"
d="M8.854,5.021c-1.84,0-3.337,1.525-3.337,3.4v30.167c0,1.875,1.497,3.4,3.337,3.4h29.401 c1.84,0,3.337-1.525,3.337-3.4V8.42c0-1.875-1.497-3.4-3.337-3.4H8.854z"
id="path595" />
<path
id="path166_1_"
style="fill:#FFFFFF;"
d="M40.654,38.588c0,1.36-1.074,2.463-2.399,2.463H8.854c-1.326,0-2.4-1.103-2.4-2.463V8.42 c0-1.36,1.074-2.462,2.4-2.462h29.401c1.325,0,2.399,1.103,2.399,2.462V38.588z" />
<linearGradient
id="path166_2_"
gradientUnits="userSpaceOnUse"
x1="-149.0464"
y1="251.1436"
x2="-149.0464"
y2="436.303"
gradientTransform="matrix(0.1875 0 0 -0.1875 51.5 83.75)">
<stop
offset="0"
style="stop-color:#B4E2FF"
id="stop598" />
<stop
offset="1"
style="stop-color:#006DFF"
id="stop599" />
<a:midPointStop
offset="0"
style="stop-color:#B4E2FF"
id="midPointStop600" />
<a:midPointStop
offset="0.5"
style="stop-color:#B4E2FF"
id="midPointStop601" />
<a:midPointStop
offset="1"
style="stop-color:#006DFF"
id="midPointStop602" />
</linearGradient>
<path
id="path166"
style="fill:url(#path166_2_);"
d="M40.654,38.588c0,1.36-1.074,2.463-2.399,2.463H8.854c-1.326,0-2.4-1.103-2.4-2.463V8.42 c0-1.36,1.074-2.462,2.4-2.462h29.401c1.325,0,2.399,1.103,2.399,2.462V38.588z" />
<path
style="fill:#FFFFFF;"
d="M8.854,6.521c-1.013,0-1.837,0.852-1.837,1.9v30.167c0,1.048,0.824,1.9,1.837,1.9h29.401 c1.013,0,1.837-0.853,1.837-1.9V8.42c0-1.048-0.824-1.9-1.837-1.9H8.854z"
id="path604" />
<linearGradient
id="XMLID_1_"
gradientUnits="userSpaceOnUse"
x1="7.3057"
y1="7.2559"
x2="50.7728"
y2="50.7231">
<stop
offset="0"
style="stop-color:#94CAFF"
id="stop606" />
<stop
offset="1"
style="stop-color:#006DFF"
id="stop607" />
<a:midPointStop
offset="0"
style="stop-color:#94CAFF"
id="midPointStop608" />
<a:midPointStop
offset="0.5"
style="stop-color:#94CAFF"
id="midPointStop609" />
<a:midPointStop
offset="1"
style="stop-color:#006DFF"
id="midPointStop610" />
</linearGradient>
<path
style="fill:url(#XMLID_1_);"
d="M8.854,6.521c-1.013,0-1.837,0.852-1.837,1.9v30.167c0,1.048,0.824,1.9,1.837,1.9h29.401 c1.013,0,1.837-0.853,1.837-1.9V8.42c0-1.048-0.824-1.9-1.837-1.9H8.854z"
id="path611" />
<linearGradient
id="XMLID_2_"
gradientUnits="userSpaceOnUse"
x1="23.5039"
y1="2.187"
x2="23.5039"
y2="34.4368">
<stop
offset="0"
style="stop-color:#428AFF"
id="stop613" />
<stop
offset="1"
style="stop-color:#C9E6FF"
id="stop614" />
<a:midPointStop
offset="0"
style="stop-color:#428AFF"
id="midPointStop615" />
<a:midPointStop
offset="0.5"
style="stop-color:#428AFF"
id="midPointStop616" />
<a:midPointStop
offset="1"
style="stop-color:#C9E6FF"
id="midPointStop617" />
</linearGradient>
<path
style="fill:url(#XMLID_2_);"
d="M36.626,6.861c0,0-26.184,0-26.914,0c0,0.704,0,16.59,0,17.294c0.721,0,26.864,0,27.583,0 c0-0.704,0-16.59,0-17.294C36.988,6.861,36.626,6.861,36.626,6.861z"
id="path618" />
<polygon
id="path186_1_"
style="fill:#FFFFFF;"
points="35.809,6.486 10.221,6.486 10.221,23.405 36.788,23.405 36.788,6.486 " />
<linearGradient
id="path186_2_"
gradientUnits="userSpaceOnUse"
x1="-104.5933"
y1="411.6699"
x2="-206.815"
y2="309.4482"
gradientTransform="matrix(0.1875 0 0 -0.1875 51.5 83.75)">
<stop
offset="0"
style="stop-color:#CCCCCC"
id="stop621" />
<stop
offset="1"
style="stop-color:#F0F0F0"
id="stop622" />
<a:midPointStop
offset="0"
style="stop-color:#CCCCCC"
id="midPointStop623" />
<a:midPointStop
offset="0.5"
style="stop-color:#CCCCCC"
id="midPointStop624" />
<a:midPointStop
offset="1"
style="stop-color:#F0F0F0"
id="midPointStop625" />
</linearGradient>
<polygon
id="path186"
style="fill:url(#path186_2_);"
points="35.809,6.486 10.221,6.486 10.221,23.405 36.788,23.405 36.788,6.486 " />
<path
style="fill:#FFFFFF;stroke:#FFFFFF;stroke-width:0.1875;"
d="M11.488,7.019c0,0.698,0,14.542,0,15.239c0.716,0,23.417,0,24.133,0c0-0.698,0-14.541,0-15.239 C34.904,7.019,12.204,7.019,11.488,7.019z"
id="path627" />
<linearGradient
id="XMLID_3_"
gradientUnits="userSpaceOnUse"
x1="34.5967"
y1="3.5967"
x2="18.4087"
y2="19.7847">
<stop
offset="0"
style="stop-color:#FFFFFF"
id="stop629" />
<stop
offset="0.5506"
style="stop-color:#E6EDFF"
id="stop630" />
<stop
offset="1"
style="stop-color:#FFFFFF"
id="stop631" />
<a:midPointStop
offset="0"
style="stop-color:#FFFFFF"
id="midPointStop632" />
<a:midPointStop
offset="0.5"
style="stop-color:#FFFFFF"
id="midPointStop633" />
<a:midPointStop
offset="0.5506"
style="stop-color:#E6EDFF"
id="midPointStop634" />
<a:midPointStop
offset="0.5"
style="stop-color:#E6EDFF"
id="midPointStop635" />
<a:midPointStop
offset="1"
style="stop-color:#FFFFFF"
id="midPointStop636" />
</linearGradient>
<path
style="fill:url(#XMLID_3_);stroke:#FFFFFF;stroke-width:0.1875;"
d="M11.488,7.019c0,0.698,0,14.542,0,15.239c0.716,0,23.417,0,24.133,0c0-0.698,0-14.541,0-15.239 C34.904,7.019,12.204,7.019,11.488,7.019z"
id="path637" />
<linearGradient
id="path205_1_"
gradientUnits="userSpaceOnUse"
x1="-174.4409"
y1="300.0908"
x2="-108.8787"
y2="210.2074"
gradientTransform="matrix(0.1875 0 0 -0.1875 51.5 83.75)">
<stop
offset="0"
style="stop-color:#003399"
id="stop639" />
<stop
offset="0.2697"
style="stop-color:#0035ED"
id="stop640" />
<stop
offset="1"
style="stop-color:#57ADFF"
id="stop641" />
<a:midPointStop
offset="0"
style="stop-color:#003399"
id="midPointStop642" />
<a:midPointStop
offset="0.5"
style="stop-color:#003399"
id="midPointStop643" />
<a:midPointStop
offset="0.2697"
style="stop-color:#0035ED"
id="midPointStop644" />
<a:midPointStop
offset="0.5"
style="stop-color:#0035ED"
id="midPointStop645" />
<a:midPointStop
offset="1"
style="stop-color:#57ADFF"
id="midPointStop646" />
</linearGradient>
<rect
id="path205"
x="12.154"
y="26.479"
style="fill:url(#path205_1_);"
width="22.007"
height="13.978" />
<linearGradient
id="XMLID_4_"
gradientUnits="userSpaceOnUse"
x1="21.8687"
y1="25.1875"
x2="21.8687"
y2="44.6251">
<stop
offset="0"
style="stop-color:#DFDFDF"
id="stop649" />
<stop
offset="1"
style="stop-color:#7D7D99"
id="stop650" />
<a:midPointStop
offset="0"
style="stop-color:#DFDFDF"
id="midPointStop651" />
<a:midPointStop
offset="0.5"
style="stop-color:#DFDFDF"
id="midPointStop652" />
<a:midPointStop
offset="1"
style="stop-color:#7D7D99"
id="midPointStop653" />
</linearGradient>
<path
style="fill:url(#XMLID_4_);"
d="M13.244,27.021c-0.311,0-0.563,0.252-0.563,0.563v13.104c0,0.312,0.252,0.563,0.563,0.563h17.249 c0.311,0,0.563-0.251,0.563-0.563V27.583c0-0.311-0.252-0.563-0.563-0.563H13.244z M18.85,30.697c0,0.871,0,5.078,0,5.949 c-0.683,0-2.075,0-2.759,0c0-0.871,0-5.078,0-5.949C16.775,30.697,18.167,30.697,18.85,30.697z"
id="path654" />
<linearGradient
id="XMLID_5_"
gradientUnits="userSpaceOnUse"
x1="-158.0337"
y1="288.0684"
x2="-158.0337"
y2="231.3219"
gradientTransform="matrix(0.1875 0 0 -0.1875 51.5 83.75)">
<stop
offset="0"
style="stop-color:#F0F0F0"
id="stop656" />
<stop
offset="0.6348"
style="stop-color:#CECEDB"
id="stop657" />
<stop
offset="0.8595"
style="stop-color:#B1B1C5"
id="stop658" />
<stop
offset="1"
style="stop-color:#FFFFFF"
id="stop659" />
<a:midPointStop
offset="0"
style="stop-color:#F0F0F0"
id="midPointStop660" />
<a:midPointStop
offset="0.5"
style="stop-color:#F0F0F0"
id="midPointStop661" />
<a:midPointStop
offset="0.6348"
style="stop-color:#CECEDB"
id="midPointStop662" />
<a:midPointStop
offset="0.5"
style="stop-color:#CECEDB"
id="midPointStop663" />
<a:midPointStop
offset="0.8595"
style="stop-color:#B1B1C5"
id="midPointStop664" />
<a:midPointStop
offset="0.5"
style="stop-color:#B1B1C5"
id="midPointStop665" />
<a:midPointStop
offset="1"
style="stop-color:#FFFFFF"
id="midPointStop666" />
</linearGradient>
<path
style="fill:url(#XMLID_5_);"
d="M13.244,27.583v13.104h17.249V27.583H13.244z M19.413,37.209h-3.884v-7.074h3.884V37.209z"
id="path667" />
<linearGradient
id="path228_1_"
gradientUnits="userSpaceOnUse"
x1="-68.1494"
y1="388.4561"
x2="-68.1494"
y2="404.6693"
gradientTransform="matrix(0.1875 0 0 -0.1875 51.5 83.75)">
<stop
offset="0"
style="stop-color:#3399FF"
id="stop669" />
<stop
offset="1"
style="stop-color:#000000"
id="stop670" />
<a:midPointStop
offset="0"
style="stop-color:#3399FF"
id="midPointStop671" />
<a:midPointStop
offset="0.5"
style="stop-color:#3399FF"
id="midPointStop672" />
<a:midPointStop
offset="1"
style="stop-color:#000000"
id="midPointStop673" />
</linearGradient>
<rect
id="path228"
x="37.83"
y="9.031"
style="fill:url(#path228_1_);"
width="1.784"
height="1.785" />
<polyline
id="_x3C_Slice_x3E_"
style="fill:none;"
points="0,48 0,0 48,0 48,48 " />
</g>
</svg>
saveAsDetail=將标签保存到其他文件
changeSaveDir=改变存放目录
openFile=打开文件
shapeLineColorDetail=更改线条颜色
resetAll=重置界面与保存地址
crtBox=矩形标注
crtBoxDetail=创建一个新的区块
dupBoxDetail=复制区块
verifyImg=验证图像
zoominDetail=放大
verifyImgDetail=验证图像
saveDetail=保存标签文件
openFileDetail=打开图像文件
fitWidthDetail=调整宽度适应到窗口宽度
tutorial=PaddleOCR地址
editLabel=编辑标签
openAnnotationDetail=打开标签文件
quit=退出
shapeFillColorDetail=更改填充颜色
closeCurDetail=关闭当前文件
closeCur=关闭文件
deleteImg=删除图像
deleteImgDetail=删除当前图像
fitWin=调整到窗口大小
delBox=删除选择的区块
boxLineColorDetail=选择线框颜色
originalsize=原始大小
resetAllDetail=重置所有设定
zoomoutDetail=放大画面
save=保存
saveAs=另存为
fitWinDetail=缩放到当前窗口大小
openDir=打开目录
copyPrevBounding=复制当前图像中的上一个边界框
showHide=显示/隐藏标签
changeSaveFormat=更改存储格式
shapeFillColor=填充颜色
quitApp=退出程序
dupBox=复制区块
delBoxDetail=删除区块
zoomin=放大画面
info=信息
openAnnotation=开启标签
prevImgDetail=上一个图像
fitWidth=缩放到跟当前画面一样宽
zoomout=缩小画面
changeSavedAnnotationDir=更改保存标签文件的预设目录
nextImgDetail=下一个图像
originalsizeDetail=放大到原始大小
prevImg=上一张
tutorialDetail=显示示范内容
shapeLineColor=形状线条颜色
boxLineColor=区块线条颜色
editLabelDetail=修改当前所选的区块颜色
nextImg=下一张
useDefaultLabel=使用预设标签
useDifficult=有难度的
boxLabelText=区块的标签
labels=标签
autoSaveMode=自动保存模式
singleClsMode=单一类别模式
displayLabel=显示类别
fileList=文件列表
files=文件
advancedMode=专家模式
advancedModeDetail=切换到专家模式
showAllBoxDetail=显示所有区块
hideAllBoxDetail=隐藏所有区块
annoPanel=标注面板
anno=标注
addNewBbox=新框
reLabel=重标注
choosemodel=选择模型
tipchoosemodel=选择OCR模型
ImageResize=图片缩放
IR=图片缩放
autoRecognition=自动标注
reRecognition=重新识别
mfile=文件
medit=编辑
mview=视图
mhelp=帮助
iconList=缩略图
detectionBoxposition=检测框位置
recognitionResult=识别结果
creatPolygon=四点标注
drawSquares=正方形标注
saveRec=保存识别结果
tempLabel=待识别
steps=操作步骤
choseModelLg=选择模型语言
cancel=取消
ok=确认
autolabeling=自动标注中
hideBox=隐藏所有标注
showBox=显示所有标注
\ No newline at end of file
saveAsDetail=將標籤保存到其他文件
changeSaveDir=改變存放目錄
openFile=開啟檔案
shapeLineColorDetail=更改線條顏色
resetAll=重置
crtBox=創建區塊
crtBoxDetail=畫一個區塊
dupBoxDetail=複製區塊
verifyImg=驗證圖像
zoominDetail=放大
verifyImgDetail=驗證圖像
saveDetail=將標籤存到
openFileDetail=打開圖像
fitWidthDetail=調整到窗口寬度
tutorial=YouTube教學
editLabel=編輯標籤
openAnnotationDetail=打開標籤文件
quit=結束
shapeFillColorDetail=更改填充顏色
closeCurDetail=關閉目前檔案
closeCur=關閉
deleteImg=刪除圖像
deleteImgDetail=刪除目前圖像
fitWin=調整到跟窗口一樣大小
delBox=刪除選取區塊
boxLineColorDetail=選擇框線顏色
originalsize=原始大小
resetAllDetail=重設所有設定
zoomoutDetail=畫面放大
save=儲存
saveAs=另存為
fitWinDetail=縮放到窗口一樣
openDir=開啟目錄
copyPrevBounding=複製當前圖像中的上一個邊界框
showHide=顯示/隱藏標籤
changeSaveFormat=更改儲存格式
shapeFillColor=填充顏色
quitApp=離開本程式
dupBox=複製區塊
delBoxDetail=刪除區塊
zoomin=放大畫面
info=資訊
openAnnotation=開啟標籤
prevImgDetail=上一個圖像
fitWidth=縮放到跟畫面一樣寬
zoomout=縮小畫面
changeSavedAnnotationDir=更改預設標籤存的目錄
nextImgDetail=下一個圖像
originalsizeDetail=放大到原始大小
prevImg=上一個圖像
tutorialDetail=顯示示範內容
shapeLineColor=形狀線條顏色
boxLineColor=日期分隔線顏色
editLabelDetail=修改所選區塊的標籤
nextImg=下一張圖片
useDefaultLabel=使用預設標籤
useDifficult=有難度的
boxLabelText=區塊的標籤
labels=標籤
autoSaveMode=自動儲存模式
singleClsMode=單一類別模式
displayLabel=顯示類別
fileList=檔案清單
files=檔案
iconList=XX
icon=XX
advancedMode=進階模式
advancedModeDetail=切到進階模式
showAllBoxDetail=顯示所有區塊
hideAllBoxDetail=隱藏所有區塊
openFile=Open
openFileDetail=Open image or label file
quit=Quit
quitApp=Quit application
openDir=Open Dir
copyPrevBounding=Copy previous Bounding Boxes in the current image
changeSavedAnnotationDir=Change default saved Annotation dir
openAnnotation=Open Annotation
openAnnotationDetail=Open an annotation file
changeSaveDir=Change Save Dir
nextImg=Next Image
nextImgDetail=Open the next Image
prevImg=Prev Image
prevImgDetail=Open the previous Image
verifyImg=Verify Image
verifyImgDetail=Verify Image
save=Save
saveDetail=Save the labels to a file
changeSaveFormat=Change save format
saveAs=Save As
saveAsDetail=Save the labels to a different file
closeCur=Close
closeCurDetail=Close the current file
deleteImg=Delete current image
deleteImgDetail=Delete the current image
resetAll=Reset Interface and Save Dir
resetAllDetail=Reset All
boxLineColor=Box Line Color
boxLineColorDetail=Choose Box line color
crtBox=Create RectBox
crtBoxDetail=Draw a new box
delBox=Delete RectBox
delBoxDetail=Remove the box
dupBox=Duplicate RectBox
dupBoxDetail=Create a duplicate of the selected box
tutorial=PaddleOCR url
tutorialDetail=Show demo
info=Information
zoomin=Zoom In
zoominDetail=Increase zoom level
zoomout=Zoom Out
zoomoutDetail=Decrease zoom level
originalsize=Original size
originalsizeDetail=Zoom to original size
fitWin=Fit Window
fitWinDetail=Zoom follows window size
fitWidth=Fit Width
fitWidthDetail=Zoom follows window width
editLabel=Edit Label
editLabelDetail=Modify the label of the selected Box
shapeLineColor=Shape Line Color
shapeLineColorDetail=Change the line color for this specific shape
shapeFillColor=Shape Fill Color
shapeFillColorDetail=Change the fill color for this specific shape
showHide=Show/Hide Label Panel
useDefaultLabel=Use default label
useDifficult=Difficult
boxLabelText=Box Labels
labels=Labels
autoSaveMode=Auto Save mode
singleClsMode=Single Class Mode
displayLabel=Display Labels
fileList=File List
files=Files
advancedMode=Advanced Mode
advancedModeDetail=Swtich to advanced mode
showAllBoxDetail=Show all bounding boxes
hideAllBoxDetail=Hide all bounding boxes
annoPanel=anno Panel
anno=anno
addNewBbox=new bbox
reLabel=reLabel
choosemodel=Choose OCR model
tipchoosemodel=Choose OCR model from dir
ImageResize=Image Resize
IR=Image Resize
autoRecognition=Auto Recognition
reRecognition=Re-recognition
mfile=File
medit=Eidt
mview=View
mhelp=Help
iconList=Icon List
detectionBoxposition=Detection box position
recognitionResult=Recognition result
creatPolygon=Create Quadrilateral
drawSquares=Draw Squares
saveRec=Save Recognition Result
tempLabel=TEMPORARY
steps=Steps
choseModelLg=Choose Model Language
cancel=Cancel
ok=OK
autolabeling=Automatic Labeling
hideBox=Hide All Box
showBox=Show All Box
\ No newline at end of file
[bumpversion]
commit = True
tag = True
[bumpversion:file:setup.py]
[bdist_wheel]
universal = 1
# Copyright (c) <2015-Present> Tzutalin
# Copyright (C) 2013 MIT, Computer Science and Artificial Intelligence Laboratory. Bryan Russell, Antonio Torralba,
# William T. Freeman. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction, including without
# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages, Command
from sys import platform as _platform
from shutil import rmtree
import sys
import os
here = os.path.abspath(os.path.dirname(__file__))
NAME = 'labelImg'
REQUIRES_PYTHON = '>=3.0.0'
REQUIRED_DEP = ['pyqt5', 'lxml']
about = {}
with open(os.path.join(here, 'libs', '__init__.py')) as f:
exec(f.read(), about)
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
# OS specific settings
SET_REQUIRES = []
if _platform == "linux" or _platform == "linux2":
# linux
print('linux')
elif _platform == "darwin":
# MAC OS X
SET_REQUIRES.append('py2app')
required_packages = find_packages()
required_packages.append('labelImg')
APP = [NAME + '.py']
OPTIONS = {
'argv_emulation': True,
'iconfile': 'resources/icons/app.icns'
}
class UploadCommand(Command):
"""Support setup.py upload."""
description=readme + '\n\n' + history,
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
self.status('Fail to remove previous builds..')
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system(
'{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag -d v{0}'.format(about['__version__']))
os.system('git tag v{0}'.format(about['__version__']))
# os.system('git push --tags')
sys.exit()
setup(
app=APP,
name=NAME,
version=about['__version__'],
description="LabelImg is a graphical image annotation tool and label object bounding boxes in images",
long_description=readme + '\n\n' + history,
author="TzuTa Lin",
author_email='tzu.ta.lin@gmail.com',
url='https://github.com/tzutalin/labelImg',
python_requires=REQUIRES_PYTHON,
package_dir={'labelImg': '.'},
packages=required_packages,
entry_points={
'console_scripts': [
'labelImg=labelImg.labelImg:main'
]
},
include_package_data=True,
install_requires=REQUIRED_DEP,
license="MIT license",
zip_safe=False,
keywords='labelImg labelTool development annotation deeplearning',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
package_data={'data/predefined_classes.txt': ['data/predefined_classes.txt']},
options={'py2app': OPTIONS},
setup_requires=SET_REQUIRES,
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
}
)
English | [简体中文](README_ch.md) English | [简体中文](README_ch.md)
## Introduction ## Introduction
PaddleOCR aims to create rich, leading, and practical OCR tools that help users train better models and apply them into practice. PaddleOCR aims to create multilingual, awesome, leading, and practical OCR tools that help users train better models and apply them into practice.
**Recent updates** **Recent updates**
- 2020.11.25 Update a new data annotation tool, i.e., [PPOCRLabel](./PPOCRLabel/README_en.md), which is helpful to improve the labeling efficiency. Moreover, the labeling results can be used in training of the PP-OCR system directly.
- 2020.9.22 Update the PP-OCR technical article, https://arxiv.org/abs/2009.09941 - 2020.9.22 Update the PP-OCR technical article, https://arxiv.org/abs/2009.09941
- 2020.9.19 Update the ultra lightweight compressed ppocr_mobile_slim series models, the overall model size is 3.5M (see [PP-OCR Pipeline](#PP-OCR-Pipeline)), suitable for mobile deployment. [Model Downloads](#Supported-Chinese-model-list) - 2020.9.19 Update the ultra lightweight compressed ppocr_mobile_slim series models, the overall model size is 3.5M (see [PP-OCR Pipeline](#PP-OCR-Pipeline)), suitable for mobile deployment. [Model Downloads](#Supported-Chinese-model-list)
- 2020.9.17 Update the ultra lightweight ppocr_mobile series and general ppocr_server series Chinese and English ocr models, which are comparable to commercial effects. [Model Downloads](#Supported-Chinese-model-list) - 2020.9.17 Update the ultra lightweight ppocr_mobile series and general ppocr_server series Chinese and English ocr models, which are comparable to commercial effects. [Model Downloads](#Supported-Chinese-model-list)
- 2020.9.17 update [English recognition model](./doc/doc_en/models_list_en.md#english-recognition-model) and [Multilingual recognition model](doc/doc_en/models_list_en.md#english-recognition-model), `German`, `French`, `Japanese` and `Korean` have been supported. Models for more languages will continue to be updated. - 2020.9.17 update [English recognition model](./doc/doc_en/models_list_en.md#english-recognition-model) and [Multilingual recognition model](doc/doc_en/models_list_en.md#english-recognition-model), `English`, `Chinese`, `German`, `French`, `Japanese` and `Korean` have been supported. Models for more languages will continue to be updated.
- 2020.8.24 Support the use of PaddleOCR through whl package installation,please refer [PaddleOCR Package](./doc/doc_en/whl_en.md) - 2020.8.24 Support the use of PaddleOCR through whl package installation,please refer [PaddleOCR Package](./doc/doc_en/whl_en.md)
- 2020.8.21 Update the replay and PPT of the live lesson at Bilibili on August 18, lesson 2, easy to learn and use OCR tool spree. [Get Address](https://aistudio.baidu.com/aistudio/education/group/info/1519) - 2020.8.21 Update the replay and PPT of the live lesson at Bilibili on August 18, lesson 2, easy to learn and use OCR tool spree. [Get Address](https://aistudio.baidu.com/aistudio/education/group/info/1519)
- [more](./doc/doc_en/update_en.md) - [more](./doc/doc_en/update_en.md)
...@@ -64,39 +65,47 @@ Mobile DEMO experience (based on EasyEdge and Paddle-Lite, supports iOS and Andr ...@@ -64,39 +65,47 @@ Mobile DEMO experience (based on EasyEdge and Paddle-Lite, supports iOS and Andr
| Chinese and English ultra-lightweight OCR model (8.1M) | ch_ppocr_mobile_v1.1_xx | Mobile & server | [inference model](https://paddleocr.bj.bcebos.com/20-09-22/mobile/det/ch_ppocr_mobile_v1.1_det_infer.tar) / [pre-trained model](https://paddleocr.bj.bcebos.com/20-09-22/mobile/det/ch_ppocr_mobile_v1.1_det_train.tar) | [inference model](https://paddleocr.bj.bcebos.com/20-09-22/cls/ch_ppocr_mobile_v1.1_cls_infer.tar) / [pre-trained model](https://paddleocr.bj.bcebos.com/20-09-22/cls/ch_ppocr_mobile_v1.1_cls_train.tar) | [inference model](https://paddleocr.bj.bcebos.com/20-09-22/mobile/rec/ch_ppocr_mobile_v1.1_rec_infer.tar) / [pre-trained model](https://paddleocr.bj.bcebos.com/20-09-22/mobile/rec/ch_ppocr_mobile_v1.1_rec_pre.tar) | | Chinese and English ultra-lightweight OCR model (8.1M) | ch_ppocr_mobile_v1.1_xx | Mobile & server | [inference model](https://paddleocr.bj.bcebos.com/20-09-22/mobile/det/ch_ppocr_mobile_v1.1_det_infer.tar) / [pre-trained model](https://paddleocr.bj.bcebos.com/20-09-22/mobile/det/ch_ppocr_mobile_v1.1_det_train.tar) | [inference model](https://paddleocr.bj.bcebos.com/20-09-22/cls/ch_ppocr_mobile_v1.1_cls_infer.tar) / [pre-trained model](https://paddleocr.bj.bcebos.com/20-09-22/cls/ch_ppocr_mobile_v1.1_cls_train.tar) | [inference model](https://paddleocr.bj.bcebos.com/20-09-22/mobile/rec/ch_ppocr_mobile_v1.1_rec_infer.tar) / [pre-trained model](https://paddleocr.bj.bcebos.com/20-09-22/mobile/rec/ch_ppocr_mobile_v1.1_rec_pre.tar) |
| Chinese and English general OCR model (155.1M) | ch_ppocr_server_v1.1_xx | Server | [inference model](https://paddleocr.bj.bcebos.com/20-09-22/server/det/ch_ppocr_server_v1.1_det_infer.tar) / [pre-trained model](https://paddleocr.bj.bcebos.com/20-09-22/server/det/ch_ppocr_server_v1.1_det_train.tar) | [inference model](https://paddleocr.bj.bcebos.com/20-09-22/cls/ch_ppocr_mobile_v1.1_cls_infer.tar) / [pre-trained model](https://paddleocr.bj.bcebos.com/20-09-22/cls/ch_ppocr_mobile_v1.1_cls_train.tar) | [inference model](https://paddleocr.bj.bcebos.com/20-09-22/server/rec/ch_ppocr_server_v1.1_rec_infer.tar) / [pre-trained model](https://paddleocr.bj.bcebos.com/20-09-22/server/rec/ch_ppocr_server_v1.1_rec_pre.tar) | | Chinese and English general OCR model (155.1M) | ch_ppocr_server_v1.1_xx | Server | [inference model](https://paddleocr.bj.bcebos.com/20-09-22/server/det/ch_ppocr_server_v1.1_det_infer.tar) / [pre-trained model](https://paddleocr.bj.bcebos.com/20-09-22/server/det/ch_ppocr_server_v1.1_det_train.tar) | [inference model](https://paddleocr.bj.bcebos.com/20-09-22/cls/ch_ppocr_mobile_v1.1_cls_infer.tar) / [pre-trained model](https://paddleocr.bj.bcebos.com/20-09-22/cls/ch_ppocr_mobile_v1.1_cls_train.tar) | [inference model](https://paddleocr.bj.bcebos.com/20-09-22/server/rec/ch_ppocr_server_v1.1_rec_infer.tar) / [pre-trained model](https://paddleocr.bj.bcebos.com/20-09-22/server/rec/ch_ppocr_server_v1.1_rec_pre.tar) |
| Chinese and English ultra-lightweight compressed OCR model (3.5M) | ch_ppocr_mobile_slim_v1.1_xx | Mobile | [inference model](https://paddleocr.bj.bcebos.com/20-09-22/mobile-slim/det/ch_ppocr_mobile_v1.1_det_prune_infer.tar) / [slim model](https://paddleocr.bj.bcebos.com/20-09-22/mobile/lite/ch_ppocr_mobile_v1.1_det_prune_opt.nb) | [inference model](https://paddleocr.bj.bcebos.com/20-09-22/cls/ch_ppocr_mobile_v1.1_cls_quant_infer.tar) / [slim model](https://paddleocr.bj.bcebos.com/20-09-22/mobile/lite/ch_ppocr_mobile_v1.1_cls_quant_opt.nb) | [inference model](https://paddleocr.bj.bcebos.com/20-09-22/mobile-slim/rec/ch_ppocr_mobile_v1.1_rec_quant_infer.tar) / [slim model](https://paddleocr.bj.bcebos.com/20-09-22/mobile/lite/ch_ppocr_mobile_v1.1_rec_quant_opt.nb) | | Chinese and English ultra-lightweight compressed OCR model (3.5M) | ch_ppocr_mobile_slim_v1.1_xx | Mobile | [inference model](https://paddleocr.bj.bcebos.com/20-09-22/mobile-slim/det/ch_ppocr_mobile_v1.1_det_prune_infer.tar) / [slim model](https://paddleocr.bj.bcebos.com/20-09-22/mobile/lite/ch_ppocr_mobile_v1.1_det_prune_opt.nb) | [inference model](https://paddleocr.bj.bcebos.com/20-09-22/cls/ch_ppocr_mobile_v1.1_cls_quant_infer.tar) / [slim model](https://paddleocr.bj.bcebos.com/20-09-22/mobile/lite/ch_ppocr_mobile_v1.1_cls_quant_opt.nb) | [inference model](https://paddleocr.bj.bcebos.com/20-09-22/mobile-slim/rec/ch_ppocr_mobile_v1.1_rec_quant_infer.tar) / [slim model](https://paddleocr.bj.bcebos.com/20-09-22/mobile/lite/ch_ppocr_mobile_v1.1_rec_quant_opt.nb) |
| French ultra-lightweight OCR model (4.6M) | french_ppocr_mobile_v1.1_xx | Mobile & server | [inference model](https://paddleocr.bj.bcebos.com/20-09-22/mobile/det/ch_ppocr_mobile_v1.1_det_infer.tar) / [pre-trained model](https://paddleocr.bj.bcebos.com/20-09-22/mobile/det/ch_ppocr_mobile_v1.1_det_train.tar) | - | [inference model](https://paddleocr.bj.bcebos.com/20-09-22/mobile/fr/french_ppocr_mobile_v1.1_rec_infer.tar) / [pre-trained model](https://paddleocr.bj.bcebos.com/20-09-22/mobile/fr/french_ppocr_mobile_v1.1_rec_train.tar) |
| German ultra-lightweight OCR model (4.6M) | german_ppocr_mobile_v1.1_xx | Mobile & server | [inference model](https://paddleocr.bj.bcebos.com/20-09-22/mobile/det/ch_ppocr_mobile_v1.1_det_infer.tar) / [pre-trained model](https://paddleocr.bj.bcebos.com/20-09-22/mobile/det/ch_ppocr_mobile_v1.1_det_train.tar) | - |[inference model](https://paddleocr.bj.bcebos.com/20-09-22/mobile/ge/german_ppocr_mobile_v1.1_rec_infer.tar) / [pre-trained model](https://paddleocr.bj.bcebos.com/20-09-22/mobile/ge/german_ppocr_mobile_v1.1_rec_train.tar) |
| Korean ultra-lightweight OCR model (5.9M) | korean_ppocr_mobile_v1.1_xx | Mobile & server | [inference model](https://paddleocr.bj.bcebos.com/20-09-22/mobile/det/ch_ppocr_mobile_v1.1_det_infer.tar) / [pre-trained model](https://paddleocr.bj.bcebos.com/20-09-22/mobile/det/ch_ppocr_mobile_v1.1_det_train.tar) | - |[inference model](https://paddleocr.bj.bcebos.com/20-09-22/mobile/kr/korean_ppocr_mobile_v1.1_rec_infer.tar) / [pre-trained model](https://paddleocr.bj.bcebos.com/20-09-22/mobile/kr/korean_ppocr_mobile_v1.1_rec_train.tar)|
| Japan ultra-lightweight OCR model (6.2M) | japan_ppocr_mobile_v1.1_xx | Mobile & server | [inference model](https://paddleocr.bj.bcebos.com/20-09-22/mobile/det/ch_ppocr_mobile_v1.1_det_infer.tar) / [pre-trained model](https://paddleocr.bj.bcebos.com/20-09-22/mobile/det/ch_ppocr_mobile_v1.1_det_train.tar) | - |[inference model](https://paddleocr.bj.bcebos.com/20-09-22/mobile/jp/japan_ppocr_mobile_v1.1_rec_infer.tar) / [pre-trained model](https://paddleocr.bj.bcebos.com/20-09-22/mobile/jp/japan_ppocr_mobile_v1.1_rec_train.tar) |
For more model downloads (including multiple languages), please refer to [PP-OCR v1.1 series model downloads](./doc/doc_en/models_list_en.md) For more model downloads (including multiple languages), please refer to [PP-OCR v1.1 series model downloads](./doc/doc_en/models_list_en.md).
For a new language request, please refer to [Guideline for new language_requests](#language_requests).
## Tutorials ## Tutorials
- [Installation](./doc/doc_en/installation_en.md) - [Installation](./doc/doc_en/installation_en.md)
- [Quick Start](./doc/doc_en/quickstart_en.md) - [Quick Start](./doc/doc_en/quickstart_en.md)
- [Code Structure](./doc/doc_en/tree_en.md) - [Code Structure](./doc/doc_en/tree_en.md)
- Algorithm introduction - Algorithm Introduction
- [Text Detection Algorithm](./doc/doc_en/algorithm_overview_en.md) - [Text Detection Algorithm](./doc/doc_en/algorithm_overview_en.md)
- [Text Recognition Algorithm](./doc/doc_en/algorithm_overview_en.md) - [Text Recognition Algorithm](./doc/doc_en/algorithm_overview_en.md)
- [PP-OCR Pipeline](#PP-OCR-Pipeline) - [PP-OCR Pipeline](#PP-OCR-Pipeline)
- Model training/evaluation - Model Training/Evaluation
- [Text Detection](./doc/doc_en/detection_en.md) - [Text Detection](./doc/doc_en/detection_en.md)
- [Text Recognition](./doc/doc_en/recognition_en.md) - [Text Recognition](./doc/doc_en/recognition_en.md)
- [Direction Classification](./doc/doc_en/angle_class_en.md) - [Direction Classification](./doc/doc_en/angle_class_en.md)
- [Yml Configuration](./doc/doc_en/config_en.md) - [Yml Configuration](./doc/doc_en/config_en.md)
- Inference and Deployment - Inference and Deployment
- [Quick inference based on pip](./doc/doc_en/whl_en.md) - [Quick Inference Based on PIP](./doc/doc_en/whl_en.md)
- [Python Inference](./doc/doc_en/inference_en.md) - [Python Inference](./doc/doc_en/inference_en.md)
- [C++ Inference](./deploy/cpp_infer/readme_en.md) - [C++ Inference](./deploy/cpp_infer/readme_en.md)
- [Serving](./deploy/hubserving/readme_en.md) - [Serving](./deploy/hubserving/readme_en.md)
- [Mobile](./deploy/lite/readme_en.md) - [Mobile](./deploy/lite/readme_en.md)
- [Model Quantization](./deploy/slim/quantization/README_en.md) - [Model Quantization](./deploy/slim/quantization/README_en.md)
- [Model Compression](./deploy/slim/prune/README_en.md) - [Model Compression](./deploy/slim/prune/README_en.md)
- [Benchmark](./doc/doc_en/benchmark_en.md) - [Benchmark](./doc/doc_en/benchmark_en.md)
- Data Annotation and Synthesis
- [Semi-automatic Annotation Tool](./PPOCRLabel/README_en.md)
- [Data Annotation Tools](./doc/doc_en/data_annotation_en.md)
- [Data Synthesis Tools](./doc/doc_en/data_synthesis_en.md)
- Datasets - Datasets
- [General OCR Datasets(Chinese/English)](./doc/doc_en/datasets_en.md) - [General OCR Datasets(Chinese/English)](./doc/doc_en/datasets_en.md)
- [HandWritten_OCR_Datasets(Chinese)](./doc/doc_en/handwritten_datasets_en.md) - [HandWritten_OCR_Datasets(Chinese)](./doc/doc_en/handwritten_datasets_en.md)
- [Various OCR Datasets(multilingual)](./doc/doc_en/vertical_and_multilingual_datasets_en.md) - [Various OCR Datasets(multilingual)](./doc/doc_en/vertical_and_multilingual_datasets_en.md)
- [Data Annotation Tools](./doc/doc_en/data_annotation_en.md)
- [Data Synthesis Tools](./doc/doc_en/data_synthesis_en.md)
- [Visualization](#Visualization) - [Visualization](#Visualization)
- [New language requests](#language_requests)
- [FAQ](./doc/doc_en/FAQ_en.md) - [FAQ](./doc/doc_en/FAQ_en.md)
- [Community](#Community) - [Community](#Community)
- [References](./doc/doc_en/reference_en.md) - [References](./doc/doc_en/reference_en.md)
...@@ -136,6 +145,23 @@ PP-OCR is a practical ultra-lightweight OCR system. It is mainly composed of thr ...@@ -136,6 +145,23 @@ PP-OCR is a practical ultra-lightweight OCR system. It is mainly composed of thr
</div> </div>
<a name="language_requests"></a>
## Guideline for new language requests
If you want to request a new language support, a PR with 2 following files are needed:
1. In folder [ppocr/utils/dict](https://github.com/PaddlePaddle/PaddleOCR/tree/develop/ppocr/utils/dict),
it is necessary to submit the dict text to this path and name it with `{language}_dict.txt` that contains a list of all characters. Please see the format example from other files in that folder.
2. In folder [ppocr/utils/corpus](https://github.com/PaddlePaddle/PaddleOCR/tree/develop/ppocr/utils/corpus),
it is necessary to submit the corpus to this path and name it with `{language}_corpus.txt` that contains a list of words in your language.
Maybe, 50000 words per language is necessary at least.
Of course, the more, the better.
If your language has unique elements, please tell me in advance within any way, such as useful links, wikipedia and so on.
More details, please refer to [Multilingual OCR Development Plan](https://github.com/PaddlePaddle/PaddleOCR/issues/1048).
<a name="LICENSE"></a> <a name="LICENSE"></a>
## License ## License
...@@ -152,3 +178,7 @@ We welcome all the contributions to PaddleOCR and appreciate for your feedback v ...@@ -152,3 +178,7 @@ We welcome all the contributions to PaddleOCR and appreciate for your feedback v
- Thanks [authorfu](https://github.com/authorfu) for contributing Android demo and [xiadeye](https://github.com/xiadeye) contributing iOS demo, respectively. - Thanks [authorfu](https://github.com/authorfu) for contributing Android demo and [xiadeye](https://github.com/xiadeye) contributing iOS demo, respectively.
- Thanks [BeyondYourself](https://github.com/BeyondYourself) for contributing many great suggestions and simplifying part of the code style. - Thanks [BeyondYourself](https://github.com/BeyondYourself) for contributing many great suggestions and simplifying part of the code style.
- Thanks [tangmq](https://gitee.com/tangmq) for contributing Dockerized deployment services to PaddleOCR and supporting the rapid release of callable Restful API services. - Thanks [tangmq](https://gitee.com/tangmq) for contributing Dockerized deployment services to PaddleOCR and supporting the rapid release of callable Restful API services.
- Thanks [lijinhan](https://github.com/lijinhan) for contributing a new way, i.e., java SpringBoot, to achieve the request for the Hubserving deployment.
- Thanks [Mejans](https://github.com/Mejans) for contributing the Occitan corpus and character set.
- Thanks [LKKlein](https://github.com/LKKlein) for contributing a new deploying package with the Golang program language.
- Thanks [Evezerest](https://github.com/Evezerest), [ninetailskim](https://github.com/ninetailskim), [edencfc](https://github.com/edencfc), [BeyondYourself](https://github.com/BeyondYourself) and [1084667371](https://github.com/1084667371) for contributing a new data annotation tool, i.e., PPOCRLabel。
...@@ -4,8 +4,8 @@ ...@@ -4,8 +4,8 @@
PaddleOCR旨在打造一套丰富、领先、且实用的OCR工具库,助力使用者训练出更好的模型,并应用落地。 PaddleOCR旨在打造一套丰富、领先、且实用的OCR工具库,助力使用者训练出更好的模型,并应用落地。
**近期更新** **近期更新**
- 2020.11.2 [FAQ](./doc/doc_ch/FAQ.md)新增5个高频问题,共计99个常见问题及解答,并且计划以后每周一都会更新,欢迎大家持续关注。 - 2020.11.30 [FAQ](./doc/doc_ch/FAQ.md)新增5个高频问题,共计119个常见问题及解答,并且计划以后每周一都会更新,欢迎大家持续关注。
- 2020.10.26 [FAQ](./doc/doc_ch/FAQ.md)新增5个高频问题,共计94个常见问题及解答,并且计划以后每周一都会更新,欢迎大家持续关注 - 2020.11.25 更新半自动标注工具[PPOCRLabel](./PPOCRLabel/README.md),辅助开发者高效完成标注任务,输出格式与PP-OCR训练任务完美衔接
- 2020.9.22 更新PP-OCR技术文章,https://arxiv.org/abs/2009.09941 - 2020.9.22 更新PP-OCR技术文章,https://arxiv.org/abs/2009.09941
- 2020.9.19 更新超轻量压缩ppocr_mobile_slim系列模型,整体模型3.5M(详见[PP-OCR Pipeline](#PP-OCR)),适合在移动端部署使用。[模型下载](#模型下载) - 2020.9.19 更新超轻量压缩ppocr_mobile_slim系列模型,整体模型3.5M(详见[PP-OCR Pipeline](#PP-OCR)),适合在移动端部署使用。[模型下载](#模型下载)
- 2020.9.17 更新超轻量ppocr_mobile系列和通用ppocr_server系列中英文ocr模型,媲美商业效果。[模型下载](#模型下载) - 2020.9.17 更新超轻量ppocr_mobile系列和通用ppocr_server系列中英文ocr模型,媲美商业效果。[模型下载](#模型下载)
...@@ -14,7 +14,6 @@ PaddleOCR旨在打造一套丰富、领先、且实用的OCR工具库,助力 ...@@ -14,7 +14,6 @@ PaddleOCR旨在打造一套丰富、领先、且实用的OCR工具库,助力
- 2020.8.21 更新8月18日B站直播课回放和PPT,课节2,易学易用的OCR工具大礼包,[获取地址](https://aistudio.baidu.com/aistudio/education/group/info/1519) - 2020.8.21 更新8月18日B站直播课回放和PPT,课节2,易学易用的OCR工具大礼包,[获取地址](https://aistudio.baidu.com/aistudio/education/group/info/1519)
- [More](./doc/doc_ch/update.md) - [More](./doc/doc_ch/update.md)
## 特性 ## 特性
- PPOCR系列高质量预训练模型,准确的识别效果 - PPOCR系列高质量预训练模型,准确的识别效果
...@@ -55,7 +54,7 @@ PaddleOCR旨在打造一套丰富、领先、且实用的OCR工具库,助力 ...@@ -55,7 +54,7 @@ PaddleOCR旨在打造一套丰富、领先、且实用的OCR工具库,助力
<img src="./doc/ocr-android-easyedge.png" width = "200" height = "200" /> <img src="./doc/ocr-android-easyedge.png" width = "200" height = "200" />
</div> </div>
- 代码体验:[快速安装](./doc/doc_ch/installation.md) 开始 - 代码体验:[中文OCR模型快速使用](./doc/doc_ch/quickstart.md)
<a name="模型下载"></a> <a name="模型下载"></a>
## PP-OCR 1.1系列模型列表(9月17日更新) ## PP-OCR 1.1系列模型列表(9月17日更新)
...@@ -85,22 +84,24 @@ PaddleOCR旨在打造一套丰富、领先、且实用的OCR工具库,助力 ...@@ -85,22 +84,24 @@ PaddleOCR旨在打造一套丰富、领先、且实用的OCR工具库,助力
- [基于pip安装whl包快速推理](./doc/doc_ch/whl.md) - [基于pip安装whl包快速推理](./doc/doc_ch/whl.md)
- [基于Python脚本预测引擎推理](./doc/doc_ch/inference.md) - [基于Python脚本预测引擎推理](./doc/doc_ch/inference.md)
- [基于C++预测引擎推理](./deploy/cpp_infer/readme.md) - [基于C++预测引擎推理](./deploy/cpp_infer/readme.md)
- [服务化部署](./doc/doc_ch/serving_inference.md) - [服务化部署](./deploy/hubserving/readme.md)
- [端侧部署](./deploy/lite/readme.md) - [端侧部署](./deploy/lite/readme.md)
- [模型量化](./deploy/slim/quantization/README.md) - [模型量化](./deploy/slim/quantization/README.md)
- [模型裁剪](./deploy/slim/prune/README.md) - [模型裁剪](./deploy/slim/prune/README.md)
- [Benchmark](./doc/doc_ch/benchmark.md) - [Benchmark](./doc/doc_ch/benchmark.md)
- 数据标注与合成
- [半自动标注工具 PPOCRLabel](./PPOCRLabel/README.md)
- [常用数据标注工具](./doc/doc_ch/data_annotation.md)
- [常用数据合成工具](./doc/doc_ch/data_synthesis.md)
- 数据集 - 数据集
- [通用中英文OCR数据集](./doc/doc_ch/datasets.md) - [通用中英文OCR数据集](./doc/doc_ch/datasets.md)
- [手写中文OCR数据集](./doc/doc_ch/handwritten_datasets.md) - [手写中文OCR数据集](./doc/doc_ch/handwritten_datasets.md)
- [垂类多语言OCR数据集](./doc/doc_ch/vertical_and_multilingual_datasets.md) - [垂类多语言OCR数据集](./doc/doc_ch/vertical_and_multilingual_datasets.md)
- [常用数据标注工具](./doc/doc_ch/data_annotation.md)
- [常用数据合成工具](./doc/doc_ch/data_synthesis.md)
- [效果展示](#效果展示) - [效果展示](#效果展示)
- FAQ - FAQ
- [【精选】OCR精选10个问题](./doc/doc_ch/FAQ.md) - [【精选】OCR精选10个问题](./doc/doc_ch/FAQ.md)
- [【理论篇】OCR通用24个问题](./doc/doc_ch/FAQ.md) - [【理论篇】OCR通用29个问题](./doc/doc_ch/FAQ.md)
- [【实战篇】PaddleOCR实战65个问题](./doc/doc_ch/FAQ.md) - [【实战篇】PaddleOCR实战80个问题](./doc/doc_ch/FAQ.md)
- [技术交流群](#欢迎加入PaddleOCR技术交流群) - [技术交流群](#欢迎加入PaddleOCR技术交流群)
- [参考文献](./doc/doc_ch/reference.md) - [参考文献](./doc/doc_ch/reference.md)
- [许可证书](#许可证书) - [许可证书](#许可证书)
...@@ -152,3 +153,6 @@ PP-OCR是一个实用的超轻量OCR系统。主要由DB文本检测、检测框 ...@@ -152,3 +153,6 @@ PP-OCR是一个实用的超轻量OCR系统。主要由DB文本检测、检测框
- 非常感谢 [authorfu](https://github.com/authorfu) 贡献Android和[xiadeye](https://github.com/xiadeye) 贡献IOS的demo代码 - 非常感谢 [authorfu](https://github.com/authorfu) 贡献Android和[xiadeye](https://github.com/xiadeye) 贡献IOS的demo代码
- 非常感谢 [BeyondYourself](https://github.com/BeyondYourself) 给PaddleOCR提了很多非常棒的建议,并简化了PaddleOCR的部分代码风格。 - 非常感谢 [BeyondYourself](https://github.com/BeyondYourself) 给PaddleOCR提了很多非常棒的建议,并简化了PaddleOCR的部分代码风格。
- 非常感谢 [tangmq](https://gitee.com/tangmq) 给PaddleOCR增加Docker化部署服务,支持快速发布可调用的Restful API服务。 - 非常感谢 [tangmq](https://gitee.com/tangmq) 给PaddleOCR增加Docker化部署服务,支持快速发布可调用的Restful API服务。
- 非常感谢 [lijinhan](https://github.com/lijinhan) 给PaddleOCR增加java SpringBoot 调用OCR Hubserving接口完成对OCR服务化部署的使用。
- 非常感谢 [Mejans](https://github.com/Mejans) 给PaddleOCR增加新语言奥克西坦语Occitan的字典和语料。
- 非常感谢 [Evezerest](https://github.com/Evezerest)[ninetailskim](https://github.com/ninetailskim)[edencfc](https://github.com/edencfc)[BeyondYourself](https://github.com/BeyondYourself)[1084667371](https://github.com/1084667371) 贡献了PPOCRLabel的完整代码。
...@@ -12,6 +12,7 @@ Global: ...@@ -12,6 +12,7 @@ Global:
image_shape: [3, 32, 100] image_shape: [3, 32, 100]
max_text_length: 25 max_text_length: 25
character_type: en character_type: en
character_dict_path:
loss_type: ctc loss_type: ctc
reader_yml: ./configs/rec/rec_benchmark_reader.yml reader_yml: ./configs/rec/rec_benchmark_reader.yml
pretrain_weights: pretrain_weights:
......
...@@ -12,6 +12,7 @@ Global: ...@@ -12,6 +12,7 @@ Global:
image_shape: [3, 32, 100] image_shape: [3, 32, 100]
max_text_length: 25 max_text_length: 25
character_type: en character_type: en
character_dict_path:
loss_type: ctc loss_type: ctc
reader_yml: ./configs/rec/rec_benchmark_reader.yml reader_yml: ./configs/rec/rec_benchmark_reader.yml
pretrain_weights: pretrain_weights:
......
...@@ -12,6 +12,7 @@ Global: ...@@ -12,6 +12,7 @@ Global:
image_shape: [3, 32, 100] image_shape: [3, 32, 100]
max_text_length: 25 max_text_length: 25
character_type: en character_type: en
character_dict_path:
loss_type: attention loss_type: attention
tps: true tps: true
reader_yml: ./configs/rec/rec_benchmark_reader.yml reader_yml: ./configs/rec/rec_benchmark_reader.yml
......
...@@ -12,6 +12,7 @@ Global: ...@@ -12,6 +12,7 @@ Global:
image_shape: [3, 32, 100] image_shape: [3, 32, 100]
max_text_length: 25 max_text_length: 25
character_type: en character_type: en
character_dict_path:
loss_type: ctc loss_type: ctc
tps: true tps: true
reader_yml: ./configs/rec/rec_benchmark_reader.yml reader_yml: ./configs/rec/rec_benchmark_reader.yml
......
...@@ -12,6 +12,7 @@ Global: ...@@ -12,6 +12,7 @@ Global:
image_shape: [3, 32, 100] image_shape: [3, 32, 100]
max_text_length: 25 max_text_length: 25
character_type: en character_type: en
character_dict_path:
loss_type: ctc loss_type: ctc
reader_yml: ./configs/rec/rec_benchmark_reader.yml reader_yml: ./configs/rec/rec_benchmark_reader.yml
pretrain_weights: pretrain_weights:
......
...@@ -12,6 +12,7 @@ Global: ...@@ -12,6 +12,7 @@ Global:
image_shape: [3, 32, 100] image_shape: [3, 32, 100]
max_text_length: 25 max_text_length: 25
character_type: en character_type: en
character_dict_path:
loss_type: ctc loss_type: ctc
reader_yml: ./configs/rec/rec_benchmark_reader.yml reader_yml: ./configs/rec/rec_benchmark_reader.yml
pretrain_weights: pretrain_weights:
......
...@@ -12,6 +12,7 @@ Global: ...@@ -12,6 +12,7 @@ Global:
image_shape: [3, 32, 100] image_shape: [3, 32, 100]
max_text_length: 25 max_text_length: 25
character_type: en character_type: en
character_dict_path:
loss_type: attention loss_type: attention
tps: true tps: true
reader_yml: ./configs/rec/rec_benchmark_reader.yml reader_yml: ./configs/rec/rec_benchmark_reader.yml
......
...@@ -12,6 +12,7 @@ Global: ...@@ -12,6 +12,7 @@ Global:
image_shape: [3, 32, 100] image_shape: [3, 32, 100]
max_text_length: 25 max_text_length: 25
character_type: en character_type: en
character_dict_path:
loss_type: ctc loss_type: ctc
tps: true tps: true
reader_yml: ./configs/rec/rec_benchmark_reader.yml reader_yml: ./configs/rec/rec_benchmark_reader.yml
......
...@@ -12,6 +12,7 @@ Global: ...@@ -12,6 +12,7 @@ Global:
image_shape: [1, 64, 256] image_shape: [1, 64, 256]
max_text_length: 25 max_text_length: 25
character_type: en character_type: en
character_dict_path:
loss_type: srn loss_type: srn
num_heads: 8 num_heads: 8
average_window: 0.15 average_window: 0.15
......
...@@ -57,7 +57,7 @@ def archives = [ ...@@ -57,7 +57,7 @@ def archives = [
], ],
[ [
'src' : 'https://paddleocr.bj.bcebos.com/deploy/lite/ocr_v1_for_cpu.tar.gz', 'src' : 'https://paddleocr.bj.bcebos.com/deploy/lite/ocr_v1_for_cpu.tar.gz',
'dest' : 'src/main/assets/models/ocr_v1_for_cpu' 'dest' : 'src/main/assets/models'
] ]
] ]
......
...@@ -53,11 +53,18 @@ public class OCRPredictorNative { ...@@ -53,11 +53,18 @@ public class OCRPredictorNative {
} }
public void destory(){
if (nativePointer > 0) {
release(nativePointer);
nativePointer = 0;
}
}
protected native long init(String detModelPath, String recModelPath,String clsModelPath, int threadNum, String cpuMode); protected native long init(String detModelPath, String recModelPath,String clsModelPath, int threadNum, String cpuMode);
protected native float[] forward(long pointer, float[] buf, float[] ddims, Bitmap originalImage); protected native float[] forward(long pointer, float[] buf, float[] ddims, Bitmap originalImage);
public native void release(long pointer); protected native void release(long pointer);
private ArrayList<OcrResultModel> postprocess(float[] raw) { private ArrayList<OcrResultModel> postprocess(float[] raw) {
ArrayList<OcrResultModel> results = new ArrayList<OcrResultModel>(); ArrayList<OcrResultModel> results = new ArrayList<OcrResultModel>();
......
...@@ -135,7 +135,7 @@ public class Predictor { ...@@ -135,7 +135,7 @@ public class Predictor {
public void releaseModel() { public void releaseModel() {
if (paddlePredictor != null) { if (paddlePredictor != null) {
paddlePredictor.release(); paddlePredictor.destory();
paddlePredictor = null; paddlePredictor = null;
} }
isLoaded = false; isLoaded = false;
......
...@@ -88,7 +88,7 @@ void ResizeImgType0::Run(const cv::Mat &img, cv::Mat &resize_img, ...@@ -88,7 +88,7 @@ void ResizeImgType0::Run(const cv::Mat &img, cv::Mat &resize_img,
else if (resize_w / 32 < 1 + 1e-5) else if (resize_w / 32 < 1 + 1e-5)
resize_w = 32; resize_w = 32;
else else
resize_w = (resize_w / 32 - 1) * 32; resize_w = resize_w / 32 * 32;
cv::resize(img, resize_img, cv::Size(resize_w, resize_h)); cv::resize(img, resize_img, cv::Size(resize_w, resize_h));
......
...@@ -20,7 +20,7 @@ git clone https://github.com/PaddlePaddle/PaddleOCR.git ...@@ -20,7 +20,7 @@ git clone https://github.com/PaddlePaddle/PaddleOCR.git
``` ```
b. Goto Dockerfile directory(ps:Need to distinguish between cpu and gpu version, the following takes cpu as an example, gpu version needs to replace the keyword) b. Goto Dockerfile directory(ps:Need to distinguish between cpu and gpu version, the following takes cpu as an example, gpu version needs to replace the keyword)
``` ```
cd deploy/docker/cpu cd deploy/docker/hubserving/cpu
``` ```
c. Build image c. Build image
``` ```
......
...@@ -20,7 +20,7 @@ git clone https://github.com/PaddlePaddle/PaddleOCR.git ...@@ -20,7 +20,7 @@ git clone https://github.com/PaddlePaddle/PaddleOCR.git
``` ```
b.切换至Dockerfile目录(注:需要区分cpu或gpu版本,下文以cpu为例,gpu版本需要替换一下关键字即可) b.切换至Dockerfile目录(注:需要区分cpu或gpu版本,下文以cpu为例,gpu版本需要替换一下关键字即可)
``` ```
cd deploy/docker/cpu cd deploy/docker/hubserving/cpu
``` ```
c.生成镜像 c.生成镜像
``` ```
......
...@@ -8,21 +8,25 @@ RUN python3.7 -m pip install paddlepaddle==1.7.2 -i https://pypi.tuna.tsinghua.e ...@@ -8,21 +8,25 @@ RUN python3.7 -m pip install paddlepaddle==1.7.2 -i https://pypi.tuna.tsinghua.e
RUN pip3.7 install paddlehub --upgrade -i https://pypi.tuna.tsinghua.edu.cn/simple RUN pip3.7 install paddlehub --upgrade -i https://pypi.tuna.tsinghua.edu.cn/simple
RUN git clone https://gitee.com/PaddlePaddle/PaddleOCR RUN git clone https://gitee.com/paddlepaddle/PaddleOCR.git /PaddleOCR
WORKDIR /PaddleOCR WORKDIR /PaddleOCR
RUN pip3.7 install -r requirments.txt -i https://pypi.tuna.tsinghua.edu.cn/simple RUN pip3.7 install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple
RUN mkdir -p /PaddleOCR/inference RUN mkdir -p /PaddleOCR/inference/
# Download orc detect model(light version). if you want to change normal version, you can change ch_det_mv3_db_infer to ch_det_r50_vd_db_infer, also remember change det_model_dir in deploy/hubserving/ocr_system/params.py) # Download orc detect model(light version). if you want to change normal version, you can change ch_ppocr_mobile_v1.1_det_infer to ch_ppocr_server_v1.1_det_infer, also remember change det_model_dir in deploy/hubserving/ocr_system/params.py)
ADD https://paddleocr.bj.bcebos.com/ch_models/ch_det_mv3_db_infer.tar /PaddleOCR/inference ADD https://paddleocr.bj.bcebos.com/20-09-22/mobile/det/ch_ppocr_mobile_v1.1_det_infer.tar /PaddleOCR/inference/
RUN tar xf /PaddleOCR/inference/ch_det_mv3_db_infer.tar -C /PaddleOCR/inference RUN tar xf /PaddleOCR/inference/ch_ppocr_mobile_v1.1_det_infer.tar -C /PaddleOCR/inference/
# Download orc recognition model(light version). If you want to change normal version, you can change ch_rec_mv3_crnn_infer to ch_rec_r34_vd_crnn_enhance_infer, also remember change rec_model_dir in deploy/hubserving/ocr_system/params.py) # Download direction classifier(light version). If you want to change normal version, you can change ch_ppocr_mobile_v1.1_cls_infer to ch_ppocr_mobile_v1.1_cls_infer, also remember change cls_model_dir in deploy/hubserving/ocr_system/params.py)
ADD https://paddleocr.bj.bcebos.com/ch_models/ch_rec_mv3_crnn_infer.tar /PaddleOCR/inference ADD https://paddleocr.bj.bcebos.com/20-09-22/cls/ch_ppocr_mobile_v1.1_cls_infer.tar /PaddleOCR/inference/
RUN tar xf /PaddleOCR/inference/ch_rec_mv3_crnn_infer.tar -C /PaddleOCR/inference RUN tar xf /PaddleOCR/inference/ch_ppocr_mobile_v1.1_cls_infer.tar -C /PaddleOCR/inference/
# Download orc recognition model(light version). If you want to change normal version, you can change ch_ppocr_mobile_v1.1_rec_infer to ch_ppocr_server_v1.1_rec_infer, also remember change rec_model_dir in deploy/hubserving/ocr_system/params.py)
ADD https://paddleocr.bj.bcebos.com/20-09-22/mobile/rec/ch_ppocr_mobile_v1.1_rec_infer.tar /PaddleOCR/inference/
RUN tar xf /PaddleOCR/inference/ch_ppocr_mobile_v1.1_rec_infer.tar -C /PaddleOCR/inference/
EXPOSE 8866 EXPOSE 8866
CMD ["/bin/bash","-c","export PYTHONPATH=. && hub install deploy/hubserving/ocr_system/ && hub serving start -m ocr_system"] CMD ["/bin/bash","-c","hub install deploy/hubserving/ocr_system/ && hub serving start -m ocr_system"]
\ No newline at end of file \ No newline at end of file
...@@ -8,21 +8,25 @@ RUN python3.7 -m pip install paddlepaddle-gpu==1.7.2.post107 -i https://pypi.tun ...@@ -8,21 +8,25 @@ RUN python3.7 -m pip install paddlepaddle-gpu==1.7.2.post107 -i https://pypi.tun
RUN pip3.7 install paddlehub --upgrade -i https://pypi.tuna.tsinghua.edu.cn/simple RUN pip3.7 install paddlehub --upgrade -i https://pypi.tuna.tsinghua.edu.cn/simple
RUN git clone https://gitee.com/PaddlePaddle/PaddleOCR RUN git clone https://gitee.com/paddlepaddle/PaddleOCR.git /PaddleOCR
WORKDIR /home/PaddleOCR WORKDIR /PaddleOCR
RUN pip3.7 install -r requirments.txt -i https://pypi.tuna.tsinghua.edu.cn/simple RUN pip3.7 install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple
RUN mkdir -p /PaddleOCR/inference RUN mkdir -p /PaddleOCR/inference/
# Download orc detect model(light version). if you want to change normal version, you can change ch_det_mv3_db_infer to ch_det_r50_vd_db_infer, also remember change det_model_dir in deploy/hubserving/ocr_system/params.py) # Download orc detect model(light version). if you want to change normal version, you can change ch_ppocr_mobile_v1.1_det_infer to ch_ppocr_server_v1.1_det_infer, also remember change det_model_dir in deploy/hubserving/ocr_system/params.py)
ADD https://paddleocr.bj.bcebos.com/ch_models/ch_det_mv3_db_infer.tar /PaddleOCR/inference ADD https://paddleocr.bj.bcebos.com/20-09-22/mobile/det/ch_ppocr_mobile_v1.1_det_infer.tar /PaddleOCR/inference/
RUN tar xf /PaddleOCR/inference/ch_det_mv3_db_infer.tar -C /PaddleOCR/inference RUN tar xf /PaddleOCR/inference/ch_ppocr_mobile_v1.1_det_infer.tar -C /PaddleOCR/inference/
# Download orc recognition model(light version). If you want to change normal version, you can change ch_rec_mv3_crnn_infer to ch_rec_r34_vd_crnn_enhance_infer, also remember change rec_model_dir in deploy/hubserving/ocr_system/params.py) # Download direction classifier(light version). If you want to change normal version, you can change ch_ppocr_mobile_v1.1_cls_infer to ch_ppocr_mobile_v1.1_cls_infer, also remember change cls_model_dir in deploy/hubserving/ocr_system/params.py)
ADD https://paddleocr.bj.bcebos.com/ch_models/ch_rec_mv3_crnn_infer.tar /PaddleOCR/inference ADD https://paddleocr.bj.bcebos.com/20-09-22/cls/ch_ppocr_mobile_v1.1_cls_infer.tar /PaddleOCR/inference/
RUN tar xf /PaddleOCR/inference/ch_rec_mv3_crnn_infer.tar -C /PaddleOCR/inference RUN tar xf /PaddleOCR/inference/ch_ppocr_mobile_v1.1_cls_infer.tar -C /PaddleOCR/inference/
# Download orc recognition model(light version). If you want to change normal version, you can change ch_ppocr_mobile_v1.1_rec_infer to ch_ppocr_server_v1.1_rec_infer, also remember change rec_model_dir in deploy/hubserving/ocr_system/params.py)
ADD https://paddleocr.bj.bcebos.com/20-09-22/mobile/rec/ch_ppocr_mobile_v1.1_rec_infer.tar /PaddleOCR/inference/
RUN tar xf /PaddleOCR/inference/ch_ppocr_mobile_v1.1_rec_infer.tar -C /PaddleOCR/inference/
EXPOSE 8866 EXPOSE 8866
CMD ["/bin/bash","-c","export PYTHONPATH=. && hub install deploy/hubserving/ocr_system/ && hub serving start -m ocr_system"] CMD ["/bin/bash","-c","hub install deploy/hubserving/ocr_system/ && hub serving start -m ocr_system"]
\ No newline at end of file \ No newline at end of file
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
PaddleOCR提供2种服务部署方式: PaddleOCR提供2种服务部署方式:
- 基于PaddleHub Serving的部署:代码路径为"`./deploy/hubserving`",按照本教程使用; - 基于PaddleHub Serving的部署:代码路径为"`./deploy/hubserving`",按照本教程使用;
- 基于PaddleServing的部署:代码路径为"`./deploy/pdserving`",使用方法参考[文档](../../doc/doc_ch/serving_inference.md) - 基于PaddleServing的部署:代码路径为"`./deploy/pdserving`",使用方法参考[文档](../../deploy/pdserving/readme.md)
# 基于PaddleHub Serving的服务部署 # 基于PaddleHub Serving的服务部署
......
...@@ -2,7 +2,7 @@ English | [简体中文](readme.md) ...@@ -2,7 +2,7 @@ English | [简体中文](readme.md)
PaddleOCR provides 2 service deployment methods: PaddleOCR provides 2 service deployment methods:
- Based on **PaddleHub Serving**: Code path is "`./deploy/hubserving`". Please follow this tutorial. - Based on **PaddleHub Serving**: Code path is "`./deploy/hubserving`". Please follow this tutorial.
- Based on **PaddleServing**: Code path is "`./deploy/pdserving`". Please refer to the [tutorial](../../doc/doc_ch/serving_inference.md) for usage. - Based on **PaddleServing**: Code path is "`./deploy/pdserving`". Please refer to the [tutorial](../../deploy/pdserving/readme.md) for usage.
# Service deployment based on PaddleHub Serving # Service deployment based on PaddleHub Serving
......
# Tutorial of PaddleOCR Mobile deployment # Tutorial of PaddleOCR Mobile deployment
This tutorial will introduce how to use paddle-lite to deploy paddleOCR ultra-lightweight Chinese and English detection models on mobile phones. This tutorial will introduce how to use [paddle-lite](https://github.com/PaddlePaddle/Paddle-Lite) to deploy paddleOCR ultra-lightweight Chinese and English detection models on mobile phones.
paddle-lite is a lightweight inference engine for PaddlePaddle. paddle-lite is a lightweight inference engine for PaddlePaddle.
It provides efficient inference capabilities for mobile phones and IOTs, It provides efficient inference capabilities for mobile phones and IoTs,
and extensively integrates cross-platform hardware to provide lightweight and extensively integrates cross-platform hardware to provide lightweight
deployment solutions for end-side deployment issues. deployment solutions for end-side deployment issues.
......
...@@ -107,16 +107,18 @@ class OCRService(WebService): ...@@ -107,16 +107,18 @@ class OCRService(WebService):
if ".lod" in x: if ".lod" in x:
self.tmp_args[x] = fetch_map[x] self.tmp_args[x] = fetch_map[x]
_, rec_res = self.text_classifier.postprocess(outputs, self.tmp_args) _, rec_res = self.text_classifier.postprocess(outputs, self.tmp_args)
res = { res = []
"pred_text": [x[0] for x in rec_res], for i in range(len(rec_res)):
"score": [str(x[1]) for x in rec_res] res.append({
} "direction": rec_res[i][0],
"confidence": float(rec_res[i][1])
})
return res return res
if __name__ == "__main__": if __name__ == "__main__":
ocr_service = OCRService(name="ocr") ocr_service = OCRService(name="ocr")
ocr_service.load_model_config(global_args.cls_model_dir) ocr_service.load_model_config(global_args.cls_server_dir)
ocr_service.init_rec() ocr_service.init_rec()
if global_args.use_gpu: if global_args.use_gpu:
ocr_service.prepare_server( ocr_service.prepare_server(
......
...@@ -113,16 +113,18 @@ class OCRService(WebService): ...@@ -113,16 +113,18 @@ class OCRService(WebService):
if ".lod" in x: if ".lod" in x:
self.tmp_args[x] = fetch_map[x] self.tmp_args[x] = fetch_map[x]
_, rec_res = self.text_classifier.postprocess(outputs, self.tmp_args) _, rec_res = self.text_classifier.postprocess(outputs, self.tmp_args)
res = { res = []
"direction": [x[0] for x in rec_res], for i in range(len(rec_res)):
"score": [str(x[1]) for x in rec_res] res.append({
} "direction": rec_res[i][0],
"confidence": float(rec_res[i][1])
})
return res return res
if __name__ == "__main__": if __name__ == "__main__":
ocr_service = OCRService(name="ocr") ocr_service = OCRService(name="ocr")
ocr_service.load_model_config(global_args.cls_model_dir) ocr_service.load_model_config(global_args.cls_server_dir)
ocr_service.init_rec() ocr_service.init_rec()
if global_args.use_gpu: if global_args.use_gpu:
ocr_service.prepare_server( ocr_service.prepare_server(
......
...@@ -90,13 +90,15 @@ class DetService(WebService): ...@@ -90,13 +90,15 @@ class DetService(WebService):
def postprocess(self, feed={}, fetch=[], fetch_map=None): def postprocess(self, feed={}, fetch=[], fetch_map=None):
outputs = [fetch_map[x] for x in fetch] outputs = [fetch_map[x] for x in fetch]
res = self.text_detector.postprocess(outputs, self.tmp_args) det_res = self.text_detector.postprocess(outputs, self.tmp_args)
return {"boxes": res.tolist()} res = []
for i in range(len(det_res)):
res.append({"text_region": det_res[i].tolist()})
return res
if __name__ == "__main__": if __name__ == "__main__":
ocr_service = DetService(name="ocr") ocr_service = DetService(name="ocr")
ocr_service.load_model_config(global_args.det_model_dir) ocr_service.load_model_config(global_args.det_server_dir)
ocr_service.init_det() ocr_service.init_det()
if global_args.use_gpu: if global_args.use_gpu:
ocr_service.prepare_server( ocr_service.prepare_server(
......
...@@ -89,13 +89,15 @@ class DetService(WebService): ...@@ -89,13 +89,15 @@ class DetService(WebService):
def postprocess(self, feed={}, fetch=[], fetch_map=None): def postprocess(self, feed={}, fetch=[], fetch_map=None):
outputs = [fetch_map[x] for x in fetch] outputs = [fetch_map[x] for x in fetch]
res = self.text_detector.postprocess(outputs, self.tmp_args) det_res = self.text_detector.postprocess(outputs, self.tmp_args)
return {"boxes": res.tolist()} res = []
for i in range(len(det_res)):
res.append({"text_region": det_res[i].tolist()})
return res
if __name__ == "__main__": if __name__ == "__main__":
ocr_service = DetService(name="ocr") ocr_service = DetService(name="ocr")
ocr_service.load_model_config(global_args.det_model_dir) ocr_service.load_model_config(global_args.det_server_dir)
ocr_service.init_det() ocr_service.init_det()
if global_args.use_gpu: if global_args.use_gpu:
ocr_service.prepare_server( ocr_service.prepare_server(
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
import requests
import json
import cv2
import base64
import os, sys
import time
def cv2_to_base64(image):
#data = cv2.imencode('.jpg', image)[1]
return base64.b64encode(image).decode(
'utf8') #data.tostring()).decode('utf8')
headers = {"Content-type": "application/json"}
url = "http://127.0.0.1:9292/ocr/prediction"
test_img_dir = "../../doc/imgs/"
for img_file in os.listdir(test_img_dir):
with open(os.path.join(test_img_dir, img_file), 'rb') as file:
image_data1 = file.read()
image = cv2_to_base64(image_data1)
data = {"feed": [{"image": image}], "fetch": ["res"]}
r = requests.post(url=url, headers=headers, data=json.dumps(data))
rjson = r.json()
print(rjson)
...@@ -11,29 +11,22 @@ ...@@ -11,29 +11,22 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# -*- coding: utf-8 -*-
import requests import os
import json import argparse
import cv2 from paddle_serving_client.io import inference_model_to_serving
import base64 def parse_args():
import os, sys parser = argparse.ArgumentParser()
import time parser.add_argument("--model_dir", type=str)
parser.add_argument("--server_dir", type=str, default="serving_server_dir")
parser.add_argument("--client_dir", type=str, default="serving_client_dir")
return parser.parse_args()
args = parse_args()
inference_model_dir = args.model_dir
serving_server_dir = os.path.join(args.model_dir, args.server_dir)
serving_client_dir = os.path.join(args.model_dir, args.client_dir)
feed_var_names, fetch_var_names = inference_model_to_serving(
inference_model_dir, serving_server_dir, serving_client_dir, model_filename="model", params_filename="params")
def cv2_to_base64(image): print("success!")
#data = cv2.imencode('.jpg', image)[1]
return base64.b64encode(image).decode(
'utf8') #data.tostring()).decode('utf8')
headers = {"Content-type": "application/json"}
url = "http://127.0.0.1:9292/ocr/prediction"
test_img_dir = "../../doc/imgs_words/ch/"
for img_file in os.listdir(test_img_dir):
with open(os.path.join(test_img_dir, img_file), 'rb') as file:
image_data1 = file.read()
image = cv2_to_base64(image_data1)
data = {"feed": [{"image": image}], "fetch": ["res"]}
r = requests.post(url=url, headers=headers, data=json.dumps(data))
print(r.json())
...@@ -44,11 +44,11 @@ class TextSystemHelper(TextSystem): ...@@ -44,11 +44,11 @@ class TextSystemHelper(TextSystem):
if self.use_angle_cls: if self.use_angle_cls:
self.clas_client = Debugger() self.clas_client = Debugger()
self.clas_client.load_model_config( self.clas_client.load_model_config(
global_args.cls_model_dir, gpu=True, profile=False) global_args.cls_server_dir, gpu=True, profile=False)
self.text_classifier = TextClassifierHelper(args) self.text_classifier = TextClassifierHelper(args)
self.det_client = Debugger() self.det_client = Debugger()
self.det_client.load_model_config( self.det_client.load_model_config(
global_args.det_model_dir, gpu=True, profile=False) global_args.det_server_dir, gpu=True, profile=False)
self.fetch = ["save_infer_model/scale_0.tmp_0", "save_infer_model/scale_1.tmp_0"] self.fetch = ["save_infer_model/scale_0.tmp_0", "save_infer_model/scale_1.tmp_0"]
def preprocess(self, img): def preprocess(self, img):
...@@ -60,6 +60,7 @@ class TextSystemHelper(TextSystem): ...@@ -60,6 +60,7 @@ class TextSystemHelper(TextSystem):
return None, None return None, None
img_crop_list = [] img_crop_list = []
dt_boxes = sorted_boxes(dt_boxes) dt_boxes = sorted_boxes(dt_boxes)
self.dt_boxes = dt_boxes
for bno in range(len(dt_boxes)): for bno in range(len(dt_boxes)):
tmp_box = copy.deepcopy(dt_boxes[bno]) tmp_box = copy.deepcopy(dt_boxes[bno])
img_crop = self.get_rotate_crop_image(img, tmp_box) img_crop = self.get_rotate_crop_image(img, tmp_box)
...@@ -100,16 +101,20 @@ class OCRService(WebService): ...@@ -100,16 +101,20 @@ class OCRService(WebService):
if ".lod" in x: if ".lod" in x:
self.tmp_args[x] = fetch_map[x] self.tmp_args[x] = fetch_map[x]
rec_res = self.text_system.postprocess(outputs, self.tmp_args) rec_res = self.text_system.postprocess(outputs, self.tmp_args)
res = { res = []
"pred_text": [x[0] for x in rec_res], for i in range(len(rec_res)):
"score": [str(x[1]) for x in rec_res] tmp_res = {
} "text_region": self.text_system.dt_boxes[i].tolist(),
"text": rec_res[i][0],
"confidence": float(rec_res[i][1])
}
res.append(tmp_res)
return res return res
if __name__ == "__main__": if __name__ == "__main__":
ocr_service = OCRService(name="ocr") ocr_service = OCRService(name="ocr")
ocr_service.load_model_config(global_args.rec_model_dir) ocr_service.load_model_config(global_args.rec_server_dir)
ocr_service.init_rec() ocr_service.init_rec()
if global_args.use_gpu: if global_args.use_gpu:
ocr_service.prepare_server( ocr_service.prepare_server(
......
...@@ -42,12 +42,14 @@ class TextSystemHelper(TextSystem): ...@@ -42,12 +42,14 @@ class TextSystemHelper(TextSystem):
if self.use_angle_cls: if self.use_angle_cls:
self.clas_client = Client() self.clas_client = Client()
self.clas_client.load_client_config( self.clas_client.load_client_config(
"cls_infer_client/serving_client_conf.prototxt") os.path.join(args.cls_client_dir, "serving_client_conf.prototxt")
)
self.clas_client.connect(["127.0.0.1:9294"]) self.clas_client.connect(["127.0.0.1:9294"])
self.text_classifier = TextClassifierHelper(args) self.text_classifier = TextClassifierHelper(args)
self.det_client = Client() self.det_client = Client()
self.det_client.load_client_config( self.det_client.load_client_config(
"det_infer_client/serving_client_conf.prototxt") os.path.join(args.det_client_dir, "serving_client_conf.prototxt")
)
self.det_client.connect(["127.0.0.1:9293"]) self.det_client.connect(["127.0.0.1:9293"])
self.fetch = ["save_infer_model/scale_0.tmp_0", "save_infer_model/scale_1.tmp_0"] self.fetch = ["save_infer_model/scale_0.tmp_0", "save_infer_model/scale_1.tmp_0"]
...@@ -56,11 +58,11 @@ class TextSystemHelper(TextSystem): ...@@ -56,11 +58,11 @@ class TextSystemHelper(TextSystem):
fetch_map = self.det_client.predict(feed, fetch) fetch_map = self.det_client.predict(feed, fetch)
outputs = [fetch_map[x] for x in fetch] outputs = [fetch_map[x] for x in fetch]
dt_boxes = self.text_detector.postprocess(outputs, self.tmp_args) dt_boxes = self.text_detector.postprocess(outputs, self.tmp_args)
print(dt_boxes)
if dt_boxes is None: if dt_boxes is None:
return None, None return None, None
img_crop_list = [] img_crop_list = []
dt_boxes = sorted_boxes(dt_boxes) dt_boxes = sorted_boxes(dt_boxes)
self.dt_boxes = dt_boxes
for bno in range(len(dt_boxes)): for bno in range(len(dt_boxes)):
tmp_box = copy.deepcopy(dt_boxes[bno]) tmp_box = copy.deepcopy(dt_boxes[bno])
img_crop = self.get_rotate_crop_image(img, tmp_box) img_crop = self.get_rotate_crop_image(img, tmp_box)
...@@ -69,7 +71,6 @@ class TextSystemHelper(TextSystem): ...@@ -69,7 +71,6 @@ class TextSystemHelper(TextSystem):
feed, fetch, self.tmp_args = self.text_classifier.preprocess( feed, fetch, self.tmp_args = self.text_classifier.preprocess(
img_crop_list) img_crop_list)
fetch_map = self.clas_client.predict(feed, fetch) fetch_map = self.clas_client.predict(feed, fetch)
print(fetch_map)
outputs = [fetch_map[x] for x in self.text_classifier.fetch] outputs = [fetch_map[x] for x in self.text_classifier.fetch]
for x in fetch_map.keys(): for x in fetch_map.keys():
if ".lod" in x: if ".lod" in x:
...@@ -102,16 +103,20 @@ class OCRService(WebService): ...@@ -102,16 +103,20 @@ class OCRService(WebService):
if ".lod" in x: if ".lod" in x:
self.tmp_args[x] = fetch_map[x] self.tmp_args[x] = fetch_map[x]
rec_res = self.text_system.postprocess(outputs, self.tmp_args) rec_res = self.text_system.postprocess(outputs, self.tmp_args)
res = { res = []
"pred_text": [x[0] for x in rec_res], for i in range(len(rec_res)):
"score": [str(x[1]) for x in rec_res] tmp_res = {
} "text_region": self.text_system.dt_boxes[i].tolist(),
"text": rec_res[i][0],
"confidence": float(rec_res[i][1])
}
res.append(tmp_res)
return res return res
if __name__ == "__main__": if __name__ == "__main__":
ocr_service = OCRService(name="ocr") ocr_service = OCRService(name="ocr")
ocr_service.load_model_config(global_args.rec_model_dir) ocr_service.load_model_config(global_args.rec_server_dir)
ocr_service.init_rec() ocr_service.init_rec()
if global_args.use_gpu: if global_args.use_gpu:
ocr_service.prepare_server( ocr_service.prepare_server(
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
import requests
import json
import cv2
import base64
import os, sys
import time
def cv2_to_base64(image):
#data = cv2.imencode('.jpg', image)[1]
return base64.b64encode(image).decode(
'utf8') #data.tostring()).decode('utf8')
headers = {"Content-type": "application/json"}
url = "http://127.0.0.1:9292/ocr/prediction"
test_img_dir = "../../doc/imgs/"
for img_file in os.listdir(test_img_dir):
with open(os.path.join(test_img_dir, img_file), 'rb') as file:
image_data1 = file.read()
image = cv2_to_base64(image_data1)
data = {"feed": [{"image": image}], "fetch": ["res"]}
r = requests.post(url=url, headers=headers, data=json.dumps(data))
rjson = r.json()
print(rjson)
...@@ -14,7 +14,8 @@ def read_params(): ...@@ -14,7 +14,8 @@ def read_params():
#params for text detector #params for text detector
cfg.det_algorithm = "DB" cfg.det_algorithm = "DB"
cfg.det_model_dir = "./det_infer_server/" cfg.det_server_dir = "../../inference/ch_ppocr_mobile_v1.1_det_infer/serving_server_dir"
cfg.det_client_dir = "../../inference/ch_ppocr_mobile_v1.1_det_infer/serving_client_dir"
cfg.det_max_side_len = 960 cfg.det_max_side_len = 960
#DB parmas #DB parmas
...@@ -29,19 +30,21 @@ def read_params(): ...@@ -29,19 +30,21 @@ def read_params():
#params for text recognizer #params for text recognizer
cfg.rec_algorithm = "CRNN" cfg.rec_algorithm = "CRNN"
cfg.rec_model_dir = "./rec_infer_server/" cfg.rec_server_dir = "../../inference/ch_ppocr_mobile_v1.1_rec_infer/serving_server_dir"
cfg.rec_client_dir = "../../inference/ch_ppocr_mobile_v1.1_rec_infer/serving_client_dir"
cfg.rec_image_shape = "3, 32, 320" cfg.rec_image_shape = "3, 32, 320"
cfg.rec_char_type = 'ch' cfg.rec_char_type = 'ch'
cfg.rec_batch_num = 30 cfg.rec_batch_num = 30
cfg.max_text_length = 25 cfg.max_text_length = 25
cfg.rec_char_dict_path = "./ppocr_keys_v1.txt" cfg.rec_char_dict_path = "../../ppocr/utils/ppocr_keys_v1.txt"
cfg.use_space_char = True cfg.use_space_char = True
#params for text classifier #params for text classifier
cfg.use_angle_cls = True cfg.use_angle_cls = True
cfg.cls_model_dir = "./cls_infer_server/" cfg.cls_server_dir = "../../inference/ch_ppocr_mobile_v1.1_cls_infer/serving_server_dir"
cfg.cls_client_dir = "../../inference/ch_ppocr_mobile_v1.1_cls_infer/serving_client_dir"
cfg.cls_image_shape = "3, 48, 192" cfg.cls_image_shape = "3, 48, 192"
cfg.label_list = ['0', '180'] cfg.label_list = ['0', '180']
cfg.cls_batch_num = 30 cfg.cls_batch_num = 30
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
__dir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(__dir__)
sys.path.append(os.path.abspath(os.path.join(__dir__, '..')))
sys.path.append(os.path.abspath(os.path.join(__dir__, '../../')))
from ppocr.utils.utility import initial_logger
logger = initial_logger()
import cv2
import numpy as np
import time
from PIL import Image
from ppocr.utils.utility import get_image_file_list
from tools.infer.utility import draw_ocr, draw_boxes
import requests
import json
import base64
def cv2_to_base64(image):
return base64.b64encode(image).decode('utf8')
def draw_server_result(image_file, res):
img = cv2.imread(image_file)
image = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
if len(res) == 0:
return np.array(image)
keys = res[0].keys()
if 'text_region' not in keys: # for rec or clas, draw function is invalid
logger.info("draw function is invalid for rec or clas!")
return None
elif 'text' not in keys: # for ocr_det
logger.info("draw text boxes only!")
boxes = []
for dno in range(len(res)):
boxes.append(res[dno]['text_region'])
boxes = np.array(boxes)
draw_img = draw_boxes(image, boxes)
return draw_img
else: # for ocr_system
logger.info("draw boxes and texts!")
boxes = []
texts = []
scores = []
for dno in range(len(res)):
boxes.append(res[dno]['text_region'])
texts.append(res[dno]['text'])
scores.append(res[dno]['confidence'])
boxes = np.array(boxes)
scores = np.array(scores)
draw_img = draw_ocr(image, boxes, texts, scores, drop_score=0.5, font_path="../../doc/simfang.ttf")
return draw_img
def main(image_path):
image_file_list = get_image_file_list(image_path)
is_visualize = True
headers = {"Content-type": "application/json"}
url = "http://127.0.0.1:9292/ocr/prediction"
cnt = 0
total_time = 0
for image_file in image_file_list:
img = open(image_file, 'rb').read()
if img is None:
logger.info("error in loading image:{}".format(image_file))
continue
# 发送HTTP请求
starttime = time.time()
data = {"feed": [{"image": cv2_to_base64(img)}], "fetch": ["res"]}
r = requests.post(url=url, headers=headers, data=json.dumps(data))
elapse = time.time() - starttime
total_time += elapse
logger.info("Predict time of %s: %.3fs" % (image_file, elapse))
res = r.json()['result']
logger.info(res)
if is_visualize:
draw_img = draw_server_result(image_file, res)
if draw_img is not None:
draw_img_save = "./server_results/"
if not os.path.exists(draw_img_save):
os.makedirs(draw_img_save)
cv2.imwrite(
os.path.join(draw_img_save, os.path.basename(image_file)),
draw_img[:, :, ::-1])
logger.info("The visualized image saved in {}".format(
os.path.join(draw_img_save, os.path.basename(image_file))))
cnt += 1
if cnt % 100 == 0:
logger.info("{} processed".format(cnt))
logger.info("avg time cost: {}".format(float(total_time) / cnt))
if __name__ == '__main__':
if len(sys.argv) != 2:
logger.info("Usage: %s image_path" % sys.argv[0])
else:
image_path = sys.argv[1]
main(image_path)
[English](readme_en.md) | 简体中文
PaddleOCR提供2种服务部署方式:
- 基于PaddleHub Serving的部署:代码路径为"`./deploy/hubserving`",使用方法参考[文档](../hubserving/readme.md)
- 基于PaddleServing的部署:代码路径为"`./deploy/pdserving`",按照本教程使用。
# Paddle Serving 服务部署
本教程将介绍基于[Paddle Serving](https://github.com/PaddlePaddle/Serving)部署PaddleOCR在线预测服务的详细步骤。
- [快速启动服务](#快速启动服务)
- [1. 准备环境](#准备环境)
- [2. 转换模型](#转换模型)
- [3. 启动服务](#启动服务)
- [发送预测请求](#发送预测请求)
pdserving服务部署目录下包括`检测``方向分类器``识别``串联`四种服务部署工具,请根据需求选择相应的服务。目录结构如下:
```
deploy/pdserving/
└─ det_local_server.py 快速版 检测 服务端
└─ det_rpc_server.py 标准版 检测 服务端
└─ clas_local_server.py 快速版 方向分类器 服务端
└─ clas_rpc_server.py 标准版 方向分类器 服务端
└─ rec_local_server.py 快速版 识别 服务端
└─ rec_rpc_server.py 标准版 识别 服务端
└─ ocr_local_server.py 快速版 串联 服务端
└─ ocr_rpc_server.py 标准版 串联 服务端
└─ pdserving_client.py 客户端
└─ params.py 配置文件
```
<a name="快速启动服务"></a>
## 快速启动服务
<a name="准备环境"></a>
### 1. 准备环境
环境版本要求:
- **CUDA版本:9.X/10.X**
- **CUDNN版本:7.X**
- **操作系统版本:Linux/Windows**
- **Python版本: 2.7/3.5/3.6/3.7**
**Python操作指南:**
目前Serving用于OCR的部分功能还在测试当中,因此在这里我们给出[Servnig latest package](https://github.com/PaddlePaddle/Serving/blob/develop/doc/LATEST_PACKAGES.md)
大家根据自己的环境选择需要安装的whl包即可,例如以Python 3.5为例,执行下列命令:
```
# 安装服务端,CPU/GPU版本选择一个
# GPU版本服务端
# CUDA 9
python -m pip install -U https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server_gpu-0.0.0.post9-py3-none-any.whl
# CUDA 10
python -m pip install -U https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server_gpu-0.0.0.post10-py3-none-any.whl
# CPU版本服务端
python -m pip install -U https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server-0.0.0-py3-none-any.whl
# 安装客户端和App包,CPU、GPU通用
python -m pip install -U https://paddle-serving.bj.bcebos.com/whl/paddle_serving_client-0.0.0-cp35-none-any.whl https://paddle-serving.bj.bcebos.com/whl/paddle_serving_app-0.0.0-py3-none-any.whl
# 安装其他依赖
pip3.5 install func-timeout
```
<a name="转换模型"></a>
## 2. 转换模型
Paddle Serving无法直接用训练模型(checkpoints 模型)或推理模型(inference 模型)进行部署。Serving模型由两个文件夹构成,用于存放客户端和服务端的配置。本节介绍如何将推理模型转换为Paddle Serving可部署的模型。
**以文本检测模型`ch_ppocr_mobile_v1.1_det_infer`为例,文本识别模型和方向分类器的转换同理。**
首先下载推理模型:
```shell
wget -P ./inference/ https://paddleocr.bj.bcebos.com/20-09-22/mobile/det/ch_ppocr_mobile_v1.1_det_infer.tar && tar xf ./inference/ch_ppocr_mobile_v1.1_det_infer.tar -C ./inference/
```
然后运行如下python脚本进行转换,其中,使用参数`model_dir`指定待转换的推理模型路径:
```
python deploy/pdserving/inference_to_serving.py --model_dir ./inference/ch_ppocr_mobile_v1.1_det_infer
```
最终会在`ch_ppocr_mobile_v1.1_det_infer`目录下生成客户端和服务端的模型配置,结构如下:
```
/ch_ppocr_mobile_v1.1_det_infer/
├── serving_client_dir # 客户端配置文件夹
└── serving_server_dir # 服务端配置文件夹
```
<a name="启动服务"></a>
## 3. 启动服务
启动服务可以根据实际需求选择启动`标准版`或者`快速版`,两种方式的对比如下表:
|版本|特点|适用场景|
|-|-|-|
|标准版|稳定性高,分布式部署|适用于吞吐量大,需要跨机房部署的情况,只能用于Linux平台|
|快速版|部署方便,预测速度快|适用于对预测速度要求高,迭代速度快的场景,可以支持Linux/Windows|
**step 1. 配置环境变量**
```
# 以下两步的顺序不能反
export PYTHONPATH=$PWD:$PYTHONPATH
cd deploy/pdserving
```
**step 2. 修改配置参数**
配置参数在`params.py`中,具体内容如下所示,可根据需要修改相关参数,如修改模型路径、修改后处理参数等。
```
def read_params():
cfg = Config()
#use gpu
cfg.use_gpu = False #是否使用GPU,False代表使用CPU
cfg.use_pdserving = True #使用paddle serving部署时必须为True
#params for text detector
cfg.det_algorithm = "DB"
cfg.det_server_dir = "../../inference/ch_ppocr_mobile_v1.1_det_infer/serving_server_dir"
cfg.det_client_dir = "../../inference/ch_ppocr_mobile_v1.1_det_infer/serving_client_dir"
cfg.det_max_side_len = 960
#DB parmas
cfg.det_db_thresh =0.3
cfg.det_db_box_thresh =0.5
cfg.det_db_unclip_ratio =2.0
#EAST parmas
cfg.det_east_score_thresh = 0.8
cfg.det_east_cover_thresh = 0.1
cfg.det_east_nms_thresh = 0.2
#params for text recognizer
cfg.rec_algorithm = "CRNN"
cfg.rec_server_dir = "../../inference/ch_ppocr_mobile_v1.1_rec_infer/serving_server_dir"
cfg.rec_client_dir = "../../inference/ch_ppocr_mobile_v1.1_rec_infer/serving_client_dir"
cfg.rec_image_shape = "3, 32, 320"
cfg.rec_char_type = 'ch'
cfg.rec_batch_num = 30
cfg.max_text_length = 25
cfg.rec_char_dict_path = "../../ppocr/utils/ppocr_keys_v1.txt"
cfg.use_space_char = True
#params for text classifier
cfg.use_angle_cls = True
cfg.cls_server_dir = "../../inference/ch_ppocr_mobile_v1.1_cls_infer/serving_server_dir"
cfg.cls_client_dir = "../../inference/ch_ppocr_mobile_v1.1_cls_infer/serving_client_dir"
cfg.cls_image_shape = "3, 48, 192"
cfg.label_list = ['0', '180']
cfg.cls_batch_num = 30
cfg.cls_thresh = 0.9
return cfg
```
**step 3_1. 启动独立的检测服务或识别服务**
如果只需要搭建检测服务或识别服务,一行命令即可,检测服务的启动方式如下,识别同理。检测+识别的串联服务请直接跳至step 3_2。
```
# 启动文本检测服务,标准版/快速版 二选一
python det_rpc_server.py #标准版,Linux用户
python det_local_server.py #快速版,Windows/Linux用户
```
**step 3_2. 启动文本检测、识别串联的服务**
如果需要搭建检测+识别的串联服务,快速版与step 3_1中的独立服务启动方式相同,但标准版略有不同,具体步骤如下:
```
# 标准版,Linux用户
# GPU用户
# 启动检测服务
python -m paddle_serving_server_gpu.serve --model ../../inference/ch_ppocr_mobile_v1.1_det_infer/serving_server_dir/ --port 9293 --gpu_id 0
# 启动方向分类器服务
python -m paddle_serving_server_gpu.serve --model ../../inference/ch_ppocr_mobile_v1.1_cls_infer/serving_server_dir/ --port 9294 --gpu_id 0
# 启动串联服务
python ocr_rpc_server.py
# CPU用户
# 启动检测服务
python -m paddle_serving_server.serve --model ../../inference/ch_ppocr_mobile_v1.1_det_infer/serving_server_dir/ --port 9293
# 启动方向分类器服务
python -m paddle_serving_server.serve --model ../../inference/ch_ppocr_mobile_v1.1_cls_infer/serving_server_dir/ --port 9294
# 启动串联服务
python ocr_rpc_server.py
# 快速版,Windows/Linux用户
python ocr_local_server.py
```
<a name="发送预测请求"></a>
## 发送预测请求
以上所有单独或串联的服务均可使用如下客户端进行访问:
```
python pdserving_client.py image_path
```
...@@ -153,16 +153,18 @@ class OCRService(WebService): ...@@ -153,16 +153,18 @@ class OCRService(WebService):
if ".lod" in x: if ".lod" in x:
self.tmp_args[x] = fetch_map[x] self.tmp_args[x] = fetch_map[x]
rec_res = self.text_recognizer.postprocess(outputs, self.tmp_args) rec_res = self.text_recognizer.postprocess(outputs, self.tmp_args)
res = { res = []
"pred_text": [x[0] for x in rec_res], for i in range(len(rec_res)):
"score": [str(x[1]) for x in rec_res] res.append({
} "text": rec_res[i][0],
"confidence": float(rec_res[i][1])
})
return res return res
if __name__ == "__main__": if __name__ == "__main__":
ocr_service = OCRService(name="ocr") ocr_service = OCRService(name="ocr")
ocr_service.load_model_config(global_args.rec_model_dir) ocr_service.load_model_config(global_args.rec_server_dir)
ocr_service.init_rec() ocr_service.init_rec()
if global_args.use_gpu: if global_args.use_gpu:
ocr_service.prepare_server( ocr_service.prepare_server(
......
...@@ -158,16 +158,18 @@ class OCRService(WebService): ...@@ -158,16 +158,18 @@ class OCRService(WebService):
if ".lod" in x: if ".lod" in x:
self.tmp_args[x] = fetch_map[x] self.tmp_args[x] = fetch_map[x]
rec_res = self.text_recognizer.postprocess(outputs, self.tmp_args) rec_res = self.text_recognizer.postprocess(outputs, self.tmp_args)
res = { res = []
"pred_text": [x[0] for x in rec_res], for i in range(len(rec_res)):
"score": [str(x[1]) for x in rec_res] res.append({
} "text": rec_res[i][0],
"confidence": float(rec_res[i][1])
})
return res return res
if __name__ == "__main__": if __name__ == "__main__":
ocr_service = OCRService(name="ocr") ocr_service = OCRService(name="ocr")
ocr_service.load_model_config(global_args.rec_model_dir) ocr_service.load_model_config(global_args.rec_server_dir)
ocr_service.init_rec() ocr_service.init_rec()
if global_args.use_gpu: if global_args.use_gpu:
ocr_service.prepare_server( ocr_service.prepare_server(
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
import requests
import json
import cv2
import base64
import os, sys
import time
def cv2_to_base64(image):
#data = cv2.imencode('.jpg', image)[1]
return base64.b64encode(image).decode(
'utf8') #data.tostring()).decode('utf8')
headers = {"Content-type": "application/json"}
url = "http://127.0.0.1:9292/ocr/prediction"
test_img_dir = "../../doc/imgs_words/ch/"
for img_file in os.listdir(test_img_dir):
with open(os.path.join(test_img_dir, img_file), 'rb') as file:
image_data1 = file.read()
image = cv2_to_base64(image_data1)
data = {"feed": [{"image": image}], "fetch": ["res"]}
r = requests.post(url=url, headers=headers, data=json.dumps(data))
print(r.json())
...@@ -30,7 +30,7 @@ python setup.py install ...@@ -30,7 +30,7 @@ python setup.py install
``` ```
### 2. 获取预训练模型 ### 2. 获取预训练模型
模型裁剪需要加载事先训练好的模型,PaddleOCR也提供了一系列模型[../../../doc/doc_ch/models_list.md],开发者可根据需要自行选择模型或使用自己的模型。 模型裁剪需要加载事先训练好的模型,PaddleOCR也提供了一系列(模型)[../../../doc/doc_ch/models_list.md],开发者可根据需要自行选择模型或使用自己的模型。
### 3. 敏感度分析训练 ### 3. 敏感度分析训练
......
...@@ -24,6 +24,7 @@ sys.path.append(os.path.join(__dir__, '..', '..', '..')) ...@@ -24,6 +24,7 @@ sys.path.append(os.path.join(__dir__, '..', '..', '..'))
sys.path.append(os.path.join(__dir__, '..', '..', '..', 'tools')) sys.path.append(os.path.join(__dir__, '..', '..', '..', 'tools'))
import program import program
import paddle
from paddle import fluid from paddle import fluid
from ppocr.utils.utility import initial_logger from ppocr.utils.utility import initial_logger
logger = initial_logger() logger = initial_logger()
...@@ -32,6 +33,12 @@ from paddleslim.prune import load_model ...@@ -32,6 +33,12 @@ from paddleslim.prune import load_model
def main(): def main():
# Run code with static graph mode.
try:
paddle.enable_static()
except:
pass
startup_prog, eval_program, place, config, _ = program.preprocess() startup_prog, eval_program, place, config, _ = program.preprocess()
feeded_var_names, target_vars, fetches_var_name = program.build_export( feeded_var_names, target_vars, fetches_var_name = program.build_export(
......
...@@ -19,6 +19,7 @@ from __future__ import print_function ...@@ -19,6 +19,7 @@ from __future__ import print_function
import os import os
import sys import sys
import numpy as np import numpy as np
import paddle
__dir__ = os.path.dirname(__file__) __dir__ = os.path.dirname(__file__)
sys.path.append(__dir__) sys.path.append(__dir__)
sys.path.append(os.path.join(__dir__, '..', '..', '..')) sys.path.append(os.path.join(__dir__, '..', '..', '..'))
...@@ -49,6 +50,12 @@ skip_list = [ ...@@ -49,6 +50,12 @@ skip_list = [
def main(): def main():
# Run code with static graph mode.
try:
paddle.enable_static()
except:
pass
config = program.load_config(FLAGS.config) config = program.load_config(FLAGS.config)
program.merge_config(FLAGS.opt) program.merge_config(FLAGS.opt)
logger.info(config) logger.info(config)
......
...@@ -25,6 +25,7 @@ sys.path.append(os.path.join(__dir__, '..', '..', '..', 'tools')) ...@@ -25,6 +25,7 @@ sys.path.append(os.path.join(__dir__, '..', '..', '..', 'tools'))
import json import json
import cv2 import cv2
import paddle
from paddle import fluid from paddle import fluid
import paddleslim as slim import paddleslim as slim
from copy import deepcopy from copy import deepcopy
...@@ -60,6 +61,12 @@ def eval_function(eval_args, mode='eval'): ...@@ -60,6 +61,12 @@ def eval_function(eval_args, mode='eval'):
def main(): def main():
# Run code with static graph mode.
try:
paddle.enable_static()
except:
pass
config = program.load_config(FLAGS.config) config = program.load_config(FLAGS.config)
program.merge_config(FLAGS.opt) program.merge_config(FLAGS.opt)
logger.info(config) logger.info(config)
......
...@@ -39,6 +39,7 @@ set_paddle_flags( ...@@ -39,6 +39,7 @@ set_paddle_flags(
) )
import program import program
import paddle
from paddle import fluid from paddle import fluid
from ppocr.utils.utility import initial_logger from ppocr.utils.utility import initial_logger
logger = initial_logger() logger = initial_logger()
...@@ -76,6 +77,11 @@ def main(): ...@@ -76,6 +77,11 @@ def main():
# The decay coefficient of moving average, default is 0.9 # The decay coefficient of moving average, default is 0.9
'moving_rate': 0.9, 'moving_rate': 0.9,
} }
# Run code with static graph mode.
try:
paddle.enable_static()
except:
pass
startup_prog, eval_program, place, config, alg_type = program.preprocess() startup_prog, eval_program, place, config, alg_type = program.preprocess()
......
...@@ -85,6 +85,12 @@ def get_optimizer(): ...@@ -85,6 +85,12 @@ def get_optimizer():
def main(): def main():
# Run code with static graph mode.
try:
paddle.enable_static()
except:
pass
train_build_outputs = program.build( train_build_outputs = program.build(
config, train_program, startup_program, mode='train') config, train_program, startup_program, mode='train')
train_loader = train_build_outputs[0] train_loader = train_build_outputs[0]
......
...@@ -9,48 +9,42 @@ ...@@ -9,48 +9,42 @@
## PaddleOCR常见问题汇总(持续更新) ## PaddleOCR常见问题汇总(持续更新)
* [近期更新(2020.10.26](#近期更新) * [近期更新(2020.11.30](#近期更新)
* [【精选】OCR精选10个问题](#OCR精选10个问题) * [【精选】OCR精选10个问题](#OCR精选10个问题)
* [【理论篇】OCR通用24个问题](#OCR通用问题) * [【理论篇】OCR通用29个问题](#OCR通用问题)
* [基础知识6](#基础知识) * [基础知识7](#基础知识)
* [数据集4题](#数据集) * [数据集7题](#数据集2)
* [模型训练调优6题](#模型训练调优) * [模型训练调优7题](#模型训练调优2)
* [预测部署8题](#预测部署) * [预测部署8题](#预测部署2)
* [【实战篇】PaddleOCR实战65个问题](#PaddleOCR实战问题) * [【实战篇】PaddleOCR实战80个问题](#PaddleOCR实战问题)
* [使用咨询20题](#使用咨询) * [使用咨询20题](#使用咨询)
* [数据集10题](#数据集) * [数据集17题](#数据集3)
* [模型训练调优18题](#模型训练调优) * [模型训练调优21题](#模型训练调优3)
* [预测部署17题](#预测部署) * [预测部署22题](#预测部署3)
<a name="近期更新"></a> <a name="近期更新"></a>
## 近期更新(2020.10.26 ## 近期更新(2020.11.30
#### Q3.4.17: 预测内存泄漏问题 #### Q3.2.15: 文本标注工具PPOCRLabel有什么特色?
**A**:1. 使用hubserving出现内存泄漏,该问题为已知问题,预计在paddle2.0正式版中解决。相关讨论见[issue](https://github.com/PaddlePaddle/PaddleHub/issues/682) **A**: PPOCRLabel是一个半自动文本标注工具,它使用基于PPOCR的中英文OCR模型,预先预测文本检测和识别结果,然后用户对上述结果进行校验和修正就行,大大提高用户的标注效率。同时导出的标注结果直接适配PPOCR训练所需要的数据格式,
**A**:2. C++ 预测出现内存泄漏,该问题已经在paddle2.0rc版本中解决,建议安装paddle2.0rc版本,并更新PaddleOCR代码到最新。 #### Q3.2.16: 文本标注工具PPOCRLabel,可以更换模型吗?
#### Q3.3.18: cpp_infer 在Windows下使用vs2015编译不通过 **A**: PPOCRLabel中OCR部署方式采用的基于pip安装whl包快速推理,可以参考相关文档更换模型路径,进行特定任务的标注适配。基于pip安装whl包快速推理的文档如下,https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_ch/whl.md。
**A**:1. windows上建议使用VS2019工具编译,具体编译细节参考[链接](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/deploy/cpp_infer/docs/windows_vs2019_build.md) #### Q3.2.17: 文本标注工具PPOCRLabel支持的运行环境有哪些?
**A**:2. 在release模式下而不是debug模式下编译,参考[issue](https://github.com/PaddlePaddle/PaddleOCR/issues/1023) **A**: PPOCRLabel可运行于Linux、Windows、MacOS等多种系统。操作步骤可以参考文档,https://github.com/PaddlePaddle/PaddleOCR/blob/develop/PPOCRLabel/README.md
#### Q3.3.19: No module named 'tools.infer' #### Q2.2.6: 当训练数据量少时,如何获取更多的数据?
**A**:1. 确保在PaddleOCR/目录下执行的指令,执行'export PYTHONPATH=.' **A**: 当训练数据量少时,可以尝试以下三种方式获取更多的数据:(1)人工采集更多的训练数据,最直接也是最有效的方式。(2)基于PIL和opencv基本图像处理或者变换。例如PIL中ImageFont, Image, ImageDraw三个模块将文字写到背景中,opencv的旋转仿射变换,高斯滤波等。(3)利用数据生成算法合成数据,例如pix2pix等算法。
**A**:2. 拉取github上最新代码,这个问题在10月底已修复。 #### Q2.2.7: 论文《Editing Text in the Wild》中文本合成方法SRNet有什么特点?
#### Q3.3.20: 训练模型和测试模型的检测结果差距较大
**A**:1. 检查两个模型使用的后处理参数是否是一样的,训练的后处理参数在配置文件中的[PostProcess](https://github.com/PaddlePaddle/PaddleOCR/blob/e9d533fc1fdf7bbce79947dde54b05011bb1e135/configs/det/det_mv3_db_v1.1.yml#L54)部分,测试模型的后处理参数在[tools/infer/utility.py](https://github.com/PaddlePaddle/PaddleOCR/blob/e9d533fc1fdf7bbce79947dde54b05011bb1e135/tools/infer/utility.py#L47)中,最新代码中两个后处理参数已保持一致。 **A**: SRNet是借鉴GAN中图像到图像转换、风格迁移的想法合成文本数据。不同于通用GAN的方法只选择一个分支,SRNet将文本合成任务分解为三个简单的子模块,提升合成数据的效果。这三个子模块为不带背景的文本风格迁移模块、背景抽取模块和融合模块。PaddleOCR计划将在2020年12月中旬开源基于SRNet的实用模型。
#### Q2.2.5: 文本行较紧密的情况下如何准确检测?
**A**:使用基于分割的方法,如DB,检测密集文本行时,最好收集一批数据进行训练,并且在训练时,并将生成二值图像的[shrink_ratio](https://github.com/PaddlePaddle/PaddleOCR/blob/e9d533fc1fdf7bbce79947dde54b05011bb1e135/ppocr/data/det/make_shrink_map.py#L46)参数调小一些;
<a name="OCR精选10个问题"></a> <a name="OCR精选10个问题"></a>
...@@ -142,6 +136,8 @@ ...@@ -142,6 +136,8 @@
<a name="OCR通用问题"></a> <a name="OCR通用问题"></a>
## 【理论篇】OCR通用问题 ## 【理论篇】OCR通用问题
<a name="基础知识"></a>
### 基础知识 ### 基础知识
#### Q2.1.1:CRNN能否识别两行的文字?还是说必须一行? #### Q2.1.1:CRNN能否识别两行的文字?还是说必须一行?
...@@ -163,6 +159,14 @@ ...@@ -163,6 +159,14 @@
#### Q2.1.5 多语言的字典里是混合了不同的语种,这个是有什么讲究吗?统一到一个字典里会对精度造成多大的损失? #### Q2.1.5 多语言的字典里是混合了不同的语种,这个是有什么讲究吗?统一到一个字典里会对精度造成多大的损失?
**A**:统一到一个字典里,会造成最后一层FC过大,增加模型大小。如果有特殊需求的话,可以把需要的几种语言合并字典训练模型,合并字典之后如果引入过多的形近字,可能会造成精度损失,字符平衡的问题可能也需要考虑一下。在PaddleOCR里暂时将语言字典分开。 **A**:统一到一个字典里,会造成最后一层FC过大,增加模型大小。如果有特殊需求的话,可以把需要的几种语言合并字典训练模型,合并字典之后如果引入过多的形近字,可能会造成精度损失,字符平衡的问题可能也需要考虑一下。在PaddleOCR里暂时将语言字典分开。
#### Q2.1.6 预处理部分,图片的长和宽为什么要处理成32的倍数?
**A**:以检测中的resnet骨干网络为例,图像输入网络之后,需要经过5次2倍降采样,共32倍,因此建议输入的图像尺寸为32的倍数。
#### Q2.1.7:类似泰语这样的小语种,部分字会占用两个字符甚至三个字符,请问如何制作字典。
**A**:处理字符的时候,把多字符的当作一个字就行,字典中每行是一个字。
<a name="数据集2"></a>
### 数据集 ### 数据集
#### Q2.2.1:支持空格的模型,标注数据的时候是不是要标注空格?中间几个空格都要标注出来么? #### Q2.2.1:支持空格的模型,标注数据的时候是不是要标注空格?中间几个空格都要标注出来么?
...@@ -183,9 +187,17 @@ ...@@ -183,9 +187,17 @@
#### Q2.2.5: 文本行较紧密的情况下如何准确检测? #### Q2.2.5: 文本行较紧密的情况下如何准确检测?
**A**:使用基于分割的方法,如DB,检测密集文本行时,最好收集一批数据进行训练,并且在训练时,并将生成二值图像的shrink_ratio参数调小一些; **A**:使用基于分割的方法,如DB,检测密集文本行时,最好收集一批数据进行训练,并且在训练时,并将生成二值图像的shrink_ratio参数调小一些。
#### Q2.2.6: 当训练数据量少时,如何获取更多的数据?
**A**: 当训练数据量少时,可以尝试以下三种方式获取更多的数据:(1)人工采集更多的训练数据,最直接也是最有效的方式。(2)基于PIL和opencv基本图像处理或者变换。例如PIL中ImageFont, Image, ImageDraw三个模块将文字写到背景中,opencv的旋转仿射变换,高斯滤波等。(3)利用数据生成算法合成数据,例如pix2pix等算法。
#### Q2.2.7: 论文《Editing Text in the Wild》中文本合成方法SRNet有什么特点?
**A**: SRNet是借鉴GAN中图像到图像转换、风格迁移的想法合成文本数据。不同于通用GAN的方法只选择一个分支,SRNet将文本合成任务分解为三个简单的子模块,提升合成数据的效果。这三个子模块为不带背景的文本风格迁移模块、背景抽取模块和融合模块。PaddleOCR计划将在2020年12月中旬开源基于SRNet的实用模型。
<a name="模型训练调优2"></a>
### 模型训练调优 ### 模型训练调优
#### Q2.3.1:如何更换文本检测/识别的backbone? #### Q2.3.1:如何更换文本检测/识别的backbone?
...@@ -219,6 +231,15 @@ ...@@ -219,6 +231,15 @@
**A**:在中文识别模型训练时,并不是采用直接将训练样本缩放到[3,32,320]进行训练,而是先等比例缩放图像,保证图像高度为32,宽度不足320的部分补0,宽高比大于10的样本直接丢弃。预测时,如果是单张图像预测,则按上述操作直接对图像缩放,不做宽度320的限制。如果是多张图预测,则采用batch方式预测,每个batch的宽度动态变换,采用这个batch中最长宽度。 **A**:在中文识别模型训练时,并不是采用直接将训练样本缩放到[3,32,320]进行训练,而是先等比例缩放图像,保证图像高度为32,宽度不足320的部分补0,宽高比大于10的样本直接丢弃。预测时,如果是单张图像预测,则按上述操作直接对图像缩放,不做宽度320的限制。如果是多张图预测,则采用batch方式预测,每个batch的宽度动态变换,采用这个batch中最长宽度。
#### Q2.3.7:识别训练时,训练集精度已经到达90了,但验证集精度一直在70,涨不上去怎么办?
**A**:训练集精度90,测试集70多的话,应该是过拟合了,有两个可尝试的方法:
(1)加入更多的增广方式或者调大增广prob的[概率](https://github.com/PaddlePaddle/PaddleOCR/blob/a501603d54ff5513fc4fc760319472e59da25424/ppocr/data/rec/img_tools.py#L307),默认为0.4。
(2)调大系统的[l2 dcay值](https://github.com/PaddlePaddle/PaddleOCR/blob/a501603d54ff5513fc4fc760319472e59da25424/configs/rec/ch_ppocr_v1.1/rec_chinese_lite_train_v1.1.yml#L47)
<a name="预测部署2"></a>
### 预测部署 ### 预测部署
#### Q2.4.1:请问对于图片中的密集文字,有什么好的处理办法吗? #### Q2.4.1:请问对于图片中的密集文字,有什么好的处理办法吗?
...@@ -262,9 +283,11 @@ ...@@ -262,9 +283,11 @@
**A**:表格目前学术界比较成熟的解决方案不多 ,可以尝试下分割的论文方案。 **A**:表格目前学术界比较成熟的解决方案不多 ,可以尝试下分割的论文方案。
<a name="PaddleOCR实战问题"></a> <a name="PaddleOCR实战问题"></a>
## 【实战篇】PaddleOCR实战问题 ## 【实战篇】PaddleOCR实战问题
<a name="使用咨询"></a>
### 使用咨询 ### 使用咨询
#### Q3.1.1:OSError: [WinError 126] 找不到指定的模块。mac pro python 3.4 shapely import 问题 #### Q3.1.1:OSError: [WinError 126] 找不到指定的模块。mac pro python 3.4 shapely import 问题
...@@ -309,7 +332,7 @@ ...@@ -309,7 +332,7 @@
#### Q3.1.11:PaddleOCR如何做到横排和竖排同时支持的? #### Q3.1.11:PaddleOCR如何做到横排和竖排同时支持的?
**A**:合成了一批竖排文字,逆时针旋转90度后加入训练集与横排一起训练。预测时根据图片长比判断是否为竖排,若为竖排则将crop出的文本逆时针旋转90度后送入识别网络。 **A**:合成了一批竖排文字,逆时针旋转90度后加入训练集与横排一起训练。预测时根据图片长比判断是否为竖排,若为竖排则将crop出的文本逆时针旋转90度后送入识别网络。
#### Q3.1.12:如何获取检测文本框的坐标? #### Q3.1.12:如何获取检测文本框的坐标?
...@@ -345,7 +368,7 @@ ...@@ -345,7 +368,7 @@
|8.6M超轻量中文OCR模型|MobileNetV3+MobileNetV3|det_mv3_db.yml|rec_chinese_lite_train.yml| |8.6M超轻量中文OCR模型|MobileNetV3+MobileNetV3|det_mv3_db.yml|rec_chinese_lite_train.yml|
|通用中文OCR模型|Resnet50_vd+Resnet34_vd|det_r50_vd_db.yml|rec_chinese_common_train.yml| |通用中文OCR模型|Resnet50_vd+Resnet34_vd|det_r50_vd_db.yml|rec_chinese_common_train.yml|
#### 3.1.18:如何加入自己的检测算法? #### Q3.1.18:如何加入自己的检测算法?
**A**:1. 在ppocr/modeling对应目录下分别选择backbone,head。如果没有可用的可以新建文件并添加 **A**:1. 在ppocr/modeling对应目录下分别选择backbone,head。如果没有可用的可以新建文件并添加
2. 在ppocr/data下选择对应的数据处理处理方式,如果没有可用的可以新建文件并添加 2. 在ppocr/data下选择对应的数据处理处理方式,如果没有可用的可以新建文件并添加
3. 在ppocr/losses下新建文件并编写loss 3. 在ppocr/losses下新建文件并编写loss
...@@ -353,6 +376,16 @@ ...@@ -353,6 +376,16 @@
5. 将上面四个步骤里新添加的类或函数参照yml文件写到配置中 5. 将上面四个步骤里新添加的类或函数参照yml文件写到配置中
#### Q3.1.19:训练的时候报错`reader raised an exception`,但是具体不知道是啥问题?
**A**:这个一般是因为标注文件格式有问题或者是标注文件中的图片路径有问题导致的,在[tools/train.py](../../tools/train.py)文件中有一个`test_reader`的函数,基于这个去检查一下数据的格式以及标注,确认没问题之后再进行模型训练。
#### Q3.1.20:PaddleOCR与百度的其他OCR产品有什么区别?
**A**:PaddleOCR主要聚焦通用ocr,如果有垂类需求,您可以用PaddleOCR+垂类数据自己训练;
如果缺少带标注的数据,或者不想投入研发成本,建议直接调用开放的API,开放的API覆盖了目前比较常见的一些垂类。
<a name="数据集3"></a>
### 数据集 ### 数据集
#### Q3.2.1:如何制作PaddleOCR支持的数据格式 #### Q3.2.1:如何制作PaddleOCR支持的数据格式
...@@ -407,11 +440,43 @@ ...@@ -407,11 +440,43 @@
**A**:可以主要参考可视化效果,通用模型更倾向于检测一整行文字,轻量级可能会有一行文字被分成两段检测的情况,不是数量越多,效果就越好。 **A**:可以主要参考可视化效果,通用模型更倾向于检测一整行文字,轻量级可能会有一行文字被分成两段检测的情况,不是数量越多,效果就越好。
#### Q3.2.10: crnn+ctc模型训练所用的垂直文本(旋转至水平方向)是如何生成的? #### Q3.2.10:crnn+ctc模型训练所用的垂直文本(旋转至水平方向)是如何生成的?
**A**:方法与合成水平方向文字一致,只是将字体替换成了垂直字体。 **A**:方法与合成水平方向文字一致,只是将字体替换成了垂直字体。
#### Q3.2.11:有哪些标注工具可以标注OCR数据集?
**A**:您可以参考:https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_en/data_annotation_en.md。
我们计划推出高效标注OCR数据的标注工具,请您持续关注PaddleOCR的近期更新。
#### Q3.2.12:一些特殊场景的数据识别效果差,但是数据量很少,不够用来finetune怎么办?
**A**:您可以合成一些接近使用场景的数据用于训练。
我们计划推出基于特定场景的文本数据合成工具,请您持续关注PaddleOCR的近期更新。
#### Q3.2.13:特殊字符(例如一些标点符号)识别效果不好怎么办?
**A**:首先请您确认要识别的特殊字符是否在字典中。
如果字符在已经字典中但效果依然不好,可能是由于识别数据较少导致的,您可以增加相应数据finetune模型。
#### Q3.2.14:PaddleOCR可以识别灰度图吗?
**A**:PaddleOCR的模型均为三通道输入。如果您想使用灰度图作为输入,建议直接用3通道的模式读入灰度图,
或者将单通道图像转换为三通道图像再识别。例如,opencv的cvtColor函数就可以将灰度图转换为RGB三通道模式。
#### Q3.2.15: 文本标注工具PPOCRLabel有什么特色?
**A**: PPOCRLabel是一个半自动文本标注工具,它使用基于PPOCR的中英文OCR模型,预先预测文本检测和识别结果,然后用户对上述结果进行校验和修正就行,大大提高用户的标注效率。同时导出的标注结果直接适配PPOCR训练所需要的数据格式,
#### Q3.2.16: 文本标注工具PPOCRLabel,可以更换模型吗?
**A**: PPOCRLabel中OCR部署方式采用的基于pip安装whl包快速推理,可以参考相关文档更换模型路径,进行特定任务的标注适配。基于pip安装whl包快速推理的文档如下,https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_ch/whl.md。
#### Q3.2.17: 文本标注工具PPOCRLabel支持的运行环境有哪些?
**A**: PPOCRLabel可运行于Linux、Windows、MacOS等多种系统。操作步骤可以参考文档,https://github.com/PaddlePaddle/PaddleOCR/blob/develop/PPOCRLabel/README.md
<a name="模型训练调优3"></a>
### 模型训练调优 ### 模型训练调优
#### Q3.3.1:文本长度超过25,应该怎么处理? #### Q3.3.1:文本长度超过25,应该怎么处理?
...@@ -436,7 +501,7 @@ unclip_ratio: 文本框扩张的系数,关系到文本框的大小`` ...@@ -436,7 +501,7 @@ unclip_ratio: 文本框扩张的系数,关系到文本框的大小``
**A**:可以通过下面的脚本终止所有包含train.py字段的进程, **A**:可以通过下面的脚本终止所有包含train.py字段的进程,
``` ```shell
ps -axu | grep train.py | awk '{print $2}' | xargs kill -9 ps -axu | grep train.py | awk '{print $2}' | xargs kill -9
``` ```
...@@ -487,7 +552,8 @@ return paddle.reader.multiprocess_reader(readers, False, queue_size=320) ...@@ -487,7 +552,8 @@ return paddle.reader.multiprocess_reader(readers, False, queue_size=320)
#### Q3.3.15: 训练中使用的字典需要与加载的预训练模型使用的字典一样吗? #### Q3.3.15: 训练中使用的字典需要与加载的预训练模型使用的字典一样吗?
**A**:是的,训练的字典与你使用该模型进行预测的字典需要保持一致的。 **A**:分情况,1. 不改变识别字符,训练的字典与你使用该模型进行预测的字典需要保持一致的。
2. 改变识别的字符,这种情况可以不一样,最后一层会重新训练
#### Q3.3.16: 如何对检测模型finetune,比如冻结前面的层或某些层使用小的学习率学习? #### Q3.3.16: 如何对检测模型finetune,比如冻结前面的层或某些层使用小的学习率学习?
**A** **A**
...@@ -518,6 +584,17 @@ return paddle.reader.multiprocess_reader(readers, False, queue_size=320) ...@@ -518,6 +584,17 @@ return paddle.reader.multiprocess_reader(readers, False, queue_size=320)
**A**:1. 检查两个模型使用的后处理参数是否是一样的,训练的后处理参数在配置文件中的PostProcess部分,测试模型的后处理参数在tools/infer/utility.py中,最新代码中两个后处理参数已保持一致。 **A**:1. 检查两个模型使用的后处理参数是否是一样的,训练的后处理参数在配置文件中的PostProcess部分,测试模型的后处理参数在tools/infer/utility.py中,最新代码中两个后处理参数已保持一致。
#### Q3.3.21: 使用合成数据精调小模型后,效果可以,但是还没开源的小infer模型效果好,这是为什么呢?
**A**
(1)要保证使用的配置文件和pretrain weights是对应的;
(2)在微调时,一般都需要真实数据,如果使用合成数据,效果反而可能会有下降,PaddleOCR中放出的识别inference模型也是基于预训练模型在真实数据上微调得到的,效果提升比较明显;
(3)在训练的时候,文本长度超过25的训练图像都会被丢弃,因此需要看下真正参与训练的图像有多少,太少的话也容易过拟合。
<a name="预测部署3"></a>
### 预测部署 ### 预测部署
#### Q3.4.1:如何pip安装opt模型转换工具? #### Q3.4.1:如何pip安装opt模型转换工具?
...@@ -597,3 +674,25 @@ return paddle.reader.multiprocess_reader(readers, False, queue_size=320) ...@@ -597,3 +674,25 @@ return paddle.reader.multiprocess_reader(readers, False, queue_size=320)
**A**:1. 使用hubserving出现内存泄漏,该问题为已知问题,预计在paddle2.0正式版中解决。相关讨论见[issue](https://github.com/PaddlePaddle/PaddleHub/issues/682) **A**:1. 使用hubserving出现内存泄漏,该问题为已知问题,预计在paddle2.0正式版中解决。相关讨论见[issue](https://github.com/PaddlePaddle/PaddleHub/issues/682)
**A**:2. C++ 预测出现内存泄漏,该问题已经在paddle2.0rc版本中解决,建议安装paddle2.0rc版本,并更新PaddleOCR代码到最新。 **A**:2. C++ 预测出现内存泄漏,该问题已经在paddle2.0rc版本中解决,建议安装paddle2.0rc版本,并更新PaddleOCR代码到最新。
#### Q3.4.18:对于一些尺寸较大的文档类图片,在检测时会有较多的漏检,怎么避免这种漏检的问题呢?
**A**:PaddleOCR中在图像最长边大于960时,将图像等比例缩放为长边960的图像再进行预测,对于这种图像,可以通过修改det_max_side_len,增大检测的最长边:[tools/infer/utility.py#L45](../../tools/infer/utility.py#L45)
#### Q3.4.19:在使用训练好的识别模型进行预测的时候,发现有很多重复的字,这个怎么解决呢?
**A**:可以看下训练的尺度和预测的尺度是否相同,如果训练的尺度为`[3, 32, 320]`,预测的尺度为`[3, 64, 640]`,则会有比较多的重复识别现象。
#### Q3.4.20:文档场景中,使用DB模型会出现整行漏检的情况应该怎么解决?
**A**:可以在预测时调小 det_db_box_thresh 阈值,默认为0.5, 可调小至0.3观察效果。
#### Q3.4.21:自己训练的det模型,在同一张图片上,inference模型与eval模型结果差别很大,为什么?
**A**:这是由于图片预处理不同造成的。如果训练的det模型图片输入并不是默认的shape[600, 600],eval的程序中图片预处理方式与train时一致
(由xxx_reader.yml中的test_image_shape参数决定缩放大小,但predict_eval.py中的图片预处理方式由程序里的preprocess_params决定,
最好不要传入max_side_len,而是传入和训练时一样大小的test_image_shape。
#### Q3.4.22:训练ccpd车牌数据集,训练集准确率高,测试均是错误的,这是什么原因?
**A**:这是因为训练时将shape修改为[3, 70, 220], 预测时对图片resize,会把高度压缩至32,影响测试结果。注释掉[resize代码](https://github.com/PaddlePaddle/PaddleOCR/blob/ed4313d611b7708a7763d4612f00cb7f318a0e1f/tools/infer/predict_rec.py#L54-L55)可以解决问题。
...@@ -47,9 +47,9 @@ docker images ...@@ -47,9 +47,9 @@ docker images
hub.baidubce.com/paddlepaddle/paddle latest-gpu-cuda9.0-cudnn7-dev f56310dcc829 hub.baidubce.com/paddlepaddle/paddle latest-gpu-cuda9.0-cudnn7-dev f56310dcc829
``` ```
**2. 安装PaddlePaddle Fluid v2.0** **2. 安装PaddlePaddle v2.0**
``` ```
pip3 install --upgrade pip python3 -m pip install --upgrade pip
如果您的机器安装的是CUDA9或CUDA10,请运行以下命令安装 如果您的机器安装的是CUDA9或CUDA10,请运行以下命令安装
python3 -m pip install paddlepaddle-gpu==2.0.0b0 -i https://mirror.baidu.com/pypi/simple python3 -m pip install paddlepaddle-gpu==2.0.0b0 -i https://mirror.baidu.com/pypi/simple
...@@ -75,7 +75,7 @@ git clone https://gitee.com/paddlepaddle/PaddleOCR ...@@ -75,7 +75,7 @@ git clone https://gitee.com/paddlepaddle/PaddleOCR
**4. 安装第三方库** **4. 安装第三方库**
``` ```
cd PaddleOCR cd PaddleOCR
pip3 install -r requirments.txt python3 -m pip install -r requirements.txt
``` ```
注意,windows环境下,建议从[这里](https://www.lfd.uci.edu/~gohlke/pythonlibs/#shapely)下载shapely安装包完成安装, 注意,windows环境下,建议从[这里](https://www.lfd.uci.edu/~gohlke/pythonlibs/#shapely)下载shapely安装包完成安装,
......
...@@ -95,5 +95,5 @@ python3 tools/infer/predict_system.py --image_dir="./doc/imgs/11.jpg" --det_mode ...@@ -95,5 +95,5 @@ python3 tools/infer/predict_system.py --image_dir="./doc/imgs/11.jpg" --det_mode
此外,文档教程中也提供了中文OCR模型的其他预测部署方式: 此外,文档教程中也提供了中文OCR模型的其他预测部署方式:
- [基于C++预测引擎推理](../../deploy/cpp_infer/readme.md) - [基于C++预测引擎推理](../../deploy/cpp_infer/readme.md)
- [服务部署](./serving_inference.md) - [服务部署](../../deploy/hubserving/readme.md)
- [端侧部署](../../deploy/lite/readme.md) - [端侧部署](../../deploy/lite/readme.md)
...@@ -122,11 +122,11 @@ word_dict.txt 每行有一个单字,将字符与数字索引映射在一起, ...@@ -122,11 +122,11 @@ word_dict.txt 每行有一个单字,将字符与数字索引映射在一起,
`ppocr/utils/dict/french_dict.txt` 是一个包含118个字符的法文字典 `ppocr/utils/dict/french_dict.txt` 是一个包含118个字符的法文字典
`ppocr/utils/dict/japan_dict.txt` 是一个包含4399个字符的文字典 `ppocr/utils/dict/japan_dict.txt` 是一个包含4399个字符的文字典
`ppocr/utils/dict/korean_dict.txt` 是一个包含3636个字符的文字典 `ppocr/utils/dict/korean_dict.txt` 是一个包含3636个字符的文字典
`ppocr/utils/dict/german_dict.txt` 是一个包含131个字符的文字典 `ppocr/utils/dict/german_dict.txt` 是一个包含131个字符的文字典
您可以按需使用。 您可以按需使用。
......
PaddleOCR提供2种服务部署方式:
- 基于PaddleServing的部署:代码路径为"`./deploy/pdserving`",按照本教程使用。。
- 基于PaddleHub Serving的部署:代码路径为"`./deploy/hubserving`",使用方法参考[文档](../../deploy/hubserving/readme.md)
# 使用Paddle Serving预测推理
阅读本文档之前,请先阅读文档 [基于Python预测引擎推理](./inference.md)
同本地执行预测一样,我们需要保存一份可以用于Paddle Serving的模型。
接下来首先介绍如何将训练的模型转换成Paddle Serving模型,然后将依次介绍文本检测、文本识别以及两者串联基于预测引擎推理。
### 一、 准备环境
我们先安装Paddle Serving相关组件
我们推荐用户使用GPU来做Paddle Serving的OCR服务部署
**CUDA版本:9.X/10.X**
**CUDNN版本:7.X**
**操作系统版本:Linux/Windows**
**Python版本: 2.7/3.5/3.6/3.7**
**Python操作指南:**
目前Serving用于OCR的部分功能还在测试当中,因此在这里我们给出[Servnig latest package](https://github.com/PaddlePaddle/Serving/blob/develop/doc/LATEST_PACKAGES.md)
大家根据自己的环境选择需要安装的whl包即可,例如以Python 3.5为例,执行下列命令
```
#CPU/GPU版本选择一个
#GPU版本服务端
#CUDA 9
python -m pip install -U https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server_gpu-0.0.0.post9-py3-none-any.whl
#CUDA 10
python -m pip install -U https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server_gpu-0.0.0.post10-py3-none-any.whl
#CPU版本服务端
python -m pip install -U https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server-0.0.0-py3-none-any.whl
#客户端和App包使用以下链接(CPU,GPU通用)
python -m pip install -U https://paddle-serving.bj.bcebos.com/whl/paddle_serving_client-0.0.0-cp36-none-any.whl https://paddle-serving.bj.bcebos.com/whl/paddle_serving_app-0.0.0-py3-none-any.whl
```
## 二、训练模型转Serving模型
在前序文档 [基于Python预测引擎推理](./inference.md) 中,我们提供了如何把训练的checkpoint转换成Paddle模型。Paddle模型通常由一个文件夹构成,内含模型结构描述文件`model`和模型参数文件`params`。Serving模型由两个文件夹构成,用于存放客户端和服务端的配置。
我们以`ch_rec_r34_vd_crnn`模型作为例子,下载链接在:
```
wget --no-check-certificate https://paddleocr.bj.bcebos.com/ch_models/ch_rec_r34_vd_crnn_infer.tar
tar xf ch_rec_r34_vd_crnn_infer.tar
```
因此我们按照Serving模型转换教程,运行下列python文件。
```
python tools/inference_to_serving.py --model_dir ch_rec_r34_vd_crnn
```
最终会在`serving_client_dir``serving_server_dir`生成客户端和服务端的模型配置。其中`serving_server_dir``serving_client_dir`的名字可以自定义。最终文件结构如下
```
/ch_rec_r34_vd_crnn/
├── serving_client_dir # 客户端配置文件夹
└── serving_server_dir # 服务端配置文件夹
```
## 三、文本检测模型Serving推理
启动服务可以根据实际需求选择启动`标准版`或者`快速版`,两种方式的对比如下表:
|版本|特点|适用场景|
|-|-|-|
|标准版|稳定性高,分布式部署|适用于吞吐量大,需要跨机房部署的情况|
|快速版|部署方便,预测速度快|适用于对预测速度要求高,迭代速度快的场景,Windows用户只能选择快速版|
接下来的命令中,我们会指定快速版和标准版的命令。需要说明的是,标准版只能用Linux平台,快速版可以支持Linux/Windows。
文本检测模型推理,默认使用DB模型的配置参数,识别默认为CRNN。
配置文件在`params.py`中,我们贴出配置部分,如果需要做改动,也在这个文件内部进行修改。
```
def read_params():
cfg = Config()
#use gpu
cfg.use_gpu = False # 是否使用GPU
cfg.use_pdserving = True # 是否使用paddleserving,必须为True
#params for text detector
cfg.det_algorithm = "DB" # 检测算法, DB/EAST等
cfg.det_model_dir = "./det_mv_server/" # 检测算法模型路径
cfg.det_max_side_len = 960
#DB params
cfg.det_db_thresh =0.3
cfg.det_db_box_thresh =0.5
cfg.det_db_unclip_ratio =2.0
#EAST params
cfg.det_east_score_thresh = 0.8
cfg.det_east_cover_thresh = 0.1
cfg.det_east_nms_thresh = 0.2
#params for text recognizer
cfg.rec_algorithm = "CRNN" # 识别算法, CRNN/RARE等
cfg.rec_model_dir = "./ocr_rec_server/" # 识别算法模型路径
cfg.rec_image_shape = "3, 32, 320"
cfg.rec_char_type = 'ch'
cfg.rec_batch_num = 30
cfg.max_text_length = 25
cfg.rec_char_dict_path = "./ppocr_keys_v1.txt" # 识别算法字典文件
cfg.use_space_char = True
#params for text classifier
cfg.use_angle_cls = True # 是否启用分类算法
cfg.cls_model_dir = "./ocr_clas_server/" # 分类算法模型路径
cfg.cls_image_shape = "3, 48, 192"
cfg.label_list = ['0', '180']
cfg.cls_batch_num = 30
cfg.cls_thresh = 0.9
return cfg
```
与本地预测不同的是,Serving预测需要一个客户端和一个服务端,因此接下来的教程都是两行代码。
在正式执行服务端启动命令之前,先export PYTHONPATH到工程主目录下。
```
export PYTHONPATH=$PWD:$PYTHONPATH
cd deploy/pdserving
```
为了方便用户复现Demo程序,我们提供了Chinese and English ultra-lightweight OCR model (8.1M)版本的Serving模型
```
wget --no-check-certificate https://paddleocr.bj.bcebos.com/deploy/pdserving/ocr_pdserving_suite.tar.gz
tar xf ocr_pdserving_suite.tar.gz
```
### 1. 超轻量中文检测模型推理
超轻量中文检测模型推理,可以执行如下命令启动服务端:
```
#根据环境只需要启动其中一个就可以
python det_rpc_server.py #标准版,Linux用户
python det_local_server.py #快速版,Windows/Linux用户
```
客户端
```
python det_web_client.py
```
Serving的推测和本地预测不同点在于,客户端发送请求到服务端,服务端需要检测到文字框之后返回框的坐标,此处没有后处理的图片,只能看到坐标值。
## 四、文本识别模型Serving推理
下面将介绍超轻量中文识别模型推理、基于CTC损失的识别模型推理和基于Attention损失的识别模型推理。对于中文文本识别,建议优先选择基于CTC损失的识别模型,实践中也发现基于Attention损失的效果不如基于CTC损失的识别模型。此外,如果训练时修改了文本的字典,请参考下面的自定义文本识别字典的推理。
### 1. 超轻量中文识别模型推理
超轻量中文识别模型推理,可以执行如下命令启动服务端:
需要注意params.py中的`--use_gpu`的值
```
#根据环境只需要启动其中一个就可以
python rec_rpc_server.py #标准版,Linux用户
python rec_local_server.py #快速版,Windows/Linux用户
```
如果需要使用CPU版本,还需增加 `--use_gpu False`
客户端
```
python rec_web_client.py
```
![](../imgs_words/ch/word_4.jpg)
执行命令后,上面图像的预测结果(识别的文本和得分)会打印到屏幕上,示例如下:
```
{u'result': {u'score': [u'0.89547354'], u'pred_text': ['实力活力']}}
```
## 五、方向分类模型推理
下面将介绍方向分类模型推理。
### 1. 方向分类模型推理
方向分类模型推理, 可以执行如下命令启动服务端:
需要注意params.py中的`--use_gpu`的值
```
#根据环境只需要启动其中一个就可以
python clas_rpc_server.py #标准版,Linux用户
python clas_local_server.py #快速版,Windows/Linux用户
```
客户端
```
python rec_web_client.py
```
![](../imgs_words/ch/word_4.jpg)
执行命令后,上面图像的预测结果(分类的方向和得分)会打印到屏幕上,示例如下:
```
{u'result': {u'direction': [u'0'], u'score': [u'0.9999963']}}
```
## 六、文本检测、方向分类和文字识别串联Serving推理
### 1. 超轻量中文OCR模型推理
在执行预测时,需要通过参数`image_dir`指定单张图像或者图像集合的路径、参数`det_model_dir`,`cls_model_dir``rec_model_dir`分别指定检测,方向分类和识别的inference模型路径。参数`use_angle_cls`用于控制是否启用方向分类模型。与本地预测不同的是,为了减少网络传输耗时,可视化识别结果目前不做处理,用户收到的是推理得到的文字字段。
执行如下命令启动服务端:
需要注意params.py中的`--use_gpu`的值
```
#标准版,Linux用户
#GPU用户
python -m paddle_serving_server_gpu.serve --model det_infer_server --port 9293 --gpu_id 0
python -m paddle_serving_server_gpu.serve --model cls_infer_server --port 9294 --gpu_id 0
python ocr_rpc_server.py
#CPU用户
python -m paddle_serving_server.serve --model det_infer_server --port 9293
python -m paddle_serving_server.serve --model cls_infer_server --port 9294
python ocr_rpc_server.py
#快速版,Windows/Linux用户
python ocr_local_server.py
```
客户端
```
python rec_web_client.py
```
...@@ -18,7 +18,7 @@ cd /home/Projects ...@@ -18,7 +18,7 @@ cd /home/Projects
# You need to create a docker container for the first run, and do not need to run the current command when you run it again # You need to create a docker container for the first run, and do not need to run the current command when you run it again
# Create a docker container named ppocr and map the current directory to the /paddle directory of the container # Create a docker container named ppocr and map the current directory to the /paddle directory of the container
#If using CPU, use docker instead of nvidia-docker to create docker # If using CPU, use docker instead of nvidia-docker to create docker
sudo docker run --name ppocr -v $PWD:/paddle --network=host -it hub.baidubce.com/paddlepaddle/paddle:latest-gpu-cuda9.0-cudnn7-dev /bin/bash sudo docker run --name ppocr -v $PWD:/paddle --network=host -it hub.baidubce.com/paddlepaddle/paddle:latest-gpu-cuda9.0-cudnn7-dev /bin/bash
``` ```
If using CUDA9, please run the following command to create a container: If using CUDA9, please run the following command to create a container:
...@@ -49,9 +49,9 @@ docker images ...@@ -49,9 +49,9 @@ docker images
hub.baidubce.com/paddlepaddle/paddle latest-gpu-cuda9.0-cudnn7-dev f56310dcc829 hub.baidubce.com/paddlepaddle/paddle latest-gpu-cuda9.0-cudnn7-dev f56310dcc829
``` ```
**2. Install PaddlePaddle Fluid v2.0** **2. Install PaddlePaddle v2.0**
``` ```
pip3 install --upgrade pip python3 -m pip install --upgrade pip
# If you have cuda9 or cuda10 installed on your machine, please run the following command to install # If you have cuda9 or cuda10 installed on your machine, please run the following command to install
python3 -m pip install paddlepaddle-gpu==2.0.0b0 -i https://mirror.baidu.com/pypi/simple python3 -m pip install paddlepaddle-gpu==2.0.0b0 -i https://mirror.baidu.com/pypi/simple
...@@ -77,7 +77,7 @@ git clone https://gitee.com/paddlepaddle/PaddleOCR ...@@ -77,7 +77,7 @@ git clone https://gitee.com/paddlepaddle/PaddleOCR
**4. Install third-party libraries** **4. Install third-party libraries**
``` ```
cd PaddleOCR cd PaddleOCR
pip3 install -r requirments.txt python3 -m pip install -r requirements.txt
``` ```
If you getting this error `OSError: [WinError 126] The specified module could not be found` when you install shapely on windows. If you getting this error `OSError: [WinError 126] The specified module could not be found` when you install shapely on windows.
......
...@@ -114,11 +114,11 @@ In `word_dict.txt`, there is a single word in each line, which maps characters a ...@@ -114,11 +114,11 @@ In `word_dict.txt`, there is a single word in each line, which maps characters a
`ppocr/utils/dict/french_dict.txt` is a French dictionary with 118 characters `ppocr/utils/dict/french_dict.txt` is a French dictionary with 118 characters
`ppocr/utils/dict/japan_dict.txt` is a French dictionary with 4399 characters `ppocr/utils/dict/japan_dict.txt` is a Japan dictionary with 4399 characters
`ppocr/utils/dict/korean_dict.txt` is a French dictionary with 3636 characters `ppocr/utils/dict/korean_dict.txt` is a Korean dictionary with 3636 characters
`ppocr/utils/dict/german_dict.txt` is a French dictionary with 131 characters `ppocr/utils/dict/german_dict.txt` is a German dictionary with 131 characters
You can use it on demand. You can use it on demand.
......
doc/joinus.PNG

405.4 KB | W: | H:

doc/joinus.PNG

412.0 KB | W: | H:

doc/joinus.PNG
doc/joinus.PNG
doc/joinus.PNG
doc/joinus.PNG
  • 2-up
  • Swipe
  • Onion skin
...@@ -50,22 +50,22 @@ model_urls = { ...@@ -50,22 +50,22 @@ model_urls = {
'french': { 'french': {
'url': 'url':
'https://paddleocr.bj.bcebos.com/20-09-22/mobile/fr/french_ppocr_mobile_v1.1_rec_infer.tar', 'https://paddleocr.bj.bcebos.com/20-09-22/mobile/fr/french_ppocr_mobile_v1.1_rec_infer.tar',
'dict_path': './ppocr/utils/french_dict.txt' 'dict_path': './ppocr/utils/dict/french_dict.txt'
}, },
'german': { 'german': {
'url': 'url':
'https://paddleocr.bj.bcebos.com/20-09-22/mobile/ge/german_ppocr_mobile_v1.1_rec_infer.tar', 'https://paddleocr.bj.bcebos.com/20-09-22/mobile/ge/german_ppocr_mobile_v1.1_rec_infer.tar',
'dict_path': './ppocr/utils/german_dict.txt' 'dict_path': './ppocr/utils/dict/german_dict.txt'
}, },
'korean': { 'korean': {
'url': 'url':
'https://paddleocr.bj.bcebos.com/20-09-22/mobile/kr/korean_ppocr_mobile_v1.1_rec_infer.tar', 'https://paddleocr.bj.bcebos.com/20-09-22/mobile/kr/korean_ppocr_mobile_v1.1_rec_infer.tar',
'dict_path': './ppocr/utils/korean_dict.txt' 'dict_path': './ppocr/utils/dict/korean_dict.txt'
}, },
'japan': { 'japan': {
'url': 'url':
'https://paddleocr.bj.bcebos.com/20-09-22/mobile/jp/japan_ppocr_mobile_v1.1_rec_infer.tar', 'https://paddleocr.bj.bcebos.com/20-09-22/mobile/jp/japan_ppocr_mobile_v1.1_rec_infer.tar',
'dict_path': './ppocr/utils/japan_dict.txt' 'dict_path': './ppocr/utils/dict/japan_dict.txt'
} }
}, },
'cls': 'cls':
...@@ -87,8 +87,8 @@ def download_with_progressbar(url, save_path): ...@@ -87,8 +87,8 @@ def download_with_progressbar(url, save_path):
progress_bar.update(len(data)) progress_bar.update(len(data))
file.write(data) file.write(data)
progress_bar.close() progress_bar.close()
if total_size_in_bytes != 0 and progress_bar.n != total_size_in_bytes: if total_size_in_bytes == 0 or progress_bar.n != total_size_in_bytes:
logger.error("ERROR, something went wrong") logger.error("Something went wrong while downloading models")
sys.exit(0) sys.exit(0)
...@@ -157,7 +157,6 @@ def parse_args(): ...@@ -157,7 +157,6 @@ def parse_args():
parser.add_argument("--use_space_char", type=bool, default=True) parser.add_argument("--use_space_char", type=bool, default=True)
# params for text classifier # params for text classifier
parser.add_argument("--use_angle_cls", type=str2bool, default=False)
parser.add_argument("--cls_model_dir", type=str, default=None) parser.add_argument("--cls_model_dir", type=str, default=None)
parser.add_argument("--cls_image_shape", type=str, default="3, 48, 192") parser.add_argument("--cls_image_shape", type=str, default="3, 48, 192")
parser.add_argument("--label_list", type=list, default=['0', '180']) parser.add_argument("--label_list", type=list, default=['0', '180'])
...@@ -166,11 +165,12 @@ def parse_args(): ...@@ -166,11 +165,12 @@ def parse_args():
parser.add_argument("--enable_mkldnn", type=bool, default=False) parser.add_argument("--enable_mkldnn", type=bool, default=False)
parser.add_argument("--use_zero_copy_run", type=bool, default=False) parser.add_argument("--use_zero_copy_run", type=bool, default=False)
parser.add_argument("--use_pdserving", type=str2bool, default=False)
parser.add_argument("--lang", type=str, default='ch') parser.add_argument("--lang", type=str, default='ch')
parser.add_argument("--det", type=str2bool, default=True) parser.add_argument("--det", type=str2bool, default=True)
parser.add_argument("--rec", type=str2bool, default=True) parser.add_argument("--rec", type=str2bool, default=True)
parser.add_argument("--cls", type=str2bool, default=False) parser.add_argument("--use_angle_cls", type=str2bool, default=True)
return parser.parse_args() return parser.parse_args()
...@@ -205,8 +205,7 @@ class PaddleOCR(predict_system.TextSystem): ...@@ -205,8 +205,7 @@ class PaddleOCR(predict_system.TextSystem):
maybe_download(postprocess_params.det_model_dir, model_urls['det']) maybe_download(postprocess_params.det_model_dir, model_urls['det'])
maybe_download(postprocess_params.rec_model_dir, maybe_download(postprocess_params.rec_model_dir,
model_urls['rec'][lang]['url']) model_urls['rec'][lang]['url'])
if self.use_angle_cls: maybe_download(postprocess_params.cls_model_dir, model_urls['cls'])
maybe_download(postprocess_params.cls_model_dir, model_urls['cls'])
if postprocess_params.det_algorithm not in SUPPORT_DET_MODEL: if postprocess_params.det_algorithm not in SUPPORT_DET_MODEL:
logger.error('det_algorithm must in {}'.format(SUPPORT_DET_MODEL)) logger.error('det_algorithm must in {}'.format(SUPPORT_DET_MODEL))
...@@ -230,9 +229,6 @@ class PaddleOCR(predict_system.TextSystem): ...@@ -230,9 +229,6 @@ class PaddleOCR(predict_system.TextSystem):
rec: use text recognition or not, if false, only det will be exec. default is True rec: use text recognition or not, if false, only det will be exec. default is True
""" """
assert isinstance(img, (np.ndarray, list, str)) assert isinstance(img, (np.ndarray, list, str))
if cls and not self.use_angle_cls:
print('cls should be false when use_angle_cls is false')
exit(-1)
self.use_angle_cls = cls self.use_angle_cls = cls
if isinstance(img, str): if isinstance(img, str):
image_file = img image_file = img
...@@ -274,6 +270,7 @@ def main(): ...@@ -274,6 +270,7 @@ def main():
result = ocr_engine.ocr(img_path, result = ocr_engine.ocr(img_path,
det=args.det, det=args.det,
rec=args.rec, rec=args.rec,
cls=args.cls) cls=args.use_angle_cls)
for line in result: if result is not None:
print(line) for line in result:
print(line)
...@@ -74,7 +74,7 @@ class SimpleReader(object): ...@@ -74,7 +74,7 @@ class SimpleReader(object):
def get_device_num(): def get_device_num():
if self.use_gpu: if self.use_gpu:
gpus = os.environ.get("CUDA_VISIBLE_DEVICES", 1) gpus = os.environ.get("CUDA_VISIBLE_DEVICES", "1")
gpu_num = len(gpus.split(',')) gpu_num = len(gpus.split(','))
return gpu_num return gpu_num
else: else:
......
...@@ -23,6 +23,14 @@ import paddle.fluid as fluid ...@@ -23,6 +23,14 @@ import paddle.fluid as fluid
class ClsHead(object): class ClsHead(object):
"""
Class orientation
Args:
params(dict): super parameters for build Class network
"""
def __init__(self, params): def __init__(self, params):
super(ClsHead, self).__init__() super(ClsHead, self).__init__()
self.class_dim = params['class_dim'] self.class_dim = params['class_dim']
......
...@@ -109,6 +109,12 @@ class EASTHead(object): ...@@ -109,6 +109,12 @@ class EASTHead(object):
return f_score, f_geo return f_score, f_geo
def __call__(self, inputs): def __call__(self, inputs):
"""
Fuse different levels of feature map from backbone and predict results
Args:
inputs(list): feature maps from backbone
Return: predicts
"""
f_common = self.unet_fusion(inputs) f_common = self.unet_fusion(inputs)
f_score, f_geo = self.detector_header(f_common) f_score, f_geo = self.detector_header(f_common)
predicts = OrderedDict() predicts = OrderedDict()
......
...@@ -38,35 +38,66 @@ class SASTHead(object): ...@@ -38,35 +38,66 @@ class SASTHead(object):
blocks{}: contain block_2, block_3, block_4, block_5, block_6, block_7 with blocks{}: contain block_2, block_3, block_4, block_5, block_6, block_7 with
1/4, 1/8, 1/16, 1/32, 1/64, 1/128 resolution. 1/4, 1/8, 1/16, 1/32, 1/64, 1/128 resolution.
""" """
f = [blocks['block_6'], blocks['block_5'], blocks['block_4'], blocks['block_3'], blocks['block_2']] f = [
blocks['block_6'], blocks['block_5'], blocks['block_4'],
blocks['block_3'], blocks['block_2']
]
num_outputs = [256, 256, 192, 192, 128] num_outputs = [256, 256, 192, 192, 128]
g = [None, None, None, None, None] g = [None, None, None, None, None]
h = [None, None, None, None, None] h = [None, None, None, None, None]
for i in range(5): for i in range(5):
h[i] = conv_bn_layer(input=f[i], num_filters=num_outputs[i], h[i] = conv_bn_layer(
filter_size=1, stride=1, act=None, name='fpn_up_h'+str(i)) input=f[i],
num_filters=num_outputs[i],
filter_size=1,
stride=1,
act=None,
name='fpn_up_h' + str(i))
for i in range(4): for i in range(4):
if i == 0: if i == 0:
g[i] = deconv_bn_layer(input=h[i], num_filters=num_outputs[i + 1], act=None, name='fpn_up_g0') g[i] = deconv_bn_layer(
input=h[i],
num_filters=num_outputs[i + 1],
act=None,
name='fpn_up_g0')
#print("g[{}] shape: {}".format(i, g[i].shape)) #print("g[{}] shape: {}".format(i, g[i].shape))
else: else:
g[i] = fluid.layers.elementwise_add(x=g[i - 1], y=h[i]) g[i] = fluid.layers.elementwise_add(x=g[i - 1], y=h[i])
g[i] = fluid.layers.relu(g[i]) g[i] = fluid.layers.relu(g[i])
#g[i] = conv_bn_layer(input=g[i], num_filters=num_outputs[i], #g[i] = conv_bn_layer(input=g[i], num_filters=num_outputs[i],
# filter_size=1, stride=1, act='relu') # filter_size=1, stride=1, act='relu')
g[i] = conv_bn_layer(input=g[i], num_filters=num_outputs[i], g[i] = conv_bn_layer(
filter_size=3, stride=1, act='relu', name='fpn_up_g%d_1'%i) input=g[i],
g[i] = deconv_bn_layer(input=g[i], num_filters=num_outputs[i + 1], act=None, name='fpn_up_g%d_2'%i) num_filters=num_outputs[i],
filter_size=3,
stride=1,
act='relu',
name='fpn_up_g%d_1' % i)
g[i] = deconv_bn_layer(
input=g[i],
num_filters=num_outputs[i + 1],
act=None,
name='fpn_up_g%d_2' % i)
#print("g[{}] shape: {}".format(i, g[i].shape)) #print("g[{}] shape: {}".format(i, g[i].shape))
g[4] = fluid.layers.elementwise_add(x=g[3], y=h[4]) g[4] = fluid.layers.elementwise_add(x=g[3], y=h[4])
g[4] = fluid.layers.relu(g[4]) g[4] = fluid.layers.relu(g[4])
g[4] = conv_bn_layer(input=g[4], num_filters=num_outputs[4], g[4] = conv_bn_layer(
filter_size=3, stride=1, act='relu', name='fpn_up_fusion_1') input=g[4],
g[4] = conv_bn_layer(input=g[4], num_filters=num_outputs[4], num_filters=num_outputs[4],
filter_size=1, stride=1, act=None, name='fpn_up_fusion_2') filter_size=3,
stride=1,
act='relu',
name='fpn_up_fusion_1')
g[4] = conv_bn_layer(
input=g[4],
num_filters=num_outputs[4],
filter_size=1,
stride=1,
act=None,
name='fpn_up_fusion_2')
return g[4] return g[4]
def FPN_Down_Fusion(self, blocks): def FPN_Down_Fusion(self, blocks):
...@@ -77,95 +108,245 @@ class SASTHead(object): ...@@ -77,95 +108,245 @@ class SASTHead(object):
f = [blocks['block_0'], blocks['block_1'], blocks['block_2']] f = [blocks['block_0'], blocks['block_1'], blocks['block_2']]
num_outputs = [32, 64, 128] num_outputs = [32, 64, 128]
g = [None, None, None] g = [None, None, None]
h = [None, None, None] h = [None, None, None]
for i in range(3): for i in range(3):
h[i] = conv_bn_layer(input=f[i], num_filters=num_outputs[i], h[i] = conv_bn_layer(
filter_size=3, stride=1, act=None, name='fpn_down_h'+str(i)) input=f[i],
num_filters=num_outputs[i],
filter_size=3,
stride=1,
act=None,
name='fpn_down_h' + str(i))
for i in range(2): for i in range(2):
if i == 0: if i == 0:
g[i] = conv_bn_layer(input=h[i], num_filters=num_outputs[i+1], filter_size=3, stride=2, act=None, name='fpn_down_g0') g[i] = conv_bn_layer(
input=h[i],
num_filters=num_outputs[i + 1],
filter_size=3,
stride=2,
act=None,
name='fpn_down_g0')
else: else:
g[i] = fluid.layers.elementwise_add(x=g[i - 1], y=h[i]) g[i] = fluid.layers.elementwise_add(x=g[i - 1], y=h[i])
g[i] = fluid.layers.relu(g[i]) g[i] = fluid.layers.relu(g[i])
g[i] = conv_bn_layer(input=g[i], num_filters=num_outputs[i], filter_size=3, stride=1, act='relu', name='fpn_down_g%d_1'%i) g[i] = conv_bn_layer(
g[i] = conv_bn_layer(input=g[i], num_filters=num_outputs[i+1], filter_size=3, stride=2, act=None, name='fpn_down_g%d_2'%i) input=g[i],
num_filters=num_outputs[i],
filter_size=3,
stride=1,
act='relu',
name='fpn_down_g%d_1' % i)
g[i] = conv_bn_layer(
input=g[i],
num_filters=num_outputs[i + 1],
filter_size=3,
stride=2,
act=None,
name='fpn_down_g%d_2' % i)
# print("g[{}] shape: {}".format(i, g[i].shape)) # print("g[{}] shape: {}".format(i, g[i].shape))
g[2] = fluid.layers.elementwise_add(x=g[1], y=h[2]) g[2] = fluid.layers.elementwise_add(x=g[1], y=h[2])
g[2] = fluid.layers.relu(g[2]) g[2] = fluid.layers.relu(g[2])
g[2] = conv_bn_layer(input=g[2], num_filters=num_outputs[2], g[2] = conv_bn_layer(
filter_size=3, stride=1, act='relu', name='fpn_down_fusion_1') input=g[2],
g[2] = conv_bn_layer(input=g[2], num_filters=num_outputs[2], num_filters=num_outputs[2],
filter_size=1, stride=1, act=None, name='fpn_down_fusion_2') filter_size=3,
stride=1,
act='relu',
name='fpn_down_fusion_1')
g[2] = conv_bn_layer(
input=g[2],
num_filters=num_outputs[2],
filter_size=1,
stride=1,
act=None,
name='fpn_down_fusion_2')
return g[2] return g[2]
def SAST_Header1(self, f_common): def SAST_Header1(self, f_common):
"""Detector header.""" """Detector header."""
#f_score #f_score
f_score = conv_bn_layer(input=f_common, num_filters=64, filter_size=1, stride=1, act='relu', name='f_score1') f_score = conv_bn_layer(
f_score = conv_bn_layer(input=f_score, num_filters=64, filter_size=3, stride=1, act='relu', name='f_score2') input=f_common,
f_score = conv_bn_layer(input=f_score, num_filters=128, filter_size=1, stride=1, act='relu', name='f_score3') num_filters=64,
f_score = conv_bn_layer(input=f_score, num_filters=1, filter_size=3, stride=1, name='f_score4') filter_size=1,
stride=1,
act='relu',
name='f_score1')
f_score = conv_bn_layer(
input=f_score,
num_filters=64,
filter_size=3,
stride=1,
act='relu',
name='f_score2')
f_score = conv_bn_layer(
input=f_score,
num_filters=128,
filter_size=1,
stride=1,
act='relu',
name='f_score3')
f_score = conv_bn_layer(
input=f_score,
num_filters=1,
filter_size=3,
stride=1,
name='f_score4')
f_score = fluid.layers.sigmoid(f_score) f_score = fluid.layers.sigmoid(f_score)
# print("f_score shape: {}".format(f_score.shape)) # print("f_score shape: {}".format(f_score.shape))
#f_boder #f_boder
f_border = conv_bn_layer(input=f_common, num_filters=64, filter_size=1, stride=1, act='relu', name='f_border1') f_border = conv_bn_layer(
f_border = conv_bn_layer(input=f_border, num_filters=64, filter_size=3, stride=1, act='relu', name='f_border2') input=f_common,
f_border = conv_bn_layer(input=f_border, num_filters=128, filter_size=1, stride=1, act='relu', name='f_border3') num_filters=64,
f_border = conv_bn_layer(input=f_border, num_filters=4, filter_size=3, stride=1, name='f_border4') filter_size=1,
stride=1,
act='relu',
name='f_border1')
f_border = conv_bn_layer(
input=f_border,
num_filters=64,
filter_size=3,
stride=1,
act='relu',
name='f_border2')
f_border = conv_bn_layer(
input=f_border,
num_filters=128,
filter_size=1,
stride=1,
act='relu',
name='f_border3')
f_border = conv_bn_layer(
input=f_border,
num_filters=4,
filter_size=3,
stride=1,
name='f_border4')
# print("f_border shape: {}".format(f_border.shape)) # print("f_border shape: {}".format(f_border.shape))
return f_score, f_border return f_score, f_border
def SAST_Header2(self, f_common): def SAST_Header2(self, f_common):
"""Detector header.""" """Detector header."""
#f_tvo #f_tvo
f_tvo = conv_bn_layer(input=f_common, num_filters=64, filter_size=1, stride=1, act='relu', name='f_tvo1') f_tvo = conv_bn_layer(
f_tvo = conv_bn_layer(input=f_tvo, num_filters=64, filter_size=3, stride=1, act='relu', name='f_tvo2') input=f_common,
f_tvo = conv_bn_layer(input=f_tvo, num_filters=128, filter_size=1, stride=1, act='relu', name='f_tvo3') num_filters=64,
f_tvo = conv_bn_layer(input=f_tvo, num_filters=8, filter_size=3, stride=1, name='f_tvo4') filter_size=1,
stride=1,
act='relu',
name='f_tvo1')
f_tvo = conv_bn_layer(
input=f_tvo,
num_filters=64,
filter_size=3,
stride=1,
act='relu',
name='f_tvo2')
f_tvo = conv_bn_layer(
input=f_tvo,
num_filters=128,
filter_size=1,
stride=1,
act='relu',
name='f_tvo3')
f_tvo = conv_bn_layer(
input=f_tvo, num_filters=8, filter_size=3, stride=1, name='f_tvo4')
# print("f_tvo shape: {}".format(f_tvo.shape)) # print("f_tvo shape: {}".format(f_tvo.shape))
#f_tco #f_tco
f_tco = conv_bn_layer(input=f_common, num_filters=64, filter_size=1, stride=1, act='relu', name='f_tco1') f_tco = conv_bn_layer(
f_tco = conv_bn_layer(input=f_tco, num_filters=64, filter_size=3, stride=1, act='relu', name='f_tco2') input=f_common,
f_tco = conv_bn_layer(input=f_tco, num_filters=128, filter_size=1, stride=1, act='relu', name='f_tco3') num_filters=64,
f_tco = conv_bn_layer(input=f_tco, num_filters=2, filter_size=3, stride=1, name='f_tco4') filter_size=1,
stride=1,
act='relu',
name='f_tco1')
f_tco = conv_bn_layer(
input=f_tco,
num_filters=64,
filter_size=3,
stride=1,
act='relu',
name='f_tco2')
f_tco = conv_bn_layer(
input=f_tco,
num_filters=128,
filter_size=1,
stride=1,
act='relu',
name='f_tco3')
f_tco = conv_bn_layer(
input=f_tco, num_filters=2, filter_size=3, stride=1, name='f_tco4')
# print("f_tco shape: {}".format(f_tco.shape)) # print("f_tco shape: {}".format(f_tco.shape))
return f_tvo, f_tco return f_tvo, f_tco
def cross_attention(self, f_common): def cross_attention(self, f_common):
""" """
""" """
f_shape = fluid.layers.shape(f_common) f_shape = fluid.layers.shape(f_common)
f_theta = conv_bn_layer(input=f_common, num_filters=128, filter_size=1, stride=1, act='relu', name='f_theta') f_theta = conv_bn_layer(
f_phi = conv_bn_layer(input=f_common, num_filters=128, filter_size=1, stride=1, act='relu', name='f_phi') input=f_common,
f_g = conv_bn_layer(input=f_common, num_filters=128, filter_size=1, stride=1, act='relu', name='f_g') num_filters=128,
filter_size=1,
stride=1,
act='relu',
name='f_theta')
f_phi = conv_bn_layer(
input=f_common,
num_filters=128,
filter_size=1,
stride=1,
act='relu',
name='f_phi')
f_g = conv_bn_layer(
input=f_common,
num_filters=128,
filter_size=1,
stride=1,
act='relu',
name='f_g')
### horizon ### horizon
fh_theta = f_theta fh_theta = f_theta
fh_phi = f_phi fh_phi = f_phi
fh_g = f_g fh_g = f_g
#flatten #flatten
fh_theta = fluid.layers.transpose(fh_theta, [0, 2, 3, 1]) fh_theta = fluid.layers.transpose(fh_theta, [0, 2, 3, 1])
fh_theta = fluid.layers.reshape(fh_theta, [f_shape[0] * f_shape[2], f_shape[3], 128]) fh_theta = fluid.layers.reshape(
fh_theta, [f_shape[0] * f_shape[2], f_shape[3], 128])
fh_phi = fluid.layers.transpose(fh_phi, [0, 2, 3, 1]) fh_phi = fluid.layers.transpose(fh_phi, [0, 2, 3, 1])
fh_phi = fluid.layers.reshape(fh_phi, [f_shape[0] * f_shape[2], f_shape[3], 128]) fh_phi = fluid.layers.reshape(
fh_phi, [f_shape[0] * f_shape[2], f_shape[3], 128])
fh_g = fluid.layers.transpose(fh_g, [0, 2, 3, 1]) fh_g = fluid.layers.transpose(fh_g, [0, 2, 3, 1])
fh_g = fluid.layers.reshape(fh_g, [f_shape[0] * f_shape[2], f_shape[3], 128]) fh_g = fluid.layers.reshape(fh_g,
[f_shape[0] * f_shape[2], f_shape[3], 128])
#correlation #correlation
fh_attn = fluid.layers.matmul(fh_theta, fluid.layers.transpose(fh_phi, [0, 2, 1])) fh_attn = fluid.layers.matmul(fh_theta,
fluid.layers.transpose(fh_phi, [0, 2, 1]))
#scale #scale
fh_attn = fh_attn / (128 ** 0.5) fh_attn = fh_attn / (128**0.5)
fh_attn = fluid.layers.softmax(fh_attn) fh_attn = fluid.layers.softmax(fh_attn)
#weighted sum #weighted sum
fh_weight = fluid.layers.matmul(fh_attn, fh_g) fh_weight = fluid.layers.matmul(fh_attn, fh_g)
fh_weight = fluid.layers.reshape(fh_weight, [f_shape[0], f_shape[2], f_shape[3], 128]) fh_weight = fluid.layers.reshape(
fh_weight, [f_shape[0], f_shape[2], f_shape[3], 128])
# print("fh_weight: {}".format(fh_weight.shape)) # print("fh_weight: {}".format(fh_weight.shape))
fh_weight = fluid.layers.transpose(fh_weight, [0, 3, 1, 2]) fh_weight = fluid.layers.transpose(fh_weight, [0, 3, 1, 2])
fh_weight = conv_bn_layer(input=fh_weight, num_filters=128, filter_size=1, stride=1, name='fh_weight') fh_weight = conv_bn_layer(
input=fh_weight,
num_filters=128,
filter_size=1,
stride=1,
name='fh_weight')
#short cut #short cut
fh_sc = conv_bn_layer(input=f_common, num_filters=128, filter_size=1, stride=1, name='fh_sc') fh_sc = conv_bn_layer(
input=f_common,
num_filters=128,
filter_size=1,
stride=1,
name='fh_sc')
f_h = fluid.layers.relu(fh_weight + fh_sc) f_h = fluid.layers.relu(fh_weight + fh_sc)
###### ######
#vertical #vertical
...@@ -174,31 +355,60 @@ class SASTHead(object): ...@@ -174,31 +355,60 @@ class SASTHead(object):
fv_g = fluid.layers.transpose(f_g, [0, 1, 3, 2]) fv_g = fluid.layers.transpose(f_g, [0, 1, 3, 2])
#flatten #flatten
fv_theta = fluid.layers.transpose(fv_theta, [0, 2, 3, 1]) fv_theta = fluid.layers.transpose(fv_theta, [0, 2, 3, 1])
fv_theta = fluid.layers.reshape(fv_theta, [f_shape[0] * f_shape[3], f_shape[2], 128]) fv_theta = fluid.layers.reshape(
fv_theta, [f_shape[0] * f_shape[3], f_shape[2], 128])
fv_phi = fluid.layers.transpose(fv_phi, [0, 2, 3, 1]) fv_phi = fluid.layers.transpose(fv_phi, [0, 2, 3, 1])
fv_phi = fluid.layers.reshape(fv_phi, [f_shape[0] * f_shape[3], f_shape[2], 128]) fv_phi = fluid.layers.reshape(
fv_phi, [f_shape[0] * f_shape[3], f_shape[2], 128])
fv_g = fluid.layers.transpose(fv_g, [0, 2, 3, 1]) fv_g = fluid.layers.transpose(fv_g, [0, 2, 3, 1])
fv_g = fluid.layers.reshape(fv_g, [f_shape[0] * f_shape[3], f_shape[2], 128]) fv_g = fluid.layers.reshape(fv_g,
[f_shape[0] * f_shape[3], f_shape[2], 128])
#correlation #correlation
fv_attn = fluid.layers.matmul(fv_theta, fluid.layers.transpose(fv_phi, [0, 2, 1])) fv_attn = fluid.layers.matmul(fv_theta,
fluid.layers.transpose(fv_phi, [0, 2, 1]))
#scale #scale
fv_attn = fv_attn / (128 ** 0.5) fv_attn = fv_attn / (128**0.5)
fv_attn = fluid.layers.softmax(fv_attn) fv_attn = fluid.layers.softmax(fv_attn)
#weighted sum #weighted sum
fv_weight = fluid.layers.matmul(fv_attn, fv_g) fv_weight = fluid.layers.matmul(fv_attn, fv_g)
fv_weight = fluid.layers.reshape(fv_weight, [f_shape[0], f_shape[3], f_shape[2], 128]) fv_weight = fluid.layers.reshape(
fv_weight, [f_shape[0], f_shape[3], f_shape[2], 128])
# print("fv_weight: {}".format(fv_weight.shape)) # print("fv_weight: {}".format(fv_weight.shape))
fv_weight = fluid.layers.transpose(fv_weight, [0, 3, 2, 1]) fv_weight = fluid.layers.transpose(fv_weight, [0, 3, 2, 1])
fv_weight = conv_bn_layer(input=fv_weight, num_filters=128, filter_size=1, stride=1, name='fv_weight') fv_weight = conv_bn_layer(
input=fv_weight,
num_filters=128,
filter_size=1,
stride=1,
name='fv_weight')
#short cut #short cut
fv_sc = conv_bn_layer(input=f_common, num_filters=128, filter_size=1, stride=1, name='fv_sc') fv_sc = conv_bn_layer(
input=f_common,
num_filters=128,
filter_size=1,
stride=1,
name='fv_sc')
f_v = fluid.layers.relu(fv_weight + fv_sc) f_v = fluid.layers.relu(fv_weight + fv_sc)
###### ######
f_attn = fluid.layers.concat([f_h, f_v], axis=1) f_attn = fluid.layers.concat([f_h, f_v], axis=1)
f_attn = conv_bn_layer(input=f_attn, num_filters=128, filter_size=1, stride=1, act='relu', name='f_attn') f_attn = conv_bn_layer(
input=f_attn,
num_filters=128,
filter_size=1,
stride=1,
act='relu',
name='f_attn')
return f_attn return f_attn
def __call__(self, blocks, with_cab=False): def __call__(self, blocks, with_cab=False):
"""
Fuse different levels of feature map from backbone and predict results
Args:
blocks(list): feature maps from backbone
with_cab(bool): whether use cross_attention
Return: predicts
"""
# for k, v in blocks.items(): # for k, v in blocks.items():
# print(k, v.shape) # print(k, v.shape)
...@@ -212,12 +422,12 @@ class SASTHead(object): ...@@ -212,12 +422,12 @@ class SASTHead(object):
f_common = fluid.layers.elementwise_add(x=f_down, y=f_up) f_common = fluid.layers.elementwise_add(x=f_down, y=f_up)
f_common = fluid.layers.relu(f_common) f_common = fluid.layers.relu(f_common)
# print("f_common: {}".format(f_common.shape)) # print("f_common: {}".format(f_common.shape))
if self.with_cab: if self.with_cab:
# print('enhence f_common with CAB.') # print('enhence f_common with CAB.')
f_common = self.cross_attention(f_common) f_common = self.cross_attention(f_common)
f_score, f_border= self.SAST_Header1(f_common) f_score, f_border = self.SAST_Header1(f_common)
f_tvo, f_tco = self.SAST_Header2(f_common) f_tvo, f_tco = self.SAST_Header2(f_common)
predicts = OrderedDict() predicts = OrderedDict()
...@@ -225,4 +435,4 @@ class SASTHead(object): ...@@ -225,4 +435,4 @@ class SASTHead(object):
predicts['f_border'] = f_border predicts['f_border'] = f_border
predicts['f_tvo'] = f_tvo predicts['f_tvo'] = f_tvo
predicts['f_tco'] = f_tco predicts['f_tco'] = f_tco
return predicts return predicts
\ No newline at end of file
...@@ -28,6 +28,13 @@ gradient_clip = 10 ...@@ -28,6 +28,13 @@ gradient_clip = 10
class SRNPredict(object): class SRNPredict(object):
"""
SRN:
see arxiv: https://arxiv.org/abs/2003.12294
args:
params(dict): the super parameters for network build
"""
def __init__(self, params): def __init__(self, params):
super(SRNPredict, self).__init__() super(SRNPredict, self).__init__()
self.char_num = params['char_num'] self.char_num = params['char_num']
...@@ -39,7 +46,15 @@ class SRNPredict(object): ...@@ -39,7 +46,15 @@ class SRNPredict(object):
self.hidden_dims = params['hidden_dims'] self.hidden_dims = params['hidden_dims']
def pvam(self, inputs, others): def pvam(self, inputs, others):
"""
Parallel visual attention module model
args:
inputs(variable): Feature map extracted from backbone network
others(list): Other location information variables
return: pvam_features
"""
b, c, h, w = inputs.shape b, c, h, w = inputs.shape
conv_features = fluid.layers.reshape(x=inputs, shape=[-1, c, h * w]) conv_features = fluid.layers.reshape(x=inputs, shape=[-1, c, h * w])
conv_features = fluid.layers.transpose(x=conv_features, perm=[0, 2, 1]) conv_features = fluid.layers.transpose(x=conv_features, perm=[0, 2, 1])
...@@ -98,6 +113,15 @@ class SRNPredict(object): ...@@ -98,6 +113,15 @@ class SRNPredict(object):
return pvam_features return pvam_features
def gsrm(self, pvam_features, others): def gsrm(self, pvam_features, others):
"""
Global Semantic Reasonging Module
args:
pvam_features(variable): Feature map extracted from pvam
others(list): Other location information variables
return: gsrm_features, word_out, gsrm_out
"""
#===== GSRM Visual-to-semantic embedding block ===== #===== GSRM Visual-to-semantic embedding block =====
b, t, c = pvam_features.shape b, t, c = pvam_features.shape
...@@ -190,7 +214,15 @@ class SRNPredict(object): ...@@ -190,7 +214,15 @@ class SRNPredict(object):
return gsrm_features, word_out, gsrm_out return gsrm_features, word_out, gsrm_out
def vsfd(self, pvam_features, gsrm_features): def vsfd(self, pvam_features, gsrm_features):
"""
Visual-Semantic Fusion Decoder Module
args:
pvam_features(variable): Feature map extracted from pvam
gsrm_features(list): Feature map extracted from gsrm
return: fc_out
"""
#===== Visual-Semantic Fusion Decoder Module ===== #===== Visual-Semantic Fusion Decoder Module =====
b, t, c1 = pvam_features.shape b, t, c1 = pvam_features.shape
b, t, c2 = gsrm_features.shape b, t, c2 = gsrm_features.shape
......
...@@ -70,6 +70,13 @@ class LocalizationNetwork(object): ...@@ -70,6 +70,13 @@ class LocalizationNetwork(object):
return initial_bias return initial_bias
def __call__(self, image): def __call__(self, image):
"""
Estimating parameters of geometric transformation
Args:
image: input
Return:
batch_C_prime: the matrix of the geometric transformation
"""
F = self.F F = self.F
loc_lr = self.loc_lr loc_lr = self.loc_lr
if self.model_name == "large": if self.model_name == "large":
...@@ -215,6 +222,14 @@ class GridGenerator(object): ...@@ -215,6 +222,14 @@ class GridGenerator(object):
return batch_C_ex_part_tensor return batch_C_ex_part_tensor
def __call__(self, batch_C_prime, I_r_size): def __call__(self, batch_C_prime, I_r_size):
"""
Generate the grid for the grid_sampler.
Args:
batch_C_prime: the matrix of the geometric transformation
I_r_size: the shape of the input image
Return:
batch_P_prime: the grid for the grid_sampler
"""
C = self.build_C() C = self.build_C()
P = self.build_P(I_r_size) P = self.build_P(I_r_size)
inv_delta_C = self.build_inv_delta_C(C).astype('float32') inv_delta_C = self.build_inv_delta_C(C).astype('float32')
......
...@@ -29,9 +29,17 @@ def cosine_decay_with_warmup(learning_rate, ...@@ -29,9 +29,17 @@ def cosine_decay_with_warmup(learning_rate,
step_each_epoch, step_each_epoch,
epochs=500, epochs=500,
warmup_minibatch=1000): warmup_minibatch=1000):
"""Applies cosine decay to the learning rate. """
Applies cosine decay to the learning rate.
lr = 0.05 * (math.cos(epoch * (math.pi / 120)) + 1) lr = 0.05 * (math.cos(epoch * (math.pi / 120)) + 1)
decrease lr for every mini-batch and start with warmup. decrease lr for every mini-batch and start with warmup.
args:
learning_rate(float): initial learning rate
step_each_epoch (int): number of step for each epoch in training process
epochs(int): number of training epochs
warmup_minibatch(int): number of minibatch for warmup
return:
lr(tensor): learning rate tensor
""" """
global_step = _decay_step_counter() global_step = _decay_step_counter()
lr = fluid.layers.tensor.create_global_var( lr = fluid.layers.tensor.create_global_var(
...@@ -65,6 +73,7 @@ def AdamDecay(params, parameter_list=None): ...@@ -65,6 +73,7 @@ def AdamDecay(params, parameter_list=None):
params(dict): the super parameters params(dict): the super parameters
parameter_list (list): list of Variable names to update to minimize loss parameter_list (list): list of Variable names to update to minimize loss
return: return:
optimizer: a Adam optimizer instance
""" """
base_lr = params['base_lr'] base_lr = params['base_lr']
beta1 = params['beta1'] beta1 = params['beta1']
...@@ -121,6 +130,7 @@ def RMSProp(params, parameter_list=None): ...@@ -121,6 +130,7 @@ def RMSProp(params, parameter_list=None):
params(dict): the super parameters params(dict): the super parameters
parameter_list (list): list of Variable names to update to minimize loss parameter_list (list): list of Variable names to update to minimize loss
return: return:
optimizer: a RMSProp optimizer instance
""" """
base_lr = params.get("base_lr", 0.001) base_lr = params.get("base_lr", 0.001)
l2_decay = params.get("l2_decay", 0.00005) l2_decay = params.get("l2_decay", 0.00005)
......
...@@ -16,6 +16,7 @@ import logging ...@@ -16,6 +16,7 @@ import logging
import os import os
import imghdr import imghdr
import cv2 import cv2
import paddle
from paddle import fluid from paddle import fluid
...@@ -102,3 +103,10 @@ def create_multi_devices_program(program, loss_var_name): ...@@ -102,3 +103,10 @@ def create_multi_devices_program(program, loss_var_name):
build_strategy=build_strategy, build_strategy=build_strategy,
exec_strategy=exec_strategy) exec_strategy=exec_strategy)
return compile_program return compile_program
def enable_static_mode():
try:
paddle.enable_static()
except:
pass
...@@ -32,7 +32,7 @@ setup( ...@@ -32,7 +32,7 @@ setup(
package_dir={'paddleocr': ''}, package_dir={'paddleocr': ''},
include_package_data=True, include_package_data=True,
entry_points={"console_scripts": ["paddleocr= paddleocr.paddleocr:main"]}, entry_points={"console_scripts": ["paddleocr= paddleocr.paddleocr:main"]},
version='1.0.0', version='1.1.1',
install_requires=requirements, install_requires=requirements,
license='Apache License 2.0', license='Apache License 2.0',
description='Awesome OCR toolkits based on PaddlePaddle (8.6M ultra-lightweight pre-trained model, support training and deployment among server, mobile, embeded and IoT devices', description='Awesome OCR toolkits based on PaddlePaddle (8.6M ultra-lightweight pre-trained model, support training and deployment among server, mobile, embeded and IoT devices',
......
## 第三方贡献说明
- 首先感谢大家对于PaddleOCR的支持,我们也希望大家共同建设,共同分享,形成良好的开源社区氛围:)
- 如果您也有意愿贡献,可以快速查看[贡献说明](#贡献说明)
## 一、近期更新(截至2020.11.5)
- 非常感谢 [zhangxin](https://github.com/ZhangXinNan)([Blog](https://blog.csdn.net/sdlypyzq)) 贡献新的可视化方式、添加.gitgnore、处理手动设置PYTHONPATH环境变量的问题([#210](https://github.com/PaddlePaddle/PaddleOCR/pull/210))
- 非常感谢 [lyl120117](https://github.com/lyl120117) 贡献打印网络结构的代码([#304](https://github.com/PaddlePaddle/PaddleOCR/pull/304))
- 非常感谢 [BeyondYourself](https://github.com/BeyondYourself) 给PaddleOCR提了很多非常棒的建议,并简化了PaddleOCR的部分代码风格([so many commits)](https://github.com/PaddlePaddle/PaddleOCR/commits?author=BeyondYourself)
### 2、新增需求类
- 非常感谢 [xiangyubo](https://github.com/xiangyubo) 贡献手写中文OCR数据集([#321](https://github.com/PaddlePaddle/PaddleOCR/pull/321))
- 非常感谢 [Mejans](https://github.com/Mejans) 给PaddleOCR增加新语言奥克西坦语Occitan的字典和语料([#954](https://github.com/PaddlePaddle/PaddleOCR/pull/954))。
### 3、新增功能类
- 非常感谢 [authorfu](https://github.com/authorfu) 贡献Android([#340](https://github.com/PaddlePaddle/PaddleOCR/pull/340))和[xiadeye](https://github.com/xiadeye) 贡献IOS的demo代码([#325](https://github.com/PaddlePaddle/PaddleOCR/pull/325))
- 非常感谢 [tangmq](https://gitee.com/tangmq) 给PaddleOCR增加Docker化部署服务,支持快速发布可调用的Restful API服务([#507](https://github.com/PaddlePaddle/PaddleOCR/pull/507))。
- 非常感谢 [lijinhan](https://github.com/lijinhan) 给PaddleOCR增加java SpringBoot 调用OCR Hubserving接口完成对OCR服务化部署的使用([#1027](https://github.com/PaddlePaddle/PaddleOCR/pull/1027))。
<a name="贡献说明"></a>
## 二、贡献说明
### 1、修复建议类
- 如果是bug改动和修复,直接提在官方代码对应位置修改,PR提交,review后合入即可。
### 2、新增需求类
- 如果目前提供的功能不能满足需求,可以在新需求中增加,注明【需求】xxx,可以提到[新需求](./requests.md)里面。
### 3、新增功能类
- 新增功能也可以在本文件夹中新建文件夹,按照功能命名,文件夹内需包含完整代码和readme文档,在文档中描述清楚具体如何使用(也可以在个人repo中完成,通过新增requirements文件,增加一行 `paddleocr` 可以通过usedby 收录)
- 新增部署方式支持,可以在本文件夹新增
- 新增多种编程语言支持功能,可以在本文件夹新增
## 三、代码要求
- **文档、文档、文档**,重要的事情说三遍
- 简单高效,直观明了,功能完善,没有bug
- 提交之前需要使用 pre-commit 工具来自动检查一些基本事宜(如每个文件只有一个 EOL,Git 中不要添加大文件等)。pre-commit测试是 Travis-CI 中单元测试的一部分,首先安装并在当前目录运行它:
```
pip install pre-commit
pre-commit install
```
- 目前默认使用 clang-format 来调整 C/C++ 源代码格式,请确保 clang-format 版本在 3.8 以上。注:通过pip install pre-commit和conda install -c conda-forge pre-commit安装的yapf稍有不同的,Paddle 开发人员使用的是pip install pre-commit。
## 四、联系我们
- PR之前如果有任何的不确定,欢迎先通过issue或者微信群联系我们,提前沟通pr位置,减少comment和反复修改。
## 五、致谢与后续
- 合入代码之后,首页README末尾新增感谢贡献,默认链接为github名字及主页,如果有需要更换主页,也可以联系我们
- 新增重要功能类,会在用户群广而告之,享受开源社区荣誉时刻。
#!/bin/sh
# ----------------------------------------------------------------------------
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
# Maven Start Up Batch script
#
# Required ENV vars:
# ------------------
# JAVA_HOME - location of a JDK home dir
#
# Optional ENV vars
# -----------------
# M2_HOME - location of maven2's installed home dir
# MAVEN_OPTS - parameters passed to the Java VM when running Maven
# e.g. to debug Maven itself, use
# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
# MAVEN_SKIP_RC - flag to disable loading of mavenrc files
# ----------------------------------------------------------------------------
if [ -z "$MAVEN_SKIP_RC" ] ; then
if [ -f /etc/mavenrc ] ; then
. /etc/mavenrc
fi
if [ -f "$HOME/.mavenrc" ] ; then
. "$HOME/.mavenrc"
fi
fi
# OS specific support. $var _must_ be set to either true or false.
cygwin=false;
darwin=false;
mingw=false
case "`uname`" in
CYGWIN*) cygwin=true ;;
MINGW*) mingw=true;;
Darwin*) darwin=true
# Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home
# See https://developer.apple.com/library/mac/qa/qa1170/_index.html
if [ -z "$JAVA_HOME" ]; then
if [ -x "/usr/libexec/java_home" ]; then
export JAVA_HOME="`/usr/libexec/java_home`"
else
export JAVA_HOME="/Library/Java/Home"
fi
fi
;;
esac
if [ -z "$JAVA_HOME" ] ; then
if [ -r /etc/gentoo-release ] ; then
JAVA_HOME=`java-config --jre-home`
fi
fi
if [ -z "$M2_HOME" ] ; then
## resolve links - $0 may be a link to maven's home
PRG="$0"
# need this for relative symlinks
while [ -h "$PRG" ] ; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
PRG="$link"
else
PRG="`dirname "$PRG"`/$link"
fi
done
saveddir=`pwd`
M2_HOME=`dirname "$PRG"`/..
# make it fully qualified
M2_HOME=`cd "$M2_HOME" && pwd`
cd "$saveddir"
# echo Using m2 at $M2_HOME
fi
# For Cygwin, ensure paths are in UNIX format before anything is touched
if $cygwin ; then
[ -n "$M2_HOME" ] &&
M2_HOME=`cygpath --unix "$M2_HOME"`
[ -n "$JAVA_HOME" ] &&
JAVA_HOME=`cygpath --unix "$JAVA_HOME"`
[ -n "$CLASSPATH" ] &&
CLASSPATH=`cygpath --path --unix "$CLASSPATH"`
fi
# For Mingw, ensure paths are in UNIX format before anything is touched
if $mingw ; then
[ -n "$M2_HOME" ] &&
M2_HOME="`(cd "$M2_HOME"; pwd)`"
[ -n "$JAVA_HOME" ] &&
JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`"
fi
if [ -z "$JAVA_HOME" ]; then
javaExecutable="`which javac`"
if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then
# readlink(1) is not available as standard on Solaris 10.
readLink=`which readlink`
if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then
if $darwin ; then
javaHome="`dirname \"$javaExecutable\"`"
javaExecutable="`cd \"$javaHome\" && pwd -P`/javac"
else
javaExecutable="`readlink -f \"$javaExecutable\"`"
fi
javaHome="`dirname \"$javaExecutable\"`"
javaHome=`expr "$javaHome" : '\(.*\)/bin'`
JAVA_HOME="$javaHome"
export JAVA_HOME
fi
fi
fi
if [ -z "$JAVACMD" ] ; then
if [ -n "$JAVA_HOME" ] ; then
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
# IBM's JDK on AIX uses strange locations for the executables
JAVACMD="$JAVA_HOME/jre/sh/java"
else
JAVACMD="$JAVA_HOME/bin/java"
fi
else
JAVACMD="`which java`"
fi
fi
if [ ! -x "$JAVACMD" ] ; then
echo "Error: JAVA_HOME is not defined correctly." >&2
echo " We cannot execute $JAVACMD" >&2
exit 1
fi
if [ -z "$JAVA_HOME" ] ; then
echo "Warning: JAVA_HOME environment variable is not set."
fi
CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher
# traverses directory structure from process work directory to filesystem root
# first directory with .mvn subdirectory is considered project base directory
find_maven_basedir() {
if [ -z "$1" ]
then
echo "Path not specified to find_maven_basedir"
return 1
fi
basedir="$1"
wdir="$1"
while [ "$wdir" != '/' ] ; do
if [ -d "$wdir"/.mvn ] ; then
basedir=$wdir
break
fi
# workaround for JBEAP-8937 (on Solaris 10/Sparc)
if [ -d "${wdir}" ]; then
wdir=`cd "$wdir/.."; pwd`
fi
# end of workaround
done
echo "${basedir}"
}
# concatenates all lines of a file
concat_lines() {
if [ -f "$1" ]; then
echo "$(tr -s '\n' ' ' < "$1")"
fi
}
BASE_DIR=`find_maven_basedir "$(pwd)"`
if [ -z "$BASE_DIR" ]; then
exit 1;
fi
##########################################################################################
# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
# This allows using the maven wrapper in projects that prohibit checking in binary data.
##########################################################################################
if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then
if [ "$MVNW_VERBOSE" = true ]; then
echo "Found .mvn/wrapper/maven-wrapper.jar"
fi
else
if [ "$MVNW_VERBOSE" = true ]; then
echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..."
fi
if [ -n "$MVNW_REPOURL" ]; then
jarUrl="$MVNW_REPOURL/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
else
jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
fi
while IFS="=" read key value; do
case "$key" in (wrapperUrl) jarUrl="$value"; break ;;
esac
done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties"
if [ "$MVNW_VERBOSE" = true ]; then
echo "Downloading from: $jarUrl"
fi
wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar"
if $cygwin; then
wrapperJarPath=`cygpath --path --windows "$wrapperJarPath"`
fi
if command -v wget > /dev/null; then
if [ "$MVNW_VERBOSE" = true ]; then
echo "Found wget ... using wget"
fi
if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then
wget "$jarUrl" -O "$wrapperJarPath"
else
wget --http-user=$MVNW_USERNAME --http-password=$MVNW_PASSWORD "$jarUrl" -O "$wrapperJarPath"
fi
elif command -v curl > /dev/null; then
if [ "$MVNW_VERBOSE" = true ]; then
echo "Found curl ... using curl"
fi
if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then
curl -o "$wrapperJarPath" "$jarUrl" -f
else
curl --user $MVNW_USERNAME:$MVNW_PASSWORD -o "$wrapperJarPath" "$jarUrl" -f
fi
else
if [ "$MVNW_VERBOSE" = true ]; then
echo "Falling back to using Java to download"
fi
javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java"
# For Cygwin, switch paths to Windows format before running javac
if $cygwin; then
javaClass=`cygpath --path --windows "$javaClass"`
fi
if [ -e "$javaClass" ]; then
if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then
if [ "$MVNW_VERBOSE" = true ]; then
echo " - Compiling MavenWrapperDownloader.java ..."
fi
# Compiling the Java class
("$JAVA_HOME/bin/javac" "$javaClass")
fi
if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then
# Running the downloader
if [ "$MVNW_VERBOSE" = true ]; then
echo " - Running MavenWrapperDownloader.java ..."
fi
("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR")
fi
fi
fi
fi
##########################################################################################
# End of extension
##########################################################################################
export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"}
if [ "$MVNW_VERBOSE" = true ]; then
echo $MAVEN_PROJECTBASEDIR
fi
MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS"
# For Cygwin, switch paths to Windows format before running java
if $cygwin; then
[ -n "$M2_HOME" ] &&
M2_HOME=`cygpath --path --windows "$M2_HOME"`
[ -n "$JAVA_HOME" ] &&
JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"`
[ -n "$CLASSPATH" ] &&
CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
[ -n "$MAVEN_PROJECTBASEDIR" ] &&
MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"`
fi
# Provide a "standardized" way to retrieve the CLI args that will
# work with both Windows and non-Windows executions.
MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@"
export MAVEN_CMD_LINE_ARGS
WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
exec "$JAVACMD" \
$MAVEN_OPTS \
-classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \
"-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \
${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@"
@REM ----------------------------------------------------------------------------
@REM Licensed to the Apache Software Foundation (ASF) under one
@REM or more contributor license agreements. See the NOTICE file
@REM distributed with this work for additional information
@REM regarding copyright ownership. The ASF licenses this file
@REM to you under the Apache License, Version 2.0 (the
@REM "License"); you may not use this file except in compliance
@REM with the License. You may obtain a copy of the License at
@REM
@REM https://www.apache.org/licenses/LICENSE-2.0
@REM
@REM Unless required by applicable law or agreed to in writing,
@REM software distributed under the License is distributed on an
@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@REM KIND, either express or implied. See the License for the
@REM specific language governing permissions and limitations
@REM under the License.
@REM ----------------------------------------------------------------------------
@REM ----------------------------------------------------------------------------
@REM Maven Start Up Batch script
@REM
@REM Required ENV vars:
@REM JAVA_HOME - location of a JDK home dir
@REM
@REM Optional ENV vars
@REM M2_HOME - location of maven2's installed home dir
@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands
@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a keystroke before ending
@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven
@REM e.g. to debug Maven itself, use
@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files
@REM ----------------------------------------------------------------------------
@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on'
@echo off
@REM set title of command window
title %0
@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on'
@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO%
@REM set %HOME% to equivalent of $HOME
if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%")
@REM Execute a user defined script before this one
if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre
@REM check for pre script, once with legacy .bat ending and once with .cmd ending
if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat"
if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd"
:skipRcPre
@setlocal
set ERROR_CODE=0
@REM To isolate internal variables from possible post scripts, we use another setlocal
@setlocal
@REM ==== START VALIDATION ====
if not "%JAVA_HOME%" == "" goto OkJHome
echo.
echo Error: JAVA_HOME not found in your environment. >&2
echo Please set the JAVA_HOME variable in your environment to match the >&2
echo location of your Java installation. >&2
echo.
goto error
:OkJHome
if exist "%JAVA_HOME%\bin\java.exe" goto init
echo.
echo Error: JAVA_HOME is set to an invalid directory. >&2
echo JAVA_HOME = "%JAVA_HOME%" >&2
echo Please set the JAVA_HOME variable in your environment to match the >&2
echo location of your Java installation. >&2
echo.
goto error
@REM ==== END VALIDATION ====
:init
@REM Find the project base dir, i.e. the directory that contains the folder ".mvn".
@REM Fallback to current working directory if not found.
set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR%
IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir
set EXEC_DIR=%CD%
set WDIR=%EXEC_DIR%
:findBaseDir
IF EXIST "%WDIR%"\.mvn goto baseDirFound
cd ..
IF "%WDIR%"=="%CD%" goto baseDirNotFound
set WDIR=%CD%
goto findBaseDir
:baseDirFound
set MAVEN_PROJECTBASEDIR=%WDIR%
cd "%EXEC_DIR%"
goto endDetectBaseDir
:baseDirNotFound
set MAVEN_PROJECTBASEDIR=%EXEC_DIR%
cd "%EXEC_DIR%"
:endDetectBaseDir
IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig
@setlocal EnableExtensions EnableDelayedExpansion
for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a
@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS%
:endReadAdditionalConfig
SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe"
set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar"
set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
FOR /F "tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO (
IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B
)
@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
@REM This allows using the maven wrapper in projects that prohibit checking in binary data.
if exist %WRAPPER_JAR% (
if "%MVNW_VERBOSE%" == "true" (
echo Found %WRAPPER_JAR%
)
) else (
if not "%MVNW_REPOURL%" == "" (
SET DOWNLOAD_URL="%MVNW_REPOURL%/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
)
if "%MVNW_VERBOSE%" == "true" (
echo Couldn't find %WRAPPER_JAR%, downloading it ...
echo Downloading from: %DOWNLOAD_URL%
)
powershell -Command "&{"^
"$webclient = new-object System.Net.WebClient;"^
"if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^
"$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^
"}"^
"[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"^
"}"
if "%MVNW_VERBOSE%" == "true" (
echo Finished downloading %WRAPPER_JAR%
)
)
@REM End of extension
@REM Provide a "standardized" way to retrieve the CLI args that will
@REM work with both Windows and non-Windows executions.
set MAVEN_CMD_LINE_ARGS=%*
%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %*
if ERRORLEVEL 1 goto error
goto end
:error
set ERROR_CODE=1
:end
@endlocal & set ERROR_CODE=%ERROR_CODE%
if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost
@REM check for post script, once with legacy .bat ending and once with .cmd ending
if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat"
if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd"
:skipRcPost
@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on'
if "%MAVEN_BATCH_PAUSE%" == "on" pause
if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE%
exit /B %ERROR_CODE%
<?xml version="1.0" encoding="UTF-8"?>
<module org.jetbrains.idea.maven.project.MavenProjectsManager.isMavenModule="true" type="JAVA_MODULE" version="4">
<component name="FacetManager">
<facet type="Spring" name="Spring">
<configuration />
</facet>
<facet type="web" name="Web">
<configuration>
<webroots />
<sourceRoots>
<root url="file://$MODULE_DIR$/src/main/java" />
<root url="file://$MODULE_DIR$/src/main/resources" />
</sourceRoots>
</configuration>
</facet>
</component>
<component name="NewModuleRootManager" LANGUAGE_LEVEL="JDK_1_8">
<output url="file://$MODULE_DIR$/target/classes" />
<output-test url="file://$MODULE_DIR$/target/test-classes" />
<content url="file://$MODULE_DIR$">
<sourceFolder url="file://$MODULE_DIR$/src/main/java" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/main/resources" type="java-resource" />
<sourceFolder url="file://$MODULE_DIR$/src/test/java" isTestSource="true" />
<excludeFolder url="file://$MODULE_DIR$/target" />
</content>
<orderEntry type="jdk" jdkName="1.8" jdkType="JavaSDK" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" name="Maven: org.springframework.boot:spring-boot-starter-web:2.3.4.RELEASE" level="project" />
<orderEntry type="library" name="Maven: org.springframework.boot:spring-boot-starter:2.3.4.RELEASE" level="project" />
<orderEntry type="library" name="Maven: org.springframework.boot:spring-boot:2.3.4.RELEASE" level="project" />
<orderEntry type="library" name="Maven: org.springframework.boot:spring-boot-autoconfigure:2.3.4.RELEASE" level="project" />
<orderEntry type="library" name="Maven: org.springframework.boot:spring-boot-starter-logging:2.3.4.RELEASE" level="project" />
<orderEntry type="library" name="Maven: ch.qos.logback:logback-classic:1.2.3" level="project" />
<orderEntry type="library" name="Maven: ch.qos.logback:logback-core:1.2.3" level="project" />
<orderEntry type="library" name="Maven: org.apache.logging.log4j:log4j-to-slf4j:2.13.3" level="project" />
<orderEntry type="library" name="Maven: org.apache.logging.log4j:log4j-api:2.13.3" level="project" />
<orderEntry type="library" name="Maven: org.slf4j:jul-to-slf4j:1.7.30" level="project" />
<orderEntry type="library" name="Maven: jakarta.annotation:jakarta.annotation-api:1.3.5" level="project" />
<orderEntry type="library" name="Maven: org.yaml:snakeyaml:1.26" level="project" />
<orderEntry type="library" name="Maven: org.springframework.boot:spring-boot-starter-json:2.3.4.RELEASE" level="project" />
<orderEntry type="library" name="Maven: com.fasterxml.jackson.core:jackson-databind:2.11.2" level="project" />
<orderEntry type="library" name="Maven: com.fasterxml.jackson.core:jackson-annotations:2.11.2" level="project" />
<orderEntry type="library" name="Maven: com.fasterxml.jackson.core:jackson-core:2.11.2" level="project" />
<orderEntry type="library" name="Maven: com.fasterxml.jackson.datatype:jackson-datatype-jdk8:2.11.2" level="project" />
<orderEntry type="library" name="Maven: com.fasterxml.jackson.datatype:jackson-datatype-jsr310:2.11.2" level="project" />
<orderEntry type="library" name="Maven: com.fasterxml.jackson.module:jackson-module-parameter-names:2.11.2" level="project" />
<orderEntry type="library" name="Maven: org.springframework.boot:spring-boot-starter-tomcat:2.3.4.RELEASE" level="project" />
<orderEntry type="library" name="Maven: org.apache.tomcat.embed:tomcat-embed-core:9.0.38" level="project" />
<orderEntry type="library" name="Maven: org.glassfish:jakarta.el:3.0.3" level="project" />
<orderEntry type="library" name="Maven: org.apache.tomcat.embed:tomcat-embed-websocket:9.0.38" level="project" />
<orderEntry type="library" name="Maven: org.springframework:spring-web:5.2.9.RELEASE" level="project" />
<orderEntry type="library" name="Maven: org.springframework:spring-beans:5.2.9.RELEASE" level="project" />
<orderEntry type="library" name="Maven: org.springframework:spring-webmvc:5.2.9.RELEASE" level="project" />
<orderEntry type="library" name="Maven: org.springframework:spring-aop:5.2.9.RELEASE" level="project" />
<orderEntry type="library" name="Maven: org.springframework:spring-context:5.2.9.RELEASE" level="project" />
<orderEntry type="library" name="Maven: org.springframework:spring-expression:5.2.9.RELEASE" level="project" />
<orderEntry type="library" scope="TEST" name="Maven: org.springframework.boot:spring-boot-starter-test:2.3.4.RELEASE" level="project" />
<orderEntry type="library" scope="TEST" name="Maven: org.springframework.boot:spring-boot-test:2.3.4.RELEASE" level="project" />
<orderEntry type="library" scope="TEST" name="Maven: org.springframework.boot:spring-boot-test-autoconfigure:2.3.4.RELEASE" level="project" />
<orderEntry type="library" scope="TEST" name="Maven: com.jayway.jsonpath:json-path:2.4.0" level="project" />
<orderEntry type="library" scope="TEST" name="Maven: net.minidev:json-smart:2.3" level="project" />
<orderEntry type="library" scope="TEST" name="Maven: net.minidev:accessors-smart:1.2" level="project" />
<orderEntry type="library" scope="TEST" name="Maven: org.ow2.asm:asm:5.0.4" level="project" />
<orderEntry type="library" name="Maven: org.slf4j:slf4j-api:1.7.30" level="project" />
<orderEntry type="library" scope="TEST" name="Maven: jakarta.xml.bind:jakarta.xml.bind-api:2.3.3" level="project" />
<orderEntry type="library" scope="TEST" name="Maven: jakarta.activation:jakarta.activation-api:1.2.2" level="project" />
<orderEntry type="library" scope="TEST" name="Maven: org.assertj:assertj-core:3.16.1" level="project" />
<orderEntry type="library" scope="TEST" name="Maven: org.hamcrest:hamcrest:2.2" level="project" />
<orderEntry type="library" scope="TEST" name="Maven: org.junit.jupiter:junit-jupiter:5.6.2" level="project" />
<orderEntry type="library" scope="TEST" name="Maven: org.junit.jupiter:junit-jupiter-api:5.6.2" level="project" />
<orderEntry type="library" scope="TEST" name="Maven: org.apiguardian:apiguardian-api:1.1.0" level="project" />
<orderEntry type="library" scope="TEST" name="Maven: org.opentest4j:opentest4j:1.2.0" level="project" />
<orderEntry type="library" scope="TEST" name="Maven: org.junit.platform:junit-platform-commons:1.6.2" level="project" />
<orderEntry type="library" scope="TEST" name="Maven: org.junit.jupiter:junit-jupiter-params:5.6.2" level="project" />
<orderEntry type="library" scope="TEST" name="Maven: org.junit.jupiter:junit-jupiter-engine:5.6.2" level="project" />
<orderEntry type="library" scope="TEST" name="Maven: org.junit.platform:junit-platform-engine:1.6.2" level="project" />
<orderEntry type="library" scope="TEST" name="Maven: org.mockito:mockito-core:3.3.3" level="project" />
<orderEntry type="library" scope="TEST" name="Maven: net.bytebuddy:byte-buddy:1.10.14" level="project" />
<orderEntry type="library" scope="TEST" name="Maven: net.bytebuddy:byte-buddy-agent:1.10.14" level="project" />
<orderEntry type="library" scope="TEST" name="Maven: org.objenesis:objenesis:2.6" level="project" />
<orderEntry type="library" scope="TEST" name="Maven: org.mockito:mockito-junit-jupiter:3.3.3" level="project" />
<orderEntry type="library" scope="TEST" name="Maven: org.skyscreamer:jsonassert:1.5.0" level="project" />
<orderEntry type="library" name="Maven: org.springframework:spring-core:5.2.9.RELEASE" level="project" />
<orderEntry type="library" name="Maven: org.springframework:spring-jcl:5.2.9.RELEASE" level="project" />
<orderEntry type="library" scope="TEST" name="Maven: org.springframework:spring-test:5.2.9.RELEASE" level="project" />
<orderEntry type="library" scope="TEST" name="Maven: org.xmlunit:xmlunit-core:2.7.0" level="project" />
<orderEntry type="library" name="Maven: org.apache.httpcomponents:httpclient:4.5.12" level="project" />
<orderEntry type="library" name="Maven: org.apache.httpcomponents:httpcore:4.4.13" level="project" />
<orderEntry type="library" name="Maven: commons-codec:commons-codec:1.14" level="project" />
<orderEntry type="library" name="Maven: com.vaadin.external.google:android-json:0.0.20131108.vaadin1" level="project" />
<orderEntry type="library" name="Maven: com.google.code.gson:gson:2.8.6" level="project" />
</component>
</module>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>2.3.4.RELEASE</version>
<relativePath/> <!-- lookup parent from repository -->
</parent>
<groupId>com.paddelOcr_springBoot</groupId>
<artifactId>demo</artifactId>
<version>0.0.1-SNAPSHOT</version>
<name>demo</name>
<description>Demo project for Spring Boot</description>
<properties>
<java.version>1.8</java.version>
</properties>
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.junit.vintage</groupId>
<artifactId>junit-vintage-engine</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<!--引入RestTemplate-->
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
</dependency>
<dependency>
<groupId>com.vaadin.external.google</groupId>
<artifactId>android-json</artifactId>
<version>0.0.20131108.vaadin1</version>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>com.google.code.gson</groupId>
<artifactId>gson</artifactId>
<version>2.8.6</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
</plugin>
</plugins>
</build>
</project>
简体中文
- 使用本教程前请先基于PaddleHub Serving的部署.
# 基于PaddleHub Serving的Java SpringBoot调用
paddleOcrSpringBoot服务部署目录下包括全部SpringBoot代码。目录结构如下:
```
deploy/paddleOcrSpringBoot/
└─ src 代码文件
└─ main 主函数代码
└─ java\com\paddelocr_springboot\demo
└─ DemoApplication.java SpringBoot启动代码
└─ Controller
└─ OCR.java 控制器代码
└─ test 测试代码
```
- Hub Serving启动后的APi端口如下:
`http://[ip_address]:[port]/predict/[module_name]`
## 返回结果格式说明
返回结果为列表(list),列表中的每一项为词典(dict),词典一共可能包含3种字段,信息如下:
|字段名称|数据类型|意义|
|-|-|-|
|text|str|文本内容|
|confidence|float| 文本识别置信度|
|text_region|list|文本位置坐标|
不同模块返回的字段不同,如,文本识别服务模块返回结果不含`text_region`字段,具体信息如下:
|字段名/模块名|ocr_det|ocr_rec|ocr_system|
|-|-|-|-|
|text||✔|✔|
|confidence||✔|✔|
|text_region|✔||✔|
package com.paddelocr_springboot.demo.Controller;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import com.google.gson.Gson;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import org.json.JSONObject;
import org.springframework.util.LinkedMultiValueMap;
import org.springframework.util.ResourceUtils;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
import org.springframework.http.*;
import org.springframework.util.MultiValueMap;
import org.springframework.web.client.RestTemplate;
import sun.misc.BASE64Encoder;
import java.util.Objects;
@RestController
public class OCR {
@RequestMapping("/")
public ResponseEntity<String> hi(){
//创建请求头
HttpHeaders headers = new HttpHeaders();
//设置请求头格式
headers.setContentType(MediaType.APPLICATION_JSON);
//读入静态资源文件1.png
InputStream imagePath = this.getClass().getResourceAsStream("/1.png");
//构建请求参数
MultiValueMap<String, String> map= new LinkedMultiValueMap<String, String>();
//添加请求参数images,并将Base64编码的图片传入
map.add("images", ImageToBase64(imagePath));
//构建请求
HttpEntity<MultiValueMap<String, String>> request = new HttpEntity<MultiValueMap<String, String>>(map, headers);
RestTemplate restTemplate = new RestTemplate();
//发送请求
ResponseEntity<String> response = restTemplate.postForEntity("http://127.0.0.1:8866/predict/ocr_system", request, String.class);
//打印请求返回值
return response;
}
private String ImageToBase64(InputStream imgPath) {
byte[] data = null;
// 读取图片字节数组
try {
InputStream in = imgPath;
System.out.println(imgPath);
data = new byte[in.available()];
in.read(data);
in.close();
} catch (IOException e) {
e.printStackTrace();
}
// 对字节数组Base64编码
BASE64Encoder encoder = new BASE64Encoder();
// 返回Base64编码过的字节数组字符串
//System.out.println("图片转换Base64:" + encoder.encode(Objects.requireNonNull(data)));
return encoder.encode(Objects.requireNonNull(data));
}
}
package com.paddelocr_springboot.demo;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
@SpringBootApplication
public class DemoApplication {
public static void main(String[] args) {
SpringApplication.run(DemoApplication.class, args);
}
}
server.port=8081
http_pool.max_total: 200
http_pool.default_max_per_route: 100
http_pool.connect_timeout: 5000
http_pool.connection_request_timeout: 1000
http_pool.socket_timeout: 65000
http_pool.validate_after_inactivity: 2000
\ No newline at end of file
package com.paddelocr_springboot.demo;
import org.junit.jupiter.api.Test;
import org.springframework.boot.test.context.SpringBootTest;
@SpringBootTest
class DemoApplicationTests {
@Test
void contextLoads() {
}
}
# PaddleOCR-GO
本服务是[PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR)的golang部署版本。
## 1. 环境准备
### 运行环境
- go: 1.14
- OpenCV: 4.3.0
- PaddlePaddle: 1.8.4
- 编译环境:cmake 3.15.4 | gcc 4.8.5
- 基于Centos 7.4运行环境编译,Windows请自行解决`OpenCV``PaddlePaddle`的编译问题
*另外,以下编译以`.bashrc`个人环境变量配置文件,如果使用`zsh`,请自行更换为`.zshrc`*
### 1.1 安装golang
从官网下载[golang](https://golang.org/dl/),建议选择1.13版本以上进行安装。下载完成后,直接解压你需要的安装目录,并配置相关环境变量,此处以1.14版本为例。
```shell
# 下载golang
wget https://golang.org/dl/go1.14.10.linux-amd64.tar.gz
# 解压到 /usr/local 目录下
tar -xzvf go1.14.10.linux-amd64.tar.gz -C /usr/local
# 配置GOROOT,即go的安装目录
echo "export GOROOT=/usr/local/go" >> ~/.bashrc
echo "export PATH=$PATH:$GOROOT/bin" >> ~/.bashrc
# 配置GOPATH,即go相关package的安装目录,可自定义一个目录
echo "export GOPATH=$HOME/golang" >> ~/.bashrc
echo "export PATH=$PATH:$GOPATH/bin" >> ~/.bashrc
# 配置GOPROXY,即go mod包管理器的下载代理,同时打开mod模式
echo "export GO111MODULE=on" >> ~/.bashrc
echo "export GOPROXY=https://mirrors.aliyun.com/goproxy/" >> ~/.bashrc
source ~/.bashrc
```
### 1.2 编译OpenCV库
go语言中,OpenCV的使用主要以[gocv](https://github.com/hybridgroup/gocv)包为主,gocv使用cgo调用OpenCV提供接口,因此还是需要编译OpenCV库。
**踩坑指南之一:[gocv官方实现](https://github.com/hybridgroup/gocv)中,部分接口并没有与原版C++的OpenCV的API保持一致,导致图片处理结果会出现一定的数值偏差。为处理这种偏差,[该仓库](https://github.com/LKKlein/gocv)fork了一份gocv官方源码,并对部分这些不一致的API进行了修正,保证结果与其他语言的一致性。**
对于OpenCV的编译,gocv官方提供了[Makefile](https://github.com/LKKlein/gocv/blob/lk/Makefile),可以一键进行安装,具体安装步骤详见[官方指南](https://github.com/LKKlein/gocv/blob/lk/README_ORIGIN.md#ubuntulinux)
这里提供逐步安装的方式,方便排查错误。
- 下载并解压OpenCV-4.3.0和OpenCV-Contrib-4.3.0
```shell
# 创建opencv安装目录
mkdir -p ~/opencv
# 下载OpenCV
cd ~/opencv
curl -sL https://github.com/opencv/opencv/archive/4.3.0.zip > opencv.zip
unzip -q opencv.zip
rm -rf opencv.zip
# 下载OpenCV-Contrib
curl -sL https://github.com/opencv/opencv_contrib/archive/4.3.0.zip > opencv-contrib.zip
unzip -q opencv-contrib.zip
rm -rf opencv-contrib.zip
```
- 安装相关依赖
```shell
sudo yum -y install pkgconfig cmake git gtk2-devel libpng-devel libjpeg-devel libtiff-devel tbb tbb-devel libdc1394-devel
```
- 编译安装
```shell
mkdir -p ~/.local/opencv-4.3.0
cd ~/opencv/opencv-4.3.0
mkdir build
cd build
cmake -D WITH_IPP=OFF \
-D WITH_OPENGL=OFF \
-D WITH_QT=OFF \
-D BUILD_EXAMPLES=OFF \
-D BUILD_TESTS=OFF \
-D BUILD_PERF_TESTS=OFF \
-D BUILD_opencv_java=OFF \
-D BUILD_opencv_python=OFF \
-D BUILD_opencv_python2=OFF \
-D BUILD_opencv_python3=OFF \
-D OPENCV_GENERATE_PKGCONFIG=ON \
-D CMAKE_INSTALL_PREFIX=$HOME/.local/opencv-4.3.0 \
-D OPENCV_ENABLE_NONFREE=ON \
-D OPENCV_EXTRA_MODULES_PATH=$HOME/opencv/opencv_contrib-4.3.0/modules ..
make -j8
make install
sudo ldconfig
```
make进行编译时,可能出现因`xfeatures2d`的两个模块下载失败导致的编译失败,这里只需要手动下载这部分文件到`$HOME/opencv/opencv_contrib-4.3.0/modules/xfeatures2d/src`目录下,然后重新执行`make -j8`即可。这部分文件地址可参考[这里](https://github.com/opencv/opencv_contrib/issues/1301#issuecomment-447181426)给出的链接。
- 配置环境变量
```shell
echo "export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:$HOME/.local/opencv-4.3.0/lib64/pkgconfig" >> ~/.bashrc
echo "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$HOME/.local/opencv-4.3.0/lib64" >> ~/.bashrc
source ~/.bashrc
```
- 验证安装
```shell
# 安装gocv包,先mod init
go mod init opencv
go get -u github.com/LKKlein/gocv
# 验证安装结果
cd $GOPATH/pkg/mod/github.com/!l!k!klein/gocv@v0.28.0
go run ./cmd/version/main.go
# 输出
# gocv version: 0.28.0
# opencv lib version: 4.3.0
```
### 1.3 编译PaddlePaddle的C语言API
go语言只能通过cgo调用C语言API,而不能直接与C++进行交互,因此需要编译PaddlePaddle的C语言API。当然,也可以自己写C语言调用C++的代码和头文件,这样就可以直接使用PaddlePaddle提供的已编译的C++推理库,无需自己手动编译,详见[该仓库](https://github.com/LKKlein/paddleocr-go/tree/dev_cxx)
- 获取PaddlePaddle源代码
```shell
cd ~
git clone --recurse-submodules https://github.com/paddlepaddle/paddle
# 切换到v1.8.4版本
cd paddle
git checkout v1.8.4
# 目前版本无论单卡还是多卡都需要先安装nccl
git clone https://github.com/NVIDIA/nccl.git
make -j8
make install
```
- 编译Paddle源代码
**踩坑指南之二:PaddlePaddle的C语言API实现有一个bug,即获取输入输出变量名时只能获取到第一个模型的变量名,后续模型都无法获取输入输出变量名,进而无法获取到模型输出,详情见[issue](https://github.com/PaddlePaddle/Paddle/issues/28309)。因此,编译前需要手动将`paddle/fluid/inference/capi/pd_predictor.cc`文件中`210行`与`215行`的`static`删除。**
在处理完该bug之后,才能进行后续编译。相关编译参数见[官方文档](https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.html#id12),注意部分参数需要相关依赖,请确保依赖完整再启用。
```shell
# 创建c++推理库文件夹
mkdir -p ~/paddle_inference
export PADDLE_INFER=`$HOME/paddle_inference`
# 执行编译
export PADDLE_ROOT=`pwd`
mkdir build
cd build
cmake -DFLUID_INFERENCE_INSTALL_DIR=$PADDLE_INFER \
-DWITH_CONTRIB=OFF \
-DCMAKE_BUILD_TYPE=Release \
-DWITH_PYTHON=OFF \
-DWITH_MKL=ON \
-DWITH_GPU=ON \
-DON_INFER=ON \
--WITH_MKLDNN=ON \
--WITH_XBYAK=ON \
--WITH_DSO=OFF ..
make
make inference_lib_dist
```
编译完成后,可以在`build/fluid_inference_c_install_dir`目录下,看到以下生成的文件
```
build/fluid_inference_c_install_dir
├── paddle
├── third_party
└── version.txt
```
其中`paddle`就是Paddle库的C语言预测API,`version.txt`中包含当前预测库的版本信息。最后,将C推理库配置到环境变量。
```shell
echo "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$PADDLE_ROOT/build/fluid_inference_c_install_dir/paddle/lib" >> ~/.bashrc
echo "export LIBRARY_PATH=$LIBRARY_PATH:$PADDLE_ROOT/build/fluid_inference_c_install_dir/paddle/lib" >> ~/.bashrc
souce ~/.bashrc
```
## 2. paddleocr-go预测库
### 2.1 安装paddleocr-go
确保C推理库已配置到环境变量,然后直接执行安装命令
```shell
go get -u github.com/PaddlePaddle/PaddleOCR/thirdparty/paddleocr-go
```
### 2.2 相关使用API
在go中使用import引入包
```go
import github.com/PaddlePaddle/PaddleOCR/thirdparty/paddleocr-go/ocr
```
- 预测结果结构体
```go
type OCRText struct {
BBox [][]int `json:"bbox"`
Text string `json:"text"`
Score float64 `json:"score"`
}
```
一张图的OCR结果包含多个`OCRText`结果,每个结果包含预测框、预测文本、预测文本得分。
- OCR预测类
```go
func NewOCRSystem(confFile string, a map[string]interface{}) *OCRSystem
```
`OCRSystem`是主要对外提供API的结构;
`confFile`是yaml配置文件的路径,可在配置文件中修改相关预测参数,也可以传空字符串,这时会全部使用默认配置;
`a`是可以在代码中直接定义的配置参数,优先级高于配置文件,会覆盖配置文件和默认配置的参数。
- 单张图预测API
```go 
func (ocr *OCRSystem) PredictOneImage(img gocv.Mat) []OCRText
```
- 图片文件夹预测API
```go
func (ocr *OCRSystem) PredictDirImages(dirname string) map[string][]OCRText
```
`dirname`图片文件夹的目录,默认会预测改目录下所有`jpg``png`图片,并返回每张图的预测结果。
- OCR Server
```go
func (ocr *OCRSystem) StartServer(port string)
```
开启OCR预测Server,开启后,使用`post`请求上传需要识别的图片至`http://$ip:$port/ocr`即可直接获取该图片上所有文本的识别结果。其中,`$ip`是开启服务的主机`ip``127.0.0.1`的本地ip, `$port`是传入的端口参数。
## 3. 预测demo
### 3.1 生成预测demo
以下两种方式均可生成预测demo文件,任选其一即可
- 通过下载`paddleocr-go`代码并编译
```shell
git clone https://github.com/PaddlePaddle/PaddleOCR
cd PaddleOCR/thirdparty/paddleocr-go
# 确保C动态库路径已在环境变量中,执行以下命令生成ppocr-go文件
go build ppocr-go.go
```
- 通过go package自动安装
```shell
# 执行后会自动在$GOPATH/bin下生成ppocr-go文件,如果配置了PATH=$PATH:$GOPATH/bin,以下预测命令可以去掉`./`,直接执行ppocr-go
go get -u github.com/PaddlePaddle/PaddleOCR/thirdparty/paddleocr-go
```
### 3.2 修改预测配置
当前给定的配置文件`config/conf.yaml`中,包含了默认的OCR预测配置参数,可根据个人需要更改相关参数。
比如,将`use_gpu`改为`false`,使用CPU执行预测;将`det_model_dir`, `rec_model_dir`, `cls_model_dir`都更改为自己的本地模型路径,也或者是更改字典`rec_char_dict_path`的路径,这四个路径如果配置http链接,会自动下载到本地目录。另外,配置参数包含了预测引擎、检测模型、检测阈值、方向分类模型、识别模型及阈值的相关参数,具体参数的意义可参见[PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_ch/whl.md#%E5%8F%82%E6%95%B0%E8%AF%B4%E6%98%8E)
### 3.3 执行预测demo
预测demo提供了三种预测方式,分别是单张图预测、文件夹批量预测、OCR Server预测。三者命令行优先级依次降低。
#### 3.3.1 单张图预测
```shell
./ppocr-go --config config/conf.yaml --image images/test.jpg
```
执行完成,会输出以下内容:
<img src="./images/result/single_img_result.jpg" style="zoom:80%;" />
#### 3.3.2 文件夹批量预测
```shell
./ppocr-go --config config/conf.yaml --image_dir ./images
```
执行完成,会输出以下内容:
<img src="./images/result/img_dir_result.jpg" style="zoom:80%;" />
#### 3.3.3 开启OCR Server
```shell
./ppocr-go --use_servering --port=18600
```
开启服务后,可以在其他客户端中通过`post`请求进行ocr预测。此处以`Python`客户端为例,如下所示
```python
import requests
files = {'image': open('images/test.jpg','rb')}
url = "http://127.0.0.1:18600/ocr"
r = requests.post(url, files=files)
print(r.text)
```
执行完成可以得到以下结果
![](./images/result/python_client_result.jpg)
最后,在Python中将上述结果可视化可以得到以下结果
![](./images/result/python_vis_result.jpg)
# params for prediction engine
use_gpu: true
ir_optim: true
enable_mkldnn: false
# use_zero_copy_run: true
use_tensorrt: false
num_cpu_threads: 6
gpu_id: 0
gpu_mem: 2000
# params for text detector
det_algorithm: "DB"
det_model_dir: "https://paddleocr.bj.bcebos.com/20-09-22/mobile/det/ch_ppocr_mobile_v1.1_det_infer.tar"
det_max_side_len: 960
# DB parmas
det_db_thresh: 0.3
det_db_box_thresh: 0.5
det_db_unclip_ratio: 2.0
# EAST parmas
det_east_score_thresh: 0.8
det_east_cover_thresh: 0.1
det_east_nms_thresh: 0.2
# params for text recognizer
rec_algorithm: "CRNN"
rec_model_dir: "https://paddleocr.bj.bcebos.com/20-09-22/mobile/rec/ch_ppocr_mobile_v1.1_rec_infer.tar"
rec_image_shape: [3, 32, 320]
rec_char_type: "ch"
rec_batch_num: 30
max_text_length: 25
rec_char_dict_path: "https://raw.githubusercontent.com/PaddlePaddle/PaddleOCR/develop/ppocr/utils/ppocr_keys_v1.txt"
use_space_char: true
# params for text classifier
use_angle_cls: false
cls_model_dir: "https://paddleocr.bj.bcebos.com/20-09-22/cls/ch_ppocr_mobile_v1.1_cls_infer.tar"
cls_image_shape: [3, 48, 192]
label_list: ["0", "180"]
cls_batch_num: 30
cls_thresh: 0.9
lang: ch
det: true
rec: true
cls: false
\ No newline at end of file
module github.com/PaddlePaddle/PaddleOCR/thirdparty/paddleocr-go
go 1.14
require (
github.com/LKKlein/gocv v0.28.0
github.com/ctessum/go.clipper v0.0.0-20200522184404-9c744fa3e86c
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776
)
github.com/LKKlein/gocv v0.28.0 h1:1MMvs9uYf+QGPi86it2pUmN8RRoyMnPLUefKB/Jf1Q0=
github.com/LKKlein/gocv v0.28.0/go.mod h1:MP408EL7eakRU3vzjsozzfELSX7HDDGdMpWANV1IOHY=
github.com/PaddlePaddle/PaddleOCR v1.1.0 h1:zmPevInTs5P+ctSokI9sWQLTThmJBUCo/JCLbB5xbps=
github.com/ctessum/go.clipper v0.0.0-20200522184404-9c744fa3e86c h1:VXCsVlam0R2Yl7VET2GxZBPdOa7gFRexyhfWb9v9QtM=
github.com/ctessum/go.clipper v0.0.0-20200522184404-9c744fa3e86c/go.mod h1:KRMo3PCsooJP3LmCwKI76dkd7f3ki3zwYLHR7Iwbi5k=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
package ocr
import (
"bytes"
"encoding/json"
"errors"
"image"
"image/color"
"io"
"log"
"math"
"net/http"
"path"
"path/filepath"
"sort"
"strings"
"github.com/LKKlein/gocv"
"github.com/PaddlePaddle/PaddleOCR/thirdparty/paddleocr-go/paddle"
)
type PaddleModel struct {
predictor *paddle.Predictor
input *paddle.ZeroCopyTensor
outputs []*paddle.ZeroCopyTensor
useGPU bool
deviceID int
initGPUMem int
numThreads int
useMKLDNN bool
useTensorRT bool
useIROptim bool
}
func NewPaddleModel(args map[string]interface{}) *PaddleModel {
return &PaddleModel{
useGPU: getBool(args, "use_gpu", false),
deviceID: getInt(args, "gpu_id", 0),
initGPUMem: getInt(args, "gpu_mem", 1000),
numThreads: getInt(args, "num_cpu_threads", 6),
useMKLDNN: getBool(args, "enable_mkldnn", false),
useTensorRT: getBool(args, "use_tensorrt", false),
useIROptim: getBool(args, "ir_optim", true),
}
}
func (model *PaddleModel) LoadModel(modelDir string) {
config := paddle.NewAnalysisConfig()
config.DisableGlogInfo()
config.SetModel(modelDir+"/model", modelDir+"/params")
if model.useGPU {
config.EnableUseGpu(model.initGPUMem, model.deviceID)
} else {
config.DisableGpu()
config.SetCpuMathLibraryNumThreads(model.numThreads)
if model.useMKLDNN {
config.EnableMkldnn()
}
}
// config.EnableMemoryOptim()
if model.useIROptim {
config.SwitchIrOptim(true)
}
// false for zero copy tensor
config.SwitchUseFeedFetchOps(false)
config.SwitchSpecifyInputNames(true)
model.predictor = paddle.NewPredictor(config)
model.input = model.predictor.GetInputTensors()[0]
model.outputs = model.predictor.GetOutputTensors()
}
type OCRText struct {
BBox [][]int `json:"bbox"`
Text string `json:"text"`
Score float64 `json:"score"`
}
type TextPredictSystem struct {
detector *DBDetector
cls *TextClassifier
rec *TextRecognizer
}
func NewTextPredictSystem(args map[string]interface{}) *TextPredictSystem {
sys := &TextPredictSystem{
detector: NewDBDetector(getString(args, "det_model_dir", ""), args),
rec: NewTextRecognizer(getString(args, "rec_model_dir", ""), args),
}
if getBool(args, "use_angle_cls", false) {
sys.cls = NewTextClassifier(getString(args, "cls_model_dir", ""), args)
}
return sys
}
func (sys *TextPredictSystem) sortBoxes(boxes [][][]int) [][][]int {
sort.Slice(boxes, func(i, j int) bool {
if boxes[i][0][1] < boxes[j][0][1] {
return true
}
if boxes[i][0][1] > boxes[j][0][1] {
return false
}
return boxes[i][0][0] < boxes[j][0][0]
})
for i := 0; i < len(boxes)-1; i++ {
if math.Abs(float64(boxes[i+1][0][1]-boxes[i][0][1])) < 10 && boxes[i+1][0][0] < boxes[i][0][0] {
boxes[i], boxes[i+1] = boxes[i+1], boxes[i]
}
}
return boxes
}
func (sys *TextPredictSystem) getRotateCropImage(img gocv.Mat, box [][]int) gocv.Mat {
cropW := int(math.Sqrt(math.Pow(float64(box[0][0]-box[1][0]), 2) + math.Pow(float64(box[0][1]-box[1][1]), 2)))
cropH := int(math.Sqrt(math.Pow(float64(box[0][0]-box[3][0]), 2) + math.Pow(float64(box[0][1]-box[3][1]), 2)))
ptsstd := make([]image.Point, 4)
ptsstd[0] = image.Pt(0, 0)
ptsstd[1] = image.Pt(cropW, 0)
ptsstd[2] = image.Pt(cropW, cropH)
ptsstd[3] = image.Pt(0, cropH)
points := make([]image.Point, 4)
points[0] = image.Pt(box[0][0], box[0][1])
points[1] = image.Pt(box[1][0], box[1][1])
points[2] = image.Pt(box[2][0], box[2][1])
points[3] = image.Pt(box[3][0], box[3][1])
M := gocv.GetPerspectiveTransform(points, ptsstd)
defer M.Close()
dstimg := gocv.NewMat()
gocv.WarpPerspectiveWithParams(img, &dstimg, M, image.Pt(cropW, cropH),
gocv.InterpolationCubic, gocv.BorderReplicate, color.RGBA{0, 0, 0, 0})
if float64(dstimg.Rows()) >= float64(dstimg.Cols())*1.5 {
srcCopy := gocv.NewMat()
gocv.Transpose(dstimg, &srcCopy)
defer dstimg.Close()
gocv.Flip(srcCopy, &srcCopy, 0)
return srcCopy
}
return dstimg
}
func (sys *TextPredictSystem) Run(img gocv.Mat) []OCRText {
srcimg := gocv.NewMat()
defer srcimg.Close()
img.CopyTo(&srcimg)
boxes := sys.detector.Run(img)
if len(boxes) == 0 {
return nil
}
boxes = sys.sortBoxes(boxes)
cropimages := make([]gocv.Mat, len(boxes))
for i := 0; i < len(boxes); i++ {
tmpbox := make([][]int, len(boxes[i]))
for j := 0; j < len(tmpbox); j++ {
tmpbox[j] = make([]int, len(boxes[i][j]))
copy(tmpbox[j], boxes[i][j])
}
cropimg := sys.getRotateCropImage(srcimg, tmpbox)
cropimages[i] = cropimg
}
if sys.cls != nil {
cropimages = sys.cls.Run(cropimages)
}
recResult := sys.rec.Run(cropimages, boxes)
return recResult
}
type OCRSystem struct {
args map[string]interface{}
tps *TextPredictSystem
}
func NewOCRSystem(confFile string, a map[string]interface{}) *OCRSystem {
args, err := ReadYaml(confFile)
if err != nil {
log.Printf("Read config file %v failed! Please check. err: %v\n", confFile, err)
log.Println("Program will use default config.")
args = defaultArgs
}
for k, v := range a {
args[k] = v
}
return &OCRSystem{
args: args,
tps: NewTextPredictSystem(args),
}
}
func (ocr *OCRSystem) StartServer(port string) {
http.HandleFunc("/ocr", ocr.predictHandler)
log.Println("OCR Server has been started on port :", port)
err := http.ListenAndServe(":"+port, nil)
if err != nil {
log.Panicf("http error! error: %v\n", err)
}
}
func (ocr *OCRSystem) predictHandler(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
w.Write([]byte(errors.New("post method only").Error()))
return
}
r.ParseMultipartForm(32 << 20)
var buf bytes.Buffer
file, header, err := r.FormFile("image")
if err != nil {
w.Write([]byte(err.Error()))
return
}
defer file.Close()
ext := strings.ToLower(path.Ext(header.Filename))
if ext != ".jpg" && ext != ".png" {
w.Write([]byte(errors.New("only support image endswith jpg/png").Error()))
return
}
io.Copy(&buf, file)
img, err2 := gocv.IMDecode(buf.Bytes(), gocv.IMReadColor)
defer img.Close()
if err2 != nil {
w.Write([]byte(err2.Error()))
return
}
result := ocr.PredictOneImage(img)
if output, err3 := json.Marshal(result); err3 != nil {
w.Write([]byte(err3.Error()))
} else {
w.Write(output)
}
}
func (ocr *OCRSystem) PredictOneImage(img gocv.Mat) []OCRText {
return ocr.tps.Run(img)
}
func (ocr *OCRSystem) PredictDirImages(dirname string) map[string][]OCRText {
if dirname == "" {
return nil
}
imgs, _ := filepath.Glob(dirname + "/*.jpg")
tmpimgs, _ := filepath.Glob(dirname + "/*.png")
imgs = append(imgs, tmpimgs...)
results := make(map[string][]OCRText, len(imgs))
for i := 0; i < len(imgs); i++ {
imgname := imgs[i]
img := ReadImage(imgname)
defer img.Close()
res := ocr.PredictOneImage(img)
results[imgname] = res
}
return results
}
package ocr
var (
defaultArgs = map[string]interface{}{
"use_gpu": true,
"ir_optim": true,
"enable_mkldnn": false,
"use_tensorrt": false,
"num_cpu_threads": 6,
"gpu_id": 0,
"gpu_mem": 2000,
"det_algorithm": "DB",
"det_model_dir": "https://paddleocr.bj.bcebos.com/20-09-22/mobile/det/ch_ppocr_mobile_v1.1_det_infer.tar",
"det_max_side_len": 960,
"det_db_thresh": 0.3,
"det_db_box_thresh": 0.5,
"det_db_unclip_ratio": 2.0,
"det_east_score_thresh": 0.8,
"det_east_cover_thresh": 0.1,
"det_east_nms_thresh": 0.2,
"rec_algorithm": "CRNN",
"rec_model_dir": "https://paddleocr.bj.bcebos.com/20-09-22/mobile/rec/ch_ppocr_mobile_v1.1_rec_infer.tar",
"rec_image_shape": []interface{}{3, 32, 320},
"rec_char_type": "ch",
"rec_batch_num": 30,
"max_text_length": 25,
"rec_char_dict_path": "https://raw.githubusercontent.com/PaddlePaddle/PaddleOCR/develop/ppocr/utils/ppocr_keys_v1.txt",
"use_space_char": true,
"use_angle_cls": false,
"cls_model_dir": "https://paddleocr.bj.bcebos.com/20-09-22/cls/ch_ppocr_mobile_v1.1_cls_infer.tar",
"cls_image_shape": []interface{}{3, 48, 192},
"label_list": []interface{}{"0", "180"},
"cls_batch_num": 30,
"cls_thresh": 0.9,
"lang": "ch",
"det": true,
"rec": true,
"cls": false,
}
)
package ocr
import (
"log"
"os"
"time"
"github.com/LKKlein/gocv"
)
type TextClassifier struct {
*PaddleModel
batchNum int
thresh float64
shape []int
labels []string
}
type ClsResult struct {
Score float32
Label int64
}
func NewTextClassifier(modelDir string, args map[string]interface{}) *TextClassifier {
shapes := []int{3, 48, 192}
if v, ok := args["cls_image_shape"]; ok {
for i, s := range v.([]interface{}) {
shapes[i] = s.(int)
}
}
cls := &TextClassifier{
PaddleModel: NewPaddleModel(args),
batchNum: getInt(args, "cls_batch_num", 30),
thresh: getFloat64(args, "cls_thresh", 0.9),
shape: shapes,
}
if checkModelExists(modelDir) {
home, _ := os.UserHomeDir()
modelDir, _ = downloadModel(home+"/.paddleocr/cls", modelDir)
} else {
log.Panicf("cls model path: %v not exist! Please check!", modelDir)
}
cls.LoadModel(modelDir)
return cls
}
func (cls *TextClassifier) Run(imgs []gocv.Mat) []gocv.Mat {
batch := cls.batchNum
var clsTime int64 = 0
clsout := make([]ClsResult, len(imgs))
srcimgs := make([]gocv.Mat, len(imgs))
c, h, w := cls.shape[0], cls.shape[1], cls.shape[2]
for i := 0; i < len(imgs); i += batch {
j := i + batch
if len(imgs) < j {
j = len(imgs)
}
normImgs := make([]float32, (j-i)*c*h*w)
for k := i; k < j; k++ {
tmp := gocv.NewMat()
imgs[k].CopyTo(&tmp)
srcimgs[k] = tmp
img := clsResize(imgs[k], cls.shape)
data := normPermute(img, []float32{0.5, 0.5, 0.5}, []float32{0.5, 0.5, 0.5}, 255.0)
copy(normImgs[(k-i)*c*h*w:], data)
}
st := time.Now()
cls.input.SetValue(normImgs)
cls.input.Reshape([]int32{int32(j - i), int32(c), int32(h), int32(w)})
cls.predictor.SetZeroCopyInput(cls.input)
cls.predictor.ZeroCopyRun()
cls.predictor.GetZeroCopyOutput(cls.outputs[0])
cls.predictor.GetZeroCopyOutput(cls.outputs[1])
var probout [][]float32
var labelout []int64
if len(cls.outputs[0].Shape()) == 2 {
probout = cls.outputs[0].Value().([][]float32)
} else {
labelout = cls.outputs[0].Value().([]int64)
}
if len(cls.outputs[1].Shape()) == 2 {
probout = cls.outputs[1].Value().([][]float32)
} else {
labelout = cls.outputs[1].Value().([]int64)
}
clsTime += int64(time.Since(st).Milliseconds())
for no, label := range labelout {
score := probout[no][label]
clsout[i+no] = ClsResult{
Score: score,
Label: label,
}
if label%2 == 1 && float64(score) > cls.thresh {
gocv.Rotate(srcimgs[i+no], &srcimgs[i+no], gocv.Rotate180Clockwise)
}
}
}
log.Println("cls num: ", len(clsout), ", cls time elapse: ", clsTime, "ms")
return srcimgs
}
package ocr
import (
"log"
"os"
"time"
"github.com/LKKlein/gocv"
)
type DBDetector struct {
*PaddleModel
preProcess DetPreProcess
postProcess DetPostProcess
}
func NewDBDetector(modelDir string, args map[string]interface{}) *DBDetector {
maxSideLen := getInt(args, "det_max_side_len", 960)
thresh := getFloat64(args, "det_db_thresh", 0.3)
boxThresh := getFloat64(args, "det_db_box_thresh", 0.5)
unClipRatio := getFloat64(args, "det_db_unclip_ratio", 2.0)
detector := &DBDetector{
PaddleModel: NewPaddleModel(args),
preProcess: NewDBProcess(make([]int, 0), maxSideLen),
postProcess: NewDBPostProcess(thresh, boxThresh, unClipRatio),
}
if checkModelExists(modelDir) {
home, _ := os.UserHomeDir()
modelDir, _ = downloadModel(home+"/.paddleocr/det", modelDir)
} else {
log.Panicf("det model path: %v not exist! Please check!", modelDir)
}
detector.LoadModel(modelDir)
return detector
}
func (det *DBDetector) Run(img gocv.Mat) [][][]int {
oriH := img.Rows()
oriW := img.Cols()
data, resizeH, resizeW := det.preProcess.Run(img)
st := time.Now()
det.input.SetValue(data)
det.input.Reshape([]int32{1, 3, int32(resizeH), int32(resizeW)})
det.predictor.SetZeroCopyInput(det.input)
det.predictor.ZeroCopyRun()
det.predictor.GetZeroCopyOutput(det.outputs[0])
ratioH, ratioW := float64(resizeH)/float64(oriH), float64(resizeW)/float64(oriW)
boxes := det.postProcess.Run(det.outputs[0], oriH, oriW, ratioH, ratioW)
log.Println("det_box num: ", len(boxes), ", time elapse: ", time.Since(st))
return boxes
}
package ocr
import (
"log"
"os"
"time"
"github.com/LKKlein/gocv"
)
type TextRecognizer struct {
*PaddleModel
batchNum int
textLen int
shape []int
charType string
labels []string
}
func NewTextRecognizer(modelDir string, args map[string]interface{}) *TextRecognizer {
shapes := []int{3, 32, 320}
if v, ok := args["rec_image_shape"]; ok {
for i, s := range v.([]interface{}) {
shapes[i] = s.(int)
}
}
home, _ := os.UserHomeDir()
labelpath := getString(args, "rec_char_dict_path", home+"/.paddleocr/rec/ppocr_keys_v1.txt")
labels := readLines2StringSlice(labelpath)
if getBool(args, "use_space_char", true) {
labels = append(labels, " ")
}
rec := &TextRecognizer{
PaddleModel: NewPaddleModel(args),
batchNum: getInt(args, "rec_batch_num", 30),
textLen: getInt(args, "max_text_length", 25),
charType: getString(args, "rec_char_type", "ch"),
shape: shapes,
labels: labels,
}
if checkModelExists(modelDir) {
modelDir, _ = downloadModel(home+"/.paddleocr/rec/ch", modelDir)
} else {
log.Panicf("rec model path: %v not exist! Please check!", modelDir)
}
rec.LoadModel(modelDir)
return rec
}
func (rec *TextRecognizer) Run(imgs []gocv.Mat, bboxes [][][]int) []OCRText {
recResult := make([]OCRText, 0, len(imgs))
batch := rec.batchNum
var recTime int64 = 0
c, h, w := rec.shape[0], rec.shape[1], rec.shape[2]
for i := 0; i < len(imgs); i += batch {
j := i + batch
if len(imgs) < j {
j = len(imgs)
}
maxwhratio := 0.0
for k := i; k < j; k++ {
h, w := imgs[k].Rows(), imgs[k].Cols()
ratio := float64(w) / float64(h)
if ratio > maxwhratio {
maxwhratio = ratio
}
}
if rec.charType == "ch" {
w = int(32 * maxwhratio)
}
normimgs := make([]float32, (j-i)*c*h*w)
for k := i; k < j; k++ {
data := crnnPreprocess(imgs[k], rec.shape, []float32{0.5, 0.5, 0.5},
[]float32{0.5, 0.5, 0.5}, 255.0, maxwhratio, rec.charType)
defer imgs[k].Close()
copy(normimgs[(k-i)*c*h*w:], data)
}
st := time.Now()
rec.input.SetValue(normimgs)
rec.input.Reshape([]int32{int32(j - i), int32(c), int32(h), int32(w)})
rec.predictor.SetZeroCopyInput(rec.input)
rec.predictor.ZeroCopyRun()
rec.predictor.GetZeroCopyOutput(rec.outputs[0])
rec.predictor.GetZeroCopyOutput(rec.outputs[1])
recIdxBatch := rec.outputs[0].Value().([][]int64)
recIdxLod := rec.outputs[0].Lod()
predictBatch := rec.outputs[1].Value().([][]float32)
predictLod := rec.outputs[1].Lod()
recTime += int64(time.Since(st).Milliseconds())
for rno := 0; rno < len(recIdxLod)-1; rno++ {
predIdx := make([]int, 0, 2)
for beg := recIdxLod[rno]; beg < recIdxLod[rno+1]; beg++ {
predIdx = append(predIdx, int(recIdxBatch[beg][0]))
}
if len(predIdx) == 0 {
continue
}
words := ""
for n := 0; n < len(predIdx); n++ {
words += rec.labels[predIdx[n]]
}
score := 0.0
count := 0
blankPosition := int(rec.outputs[1].Shape()[1])
for beg := predictLod[rno]; beg < predictLod[rno+1]; beg++ {
argMaxID, maxVal := argmax(predictBatch[beg])
if blankPosition-1-argMaxID > 0 {
score += float64(maxVal)
count++
}
}
score = score / float64(count)
recResult = append(recResult, OCRText{
BBox: bboxes[i+rno],
Text: words,
Score: score,
})
}
}
log.Println("rec num: ", len(recResult), ", rec time elapse: ", recTime, "ms")
return recResult
}
package ocr
import (
"image"
"image/color"
"math"
"sort"
"github.com/LKKlein/gocv"
"github.com/PaddlePaddle/PaddleOCR/thirdparty/paddleocr-go/paddle"
clipper "github.com/ctessum/go.clipper"
)
type xFloatSortBy [][]float32
func (a xFloatSortBy) Len() int { return len(a) }
func (a xFloatSortBy) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a xFloatSortBy) Less(i, j int) bool { return a[i][0] < a[j][0] }
type xIntSortBy [][]int
func (a xIntSortBy) Len() int { return len(a) }
func (a xIntSortBy) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a xIntSortBy) Less(i, j int) bool { return a[i][0] < a[j][0] }
type DetPostProcess interface {
Run(output *paddle.ZeroCopyTensor, oriH, oriW int, ratioH, ratioW float64) [][][]int
}
type DBPostProcess struct {
thresh float64
boxThresh float64
maxCandidates int
unClipRatio float64
minSize int
}
func NewDBPostProcess(thresh, boxThresh, unClipRatio float64) *DBPostProcess {
return &DBPostProcess{
thresh: thresh,
boxThresh: boxThresh,
unClipRatio: unClipRatio,
maxCandidates: 1000,
minSize: 3,
}
}
func (d *DBPostProcess) getMinBoxes(rect gocv.RotatedRect) [][]float32 {
points := gocv.NewMat()
gocv.BoxPoints(rect, &points)
defer points.Close()
array := d.mat2slice(points)
sort.Sort(xFloatSortBy(array))
point1, point2, point3, point4 := array[0], array[1], array[2], array[3]
if array[3][1] <= array[2][1] {
point2, point3 = array[3], array[2]
} else {
point2, point3 = array[2], array[3]
}
if array[1][1] <= array[0][1] {
point1, point4 = array[1], array[0]
} else {
point1, point4 = array[0], array[1]
}
array = [][]float32{point1, point2, point3, point4}
return array
}
func (d *DBPostProcess) mat2slice(mat gocv.Mat) [][]float32 {
array := make([][]float32, mat.Rows())
for i := 0; i < mat.Rows(); i++ {
tmp := make([]float32, mat.Cols())
for j := 0; j < mat.Cols(); j++ {
tmp[j] = mat.GetFloatAt(i, j)
}
array[i] = tmp
}
return array
}
func (d *DBPostProcess) boxScoreFast(array [][]float32, pred gocv.Mat) float64 {
height, width := pred.Rows(), pred.Cols()
boxX := []float32{array[0][0], array[1][0], array[2][0], array[3][0]}
boxY := []float32{array[0][1], array[1][1], array[2][1], array[3][1]}
xmin := clip(int(math.Floor(float64(minf(boxX)))), 0, width-1)
xmax := clip(int(math.Ceil(float64(maxf(boxX)))), 0, width-1)
ymin := clip(int(math.Floor(float64(minf(boxY)))), 0, height-1)
ymax := clip(int(math.Ceil(float64(maxf(boxY)))), 0, height-1)
mask := gocv.NewMatWithSize(ymax-ymin+1, xmax-xmin+1, gocv.MatTypeCV8UC1)
defer mask.Close()
ppt := make([][]image.Point, 1)
ppt[0] = make([]image.Point, 4)
ppt[0][0] = image.Point{int(array[0][0]) - xmin, int(array[0][1]) - ymin}
ppt[0][1] = image.Point{int(array[1][0]) - xmin, int(array[1][1]) - ymin}
ppt[0][2] = image.Point{int(array[2][0]) - xmin, int(array[2][1]) - ymin}
ppt[0][3] = image.Point{int(array[3][0]) - xmin, int(array[3][1]) - ymin}
gocv.FillPoly(&mask, ppt, color.RGBA{0, 0, 1, 0})
croppedImg := pred.Region(image.Rect(xmin, ymin, xmax+1, ymax+1))
s := croppedImg.MeanWithMask(mask)
return s.Val1
}
func (d *DBPostProcess) unClip(box [][]float32) gocv.RotatedRect {
var area, dist float64
for i := 0; i < 4; i++ {
area += float64(box[i][0]*box[(i+1)%4][1] - box[i][1]*box[(i+1)%4][0])
dist += math.Sqrt(float64(
(box[i][0]-box[(i+1)%4][0])*(box[i][0]-box[(i+1)%4][0]) +
(box[i][1]-box[(i+1)%4][1])*(box[i][1]-box[(i+1)%4][1]),
))
}
area = math.Abs(area / 2.0)
distance := area * d.unClipRatio / dist
offset := clipper.NewClipperOffset()
path := make([]*clipper.IntPoint, 4)
path[0] = &clipper.IntPoint{X: clipper.CInt(box[0][0]), Y: clipper.CInt(box[0][1])}
path[1] = &clipper.IntPoint{X: clipper.CInt(box[1][0]), Y: clipper.CInt(box[1][1])}
path[2] = &clipper.IntPoint{X: clipper.CInt(box[2][0]), Y: clipper.CInt(box[2][1])}
path[3] = &clipper.IntPoint{X: clipper.CInt(box[3][0]), Y: clipper.CInt(box[3][1])}
offset.AddPath(clipper.Path(path), clipper.JtRound, clipper.EtClosedPolygon)
soln := offset.Execute(distance)
points := make([]image.Point, 0, 4)
for i := 0; i < len(soln); i++ {
for j := 0; j < len(soln[i]); j++ {
points = append(points, image.Point{int(soln[i][j].X), int(soln[i][j].Y)})
}
}
var res gocv.RotatedRect
if len(points) <= 0 {
points = make([]image.Point, 4)
points[0] = image.Pt(0, 0)
points[1] = image.Pt(1, 0)
points[2] = image.Pt(1, 1)
points[3] = image.Pt(0, 1)
res = gocv.RotatedRect{
Contour: points,
BoundingRect: image.Rect(0, 0, 1, 1),
Center: gocv.Point2f{X: 0.5, Y: 0.5},
Width: 1,
Height: 1,
Angle: 0,
}
} else {
res = gocv.MinAreaRect(points)
}
return res
}
func (d *DBPostProcess) boxesFromBitmap(pred gocv.Mat, mask gocv.Mat, ratioH float64, ratioW float64) [][][]int {
height, width := mask.Rows(), mask.Cols()
mask.MultiplyUChar(255)
contours := gocv.FindContours(mask, gocv.RetrievalList, gocv.ChainApproxSimple)
numContours := len(contours)
if numContours > d.maxCandidates {
numContours = d.maxCandidates
}
boxes := make([][][]int, 0, numContours)
for i := 0; i < numContours; i++ {
contour := contours[i]
boundingbox := gocv.MinAreaRect(contour)
if boundingbox.Width < float32(d.minSize) || boundingbox.Height < float32(d.minSize) {
continue
}
points := d.getMinBoxes(boundingbox)
score := d.boxScoreFast(points, pred)
if score < d.boxThresh {
continue
}
box := d.unClip(points)
if box.Width < float32(d.minSize+2) || box.Height < float32(d.minSize+2) {
continue
}
cliparray := d.getMinBoxes(box)
dstHeight, dstWidth := pred.Rows(), pred.Cols()
intcliparray := make([][]int, 4)
for i := 0; i < 4; i++ {
p := []int{
int(float64(clip(int(math.Round(
float64(cliparray[i][0]/float32(width)*float32(dstWidth)))), 0, dstWidth)) / ratioW),
int(float64(clip(int(math.Round(
float64(cliparray[i][1]/float32(height)*float32(dstHeight)))), 0, dstHeight)) / ratioH),
}
intcliparray[i] = p
}
boxes = append(boxes, intcliparray)
}
return boxes
}
func (d *DBPostProcess) orderPointsClockwise(box [][]int) [][]int {
sort.Sort(xIntSortBy(box))
leftmost := [][]int{box[0], box[1]}
rightmost := [][]int{box[2], box[3]}
if leftmost[0][1] > leftmost[1][1] {
leftmost[0], leftmost[1] = leftmost[1], leftmost[0]
}
if rightmost[0][1] > rightmost[1][1] {
rightmost[0], rightmost[1] = rightmost[1], rightmost[0]
}
return [][]int{leftmost[0], rightmost[0], rightmost[1], leftmost[1]}
}
func (d *DBPostProcess) filterTagDetRes(boxes [][][]int, oriH, oriW int) [][][]int {
points := make([][][]int, 0, len(boxes))
for i := 0; i < len(boxes); i++ {
boxes[i] = d.orderPointsClockwise(boxes[i])
for j := 0; j < len(boxes[i]); j++ {
boxes[i][j][0] = clip(boxes[i][j][0], 0, oriW-1)
boxes[i][j][1] = clip(boxes[i][j][1], 0, oriH-1)
}
}
for i := 0; i < len(boxes); i++ {
rectW := int(math.Sqrt(math.Pow(float64(boxes[i][0][0]-boxes[i][1][0]), 2.0) +
math.Pow(float64(boxes[i][0][1]-boxes[i][1][1]), 2.0)))
rectH := int(math.Sqrt(math.Pow(float64(boxes[i][0][0]-boxes[i][3][0]), 2.0) +
math.Pow(float64(boxes[i][0][1]-boxes[i][3][1]), 2.0)))
if rectW <= 4 || rectH <= 4 {
continue
}
points = append(points, boxes[i])
}
return points
}
func (d *DBPostProcess) Run(output *paddle.ZeroCopyTensor, oriH, oriW int, ratioH, ratioW float64) [][][]int {
v := output.Value().([][][][]float32)
shape := output.Shape()
height, width := int(shape[2]), int(shape[3])
pred := gocv.NewMatWithSize(height, width, gocv.MatTypeCV32F)
bitmap := gocv.NewMatWithSize(height, width, gocv.MatTypeCV8UC1)
thresh := float32(d.thresh)
for i := 0; i < height; i++ {
for j := 0; j < width; j++ {
pred.SetFloatAt(i, j, v[0][0][i][j])
if v[0][0][i][j] > thresh {
bitmap.SetUCharAt(i, j, 1)
} else {
bitmap.SetUCharAt(i, j, 0)
}
}
}
mask := gocv.NewMat()
kernel := gocv.GetStructuringElement(gocv.MorphRect, image.Point{2, 2})
gocv.Dilate(bitmap, &mask, kernel)
boxes := d.boxesFromBitmap(pred, mask, ratioH, ratioW)
dtboxes := d.filterTagDetRes(boxes, oriH, oriW)
return dtboxes
}
package ocr
import (
"image"
"image/color"
"math"
"github.com/LKKlein/gocv"
)
func resizeByShape(img gocv.Mat, resizeShape []int) (gocv.Mat, int, int) {
resizeH := resizeShape[0]
resizeW := resizeShape[1]
gocv.Resize(img, &img, image.Pt(resizeW, resizeH), 0, 0, gocv.InterpolationLinear)
return img, resizeH, resizeW
}
func resizeByMaxLen(img gocv.Mat, maxLen int) (gocv.Mat, int, int) {
oriH := img.Rows()
oriW := img.Cols()
var resizeH, resizeW int = oriH, oriW
var ratio float64 = 1.0
if resizeH > maxLen || resizeW > maxLen {
if resizeH > resizeW {
ratio = float64(maxLen) / float64(resizeH)
} else {
ratio = float64(maxLen) / float64(resizeW)
}
}
resizeH = int(float64(resizeH) * ratio)
resizeW = int(float64(resizeW) * ratio)
if resizeH%32 == 0 {
resizeH = resizeH
} else if resizeH/32 <= 1 {
resizeH = 32
} else {
resizeH = (resizeH/32 - 1) * 32
}
if resizeW%32 == 0 {
resizeW = resizeW
} else if resizeW/32 <= 1 {
resizeW = 32
} else {
resizeW = (resizeW/32 - 1) * 32
}
if resizeW <= 0 || resizeH <= 0 {
return gocv.NewMat(), 0, 0
}
gocv.Resize(img, &img, image.Pt(resizeW, resizeH), 0, 0, gocv.InterpolationLinear)
return img, resizeH, resizeW
}
func normPermute(img gocv.Mat, mean []float32, std []float32, scaleFactor float32) []float32 {
img.ConvertTo(&img, gocv.MatTypeCV32F)
img.DivideFloat(scaleFactor)
c := gocv.Split(img)
data := make([]float32, img.Rows()*img.Cols()*img.Channels())
for i := 0; i < 3; i++ {
c[i].SubtractFloat(mean[i])
c[i].DivideFloat(std[i])
defer c[i].Close()
x, _ := c[i].DataPtrFloat32()
copy(data[i*img.Rows()*img.Cols():], x)
}
return data
}
type DetPreProcess interface {
Run(gocv.Mat) ([]float32, int, int)
}
type DBPreProcess struct {
resizeType int
imageShape []int
maxSideLen int
mean []float32
std []float32
scaleFactor float32
}
func NewDBProcess(shape []int, sideLen int) *DBPreProcess {
db := &DBPreProcess{
resizeType: 0,
imageShape: shape,
maxSideLen: sideLen,
mean: []float32{0.485, 0.456, 0.406},
std: []float32{0.229, 0.224, 0.225},
scaleFactor: 255.0,
}
if len(shape) > 0 {
db.resizeType = 1
}
if sideLen == 0 {
db.maxSideLen = 2400
}
return db
}
func (d *DBPreProcess) Run(img gocv.Mat) ([]float32, int, int) {
var resizeH, resizeW int
if d.resizeType == 0 {
img, resizeH, resizeW = resizeByMaxLen(img, d.maxSideLen)
} else {
img, resizeH, resizeW = resizeByShape(img, d.imageShape)
}
im := normPermute(img, d.mean, d.std, d.scaleFactor)
return im, resizeH, resizeW
}
func clsResize(img gocv.Mat, resizeShape []int) gocv.Mat {
imgH, imgW := resizeShape[1], resizeShape[2]
h, w := img.Rows(), img.Cols()
ratio := float64(w) / float64(h)
var resizeW int
if math.Ceil(float64(imgH)*ratio) > float64(imgW) {
resizeW = imgW
} else {
resizeW = int(math.Ceil(float64(imgH) * ratio))
}
gocv.Resize(img, &img, image.Pt(resizeW, imgH), 0, 0, gocv.InterpolationLinear)
if resizeW < imgW {
gocv.CopyMakeBorder(img, &img, 0, 0, 0, imgW-resizeW, gocv.BorderConstant, color.RGBA{0, 0, 0, 0})
}
return img
}
func crnnPreprocess(img gocv.Mat, resizeShape []int, mean []float32, std []float32,
scaleFactor float32, whRatio float64, charType string) []float32 {
imgH := resizeShape[1]
imgW := resizeShape[2]
if charType == "ch" {
imgW = int(32 * whRatio)
}
h, w := img.Rows(), img.Cols()
ratio := float64(w) / float64(h)
var resizeW int
if math.Ceil(float64(imgH)*ratio) > float64(imgW) {
resizeW = imgW
} else {
resizeW = int(math.Ceil(float64(imgH) * ratio))
}
gocv.Resize(img, &img, image.Pt(resizeW, imgH), 0, 0, gocv.InterpolationLinear)
img.ConvertTo(&img, gocv.MatTypeCV32F)
img.DivideFloat(scaleFactor)
img.SubtractScalar(gocv.NewScalar(float64(mean[0]), float64(mean[1]), float64(mean[2]), 0))
img.DivideScalar(gocv.NewScalar(float64(std[0]), float64(std[1]), float64(std[2]), 0))
if resizeW < imgW {
gocv.CopyMakeBorder(img, &img, 0, 0, 0, imgW-resizeW, gocv.BorderConstant, color.RGBA{0, 0, 0, 0})
}
c := gocv.Split(img)
data := make([]float32, img.Rows()*img.Cols()*img.Channels())
for i := 0; i < 3; i++ {
defer c[i].Close()
x, _ := c[i].DataPtrFloat32()
copy(data[i*img.Rows()*img.Cols():], x)
}
return data
}
package ocr
import (
"archive/tar"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"path"
"path/filepath"
"strings"
"github.com/LKKlein/gocv"
"gopkg.in/yaml.v3"
)
func getString(args map[string]interface{}, key string, dv string) string {
if f, ok := args[key]; ok {
return f.(string)
}
return dv
}
func getFloat64(args map[string]interface{}, key string, dv float64) float64 {
if f, ok := args[key]; ok {
return f.(float64)
}
return dv
}
func getInt(args map[string]interface{}, key string, dv int) int {
if i, ok := args[key]; ok {
return i.(int)
}
return dv
}
func getBool(args map[string]interface{}, key string, dv bool) bool {
if b, ok := args[key]; ok {
return b.(bool)
}
return dv
}
func ReadImage(image_path string) gocv.Mat {
img := gocv.IMRead(image_path, gocv.IMReadColor)
if img.Empty() {
log.Printf("Could not read image %s\n", image_path)
os.Exit(1)
}
return img
}
func clip(value, min, max int) int {
if value <= min {
return min
} else if value >= max {
return max
}
return value
}
func minf(data []float32) float32 {
v := data[0]
for _, val := range data {
if val < v {
v = val
}
}
return v
}
func maxf(data []float32) float32 {
v := data[0]
for _, val := range data {
if val > v {
v = val
}
}
return v
}
func mini(data []int) int {
v := data[0]
for _, val := range data {
if val < v {
v = val
}
}
return v
}
func maxi(data []int) int {
v := data[0]
for _, val := range data {
if val > v {
v = val
}
}
return v
}
func argmax(arr []float32) (int, float32) {
max_value, index := arr[0], 0
for i, item := range arr {
if item > max_value {
max_value = item
index = i
}
}
return index, max_value
}
func checkModelExists(modelPath string) bool {
if isPathExist(modelPath+"/model") && isPathExist(modelPath+"/params") {
return true
}
if strings.HasPrefix(modelPath, "http://") ||
strings.HasPrefix(modelPath, "ftp://") || strings.HasPrefix(modelPath, "https://") {
return true
}
return false
}
func downloadFile(filepath, url string) error {
resp, err := http.Get(url)
if err != nil {
return err
}
defer resp.Body.Close()
out, err := os.Create(filepath)
if err != nil {
return err
}
defer out.Close()
_, err = io.Copy(out, resp.Body)
log.Println("[download_file] from:", url, " to:", filepath)
return err
}
func isPathExist(path string) bool {
if _, err := os.Stat(path); err == nil {
return true
} else if os.IsNotExist(err) {
return false
}
return false
}
func downloadModel(modelDir, modelPath string) (string, error) {
if modelPath != "" && (strings.HasPrefix(modelPath, "http://") ||
strings.HasPrefix(modelPath, "ftp://") || strings.HasPrefix(modelPath, "https://")) {
if checkModelExists(modelDir) {
return modelDir, nil
}
_, suffix := path.Split(modelPath)
outPath := filepath.Join(modelDir, suffix)
outDir := filepath.Dir(outPath)
if !isPathExist(outDir) {
os.MkdirAll(outDir, os.ModePerm)
}
if !isPathExist(outPath) {
err := downloadFile(outPath, modelPath)
if err != nil {
return "", err
}
}
if strings.HasSuffix(outPath, ".tar") && !checkModelExists(modelDir) {
unTar(modelDir, outPath)
os.Remove(outPath)
return modelDir, nil
}
return modelDir, nil
}
return modelPath, nil
}
func unTar(dst, src string) (err error) {
fr, err := os.Open(src)
if err != nil {
return err
}
defer fr.Close()
tr := tar.NewReader(fr)
for {
hdr, err := tr.Next()
switch {
case err == io.EOF:
return nil
case err != nil:
return err
case hdr == nil:
continue
}
var dstFileDir string
if strings.Contains(hdr.Name, "model") {
dstFileDir = filepath.Join(dst, "model")
} else if strings.Contains(hdr.Name, "params") {
dstFileDir = filepath.Join(dst, "params")
}
switch hdr.Typeflag {
case tar.TypeDir:
continue
case tar.TypeReg:
file, err := os.OpenFile(dstFileDir, os.O_CREATE|os.O_RDWR, os.FileMode(hdr.Mode))
if err != nil {
return err
}
_, err2 := io.Copy(file, tr)
if err2 != nil {
return err2
}
file.Close()
}
}
return nil
}
func readLines2StringSlice(filepath string) []string {
if strings.HasPrefix(filepath, "http://") || strings.HasPrefix(filepath, "https://") {
home, _ := os.UserHomeDir()
dir := home + "/.paddleocr/rec/"
_, suffix := path.Split(filepath)
f := dir + suffix
if !isPathExist(f) {
err := downloadFile(f, filepath)
if err != nil {
log.Println("download ppocr key file error! You can specify your local dict path by conf.yaml.")
return nil
}
}
filepath = f
}
content, err := ioutil.ReadFile(filepath)
if err != nil {
log.Println("read ppocr key file error!")
return nil
}
lines := strings.Split(string(content), "\n")
return lines
}
func ReadYaml(yamlPath string) (map[string]interface{}, error) {
data, err := ioutil.ReadFile(yamlPath)
if err != nil {
return nil, err
}
var body interface{}
if err := yaml.Unmarshal(data, &body); err != nil {
return nil, err
}
body = convertYaml2Map(body)
return body.(map[string]interface{}), nil
}
func convertYaml2Map(i interface{}) interface{} {
switch x := i.(type) {
case map[interface{}]interface{}:
m2 := map[string]interface{}{}
for k, v := range x {
m2[k.(string)] = convertYaml2Map(v)
}
return m2
case []interface{}:
for i, v := range x {
x[i] = convertYaml2Map(v)
}
}
return i
}
package paddle
// #cgo CFLAGS: -I../paddle_c/include
// #cgo LDFLAGS: -lpaddle_fluid_c
// #include <stdbool.h>
import "C"
import "fmt"
func ConvertCBooleanToGo(b C.bool) bool {
var c_false C.bool
if b != c_false {
return true
}
return false
}
func numel(shape []int32) int32 {
n := int32(1)
for _, d := range shape {
n *= d
}
return n
}
func bug(format string, args ...interface{}) error {
return fmt.Errorf("Bug %v", fmt.Sprintf(format, args...))
}
package paddle
// #include <stdbool.h>
// #include <stdlib.h>
// #include <paddle_c_api.h>
import "C"
import (
"runtime"
"unsafe"
)
type Precision C.Precision
const (
Precision_FLOAT32 Precision = C.kFloat32
Precision_INT8 Precision = C.kInt8
Precision_HALF Precision = C.kHalf
)
type AnalysisConfig struct {
c *C.PD_AnalysisConfig
}
func NewAnalysisConfig() *AnalysisConfig {
c_config := C.PD_NewAnalysisConfig()
config := &AnalysisConfig{c: c_config}
runtime.SetFinalizer(config, (*AnalysisConfig).finalize)
return config
}
func (config *AnalysisConfig) finalize() {
C.PD_DeleteAnalysisConfig(config.c)
}
func (config *AnalysisConfig) SetModel(model, params string) {
c_model := C.CString(model)
defer C.free(unsafe.Pointer(c_model))
var c_params *C.char
if params == "" {
c_params = nil
} else {
c_params = C.CString(params)
defer C.free(unsafe.Pointer(c_params))
}
C.PD_SetModel(config.c, c_model, c_params)
}
func (config *AnalysisConfig) ModelDir() string {
return C.GoString(C.PD_ModelDir(config.c))
}
func (config *AnalysisConfig) ProgFile() string {
return C.GoString(C.PD_ProgFile(config.c))
}
func (config *AnalysisConfig) ParamsFile() string {
return C.GoString(C.PD_ParamsFile(config.c))
}
func (config *AnalysisConfig) EnableUseGpu(memory_pool_init_size_mb int, device_id int) {
C.PD_EnableUseGpu(config.c, C.int(memory_pool_init_size_mb), C.int(device_id))
}
func (config *AnalysisConfig) DisableGpu() {
C.PD_DisableGpu(config.c)
}
func (config *AnalysisConfig) UseGpu() bool {
return ConvertCBooleanToGo(C.PD_UseGpu(config.c))
}
func (config *AnalysisConfig) GpuDeviceId() int {
return int(C.PD_GpuDeviceId(config.c))
}
func (config *AnalysisConfig) MemoryPoolInitSizeMb() int {
return int(C.PD_MemoryPoolInitSizeMb(config.c))
}
func (config *AnalysisConfig) EnableCudnn() {
C.PD_EnableCUDNN(config.c)
}
func (config *AnalysisConfig) CudnnEnabled() bool {
return ConvertCBooleanToGo(C.PD_CudnnEnabled(config.c))
}
func (config *AnalysisConfig) SwitchIrOptim(x bool) {
C.PD_SwitchIrOptim(config.c, C.bool(x))
}
func (config *AnalysisConfig) IrOptim() bool {
return ConvertCBooleanToGo(C.PD_IrOptim(config.c))
}
func (config *AnalysisConfig) SwitchUseFeedFetchOps(x bool) {
C.PD_SwitchUseFeedFetchOps(config.c, C.bool(x))
}
func (config *AnalysisConfig) UseFeedFetchOpsEnabled() bool {
return ConvertCBooleanToGo(C.PD_UseFeedFetchOpsEnabled(config.c))
}
func (config *AnalysisConfig) SwitchSpecifyInputNames(x bool) {
C.PD_SwitchSpecifyInputNames(config.c, C.bool(x))
}
func (config *AnalysisConfig) SpecifyInputName() bool {
return ConvertCBooleanToGo(C.PD_SpecifyInputName(config.c))
}
func (config *AnalysisConfig) EnableTensorRtEngine(workspace_size int, max_batch_size int, min_subgraph_size int, precision Precision, use_static bool, use_calib_mode bool) {
C.PD_EnableTensorRtEngine(config.c, C.int(workspace_size), C.int(max_batch_size), C.int(min_subgraph_size), C.Precision(precision), C.bool(use_static), C.bool(use_calib_mode))
}
func (config *AnalysisConfig) TensorrtEngineEnabled() bool {
return ConvertCBooleanToGo(C.PD_TensorrtEngineEnabled(config.c))
}
func (config *AnalysisConfig) SwitchIrDebug(x bool) {
C.PD_SwitchIrDebug(config.c, C.bool(x))
}
func (config *AnalysisConfig) EnableMkldnn() {
C.PD_EnableMKLDNN(config.c)
}
func (config *AnalysisConfig) SetCpuMathLibraryNumThreads(n int) {
C.PD_SetCpuMathLibraryNumThreads(config.c, C.int(n))
}
func (config *AnalysisConfig) CpuMathLibraryNumThreads() int {
return int(C.PD_CpuMathLibraryNumThreads(config.c))
}
func (config *AnalysisConfig) EnableMkldnnQuantizer() {
C.PD_EnableMkldnnQuantizer(config.c)
}
func (config *AnalysisConfig) MkldnnQuantizerEnabled() bool {
return ConvertCBooleanToGo(C.PD_MkldnnQuantizerEnabled(config.c))
}
// SetModelBuffer
// ModelFromMemory
func (config *AnalysisConfig) EnableMemoryOptim() {
C.PD_EnableMemoryOptim(config.c)
}
func (config *AnalysisConfig) MemoryOptimEnabled() bool {
return ConvertCBooleanToGo(C.PD_MemoryOptimEnabled(config.c))
}
func (config *AnalysisConfig) EnableProfile() {
C.PD_EnableProfile(config.c)
}
func (config *AnalysisConfig) ProfileEnabled() bool {
return ConvertCBooleanToGo(C.PD_ProfileEnabled(config.c))
}
func (config *AnalysisConfig) DisableGlogInfo() {
C.PD_DisableGlogInfo(config.c)
}
func (config *AnalysisConfig) DeletePass(pass string) {
c_pass := C.CString(pass)
defer C.free(unsafe.Pointer(c_pass))
C.PD_DeletePass(config.c, c_pass)
}
func (config *AnalysisConfig) SetInValid() {
C.PD_SetInValid(config.c)
}
func (config *AnalysisConfig) IsValid() bool {
return ConvertCBooleanToGo(C.PD_IsValid(config.c))
}
package paddle
// #include <stdbool.h>
// #include "paddle_c_api.h"
import "C"
import (
"reflect"
"runtime"
"unsafe"
)
type Predictor struct {
c *C.PD_Predictor
}
func NewPredictor(config *AnalysisConfig) *Predictor {
c_predictor := C.PD_NewPredictor((*config).c)
predictor := &Predictor{c: c_predictor}
runtime.SetFinalizer(predictor, (*Predictor).finalize)
return predictor
}
func (predictor *Predictor) finalize() {
C.PD_DeletePredictor(predictor.c)
}
func DeletePredictor(predictor *Predictor) {
C.PD_DeletePredictor(predictor.c)
}
func (predictor *Predictor) GetInputNum() int {
return int(C.PD_GetInputNum(predictor.c))
}
func (predictor *Predictor) GetOutputNum() int {
return int(C.PD_GetOutputNum(predictor.c))
}
func (predictor *Predictor) GetInputName(n int) string {
return C.GoString(C.PD_GetInputName(predictor.c, C.int(n)))
}
func (predictor *Predictor) GetOutputName(n int) string {
return C.GoString(C.PD_GetOutputName(predictor.c, C.int(n)))
}
func (predictor *Predictor) GetInputTensors() [](*ZeroCopyTensor) {
var result [](*ZeroCopyTensor)
for i := 0; i < predictor.GetInputNum(); i++ {
tensor := NewZeroCopyTensor()
tensor.c.name = C.PD_GetInputName(predictor.c, C.int(i))
result = append(result, tensor)
}
return result
}
func (predictor *Predictor) GetOutputTensors() [](*ZeroCopyTensor) {
var result [](*ZeroCopyTensor)
for i := 0; i < predictor.GetOutputNum(); i++ {
tensor := NewZeroCopyTensor()
tensor.c.name = C.PD_GetOutputName(predictor.c, C.int(i))
result = append(result, tensor)
}
return result
}
func (predictor *Predictor) GetInputNames() []string {
names := make([]string, predictor.GetInputNum())
for i := 0; i < len(names); i++ {
names[i] = predictor.GetInputName(i)
}
return names
}
func (predictor *Predictor) GetOutputNames() []string {
names := make([]string, predictor.GetOutputNum())
for i := 0; i < len(names); i++ {
names[i] = predictor.GetOutputName(i)
}
return names
}
func (predictor *Predictor) SetZeroCopyInput(tensor *ZeroCopyTensor) {
C.PD_SetZeroCopyInput(predictor.c, tensor.c)
}
func (predictor *Predictor) GetZeroCopyOutput(tensor *ZeroCopyTensor) {
C.PD_GetZeroCopyOutput(predictor.c, tensor.c)
tensor.name = C.GoString(tensor.c.name)
var shape []int32
shape_hdr := (*reflect.SliceHeader)(unsafe.Pointer(&shape))
shape_hdr.Data = uintptr(unsafe.Pointer(tensor.c.shape.data))
shape_hdr.Len = int(tensor.c.shape.length / C.sizeof_int)
shape_hdr.Cap = int(tensor.c.shape.length / C.sizeof_int)
tensor.Reshape(shape)
}
func (predictor *Predictor) ZeroCopyRun() {
C.PD_ZeroCopyRun(predictor.c)
}
package paddle
// #include <stdbool.h>
// #include <stdlib.h>
// #include <string.h>
// #include <paddle_c_api.h>
import "C"
import (
"reflect"
"runtime"
"unsafe"
)
type PaddleDType C.PD_DataType
const (
FLOAT32 PaddleDType = C.PD_FLOAT32
INT32 PaddleDType = C.PD_INT32
INT64 PaddleDType = C.PD_INT64
UINT8 PaddleDType = C.PD_UINT8
UNKDTYPE PaddleDType = C.PD_UNKDTYPE
)
var types = []struct {
gotype reflect.Type
dtype PaddleDType
}{
{reflect.TypeOf(float32(0)), FLOAT32},
{reflect.TypeOf(int32(0)), INT32},
{reflect.TypeOf(int64(0)), INT64},
{reflect.TypeOf(uint8(0)), UINT8},
}
func typeOfDataType(dtype PaddleDType) reflect.Type {
var ret reflect.Type
for _, t := range types {
if t.dtype == dtype {
ret = t.gotype
}
}
return ret
}
func sizeofDataType(dtype PaddleDType) int32 {
switch dtype {
case UINT8:
return int32(C.sizeof_uchar)
case INT32:
return int32(C.sizeof_int)
case INT64:
return int32(C.sizeof_longlong)
case FLOAT32:
return int32(C.sizeof_float)
}
return -1
}
func shapeAndTypeOf(val reflect.Value) (shape []int32, dt PaddleDType) {
gotype := val.Type()
for gotype.Kind() == reflect.Array || gotype.Kind() == reflect.Slice {
shape = append(shape, int32(val.Len()))
if val.Len() > 0 {
val = val.Index(0)
}
gotype = gotype.Elem()
}
for _, t := range types {
if gotype.Kind() == t.gotype.Kind() {
return shape, PaddleDType(t.dtype)
}
}
return shape, dt
}
type ZeroCopyTensor struct {
c *C.PD_ZeroCopyTensor
name string
shape []int32
}
func NewZeroCopyTensor() *ZeroCopyTensor {
c_tensor := C.PD_NewZeroCopyTensor()
tensor := &ZeroCopyTensor{c: c_tensor}
runtime.SetFinalizer(tensor, (*ZeroCopyTensor).finalize)
return tensor
}
func (tensor *ZeroCopyTensor) finalize() {
C.PD_DeleteZeroCopyTensor(tensor.c)
}
func (tensor *ZeroCopyTensor) Shape() []int32 {
return tensor.shape
}
func (tensor *ZeroCopyTensor) Name() string {
return C.GoString(tensor.c.name)
}
func (tensor *ZeroCopyTensor) Rename(name string) {
tensor.name = name
tensor.c.name = (*C.char)(unsafe.Pointer(tensor.c.name))
}
func (tensor *ZeroCopyTensor) Reshape(shape []int32) {
tensor.shape = make([]int32, len(shape))
copy(tensor.shape, shape)
length := C.sizeof_int * C.size_t(len(shape))
if tensor.c.shape.capacity < C.size_t(length) {
if tensor.c.shape.capacity != C.size_t(0) {
C.free(tensor.c.shape.data)
}
tensor.c.shape.data = C.malloc(length)
tensor.c.shape.capacity = length
}
tensor.c.shape.length = length
C.memcpy(tensor.c.shape.data, unsafe.Pointer(&shape[0]), length)
}
func (tensor *ZeroCopyTensor) DataType() PaddleDType {
return PaddleDType(tensor.c.dtype)
}
func (tensor *ZeroCopyTensor) SetValue(value interface{}) {
val := reflect.ValueOf(value)
shape, dtype := shapeAndTypeOf(val)
num := numel(shape)
length := C.size_t(sizeofDataType(dtype) * num)
if tensor.c.data.capacity < length {
if tensor.c.data.capacity != C.size_t(0) {
C.free(tensor.c.data.data)
}
tensor.c.data.data = C.malloc(length)
tensor.c.data.capacity = length
}
tensor.c.data.length = length
switch dtype {
case PaddleDType(UINT8):
data := val.Interface().([]uint8)
C.memcpy(tensor.c.data.data, unsafe.Pointer(&data[0]), length)
case PaddleDType(INT32):
data := val.Interface().([]int32)
C.memcpy(tensor.c.data.data, unsafe.Pointer(&data[0]), length)
case PaddleDType(INT64):
data := val.Interface().([]int64)
C.memcpy(tensor.c.data.data, unsafe.Pointer(&data[0]), length)
case PaddleDType(FLOAT32):
data := val.Interface().([]float32)
C.memcpy(tensor.c.data.data, unsafe.Pointer(&data[0]), length)
}
tensor.c.dtype = C.PD_DataType(dtype)
}
func (tensor *ZeroCopyTensor) tensorData() []byte {
cbytes := tensor.c.data.data
length := tensor.c.data.length
var slice []byte
if unsafe.Sizeof(unsafe.Pointer(nil)) == 8 {
slice = (*[1<<50 - 1]byte)(unsafe.Pointer(cbytes))[:length:length]
} else {
slice = (*[1 << 30]byte)(unsafe.Pointer(cbytes))[:length:length]
}
return slice
}
func (tensor *ZeroCopyTensor) Value() interface{} {
t := typeOfDataType(PaddleDType(tensor.c.dtype))
data := tensor.tensorData()
return decodeTensor(data, tensor.Shape(), t).Interface()
}
// It isn't safe to use reflect.SliceHeader as it uses a uintptr for Data and
// this is not inspected by the garbage collector
type sliceHeader struct {
Data unsafe.Pointer
Len int
Cap int
}
func decodeTensor(raw []byte, shape []int32, t reflect.Type) reflect.Value {
// Create a 1-dimensional slice of the base large enough for the data and
// copy the data in.
n := int(numel(shape))
l := n * int(t.Size())
typ := reflect.SliceOf(t)
slice := reflect.MakeSlice(typ, n, n)
baseBytes := *(*[]byte)(unsafe.Pointer(&sliceHeader{
Data: unsafe.Pointer(slice.Pointer()),
Len: l,
Cap: l,
}))
copy(baseBytes, raw)
if len(shape) == 0 {
// for n
return slice.Index(0)
}
if len(shape) == 1 {
// for {}
return slice
}
// for {{} {}} {{} {}} {{} {}}
if n == 0 {
n = int(numel(shape[:len(shape)-1]))
}
for i := len(shape) - 2; i >= 0; i-- {
underlyingSize := typ.Elem().Size()
typ = reflect.SliceOf(typ)
subsliceLen := int(shape[i+1])
if subsliceLen != 0 {
n = n / subsliceLen
}
data := unsafe.Pointer(slice.Pointer())
nextSlice := reflect.MakeSlice(typ, n, n)
for j := 0; j < n; j++ {
// This is equivalent to nSlice[j] = slice[j*subsliceLen: (j+1)*subsliceLen]
setSliceInSlice(nextSlice, j, sliceHeader{
Data: unsafe.Pointer(uintptr(data) + (uintptr(j*subsliceLen) * underlyingSize)),
Len: subsliceLen,
Cap: subsliceLen,
})
}
slice = nextSlice
}
return slice
}
// setSliceInSlice sets slice[index] = content.
func setSliceInSlice(slice reflect.Value, index int, content sliceHeader) {
const sliceSize = unsafe.Sizeof(sliceHeader{})
// We must cast slice.Pointer to uninptr & back again to avoid GC issues.
// See https://github.com/google/go-cmp/issues/167#issuecomment-546093202
*(*sliceHeader)(unsafe.Pointer(uintptr(unsafe.Pointer(slice.Pointer())) + (uintptr(index) * sliceSize))) = content
}
func (tensor *ZeroCopyTensor) Lod() []uint {
var val []uint
valHdr := (*reflect.SliceHeader)(unsafe.Pointer(&val))
valHdr.Data = uintptr(unsafe.Pointer(tensor.c.lod.data))
valHdr.Len = int(tensor.c.lod.length / C.sizeof_size_t)
valHdr.Cap = int(tensor.c.lod.length / C.sizeof_size_t)
return val
}
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#if defined(_WIN32)
#ifdef PADDLE_ON_INFERENCE
#define PADDLE_CAPI_EXPORT __declspec(dllexport)
#else
#define PADDLE_CAPI_EXPORT __declspec(dllimport)
#endif // PADDLE_ON_INFERENCE
#else
#define PADDLE_CAPI_EXPORT __attribute__((visibility("default")))
#endif // _WIN32
#ifdef __cplusplus
extern "C" {
#endif
enum PD_DataType { PD_FLOAT32, PD_INT32, PD_INT64, PD_UINT8, PD_UNKDTYPE };
typedef enum PD_DataType PD_DataType;
typedef struct PD_PaddleBuf PD_PaddleBuf;
typedef struct PD_AnalysisConfig PD_AnalysisConfig;
typedef struct PD_Predictor PD_Predictor;
typedef struct PD_Buffer {
void* data;
size_t length;
size_t capacity;
} PD_Buffer;
typedef struct PD_ZeroCopyTensor {
PD_Buffer data;
PD_Buffer shape;
PD_Buffer lod;
PD_DataType dtype;
char* name;
} PD_ZeroCopyTensor;
PADDLE_CAPI_EXPORT extern PD_ZeroCopyTensor* PD_NewZeroCopyTensor();
PADDLE_CAPI_EXPORT extern void PD_DeleteZeroCopyTensor(PD_ZeroCopyTensor*);
PADDLE_CAPI_EXPORT extern void PD_InitZeroCopyTensor(PD_ZeroCopyTensor*);
PADDLE_CAPI_EXPORT extern void PD_DestroyZeroCopyTensor(PD_ZeroCopyTensor*);
PADDLE_CAPI_EXPORT extern void PD_DeleteZeroCopyTensor(PD_ZeroCopyTensor*);
typedef struct PD_ZeroCopyData {
char* name;
void* data;
PD_DataType dtype;
int* shape;
int shape_size;
} PD_ZeroCopyData;
typedef struct InTensorShape {
char* name;
int* tensor_shape;
int shape_size;
} InTensorShape;
PADDLE_CAPI_EXPORT extern PD_PaddleBuf* PD_NewPaddleBuf();
PADDLE_CAPI_EXPORT extern void PD_DeletePaddleBuf(PD_PaddleBuf* buf);
PADDLE_CAPI_EXPORT extern void PD_PaddleBufResize(PD_PaddleBuf* buf,
size_t length);
PADDLE_CAPI_EXPORT extern void PD_PaddleBufReset(PD_PaddleBuf* buf, void* data,
size_t length);
PADDLE_CAPI_EXPORT extern bool PD_PaddleBufEmpty(PD_PaddleBuf* buf);
PADDLE_CAPI_EXPORT extern void* PD_PaddleBufData(PD_PaddleBuf* buf);
PADDLE_CAPI_EXPORT extern size_t PD_PaddleBufLength(PD_PaddleBuf* buf);
// PaddleTensor
typedef struct PD_Tensor PD_Tensor;
PADDLE_CAPI_EXPORT extern PD_Tensor* PD_NewPaddleTensor();
PADDLE_CAPI_EXPORT extern void PD_DeletePaddleTensor(PD_Tensor* tensor);
PADDLE_CAPI_EXPORT extern void PD_SetPaddleTensorName(PD_Tensor* tensor,
char* name);
PADDLE_CAPI_EXPORT extern void PD_SetPaddleTensorDType(PD_Tensor* tensor,
PD_DataType dtype);
PADDLE_CAPI_EXPORT extern void PD_SetPaddleTensorData(PD_Tensor* tensor,
PD_PaddleBuf* buf);
PADDLE_CAPI_EXPORT extern void PD_SetPaddleTensorShape(PD_Tensor* tensor,
int* shape, int size);
PADDLE_CAPI_EXPORT extern const char* PD_GetPaddleTensorName(
const PD_Tensor* tensor);
PADDLE_CAPI_EXPORT extern PD_DataType PD_GetPaddleTensorDType(
const PD_Tensor* tensor);
PADDLE_CAPI_EXPORT extern PD_PaddleBuf* PD_GetPaddleTensorData(
const PD_Tensor* tensor);
PADDLE_CAPI_EXPORT extern const int* PD_GetPaddleTensorShape(
const PD_Tensor* tensor, int* size);
// AnalysisPredictor
PADDLE_CAPI_EXPORT extern bool PD_PredictorRun(const PD_AnalysisConfig* config,
PD_Tensor* inputs, int in_size,
PD_Tensor** output_data,
int* out_size, int batch_size);
PADDLE_CAPI_EXPORT extern bool PD_PredictorZeroCopyRun(
const PD_AnalysisConfig* config, PD_ZeroCopyData* inputs, int in_size,
PD_ZeroCopyData** output, int* out_size);
// AnalysisConfig
enum Precision { kFloat32 = 0, kInt8, kHalf };
typedef enum Precision Precision;
PADDLE_CAPI_EXPORT extern PD_AnalysisConfig* PD_NewAnalysisConfig();
PADDLE_CAPI_EXPORT extern void PD_DeleteAnalysisConfig(
PD_AnalysisConfig* config);
PADDLE_CAPI_EXPORT extern void PD_SetModel(PD_AnalysisConfig* config,
const char* model_dir,
const char* params_path);
PADDLE_CAPI_EXPORT
extern void PD_SetProgFile(PD_AnalysisConfig* config, const char* x);
PADDLE_CAPI_EXPORT extern void PD_SetParamsFile(PD_AnalysisConfig* config,
const char* x);
PADDLE_CAPI_EXPORT extern void PD_SetOptimCacheDir(PD_AnalysisConfig* config,
const char* opt_cache_dir);
PADDLE_CAPI_EXPORT extern const char* PD_ModelDir(
const PD_AnalysisConfig* config);
PADDLE_CAPI_EXPORT extern const char* PD_ProgFile(
const PD_AnalysisConfig* config);
PADDLE_CAPI_EXPORT extern const char* PD_ParamsFile(
const PD_AnalysisConfig* config);
PADDLE_CAPI_EXPORT extern void PD_EnableUseGpu(PD_AnalysisConfig* config,
int memory_pool_init_size_mb,
int device_id);
PADDLE_CAPI_EXPORT extern void PD_DisableGpu(PD_AnalysisConfig* config);
PADDLE_CAPI_EXPORT extern bool PD_UseGpu(const PD_AnalysisConfig* config);
PADDLE_CAPI_EXPORT extern int PD_GpuDeviceId(const PD_AnalysisConfig* config);
PADDLE_CAPI_EXPORT extern int PD_MemoryPoolInitSizeMb(
const PD_AnalysisConfig* config);
PADDLE_CAPI_EXPORT extern float PD_FractionOfGpuMemoryForPool(
const PD_AnalysisConfig* config);
PADDLE_CAPI_EXPORT extern void PD_EnableCUDNN(PD_AnalysisConfig* config);
PADDLE_CAPI_EXPORT extern bool PD_CudnnEnabled(const PD_AnalysisConfig* config);
PADDLE_CAPI_EXPORT extern void PD_SwitchIrOptim(PD_AnalysisConfig* config,
bool x);
PADDLE_CAPI_EXPORT extern bool PD_IrOptim(const PD_AnalysisConfig* config);
PADDLE_CAPI_EXPORT extern void PD_SwitchUseFeedFetchOps(
PD_AnalysisConfig* config, bool x);
PADDLE_CAPI_EXPORT extern bool PD_UseFeedFetchOpsEnabled(
const PD_AnalysisConfig* config);
PADDLE_CAPI_EXPORT extern void PD_SwitchSpecifyInputNames(
PD_AnalysisConfig* config, bool x);
PADDLE_CAPI_EXPORT extern bool PD_SpecifyInputName(
const PD_AnalysisConfig* config);
PADDLE_CAPI_EXPORT extern void PD_EnableTensorRtEngine(
PD_AnalysisConfig* config, int workspace_size, int max_batch_size,
int min_subgraph_size, Precision precision, bool use_static,
bool use_calib_mode);
PADDLE_CAPI_EXPORT extern bool PD_TensorrtEngineEnabled(
const PD_AnalysisConfig* config);
typedef struct PD_MaxInputShape {
char* name;
int* shape;
int shape_size;
} PD_MaxInputShape;
PADDLE_CAPI_EXPORT extern void PD_SwitchIrDebug(PD_AnalysisConfig* config,
bool x);
PADDLE_CAPI_EXPORT extern void PD_EnableMKLDNN(PD_AnalysisConfig* config);
PADDLE_CAPI_EXPORT extern void PD_SetMkldnnCacheCapacity(
PD_AnalysisConfig* config, int capacity);
PADDLE_CAPI_EXPORT extern bool PD_MkldnnEnabled(
const PD_AnalysisConfig* config);
PADDLE_CAPI_EXPORT extern void PD_SetCpuMathLibraryNumThreads(
PD_AnalysisConfig* config, int cpu_math_library_num_threads);
PADDLE_CAPI_EXPORT extern int PD_CpuMathLibraryNumThreads(
const PD_AnalysisConfig* config);
PADDLE_CAPI_EXPORT extern void PD_EnableMkldnnQuantizer(
PD_AnalysisConfig* config);
PADDLE_CAPI_EXPORT extern bool PD_MkldnnQuantizerEnabled(
const PD_AnalysisConfig* config);
PADDLE_CAPI_EXPORT extern void PD_SetModelBuffer(PD_AnalysisConfig* config,
const char* prog_buffer,
size_t prog_buffer_size,
const char* params_buffer,
size_t params_buffer_size);
PADDLE_CAPI_EXPORT extern bool PD_ModelFromMemory(
const PD_AnalysisConfig* config);
PADDLE_CAPI_EXPORT extern void PD_EnableMemoryOptim(PD_AnalysisConfig* config);
PADDLE_CAPI_EXPORT extern bool PD_MemoryOptimEnabled(
const PD_AnalysisConfig* config);
PADDLE_CAPI_EXPORT extern void PD_EnableProfile(PD_AnalysisConfig* config);
PADDLE_CAPI_EXPORT extern bool PD_ProfileEnabled(
const PD_AnalysisConfig* config);
PADDLE_CAPI_EXPORT extern void PD_SetInValid(PD_AnalysisConfig* config);
PADDLE_CAPI_EXPORT extern bool PD_IsValid(const PD_AnalysisConfig* config);
PADDLE_CAPI_EXPORT extern void PD_DisableGlogInfo(PD_AnalysisConfig* config);
PADDLE_CAPI_EXPORT extern void PD_DeletePass(PD_AnalysisConfig* config,
char* pass_name);
PADDLE_CAPI_EXPORT extern PD_Predictor* PD_NewPredictor(
const PD_AnalysisConfig* config);
PADDLE_CAPI_EXPORT extern void PD_DeletePredictor(PD_Predictor* predictor);
PADDLE_CAPI_EXPORT extern int PD_GetInputNum(const PD_Predictor*);
PADDLE_CAPI_EXPORT extern int PD_GetOutputNum(const PD_Predictor*);
PADDLE_CAPI_EXPORT extern const char* PD_GetInputName(const PD_Predictor*, int);
PADDLE_CAPI_EXPORT extern const char* PD_GetOutputName(const PD_Predictor*,
int);
PADDLE_CAPI_EXPORT extern void PD_SetZeroCopyInput(
PD_Predictor* predictor, const PD_ZeroCopyTensor* tensor);
PADDLE_CAPI_EXPORT extern void PD_GetZeroCopyOutput(PD_Predictor* predictor,
PD_ZeroCopyTensor* tensor);
PADDLE_CAPI_EXPORT extern void PD_ZeroCopyRun(PD_Predictor* predictor);
#ifdef __cplusplus
} // extern "C"
#endif
package main
import (
"flag"
"log"
"github.com/PaddlePaddle/PaddleOCR/thirdparty/paddleocr-go/ocr"
)
var (
confFile string
image string
imageDir string
useServering bool
port string
)
func init() {
flag.StringVar(&confFile, "config", "config/conf.yaml", "config from ocr system. If not given, will use default config.")
flag.StringVar(&image, "image", "", "image to predict. if not given, will use image_dir")
flag.StringVar(&imageDir, "image_dir", "", "imgs in dir to be predicted. if not given, will check servering")
flag.BoolVar(&useServering, "use_servering", false, "whether to use ocr server. [default: false]")
flag.StringVar(&port, "port", "18600", "which port to serve ocr server. [default: 18600].")
}
func main() {
flag.Parse()
sys := ocr.NewOCRSystem(confFile, nil)
if image != "" {
img := ocr.ReadImage(image)
defer img.Close()
results := sys.PredictOneImage(img)
for _, res := range results {
log.Println(res)
}
return
}
if imageDir != "" {
results := sys.PredictDirImages(imageDir)
for k, vs := range results {
log.Printf("======== image: %v =======\n", k)
for _, res := range vs {
log.Println(res)
}
}
}
if useServering {
sys.StartServer(port)
}
}
## 第三方新需求
### 1、开发 - 多语言支持需求
### 2、模型 - 多语言支持需求
- 格式:需要提交对应的语言字典dict和语料文件corpus,建议以链接形式提供。
### 3、部署 - 性能提升需求
### 4、其他需求
...@@ -39,6 +39,7 @@ set_paddle_flags( ...@@ -39,6 +39,7 @@ set_paddle_flags(
import program import program
from paddle import fluid from paddle import fluid
from ppocr.utils.utility import initial_logger from ppocr.utils.utility import initial_logger
from ppocr.utils.utility import enable_static_mode
logger = initial_logger() logger = initial_logger()
from ppocr.data.reader_main import reader_main from ppocr.data.reader_main import reader_main
from ppocr.utils.save_load import init_model from ppocr.utils.save_load import init_model
...@@ -95,4 +96,5 @@ def main(): ...@@ -95,4 +96,5 @@ def main():
if __name__ == '__main__': if __name__ == '__main__':
enable_static_mode()
main() main()
...@@ -177,7 +177,7 @@ if __name__ == "__main__": ...@@ -177,7 +177,7 @@ if __name__ == "__main__":
text_detector = TextDetector(args) text_detector = TextDetector(args)
count = 0 count = 0
total_time = 0 total_time = 0
draw_img_save = "./inference_results" draw_img_save = os.path.join(".", "inference_results")
if not os.path.exists(draw_img_save): if not os.path.exists(draw_img_save):
os.makedirs(draw_img_save) os.makedirs(draw_img_save)
for image_file in image_file_list: for image_file in image_file_list:
......
...@@ -41,6 +41,7 @@ set_paddle_flags( ...@@ -41,6 +41,7 @@ set_paddle_flags(
import tools.program as program import tools.program as program
from paddle import fluid from paddle import fluid
from ppocr.utils.utility import initial_logger from ppocr.utils.utility import initial_logger
from ppocr.utils.utility import enable_static_mode
logger = initial_logger() logger = initial_logger()
from ppocr.data.reader_main import reader_main from ppocr.data.reader_main import reader_main
...@@ -109,6 +110,7 @@ def main(): ...@@ -109,6 +110,7 @@ def main():
if __name__ == '__main__': if __name__ == '__main__':
enable_static_mode()
parser = program.ArgsParser() parser = program.ArgsParser()
FLAGS = parser.parse_args() FLAGS = parser.parse_args()
main() main()
...@@ -49,6 +49,7 @@ import cv2 ...@@ -49,6 +49,7 @@ import cv2
from ppocr.utils.utility import initial_logger from ppocr.utils.utility import initial_logger
logger = initial_logger() logger = initial_logger()
from ppocr.utils.utility import enable_static_mode
def draw_det_res(dt_boxes, config, img, img_name): def draw_det_res(dt_boxes, config, img, img_name):
...@@ -162,6 +163,7 @@ def main(): ...@@ -162,6 +163,7 @@ def main():
if __name__ == '__main__': if __name__ == '__main__':
enable_static_mode()
parser = program.ArgsParser() parser = program.ArgsParser()
FLAGS = parser.parse_args() FLAGS = parser.parse_args()
main() main()
...@@ -41,6 +41,7 @@ import tools.program as program ...@@ -41,6 +41,7 @@ import tools.program as program
from paddle import fluid from paddle import fluid
from ppocr.utils.utility import initial_logger from ppocr.utils.utility import initial_logger
logger = initial_logger() logger = initial_logger()
from ppocr.utils.utility import enable_static_mode
from ppocr.data.reader_main import reader_main from ppocr.data.reader_main import reader_main
from ppocr.utils.save_load import init_model from ppocr.utils.save_load import init_model
from ppocr.utils.character import CharacterOps from ppocr.utils.character import CharacterOps
...@@ -171,6 +172,7 @@ def main(): ...@@ -171,6 +172,7 @@ def main():
if __name__ == '__main__': if __name__ == '__main__':
enable_static_mode()
parser = program.ArgsParser() parser = program.ArgsParser()
FLAGS = parser.parse_args() FLAGS = parser.parse_args()
main() main()
...@@ -39,6 +39,7 @@ set_paddle_flags( ...@@ -39,6 +39,7 @@ set_paddle_flags(
import tools.program as program import tools.program as program
from paddle import fluid from paddle import fluid
from ppocr.utils.utility import initial_logger from ppocr.utils.utility import initial_logger
from ppocr.utils.utility import enable_static_mode
logger = initial_logger() logger = initial_logger()
from ppocr.data.reader_main import reader_main from ppocr.data.reader_main import reader_main
from ppocr.utils.save_load import init_model from ppocr.utils.save_load import init_model
...@@ -119,14 +120,18 @@ def test_reader(): ...@@ -119,14 +120,18 @@ def test_reader():
if count % 1 == 0: if count % 1 == 0:
batch_time = time.time() - starttime batch_time = time.time() - starttime
starttime = time.time() starttime = time.time()
logger.info("reader:", count, len(data), batch_time) logger.info("[reader]count: {}, data length: {}, time: {}".
format(count, len(data), batch_time))
except Exception as e: except Exception as e:
logger.info(e) logger.info(e)
logger.info("finish reader: {}, Success!".format(count)) logger.info("finish reader: {}, Success!".format(count))
if __name__ == '__main__': if __name__ == '__main__':
enable_static_mode()
startup_program, train_program, place, config, train_alg_type = program.preprocess( startup_program, train_program, place, config, train_alg_type = program.preprocess(
) )
# run the train process
main() main()
# test_reader() # if you want to check the reader, you can comment `main` and run test_reader
# test_reader()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册