diff --git a/PPOCRLabel/PPOCRLabel.py b/PPOCRLabel/PPOCRLabel.py index 517714104d1cb62f3b0c03c34843595d85502417..34c045e96aa10ba678447eefc1d007f9042804b8 100644 --- a/PPOCRLabel/PPOCRLabel.py +++ b/PPOCRLabel/PPOCRLabel.py @@ -10,7 +10,6 @@ # SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF # CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. - # !/usr/bin/env python # -*- coding: utf-8 -*- # pyrcc5 -o libs/resources.py resources.qrc @@ -24,13 +23,11 @@ import subprocess import sys from functools import partial -try: - from PyQt5 import QtCore, QtGui, QtWidgets - from PyQt5.QtGui import * - from PyQt5.QtCore import * - from PyQt5.QtWidgets import * -except ImportError: - print("Please install pyqt5...") +from PyQt5.QtCore import QSize, Qt, QPoint, QByteArray, QTimer, QFileInfo, QPointF, QProcess +from PyQt5.QtGui import QImage, QCursor, QPixmap, QImageReader +from PyQt5.QtWidgets import QMainWindow, QListWidget, QVBoxLayout, QToolButton, QHBoxLayout, QDockWidget, QWidget, \ + QSlider, QGraphicsOpacityEffect, QMessageBox, QListView, QScrollArea, QWidgetAction, QApplication, QLabel, \ + QFileDialog, QListWidgetItem, QComboBox, QDialog __dir__ = os.path.dirname(os.path.abspath(__file__)) @@ -42,6 +39,7 @@ sys.path.append("..") from paddleocr import PaddleOCR from libs.constants import * from libs.utils import * +from libs.labelColor import label_colormap from libs.settings import Settings from libs.shape import Shape, DEFAULT_LINE_COLOR, DEFAULT_FILL_COLOR, DEFAULT_LOCK_COLOR from libs.stringBundle import StringBundle @@ -53,9 +51,13 @@ from libs.colorDialog import ColorDialog from libs.ustr import ustr from libs.hashableQListWidgetItem import HashableQListWidgetItem from libs.editinlist import EditInList +from libs.unique_label_qlist_widget import UniqueLabelQListWidget +from libs.keyDialog import KeyDialog __appname__ = 'PPOCRLabel' +LABEL_COLORMAP = label_colormap() + class MainWindow(QMainWindow): FIT_WINDOW, FIT_WIDTH, MANUAL_ZOOM = list(range(3)) @@ -63,6 +65,7 @@ class MainWindow(QMainWindow): def __init__(self, lang="ch", gpu=False, + kie_mode=False, default_filename=None, default_predefined_class_file=None, default_save_dir=None): @@ -76,12 +79,19 @@ class MainWindow(QMainWindow): self.settings.load() settings = self.settings self.lang = lang + # Load string bundle for i18n if lang not in ['ch', 'en']: lang = 'en' self.stringBundle = StringBundle.getBundle(localeStr='zh-CN' if lang == 'ch' else 'en') # 'en' getStr = lambda strId: self.stringBundle.getString(strId) + # KIE setting + self.kie_mode = kie_mode + self.key_previous_text = "" + self.existed_key_cls_set = set() + self.key_dialog_tip = getStr('keyDialogTip') + self.defaultSaveDir = default_save_dir self.ocr = PaddleOCR(use_pdserving=False, use_angle_cls=True, @@ -133,11 +143,13 @@ class MainWindow(QMainWindow): self.autoSaveNum = 5 # ================== File List ================== + + filelistLayout = QVBoxLayout() + filelistLayout.setContentsMargins(0, 0, 0, 0) + self.fileListWidget = QListWidget() self.fileListWidget.itemClicked.connect(self.fileitemDoubleClicked) self.fileListWidget.setIconSize(QSize(25, 25)) - filelistLayout = QVBoxLayout() - filelistLayout.setContentsMargins(0, 0, 0, 0) filelistLayout.addWidget(self.fileListWidget) self.AutoRecognition = QToolButton() @@ -158,10 +170,24 @@ class MainWindow(QMainWindow): self.fileDock.setWidget(fileListContainer) self.addDockWidget(Qt.LeftDockWidgetArea, self.fileDock) + # ================== Key List ================== + if self.kie_mode: + # self.keyList = QListWidget() + self.keyList = UniqueLabelQListWidget() + # self.keyList.itemSelectionChanged.connect(self.keyListSelectionChanged) + # self.keyList.itemDoubleClicked.connect(self.editBox) + # self.keyList.itemChanged.connect(self.keyListItemChanged) + self.keyListDockName = getStr('keyListTitle') + self.keyListDock = QDockWidget(self.keyListDockName, self) + self.keyListDock.setWidget(self.keyList) + self.keyListDock.setFeatures(QDockWidget.NoDockWidgetFeatures) + filelistLayout.addWidget(self.keyListDock) + # ================== Right Area ================== listLayout = QVBoxLayout() listLayout.setContentsMargins(0, 0, 0, 0) + # Buttons self.editButton = QToolButton() self.reRecogButton = QToolButton() self.reRecogButton.setIcon(newIcon('reRec', 30)) @@ -174,12 +200,12 @@ class MainWindow(QMainWindow): self.DelButton = QToolButton() self.DelButton.setToolButtonStyle(Qt.ToolButtonTextBesideIcon) - lefttoptoolbox = QHBoxLayout() - lefttoptoolbox.addWidget(self.newButton) - lefttoptoolbox.addWidget(self.reRecogButton) - lefttoptoolboxcontainer = QWidget() - lefttoptoolboxcontainer.setLayout(lefttoptoolbox) - listLayout.addWidget(lefttoptoolboxcontainer) + leftTopToolBox = QHBoxLayout() + leftTopToolBox.addWidget(self.newButton) + leftTopToolBox.addWidget(self.reRecogButton) + leftTopToolBoxContainer = QWidget() + leftTopToolBoxContainer.setLayout(leftTopToolBox) + listLayout.addWidget(leftTopToolBoxContainer) # ================== Label List ================== # Create and add a widget for showing current label items @@ -341,7 +367,7 @@ class MainWindow(QMainWindow): resetAll = action(getStr('resetAll'), self.resetAll, None, 'resetall', getStr('resetAllDetail')) - color1 = action(getStr('boxLineColor'), self.chooseColor1, + color1 = action(getStr('boxLineColor'), self.chooseColor, 'Ctrl+L', 'color_line', getStr('boxLineColorDetail')) createMode = action(getStr('crtBox'), self.setCreateMode, @@ -402,11 +428,12 @@ class MainWindow(QMainWindow): self.MANUAL_ZOOM: lambda: 1, } + # ================== New Actions ================== + edit = action(getStr('editLabel'), self.editLabel, 'Ctrl+E', 'edit', getStr('editLabelDetail'), enabled=False) - # ================== New Actions ================== AutoRec = action(getStr('autoRecognition'), self.autoRecognition, '', 'Auto', getStr('autoRecognition'), enabled=False) @@ -437,6 +464,9 @@ class MainWindow(QMainWindow): undo = action(getStr("undo"), self.undoShapeEdit, 'Ctrl+Z', "undo", getStr("undo"), enabled=False) + change_cls = action(getStr("keyChange"), self.change_box_key, + 'Ctrl+B', "edit", getStr("keyChange"), enabled=False) + lock = action(getStr("lockBox"), self.lockSelectedShape, None, "lock", getStr("lockBoxDetail"), enabled=False) @@ -482,8 +512,7 @@ class MainWindow(QMainWindow): addActions(labelMenu, (edit, delete)) self.labelList.setContextMenuPolicy(Qt.CustomContextMenu) - self.labelList.customContextMenuRequested.connect( - self.popLabelListMenu) + self.labelList.customContextMenuRequested.connect(self.popLabelListMenu) # Draw squares/rectangles self.drawSquaresOption = QAction(getStr('drawSquares'), self) @@ -499,14 +528,15 @@ class MainWindow(QMainWindow): shapeLineColor=shapeLineColor, shapeFillColor=shapeFillColor, zoom=zoom, zoomIn=zoomIn, zoomOut=zoomOut, zoomOrg=zoomOrg, fitWindow=fitWindow, fitWidth=fitWidth, - zoomActions=zoomActions, saveLabel=saveLabel, + zoomActions=zoomActions, saveLabel=saveLabel, change_cls=change_cls, undo=undo, undoLastPoint=undoLastPoint, open_dataset_dir=open_dataset_dir, rotateLeft=rotateLeft, rotateRight=rotateRight, lock=lock, fileMenuActions=(opendir, open_dataset_dir, saveLabel, resetAll, quit), beginner=(), advanced=(), editMenu=(createpoly, edit, copy, delete, singleRere, None, undo, undoLastPoint, None, rotateLeft, rotateRight, None, color1, self.drawSquaresOption, lock), - beginnerContext=(create, edit, copy, delete, singleRere, rotateLeft, rotateRight, lock), + beginnerContext=( + create, edit, copy, delete, singleRere, rotateLeft, rotateRight, lock, change_cls), advancedContext=(createMode, editMode, edit, copy, delete, shapeLineColor, shapeFillColor), onLoadActive=(create, createMode, editMode), @@ -615,6 +645,8 @@ class MainWindow(QMainWindow): elif self.filePath: self.queueEvent(partial(self.loadFile, self.filePath or "")) + self.keyDialog = None + # Callbacks: self.zoomWidget.valueChanged.connect(self.paintCanvas) @@ -949,6 +981,12 @@ class MainWindow(QMainWindow): self.labelList.scrollToItem(self.currentItem()) # QAbstractItemView.EnsureVisible self.BoxList.scrollToItem(self.currentBox()) + if self.kie_mode: + if len(self.canvas.selectedShapes) == 1 and self.keyList.count() > 0: + selected_key_item_row = self.keyList.findItemsByLabel(self.canvas.selectedShapes[0].key_cls, + get_row=True) + self.keyList.setCurrentRow(selected_key_item_row) + self._noSelectionSlot = False n_selected = len(selected_shapes) self.actions.singleRere.setEnabled(n_selected) @@ -956,6 +994,7 @@ class MainWindow(QMainWindow): self.actions.copy.setEnabled(n_selected) self.actions.edit.setEnabled(n_selected == 1) self.actions.lock.setEnabled(n_selected) + self.actions.change_cls.setEnabled(n_selected) def addLabel(self, shape): shape.paintLabel = self.displayLabelOption.isChecked() @@ -1002,8 +1041,8 @@ class MainWindow(QMainWindow): def loadLabels(self, shapes): s = [] - for label, points, line_color, fill_color, difficult in shapes: - shape = Shape(label=label, line_color=line_color) + for label, points, line_color, key_cls, difficult in shapes: + shape = Shape(label=label, line_color=line_color, key_cls=key_cls) for x, y in points: # Ensure the labels are within the bounds of the image. If not, fix them. @@ -1017,16 +1056,7 @@ class MainWindow(QMainWindow): shape.close() s.append(shape) - # if line_color: - # shape.line_color = QColor(*line_color) - # else: - # shape.line_color = generateColorByText(label) - # - # if fill_color: - # shape.fill_color = QColor(*fill_color) - # else: - # shape.fill_color = generateColorByText(label) - + self._update_shape_color(shape) self.addLabel(shape) self.updateComboBox() @@ -1066,14 +1096,16 @@ class MainWindow(QMainWindow): line_color=s.line_color.getRgb(), fill_color=s.fill_color.getRgb(), points=[(int(p.x()), int(p.y())) for p in s.points], # QPonitF - # add chris - difficult=s.difficult) # bool + difficult=s.difficult, + key_cls=s.key_cls) # bool - shapes = [] if mode == 'Auto' else \ - [format_shape(shape) for shape in self.canvas.shapes if shape.line_color != DEFAULT_LOCK_COLOR] + if mode == 'Auto': + shapes = [] + else: + shapes = [format_shape(shape) for shape in self.canvas.shapes if shape.line_color != DEFAULT_LOCK_COLOR] # Can add differrent annotation formats here for box in self.result_dic: - trans_dic = {"label": box[1][0], "points": box[0], 'difficult': False} + trans_dic = {"label": box[1][0], "points": box[0], "difficult": False, "key_cls": "None"} if trans_dic["label"] == "" and mode == 'Auto': continue shapes.append(trans_dic) @@ -1081,8 +1113,8 @@ class MainWindow(QMainWindow): try: trans_dic = [] for box in shapes: - trans_dic.append( - {"transcription": box['label'], "points": box['points'], 'difficult': box['difficult']}) + trans_dic.append({"transcription": box['label'], "points": box['points'], + "difficult": box['difficult'], "key_cls": box['key_cls']}) self.PPlabel[annotationFilePath] = trans_dic if mode == 'Auto': self.Cachelabel[annotationFilePath] = trans_dic @@ -1148,8 +1180,7 @@ class MainWindow(QMainWindow): position MUST be in global coordinates. """ if len(self.labelHist) > 0: - self.labelDialog = LabelDialog( - parent=self, listItem=self.labelHist) + self.labelDialog = LabelDialog(parent=self, listItem=self.labelHist) if value: text = self.labelDialog.popUp(text=self.prevLabelText) @@ -1159,8 +1190,22 @@ class MainWindow(QMainWindow): if text is not None: self.prevLabelText = self.stringBundle.getString('tempLabel') - # generate_color = generateColorByText(text) - shape = self.canvas.setLastLabel(text, None, None) # generate_color, generate_color + + shape = self.canvas.setLastLabel(text, None, None, None) # generate_color, generate_color + if self.kie_mode: + key_text, _ = self.keyDialog.popUp(self.key_previous_text) + if key_text is not None: + shape = self.canvas.setLastLabel(text, None, None, key_text) # generate_color, generate_color + self.key_previous_text = key_text + if not self.keyList.findItemsByLabel(key_text): + item = self.keyList.createItemFromLabel(key_text) + self.keyList.addItem(item) + rgb = self._get_rgb_by_label(key_text, self.kie_mode) + self.keyList.setItemLabel(item, key_text, rgb) + + self._update_shape_color(shape) + self.keyDialog.addLabelHistory(key_text) + self.addLabel(shape) if self.beginner(): # Switch to edit mode. self.canvas.setEditing(True) @@ -1175,6 +1220,25 @@ class MainWindow(QMainWindow): # self.canvas.undoLastLine() self.canvas.resetAllLines() + def _update_shape_color(self, shape): + r, g, b = self._get_rgb_by_label(shape.key_cls, self.kie_mode) + shape.line_color = QColor(r, g, b) + shape.vertex_fill_color = QColor(r, g, b) + shape.hvertex_fill_color = QColor(255, 255, 255) + shape.fill_color = QColor(r, g, b, 128) + shape.select_line_color = QColor(255, 255, 255) + shape.select_fill_color = QColor(r, g, b, 155) + + def _get_rgb_by_label(self, label, kie_mode): + shift_auto_shape_color = 2 # use for random color + if kie_mode and label != "None": + item = self.keyList.findItemsByLabel(label)[0] + label_id = self.keyList.indexFromItem(item).row() + 1 + label_id += shift_auto_shape_color + return LABEL_COLORMAP[label_id % len(LABEL_COLORMAP)] + else: + return (0, 255, 0) + def scrollRequest(self, delta, orientation): units = - delta / (8 * 15) bar = self.scrollBars[orientation] @@ -1344,7 +1408,7 @@ class MainWindow(QMainWindow): select_indexes = self.fileListWidget.selectedIndexes() if len(select_indexes) > 0: self.fileDock.setWindowTitle(self.fileListName + f" ({select_indexes[0].row() + 1}" - f"/{self.fileListWidget.count()})") + f"/{self.fileListWidget.count()})") # update show counting self.BoxListDock.setWindowTitle(self.BoxListDockName + f" ({self.BoxList.count()})") self.labelListDock.setWindowTitle(self.labelListDockName + f" ({self.labelList.count()})") @@ -1362,13 +1426,13 @@ class MainWindow(QMainWindow): for box in self.canvas.lockedShapes: if self.canvas.isInTheSameImage: shapes.append((box['transcription'], [[s[0] * width, s[1] * height] for s in box['ratio']], - DEFAULT_LOCK_COLOR, None, box['difficult'])) + DEFAULT_LOCK_COLOR, box['key_cls'], box['difficult'])) else: shapes.append(('锁定框:待检测', [[s[0] * width, s[1] * height] for s in box['ratio']], - DEFAULT_LOCK_COLOR, None, box['difficult'])) + DEFAULT_LOCK_COLOR, box['key_cls'], box['difficult'])) if imgidx in self.PPlabel.keys(): for box in self.PPlabel[imgidx]: - shapes.append((box['transcription'], box['points'], None, None, box['difficult'])) + shapes.append((box['transcription'], box['points'], None, box['key_cls'], box['difficult'])) self.loadLabels(shapes) self.canvas.verified = False @@ -1504,6 +1568,39 @@ class MainWindow(QMainWindow): self.actions.open_dataset_dir.setEnabled(False) defaultOpenDirPath = os.path.dirname(self.filePath) if self.filePath else '.' + def init_key_list(self, label_dict): + if not self.kie_mode: + return + # load key_cls + for image, info in label_dict.items(): + for box in info: + if "key_cls" not in box: + continue + self.existed_key_cls_set.add(box["key_cls"]) + if len(self.existed_key_cls_set) > 0: + for key_text in self.existed_key_cls_set: + if not self.keyList.findItemsByLabel(key_text): + item = self.keyList.createItemFromLabel(key_text) + self.keyList.addItem(item) + rgb = self._get_rgb_by_label(key_text, self.kie_mode) + self.keyList.setItemLabel(item, key_text, rgb) + + if self.keyDialog is None: + # key list dialog + self.keyDialog = KeyDialog( + text=self.key_dialog_tip, + parent=self, + labels=self.existed_key_cls_set, + sort_labels=True, + show_text_field=True, + completion="startswith", + fit_to_content={'column': True, 'row': False}, + flags=None + ) + else: + self.keyDialog.labelList.addItems(self.existed_key_cls_set) + + def importDirImages(self, dirpath, isDelete=False): if not self.mayContinue() or not dirpath: return @@ -1518,6 +1615,9 @@ class MainWindow(QMainWindow): self.Cachelabel = self.loadLabelFile(self.Cachelabelpath) if self.Cachelabel: self.PPlabel = dict(self.Cachelabel, **self.PPlabel) + + self.init_key_list(self.PPlabel) + self.lastOpenDir = dirpath self.dirname = dirpath @@ -1737,7 +1837,7 @@ class MainWindow(QMainWindow): def currentPath(self): return os.path.dirname(self.filePath) if self.filePath else '.' - def chooseColor1(self): + def chooseColor(self): color = self.colorDialog.getColor(self.lineColor, u'Choose line color', default=DEFAULT_LINE_COLOR) if color: @@ -1854,6 +1954,8 @@ class MainWindow(QMainWindow): self.setDirty() self.saveCacheLabel() + self.init_key_list(self.Cachelabel) + def reRecognition(self): img = cv2.imread(self.filePath) # org_box = [dic['points'] for dic in self.PPlabel[self.getImglabelidx(self.filePath)]] @@ -2059,7 +2161,8 @@ class MainWindow(QMainWindow): try: img = cv2.imread(key) for i, label in enumerate(self.PPlabel[idx]): - if label['difficult']: continue + if label['difficult']: + continue img_crop = get_rotate_crop_image(img, np.array(label['points'], np.float32)) img_name = os.path.splitext(os.path.basename(idx))[0] + '_crop_' + str(i) + '.jpg' cv2.imwrite(crop_img_dir + img_name, img_crop) @@ -2096,6 +2199,15 @@ class MainWindow(QMainWindow): self.autoSaveNum = 5 # Used for backup print('The program will automatically save once after confirming 5 images (default)') + def change_box_key(self): + key_text, _ = self.keyDialog.popUp(self.key_previous_text) + if key_text is None: + return + self.key_previous_text = key_text + for shape in self.canvas.selectedShapes: + shape.key_cls = key_text + self._update_shape_color(shape) + def undoShapeEdit(self): self.canvas.restoreShape() self.labelList.clear() @@ -2126,8 +2238,9 @@ class MainWindow(QMainWindow): line_color=s.line_color.getRgb(), fill_color=s.fill_color.getRgb(), ratio=[[int(p.x()) / width, int(p.y()) / height] for p in s.points], # QPonitF - # add chris - difficult=s.difficult) # bool + difficult=s.difficult, # bool + key_cls=s.key_cls, # bool + ) # lock if len(self.canvas.lockedShapes) == 0: @@ -2137,7 +2250,9 @@ class MainWindow(QMainWindow): shapes = [format_shape(shape) for shape in self.canvas.selectedShapes] trans_dic = [] for box in shapes: - trans_dic.append({"transcription": box['label'], "ratio": box['ratio'], 'difficult': box['difficult']}) + trans_dic.append({"transcription": box['label'], "ratio": box['ratio'], + "difficult": box['difficult'], + "key_cls": "None" if "key_cls" not in box else box["key_cls"]}) self.canvas.lockedShapes = trans_dic self.actions.save.setEnabled(True) @@ -2179,6 +2294,7 @@ def get_main_app(argv=[]): arg_parser = argparse.ArgumentParser() arg_parser.add_argument("--lang", type=str, default='en', nargs="?") arg_parser.add_argument("--gpu", type=str2bool, default=True, nargs="?") + arg_parser.add_argument("--kie", type=str2bool, default=False, nargs="?") arg_parser.add_argument("--predefined_classes_file", default=os.path.join(os.path.dirname(__file__), "data", "predefined_classes.txt"), nargs="?") @@ -2186,6 +2302,7 @@ def get_main_app(argv=[]): win = MainWindow(lang=args.lang, gpu=args.gpu, + kie_mode=args.kie, default_predefined_class_file=args.predefined_classes_file) win.show() return app, win diff --git a/PPOCRLabel/README.md b/PPOCRLabel/README.md index 9c6ce120974701b372fb091fcd40038f790444d3..4d25e670ae6d07d569a247bc5f9c35c939b23f8e 100644 --- a/PPOCRLabel/README.md +++ b/PPOCRLabel/README.md @@ -8,6 +8,8 @@ PPOCRLabel is a semi-automatic graphic annotation tool suitable for OCR field, w ### Recent Update +- 2022.02:(by [PeterH0323](https://github.com/peterh0323) ) + - Added KIE mode, for [detection + identification + keyword extraction] labeling. - 2022.01:(by [PeterH0323](https://github.com/peterh0323) ) - Improve user experience: prompt for the number of files and labels, optimize interaction, and fix bugs such as only use CPU when inference - 2021.11.17: @@ -72,7 +74,8 @@ PPOCRLabel ```bash pip3 install PPOCRLabel pip3 install opencv-contrib-python-headless==4.2.0.32 -PPOCRLabel # run +PPOCRLabel # [Normal mode] for [detection + recognition] labeling +PPOCRLabel --kie True # [KIE mode] for [detection + recognition + keyword extraction] labeling ``` #### 1.2.2 Build and Install the Whl Package Locally @@ -87,7 +90,8 @@ pip3 install dist/PPOCRLabel-1.0.2-py2.py3-none-any.whl ```bash cd ./PPOCRLabel # Switch to the PPOCRLabel directory -python PPOCRLabel.py +python PPOCRLabel.py # [Normal mode] for [detection + recognition] labeling +python PPOCRLabel.py --kie True # [KIE mode] for [detection + recognition + keyword extraction] labeling ``` @@ -198,21 +202,31 @@ For some data that are difficult to recognize, the recognition results will not - Enter the following command in the terminal to execute the dataset division script: - ``` + ``` cd ./PPOCRLabel # Change the directory to the PPOCRLabel folder - python gen_ocr_train_val_test.py --trainValTestRatio 6:2:2 --labelRootPath ../train_data/label --detRootPath ../train_data/det --recRootPath ../train_data/rec + python gen_ocr_train_val_test.py --trainValTestRatio 6:2:2 --datasetRootPath ../train_data ``` Parameter Description: - `trainValTestRatio` is the division ratio of the number of images in the training set, validation set, and test set, set according to your actual situation, the default is `6:2:2` - - `labelRootPath` is the storage path of the dataset labeled by PPOCRLabel, the default is `../train_data/label` - - - `detRootPath` is the path where the text detection dataset is divided according to the dataset marked by PPOCRLabel. The default is `../train_data/det` - - - `recRootPath` is the path where the character recognition dataset is divided according to the dataset marked by PPOCRLabel. The default is `../train_data/rec` - + - `datasetRootPath` is the storage path of the complete dataset labeled by PPOCRLabel. The default path is `PaddleOCR/train_data` . + ``` + |-train_data + |-crop_img + |- word_001_crop_0.png + |- word_002_crop_0.jpg + |- word_003_crop_0.jpg + | ... + | Label.txt + | rec_gt.txt + |- word_001.png + |- word_002.jpg + |- word_003.jpg + | ... + ``` + ### 3.6 Error message - If paddleocr is installed with whl, it has a higher priority than calling PaddleOCR class with paddleocr.py, which may cause an exception if whl package is not updated. diff --git a/PPOCRLabel/README_ch.md b/PPOCRLabel/README_ch.md index 0fb2dcc023f3d48988dde96778e293589861c174..3f8dc4f0c6b7cc88a71409d123f598e74b3f2cad 100644 --- a/PPOCRLabel/README_ch.md +++ b/PPOCRLabel/README_ch.md @@ -8,6 +8,8 @@ PPOCRLabel是一款适用于OCR领域的半自动化图形标注工具,内置P #### 近期更新 +- 2022.02:(by [PeterH0323](https://github.com/peterh0323) ) + - 新增:KIE 功能,用于打【检测+识别+关键字提取】的标签 - 2022.01:(by [PeterH0323](https://github.com/peterh0323) ) - 提升用户体验:新增文件与标记数目提示、优化交互、修复gpu使用等问题 - 2021.11.17: @@ -70,7 +72,8 @@ PPOCRLabel --lang ch ```bash pip3 install PPOCRLabel pip3 install opencv-contrib-python-headless==4.2.0.32 # 如果下载过慢请添加"-i https://mirror.baidu.com/pypi/simple" -PPOCRLabel --lang ch # 启动 +PPOCRLabel --lang ch # 启动【普通模式】,用于打【检测+识别】场景的标签 +PPOCRLabel --lang ch --kie True # 启动 【KIE 模式】,用于打【检测+识别+关键字提取】场景的标签 ``` > 如果上述安装出现问题,可以参考3.6节 错误提示 @@ -89,7 +92,8 @@ pip3 install dist/PPOCRLabel-1.0.2-py2.py3-none-any.whl -i https://mirror.baidu. ```bash cd ./PPOCRLabel # 切换到PPOCRLabel目录 -python PPOCRLabel.py --lang ch +python PPOCRLabel.py --lang ch # 启动【普通模式】,用于打【检测+识别】场景的标签 +python PPOCRLabel.py --lang ch --kie True # 启动 【KIE 模式】,用于打【检测+识别+关键字提取】场景的标签 ``` @@ -185,19 +189,29 @@ PPOCRLabel支持三种导出方式: ``` cd ./PPOCRLabel # 将目录切换到PPOCRLabel文件夹下 -python gen_ocr_train_val_test.py --trainValTestRatio 6:2:2 --labelRootPath ../train_data/label --detRootPath ../train_data/det --recRootPath ../train_data/rec +python gen_ocr_train_val_test.py --trainValTestRatio 6:2:2 --datasetRootPath ../train_data ``` 参数说明: - `trainValTestRatio` 是训练集、验证集、测试集的图像数量划分比例,根据实际情况设定,默认是`6:2:2` -- `labelRootPath` 是PPOCRLabel标注的数据集存放路径,默认是`../train_data/label` - -- `detRootPath` 是根据PPOCRLabel标注的数据集划分后的文本检测数据集存放的路径,默认是`../train_data/det ` - -- `recRootPath` 是根据PPOCRLabel标注的数据集划分后的字符识别数据集存放的路径,默认是`../train_data/rec` - +- `datasetRootPath` 是PPOCRLabel标注的完整数据集存放路径。默认路径是 `PaddleOCR/train_data` 分割数据集前应有如下结构: + ``` + |-train_data + |-crop_img + |- word_001_crop_0.png + |- word_002_crop_0.jpg + |- word_003_crop_0.jpg + | ... + | Label.txt + | rec_gt.txt + |- word_001.png + |- word_002.jpg + |- word_003.jpg + | ... + ``` + ### 3.6 错误提示 - 如果同时使用whl包安装了paddleocr,其优先级大于通过paddleocr.py调用PaddleOCR类,whl包未更新时会导致程序异常。 diff --git a/PPOCRLabel/gen_ocr_train_val_test.py b/PPOCRLabel/gen_ocr_train_val_test.py index 64cba612ae267835dd47aedc2b0356c9df462038..03ae566c6ec64d7ade229fb9571b0cd89ec189d4 100644 --- a/PPOCRLabel/gen_ocr_train_val_test.py +++ b/PPOCRLabel/gen_ocr_train_val_test.py @@ -17,15 +17,14 @@ def isCreateOrDeleteFolder(path, flag): return flagAbsPath -def splitTrainVal(root, dir, absTrainRootPath, absValRootPath, absTestRootPath, trainTxt, valTxt, testTxt, flag): +def splitTrainVal(root, absTrainRootPath, absValRootPath, absTestRootPath, trainTxt, valTxt, testTxt, flag): # 按照指定的比例划分训练集、验证集、测试集 - labelPath = os.path.join(root, dir) - labelAbsPath = os.path.abspath(labelPath) + dataAbsPath = os.path.abspath(root) if flag == "det": - labelFilePath = os.path.join(labelAbsPath, args.detLabelFileName) + labelFilePath = os.path.join(dataAbsPath, args.detLabelFileName) elif flag == "rec": - labelFilePath = os.path.join(labelAbsPath, args.recLabelFileName) + labelFilePath = os.path.join(dataAbsPath, args.recLabelFileName) labelFileRead = open(labelFilePath, "r", encoding="UTF-8") labelFileContent = labelFileRead.readlines() @@ -38,9 +37,9 @@ def splitTrainVal(root, dir, absTrainRootPath, absValRootPath, absTestRootPath, imageName = os.path.basename(imageRelativePath) if flag == "det": - imagePath = os.path.join(labelAbsPath, imageName) + imagePath = os.path.join(dataAbsPath, imageName) elif flag == "rec": - imagePath = os.path.join(labelAbsPath, "{}\\{}".format(args.recImageDirName, imageName)) + imagePath = os.path.join(dataAbsPath, "{}\\{}".format(args.recImageDirName, imageName)) # 按预设的比例划分训练集、验证集、测试集 trainValTestRatio = args.trainValTestRatio.split(":") @@ -90,15 +89,20 @@ def genDetRecTrainVal(args): recValTxt = open(os.path.join(args.recRootPath, "val.txt"), "a", encoding="UTF-8") recTestTxt = open(os.path.join(args.recRootPath, "test.txt"), "a", encoding="UTF-8") - for root, dirs, files in os.walk(args.labelRootPath): + splitTrainVal(args.datasetRootPath, detAbsTrainRootPath, detAbsValRootPath, detAbsTestRootPath, detTrainTxt, detValTxt, + detTestTxt, "det") + + for root, dirs, files in os.walk(args.datasetRootPath): for dir in dirs: - splitTrainVal(root, dir, detAbsTrainRootPath, detAbsValRootPath, detAbsTestRootPath, detTrainTxt, detValTxt, - detTestTxt, "det") - splitTrainVal(root, dir, recAbsTrainRootPath, recAbsValRootPath, recAbsTestRootPath, recTrainTxt, recValTxt, - recTestTxt, "rec") + if dir == 'crop_img': + splitTrainVal(root, recAbsTrainRootPath, recAbsValRootPath, recAbsTestRootPath, recTrainTxt, recValTxt, + recTestTxt, "rec") + else: + continue break + if __name__ == "__main__": # 功能描述:分别划分检测和识别的训练集、验证集、测试集 # 说明:可以根据自己的路径和需求调整参数,图像数据往往多人合作分批标注,每一批图像数据放在一个文件夹内用PPOCRLabel进行标注, @@ -110,9 +114,9 @@ if __name__ == "__main__": default="6:2:2", help="ratio of trainset:valset:testset") parser.add_argument( - "--labelRootPath", + "--datasetRootPath", type=str, - default="../train_data/label", + default="../train_data/", help="path to the dataset marked by ppocrlabel, E.g, dataset folder named 1,2,3..." ) parser.add_argument( diff --git a/PPOCRLabel/libs/canvas.py b/PPOCRLabel/libs/canvas.py index 8d257e6bd7e7a61d7c28e9787042c3eb9d42609f..095fe5ab06553dcb05c8bcc061f950ded606ebb3 100644 --- a/PPOCRLabel/libs/canvas.py +++ b/PPOCRLabel/libs/canvas.py @@ -783,7 +783,7 @@ class Canvas(QWidget): points = [p1+p2 for p1, p2 in zip(self.selectedShape.points, [step]*4)] return True in map(self.outOfPixmap, points) - def setLastLabel(self, text, line_color = None, fill_color = None): + def setLastLabel(self, text, line_color=None, fill_color=None, key_cls=None): assert text self.shapes[-1].label = text if line_color: @@ -791,6 +791,10 @@ class Canvas(QWidget): if fill_color: self.shapes[-1].fill_color = fill_color + + if key_cls: + self.shapes[-1].key_cls = key_cls + self.storeShapes() return self.shapes[-1] diff --git a/PPOCRLabel/libs/keyDialog.py b/PPOCRLabel/libs/keyDialog.py new file mode 100644 index 0000000000000000000000000000000000000000..1ec8d97147cd2eb1e3c8482a9a6c5092edcd1b9c --- /dev/null +++ b/PPOCRLabel/libs/keyDialog.py @@ -0,0 +1,216 @@ +import re + +from PyQt5 import QtCore +from PyQt5 import QtGui +from PyQt5 import QtWidgets +from PyQt5.Qt import QT_VERSION_STR +from libs.utils import newIcon, labelValidator + +QT5 = QT_VERSION_STR[0] == '5' + + +# TODO(unknown): +# - Calculate optimal position so as not to go out of screen area. + + +class KeyQLineEdit(QtWidgets.QLineEdit): + def setListWidget(self, list_widget): + self.list_widget = list_widget + + def keyPressEvent(self, e): + if e.key() in [QtCore.Qt.Key_Up, QtCore.Qt.Key_Down]: + self.list_widget.keyPressEvent(e) + else: + super(KeyQLineEdit, self).keyPressEvent(e) + + +class KeyDialog(QtWidgets.QDialog): + def __init__( + self, + text="Enter object label", + parent=None, + labels=None, + sort_labels=True, + show_text_field=True, + completion="startswith", + fit_to_content=None, + flags=None, + ): + if fit_to_content is None: + fit_to_content = {"row": False, "column": True} + self._fit_to_content = fit_to_content + + super(KeyDialog, self).__init__(parent) + self.edit = KeyQLineEdit() + self.edit.setPlaceholderText(text) + self.edit.setValidator(labelValidator()) + self.edit.editingFinished.connect(self.postProcess) + if flags: + self.edit.textChanged.connect(self.updateFlags) + + layout = QtWidgets.QVBoxLayout() + if show_text_field: + layout_edit = QtWidgets.QHBoxLayout() + layout_edit.addWidget(self.edit, 6) + layout.addLayout(layout_edit) + # buttons + self.buttonBox = bb = QtWidgets.QDialogButtonBox( + QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel, + QtCore.Qt.Horizontal, + self, + ) + bb.button(bb.Ok).setIcon(newIcon("done")) + bb.button(bb.Cancel).setIcon(newIcon("undo")) + bb.accepted.connect(self.validate) + bb.rejected.connect(self.reject) + layout.addWidget(bb) + # label_list + self.labelList = QtWidgets.QListWidget() + if self._fit_to_content["row"]: + self.labelList.setHorizontalScrollBarPolicy( + QtCore.Qt.ScrollBarAlwaysOff + ) + if self._fit_to_content["column"]: + self.labelList.setVerticalScrollBarPolicy( + QtCore.Qt.ScrollBarAlwaysOff + ) + self._sort_labels = sort_labels + if labels: + self.labelList.addItems(labels) + if self._sort_labels: + self.labelList.sortItems() + else: + self.labelList.setDragDropMode( + QtWidgets.QAbstractItemView.InternalMove + ) + self.labelList.currentItemChanged.connect(self.labelSelected) + self.labelList.itemDoubleClicked.connect(self.labelDoubleClicked) + self.edit.setListWidget(self.labelList) + layout.addWidget(self.labelList) + # label_flags + if flags is None: + flags = {} + self._flags = flags + self.flagsLayout = QtWidgets.QVBoxLayout() + self.resetFlags() + layout.addItem(self.flagsLayout) + self.edit.textChanged.connect(self.updateFlags) + self.setLayout(layout) + # completion + completer = QtWidgets.QCompleter() + if not QT5 and completion != "startswith": + completion = "startswith" + if completion == "startswith": + completer.setCompletionMode(QtWidgets.QCompleter.InlineCompletion) + # Default settings. + # completer.setFilterMode(QtCore.Qt.MatchStartsWith) + elif completion == "contains": + completer.setCompletionMode(QtWidgets.QCompleter.PopupCompletion) + completer.setFilterMode(QtCore.Qt.MatchContains) + else: + raise ValueError("Unsupported completion: {}".format(completion)) + completer.setModel(self.labelList.model()) + self.edit.setCompleter(completer) + + def addLabelHistory(self, label): + if self.labelList.findItems(label, QtCore.Qt.MatchExactly): + return + self.labelList.addItem(label) + if self._sort_labels: + self.labelList.sortItems() + + def labelSelected(self, item): + self.edit.setText(item.text()) + + def validate(self): + text = self.edit.text() + if hasattr(text, "strip"): + text = text.strip() + else: + text = text.trimmed() + if text: + self.accept() + + def labelDoubleClicked(self, item): + self.validate() + + def postProcess(self): + text = self.edit.text() + if hasattr(text, "strip"): + text = text.strip() + else: + text = text.trimmed() + self.edit.setText(text) + + def updateFlags(self, label_new): + # keep state of shared flags + flags_old = self.getFlags() + + flags_new = {} + for pattern, keys in self._flags.items(): + if re.match(pattern, label_new): + for key in keys: + flags_new[key] = flags_old.get(key, False) + self.setFlags(flags_new) + + def deleteFlags(self): + for i in reversed(range(self.flagsLayout.count())): + item = self.flagsLayout.itemAt(i).widget() + self.flagsLayout.removeWidget(item) + item.setParent(None) + + def resetFlags(self, label=""): + flags = {} + for pattern, keys in self._flags.items(): + if re.match(pattern, label): + for key in keys: + flags[key] = False + self.setFlags(flags) + + def setFlags(self, flags): + self.deleteFlags() + for key in flags: + item = QtWidgets.QCheckBox(key, self) + item.setChecked(flags[key]) + self.flagsLayout.addWidget(item) + item.show() + + def getFlags(self): + flags = {} + for i in range(self.flagsLayout.count()): + item = self.flagsLayout.itemAt(i).widget() + flags[item.text()] = item.isChecked() + return flags + + def popUp(self, text=None, move=True, flags=None): + if self._fit_to_content["row"]: + self.labelList.setMinimumHeight( + self.labelList.sizeHintForRow(0) * self.labelList.count() + 2 + ) + if self._fit_to_content["column"]: + self.labelList.setMinimumWidth( + self.labelList.sizeHintForColumn(0) + 2 + ) + # if text is None, the previous label in self.edit is kept + if text is None: + text = self.edit.text() + if flags: + self.setFlags(flags) + else: + self.resetFlags(text) + self.edit.setText(text) + self.edit.setSelection(0, len(text)) + + items = self.labelList.findItems(text, QtCore.Qt.MatchFixedString) + if items: + if len(items) != 1: + self.labelList.setCurrentItem(items[0]) + row = self.labelList.row(items[0]) + self.edit.completer().setCurrentRow(row) + self.edit.setFocus(QtCore.Qt.PopupFocusReason) + if move: + self.move(QtGui.QCursor.pos()) + if self.exec_(): + return self.edit.text(), self.getFlags() + else: + return None, None diff --git a/PPOCRLabel/libs/labelColor.py b/PPOCRLabel/libs/labelColor.py new file mode 100644 index 0000000000000000000000000000000000000000..c6f933981f3ca13981910a88fca76f884d727a14 --- /dev/null +++ b/PPOCRLabel/libs/labelColor.py @@ -0,0 +1,88 @@ +import PIL.Image +import numpy as np + + +def rgb2hsv(rgb): + # type: (np.ndarray) -> np.ndarray + """Convert rgb to hsv. + + Parameters + ---------- + rgb: numpy.ndarray, (H, W, 3), np.uint8 + Input rgb image. + + Returns + ------- + hsv: numpy.ndarray, (H, W, 3), np.uint8 + Output hsv image. + + """ + hsv = PIL.Image.fromarray(rgb, mode="RGB") + hsv = hsv.convert("HSV") + hsv = np.array(hsv) + return hsv + + +def hsv2rgb(hsv): + # type: (np.ndarray) -> np.ndarray + """Convert hsv to rgb. + + Parameters + ---------- + hsv: numpy.ndarray, (H, W, 3), np.uint8 + Input hsv image. + + Returns + ------- + rgb: numpy.ndarray, (H, W, 3), np.uint8 + Output rgb image. + + """ + rgb = PIL.Image.fromarray(hsv, mode="HSV") + rgb = rgb.convert("RGB") + rgb = np.array(rgb) + return rgb + + +def label_colormap(n_label=256, value=None): + """Label colormap. + + Parameters + ---------- + n_label: int + Number of labels (default: 256). + value: float or int + Value scale or value of label color in HSV space. + + Returns + ------- + cmap: numpy.ndarray, (N, 3), numpy.uint8 + Label id to colormap. + + """ + + def bitget(byteval, idx): + return (byteval & (1 << idx)) != 0 + + cmap = np.zeros((n_label, 3), dtype=np.uint8) + for i in range(0, n_label): + id = i + r, g, b = 0, 0, 0 + for j in range(0, 8): + r = np.bitwise_or(r, (bitget(id, 0) << 7 - j)) + g = np.bitwise_or(g, (bitget(id, 1) << 7 - j)) + b = np.bitwise_or(b, (bitget(id, 2) << 7 - j)) + id = id >> 3 + cmap[i, 0] = r + cmap[i, 1] = g + cmap[i, 2] = b + + if value is not None: + hsv = rgb2hsv(cmap.reshape(1, -1, 3)) + if isinstance(value, float): + hsv[:, 1:, 2] = hsv[:, 1:, 2].astype(float) * value + else: + assert isinstance(value, int) + hsv[:, 1:, 2] = value + cmap = hsv2rgb(hsv).reshape(-1, 3) + return cmap diff --git a/PPOCRLabel/libs/shape.py b/PPOCRLabel/libs/shape.py index 528b1102b010ceef8fa1057309e652010a91376d..fc8ab5ec4d7ff2836034d9c7e01acaf49dfe7aa0 100644 --- a/PPOCRLabel/libs/shape.py +++ b/PPOCRLabel/libs/shape.py @@ -46,12 +46,13 @@ class Shape(object): point_size = 8 scale = 1.0 - def __init__(self, label=None, line_color=None, difficult=False, paintLabel=False): + def __init__(self, label=None, line_color=None, difficult=False, key_cls="None", paintLabel=False): self.label = label self.points = [] self.fill = False self.selected = False self.difficult = difficult + self.key_cls = key_cls self.paintLabel = paintLabel self.locked = False self.direction = 0 @@ -224,6 +225,7 @@ class Shape(object): if self.fill_color != Shape.fill_color: shape.fill_color = self.fill_color shape.difficult = self.difficult + shape.key_cls = self.key_cls return shape def __len__(self): diff --git a/PPOCRLabel/libs/unique_label_qlist_widget.py b/PPOCRLabel/libs/unique_label_qlist_widget.py new file mode 100644 index 0000000000000000000000000000000000000000..f1eff7a172d3fecf9c18579ccead5f62ba65ecd5 --- /dev/null +++ b/PPOCRLabel/libs/unique_label_qlist_widget.py @@ -0,0 +1,45 @@ +# -*- encoding: utf-8 -*- + +from PyQt5.QtCore import Qt +from PyQt5 import QtWidgets + + +class EscapableQListWidget(QtWidgets.QListWidget): + def keyPressEvent(self, event): + super(EscapableQListWidget, self).keyPressEvent(event) + if event.key() == Qt.Key_Escape: + self.clearSelection() + + +class UniqueLabelQListWidget(EscapableQListWidget): + def mousePressEvent(self, event): + super(UniqueLabelQListWidget, self).mousePressEvent(event) + if not self.indexAt(event.pos()).isValid(): + self.clearSelection() + + def findItemsByLabel(self, label, get_row=False): + items = [] + for row in range(self.count()): + item = self.item(row) + if item.data(Qt.UserRole) == label: + items.append(item) + if get_row: + return row + return items + + def createItemFromLabel(self, label): + item = QtWidgets.QListWidgetItem() + item.setData(Qt.UserRole, label) + return item + + def setItemLabel(self, item, label, color=None): + qlabel = QtWidgets.QLabel() + if color is None: + qlabel.setText(f"{label}") + else: + qlabel.setText(' {} '.format(*color, label)) + qlabel.setAlignment(Qt.AlignBottom) + + item.setSizeHint(qlabel.sizeHint()) + + self.setItemWidget(item, qlabel) diff --git a/PPOCRLabel/libs/utils.py b/PPOCRLabel/libs/utils.py index 9fab41d3ffee33b8f86f9576507eb13b18806496..2510520caa8048d7787d7c8f65df2885d76026f7 100644 --- a/PPOCRLabel/libs/utils.py +++ b/PPOCRLabel/libs/utils.py @@ -10,30 +10,26 @@ # SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF # CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. -from math import sqrt -from libs.ustr import ustr import hashlib +import os import re import sys +from math import sqrt + import cv2 import numpy as np -import os +from PyQt5.QtCore import QRegExp, QT_VERSION_STR +from PyQt5.QtGui import QIcon, QRegExpValidator, QColor +from PyQt5.QtWidgets import QPushButton, QAction, QMenu +from libs.ustr import ustr -__dir__ = os.path.dirname(os.path.abspath(__file__)) # 获取本程序文件路径 +__dir__ = os.path.dirname(os.path.abspath(__file__)) # 获取本程序文件路径 __iconpath__ = os.path.abspath(os.path.join(__dir__, '../resources/icons')) -try: - from PyQt5.QtGui import * - from PyQt5.QtCore import * - from PyQt5.QtWidgets import * -except ImportError: - from PyQt4.QtGui import * - from PyQt4.QtCore import * - def newIcon(icon, iconSize=None): if iconSize is not None: - return QIcon(QIcon(__iconpath__ + "/" + icon + ".png").pixmap(iconSize,iconSize)) + return QIcon(QIcon(__iconpath__ + "/" + icon + ".png").pixmap(iconSize, iconSize)) else: return QIcon(__iconpath__ + "/" + icon + ".png") @@ -105,24 +101,25 @@ def generateColorByText(text): s = ustr(text) hashCode = int(hashlib.sha256(s.encode('utf-8')).hexdigest(), 16) r = int((hashCode / 255) % 255) - g = int((hashCode / 65025) % 255) - b = int((hashCode / 16581375) % 255) + g = int((hashCode / 65025) % 255) + b = int((hashCode / 16581375) % 255) return QColor(r, g, b, 100) + def have_qstring(): '''p3/qt5 get rid of QString wrapper as py3 has native unicode str type''' return not (sys.version_info.major >= 3 or QT_VERSION_STR.startswith('5.')) -def util_qt_strlistclass(): - return QStringList if have_qstring() else list -def natural_sort(list, key=lambda s:s): +def natural_sort(list, key=lambda s: s): """ Sort the list into natural alphanumeric order. """ + def get_alphanum_key_func(key): convert = lambda text: int(text) if text.isdigit() else text return lambda s: [convert(c) for c in re.split('([0-9]+)', key(s))] + sort_key = get_alphanum_key_func(key) list.sort(key=sort_key) @@ -133,8 +130,8 @@ def get_rotate_crop_image(img, points): d = 0.0 for index in range(-1, 3): d += -0.5 * (points[index + 1][1] + points[index][1]) * ( - points[index + 1][0] - points[index][0]) - if d < 0: # counterclockwise + points[index + 1][0] - points[index][0]) + if d < 0: # counterclockwise tmp = np.array(points) points[1], points[3] = tmp[3], tmp[1] @@ -163,10 +160,11 @@ def get_rotate_crop_image(img, points): except Exception as e: print(e) + def stepsInfo(lang='en'): if lang == 'ch': msg = "1. 安装与运行:使用上述命令安装与运行程序。\n" \ - "2. 打开文件夹:在菜单栏点击 “文件” - 打开目录 选择待标记图片的文件夹.\n"\ + "2. 打开文件夹:在菜单栏点击 “文件” - 打开目录 选择待标记图片的文件夹.\n" \ "3. 自动标注:点击 ”自动标注“,使用PPOCR超轻量模型对图片文件名前图片状态为 “X” 的图片进行自动标注。\n" \ "4. 手动标注:点击 “矩形标注”(推荐直接在英文模式下点击键盘中的 “W”),用户可对当前图片中模型未检出的部分进行手动" \ "绘制标记框。点击键盘P,则使用四点标注模式(或点击“编辑” - “四点标注”),用户依次点击4个点后,双击左键表示标注完成。\n" \ @@ -181,25 +179,26 @@ def stepsInfo(lang='en'): else: msg = "1. Build and launch using the instructions above.\n" \ - "2. Click 'Open Dir' in Menu/File to select the folder of the picture.\n"\ - "3. Click 'Auto recognition', use PPOCR model to automatically annotate images which marked with 'X' before the file name."\ - "4. Create Box:\n"\ - "4.1 Click 'Create RectBox' or press 'W' in English keyboard mode to draw a new rectangle detection box. Click and release left mouse to select a region to annotate the text area.\n"\ - "4.2 Press 'P' to enter four-point labeling mode which enables you to create any four-point shape by clicking four points with the left mouse button in succession and DOUBLE CLICK the left mouse as the signal of labeling completion.\n"\ - "5. After the marking frame is drawn, the user clicks 'OK', and the detection frame will be pre-assigned a TEMPORARY label.\n"\ - "6. Click re-Recognition, model will rewrite ALL recognition results in ALL detection box.\n"\ - "7. Double click the result in 'recognition result' list to manually change inaccurate recognition results.\n"\ - "8. Click 'Save', the image status will switch to '√',then the program automatically jump to the next.\n"\ - "9. Click 'Delete Image' and the image will be deleted to the recycle bin.\n"\ - "10. Labeling result: After closing the application or switching the file path, the manually saved label will be stored in *Label.txt* under the opened picture folder.\n"\ - " Click PaddleOCR-Save Recognition Results in the menu bar, the recognition training data of such pictures will be saved in the *crop_img* folder, and the recognition label will be saved in *rec_gt.txt*.\n" + "2. Click 'Open Dir' in Menu/File to select the folder of the picture.\n" \ + "3. Click 'Auto recognition', use PPOCR model to automatically annotate images which marked with 'X' before the file name." \ + "4. Create Box:\n" \ + "4.1 Click 'Create RectBox' or press 'W' in English keyboard mode to draw a new rectangle detection box. Click and release left mouse to select a region to annotate the text area.\n" \ + "4.2 Press 'P' to enter four-point labeling mode which enables you to create any four-point shape by clicking four points with the left mouse button in succession and DOUBLE CLICK the left mouse as the signal of labeling completion.\n" \ + "5. After the marking frame is drawn, the user clicks 'OK', and the detection frame will be pre-assigned a TEMPORARY label.\n" \ + "6. Click re-Recognition, model will rewrite ALL recognition results in ALL detection box.\n" \ + "7. Double click the result in 'recognition result' list to manually change inaccurate recognition results.\n" \ + "8. Click 'Save', the image status will switch to '√',then the program automatically jump to the next.\n" \ + "9. Click 'Delete Image' and the image will be deleted to the recycle bin.\n" \ + "10. Labeling result: After closing the application or switching the file path, the manually saved label will be stored in *Label.txt* under the opened picture folder.\n" \ + " Click PaddleOCR-Save Recognition Results in the menu bar, the recognition training data of such pictures will be saved in the *crop_img* folder, and the recognition label will be saved in *rec_gt.txt*.\n" return msg + def keysInfo(lang='en'): if lang == 'ch': msg = "快捷键\t\t\t说明\n" \ - "———————————————————————\n"\ + "———————————————————————\n" \ "Ctrl + shift + R\t\t对当前图片的所有标记重新识别\n" \ "W\t\t\t新建矩形框\n" \ "Q\t\t\t新建四点框\n" \ @@ -223,17 +222,17 @@ def keysInfo(lang='en'): "———————————————————————\n" \ "Ctrl + shift + R\t\tRe-recognize all the labels\n" \ "\t\t\tof the current image\n" \ - "\n"\ + "\n" \ "W\t\t\tCreate a rect box\n" \ "Q\t\t\tCreate a four-points box\n" \ "Ctrl + E\t\tEdit label of the selected box\n" \ "Ctrl + R\t\tRe-recognize the selected box\n" \ "Ctrl + C\t\tCopy and paste the selected\n" \ "\t\t\tbox\n" \ - "\n"\ + "\n" \ "Ctrl + Left Mouse\tMulti select the label\n" \ "Button\t\t\tbox\n" \ - "\n"\ + "\n" \ "Backspace\t\tDelete the selected box\n" \ "Ctrl + V\t\tCheck image\n" \ "Ctrl + Shift + d\tDelete image\n" \ @@ -245,4 +244,4 @@ def keysInfo(lang='en'): "———————————————————————\n" \ "Notice:For Mac users, use the 'Command' key instead of the 'Ctrl' key" - return msg \ No newline at end of file + return msg diff --git a/PPOCRLabel/resources/strings/strings-en.properties b/PPOCRLabel/resources/strings/strings-en.properties index f59e43aa92ff9ccd04686e9c16db181983b57b2c..3c4eda65a32e1048405041667ba61bdb639bfd7b 100644 --- a/PPOCRLabel/resources/strings/strings-en.properties +++ b/PPOCRLabel/resources/strings/strings-en.properties @@ -106,4 +106,7 @@ undo=Undo undoLastPoint=Undo Last Point autoSaveMode=Auto Export Label Mode lockBox=Lock selected box/Unlock all box -lockBoxDetail=Lock selected box/Unlock all box \ No newline at end of file +lockBoxDetail=Lock selected box/Unlock all box +keyListTitle=Key List +keyDialogTip=Enter object label +keyChange=Change Box Key diff --git a/PPOCRLabel/resources/strings/strings-zh-CN.properties b/PPOCRLabel/resources/strings/strings-zh-CN.properties index d8bd9d4bff02748397d7a57a6205e67ff69779c2..a7c30368b87354cbae81b2cdead8ad31b2a8c1eb 100644 --- a/PPOCRLabel/resources/strings/strings-zh-CN.properties +++ b/PPOCRLabel/resources/strings/strings-zh-CN.properties @@ -107,3 +107,6 @@ undoLastPoint=撤销上个点 autoSaveMode=自动导出标记结果 lockBox=锁定框/解除锁定框 lockBoxDetail=若当前没有框处于锁定状态则锁定选中的框,若存在锁定框则解除所有锁定框的锁定状态 +keyListTitle=关键词列表 +keyDialogTip=请输入类型名称 +keyChange=更改Box关键字类别 \ No newline at end of file diff --git a/README.md b/README.md index b1d464879bdbe64c8812a7ce335023ba5cca9727..95f35277a1d634c87d5720c7151d066b09dbdae7 100644 --- a/README.md +++ b/README.md @@ -152,7 +152,7 @@ For a new language request, please refer to [Guideline for new language_requests [1] PP-OCR is a practical ultra-lightweight OCR system. It is mainly composed of three parts: DB text detection, detection frame correction and CRNN text recognition. The system adopts 19 effective strategies from 8 aspects including backbone network selection and adjustment, prediction head design, data augmentation, learning rate transformation strategy, regularization parameter selection, pre-training model use, and automatic model tailoring and quantization to optimize and slim down the models of each module (as shown in the green box above). The final results are an ultra-lightweight Chinese and English OCR model with an overall size of 3.5M and a 2.8M English digital OCR model. For more details, please refer to the PP-OCR technical article (https://arxiv.org/abs/2009.09941). -[2] On the basis of PP-OCR, PP-OCRv2 is further optimized in five aspects. The detection model adopts CML(Collaborative Mutual Learning) knowledge distillation strategy and CopyPaste data expansion strategy. The recognition model adopts LCNet lightweight backbone network, U-DML knowledge distillation strategy and enhanced CTC loss function improvement (as shown in the red box above), which further improves the inference speed and prediction effect. For more details, please refer to the technical report of PP-OCRv2 (arXiv link is coming soon). +[2] On the basis of PP-OCR, PP-OCRv2 is further optimized in five aspects. The detection model adopts CML(Collaborative Mutual Learning) knowledge distillation strategy and CopyPaste data expansion strategy. The recognition model adopts LCNet lightweight backbone network, U-DML knowledge distillation strategy and enhanced CTC loss function improvement (as shown in the red box above), which further improves the inference speed and prediction effect. For more details, please refer to the technical report of PP-OCRv2 (https://arxiv.org/abs/2109.03144). @@ -181,16 +181,11 @@ For a new language request, please refer to [Guideline for new language_requests ## Guideline for New Language Requests -If you want to request a new language support, a PR with 2 following files are needed: +If you want to request a new language support, a PR with 1 following files are needed: 1. In folder [ppocr/utils/dict](./ppocr/utils/dict), it is necessary to submit the dict text to this path and name it with `{language}_dict.txt` that contains a list of all characters. Please see the format example from other files in that folder. -2. In folder [ppocr/utils/corpus](./ppocr/utils/corpus), -it is necessary to submit the corpus to this path and name it with `{language}_corpus.txt` that contains a list of words in your language. -Maybe, 50000 words per language is necessary at least. -Of course, the more, the better. - If your language has unique elements, please tell me in advance within any way, such as useful links, wikipedia and so on. More details, please refer to [Multilingual OCR Development Plan](https://github.com/PaddlePaddle/PaddleOCR/issues/1048). diff --git a/benchmark/analysis.py b/benchmark/analysis.py index c4189b99d8ee082082a254718617a7e58bebe961..7322f00ace94ff25e8aba38106471d32a5e8223d 100644 --- a/benchmark/analysis.py +++ b/benchmark/analysis.py @@ -26,35 +26,57 @@ def parse_args(): parser.add_argument( "--filename", type=str, help="The name of log which need to analysis.") parser.add_argument( - "--log_with_profiler", type=str, help="The path of train log with profiler") + "--log_with_profiler", + type=str, + help="The path of train log with profiler") parser.add_argument( "--profiler_path", type=str, help="The path of profiler timeline log.") parser.add_argument( "--keyword", type=str, help="Keyword to specify analysis data") parser.add_argument( - "--separator", type=str, default=None, help="Separator of different field in log") + "--separator", + type=str, + default=None, + help="Separator of different field in log") parser.add_argument( '--position', type=int, default=None, help='The position of data field') parser.add_argument( - '--range', type=str, default="", help='The range of data field to intercept') + '--range', + type=str, + default="", + help='The range of data field to intercept') parser.add_argument( '--base_batch_size', type=int, help='base_batch size on gpu') parser.add_argument( - '--skip_steps', type=int, default=0, help='The number of steps to be skipped') + '--skip_steps', + type=int, + default=0, + help='The number of steps to be skipped') parser.add_argument( - '--model_mode', type=int, default=-1, help='Analysis mode, default value is -1') + '--model_mode', + type=int, + default=-1, + help='Analysis mode, default value is -1') + parser.add_argument('--ips_unit', type=str, default=None, help='IPS unit') parser.add_argument( - '--ips_unit', type=str, default=None, help='IPS unit') - parser.add_argument( - '--model_name', type=str, default=0, help='training model_name, transformer_base') + '--model_name', + type=str, + default=0, + help='training model_name, transformer_base') parser.add_argument( '--mission_name', type=str, default=0, help='training mission name') parser.add_argument( '--direction_id', type=int, default=0, help='training direction_id') parser.add_argument( - '--run_mode', type=str, default="sp", help='multi process or single process') + '--run_mode', + type=str, + default="sp", + help='multi process or single process') parser.add_argument( - '--index', type=int, default=1, help='{1: speed, 2:mem, 3:profiler, 6:max_batch_size}') + '--index', + type=int, + default=1, + help='{1: speed, 2:mem, 3:profiler, 6:max_batch_size}') parser.add_argument( '--gpu_num', type=int, default=1, help='nums of training gpus') args = parser.parse_args() @@ -72,7 +94,12 @@ def _is_number(num): class TimeAnalyzer(object): - def __init__(self, filename, keyword=None, separator=None, position=None, range="-1"): + def __init__(self, + filename, + keyword=None, + separator=None, + position=None, + range="-1"): if filename is None: raise Exception("Please specify the filename!") @@ -99,7 +126,8 @@ class TimeAnalyzer(object): # Distil the string from a line. line = line.strip() - line_words = line.split(self.separator) if self.separator else line.split() + line_words = line.split( + self.separator) if self.separator else line.split() if args.position: result = line_words[self.position] else: @@ -108,27 +136,36 @@ class TimeAnalyzer(object): if line_words[i] == self.keyword: result = line_words[i + 1] break - + # Distil the result from the picked string. if not self.range: result = result[0:] elif _is_number(self.range): - result = result[0: int(self.range)] + result = result[0:int(self.range)] else: - result = result[int(self.range.split(":")[0]): int(self.range.split(":")[1])] + result = result[int(self.range.split(":")[0]):int( + self.range.split(":")[1])] self.records.append(float(result)) except Exception as exc: - print("line is: {}; separator={}; position={}".format(line, self.separator, self.position)) + print("line is: {}; separator={}; position={}".format( + line, self.separator, self.position)) - print("Extract {} records: separator={}; position={}".format(len(self.records), self.separator, self.position)) + print("Extract {} records: separator={}; position={}".format( + len(self.records), self.separator, self.position)) - def _get_fps(self, mode, batch_size, gpu_num, avg_of_records, run_mode, unit=None): + def _get_fps(self, + mode, + batch_size, + gpu_num, + avg_of_records, + run_mode, + unit=None): if mode == -1 and run_mode == 'sp': assert unit, "Please set the unit when mode is -1." fps = gpu_num * avg_of_records elif mode == -1 and run_mode == 'mp': assert unit, "Please set the unit when mode is -1." - fps = gpu_num * avg_of_records #temporarily, not used now + fps = gpu_num * avg_of_records #temporarily, not used now print("------------this is mp") elif mode == 0: # s/step -> samples/s @@ -155,12 +192,20 @@ class TimeAnalyzer(object): return fps, unit - def analysis(self, batch_size, gpu_num=1, skip_steps=0, mode=-1, run_mode='sp', unit=None): + def analysis(self, + batch_size, + gpu_num=1, + skip_steps=0, + mode=-1, + run_mode='sp', + unit=None): if batch_size <= 0: print("base_batch_size should larger than 0.") return 0, '' - if len(self.records) <= skip_steps: # to address the condition which item of log equals to skip_steps + if len( + self.records + ) <= skip_steps: # to address the condition which item of log equals to skip_steps print("no records") return 0, '' @@ -180,16 +225,20 @@ class TimeAnalyzer(object): skip_max = self.records[i] avg_of_records = sum_of_records / float(count) - avg_of_records_skipped = sum_of_records_skipped / float(count - skip_steps) + avg_of_records_skipped = sum_of_records_skipped / float(count - + skip_steps) - fps, fps_unit = self._get_fps(mode, batch_size, gpu_num, avg_of_records, run_mode, unit) - fps_skipped, _ = self._get_fps(mode, batch_size, gpu_num, avg_of_records_skipped, run_mode, unit) + fps, fps_unit = self._get_fps(mode, batch_size, gpu_num, avg_of_records, + run_mode, unit) + fps_skipped, _ = self._get_fps(mode, batch_size, gpu_num, + avg_of_records_skipped, run_mode, unit) if mode == -1: print("average ips of %d steps, skip 0 step:" % count) print("\tAvg: %.3f %s" % (avg_of_records, fps_unit)) print("\tFPS: %.3f %s" % (fps, fps_unit)) if skip_steps > 0: - print("average ips of %d steps, skip %d steps:" % (count, skip_steps)) + print("average ips of %d steps, skip %d steps:" % + (count, skip_steps)) print("\tAvg: %.3f %s" % (avg_of_records_skipped, fps_unit)) print("\tMin: %.3f %s" % (skip_min, fps_unit)) print("\tMax: %.3f %s" % (skip_max, fps_unit)) @@ -199,7 +248,8 @@ class TimeAnalyzer(object): print("\tAvg: %.3f steps/s" % avg_of_records) print("\tFPS: %.3f %s" % (fps, fps_unit)) if skip_steps > 0: - print("average latency of %d steps, skip %d steps:" % (count, skip_steps)) + print("average latency of %d steps, skip %d steps:" % + (count, skip_steps)) print("\tAvg: %.3f steps/s" % avg_of_records_skipped) print("\tMin: %.3f steps/s" % skip_min) print("\tMax: %.3f steps/s" % skip_max) @@ -209,7 +259,8 @@ class TimeAnalyzer(object): print("\tAvg: %.3f s/step" % avg_of_records) print("\tFPS: %.3f %s" % (fps, fps_unit)) if skip_steps > 0: - print("average latency of %d steps, skip %d steps:" % (count, skip_steps)) + print("average latency of %d steps, skip %d steps:" % + (count, skip_steps)) print("\tAvg: %.3f s/step" % avg_of_records_skipped) print("\tMin: %.3f s/step" % skip_min) print("\tMax: %.3f s/step" % skip_max) @@ -236,7 +287,8 @@ if __name__ == "__main__": if args.gpu_num == 1: run_info["log_with_profiler"] = args.log_with_profiler run_info["profiler_path"] = args.profiler_path - analyzer = TimeAnalyzer(args.filename, args.keyword, args.separator, args.position, args.range) + analyzer = TimeAnalyzer(args.filename, args.keyword, args.separator, + args.position, args.range) run_info["FINAL_RESULT"], run_info["UNIT"] = analyzer.analysis( batch_size=args.base_batch_size, gpu_num=args.gpu_num, @@ -245,29 +297,50 @@ if __name__ == "__main__": run_mode=args.run_mode, unit=args.ips_unit) try: - if int(os.getenv('job_fail_flag')) == 1 or int(run_info["FINAL_RESULT"]) == 0: + if int(os.getenv('job_fail_flag')) == 1 or int(run_info[ + "FINAL_RESULT"]) == 0: run_info["JOB_FAIL_FLAG"] = 1 except: pass elif args.index == 3: run_info["FINAL_RESULT"] = {} - records_fo_total = TimeAnalyzer(args.filename, 'Framework overhead', None, 3, '').records - records_fo_ratio = TimeAnalyzer(args.filename, 'Framework overhead', None, 5).records - records_ct_total = TimeAnalyzer(args.filename, 'Computation time', None, 3, '').records - records_gm_total = TimeAnalyzer(args.filename, 'GpuMemcpy Calls', None, 4, '').records - records_gm_ratio = TimeAnalyzer(args.filename, 'GpuMemcpy Calls', None, 6).records - records_gmas_total = TimeAnalyzer(args.filename, 'GpuMemcpyAsync Calls', None, 4, '').records - records_gms_total = TimeAnalyzer(args.filename, 'GpuMemcpySync Calls', None, 4, '').records - run_info["FINAL_RESULT"]["Framework_Total"] = records_fo_total[0] if records_fo_total else 0 - run_info["FINAL_RESULT"]["Framework_Ratio"] = records_fo_ratio[0] if records_fo_ratio else 0 - run_info["FINAL_RESULT"]["ComputationTime_Total"] = records_ct_total[0] if records_ct_total else 0 - run_info["FINAL_RESULT"]["GpuMemcpy_Total"] = records_gm_total[0] if records_gm_total else 0 - run_info["FINAL_RESULT"]["GpuMemcpy_Ratio"] = records_gm_ratio[0] if records_gm_ratio else 0 - run_info["FINAL_RESULT"]["GpuMemcpyAsync_Total"] = records_gmas_total[0] if records_gmas_total else 0 - run_info["FINAL_RESULT"]["GpuMemcpySync_Total"] = records_gms_total[0] if records_gms_total else 0 + records_fo_total = TimeAnalyzer(args.filename, 'Framework overhead', + None, 3, '').records + records_fo_ratio = TimeAnalyzer(args.filename, 'Framework overhead', + None, 5).records + records_ct_total = TimeAnalyzer(args.filename, 'Computation time', + None, 3, '').records + records_gm_total = TimeAnalyzer(args.filename, + 'GpuMemcpy Calls', + None, 4, '').records + records_gm_ratio = TimeAnalyzer(args.filename, + 'GpuMemcpy Calls', + None, 6).records + records_gmas_total = TimeAnalyzer(args.filename, + 'GpuMemcpyAsync Calls', + None, 4, '').records + records_gms_total = TimeAnalyzer(args.filename, + 'GpuMemcpySync Calls', + None, 4, '').records + run_info["FINAL_RESULT"]["Framework_Total"] = records_fo_total[ + 0] if records_fo_total else 0 + run_info["FINAL_RESULT"]["Framework_Ratio"] = records_fo_ratio[ + 0] if records_fo_ratio else 0 + run_info["FINAL_RESULT"][ + "ComputationTime_Total"] = records_ct_total[ + 0] if records_ct_total else 0 + run_info["FINAL_RESULT"]["GpuMemcpy_Total"] = records_gm_total[ + 0] if records_gm_total else 0 + run_info["FINAL_RESULT"]["GpuMemcpy_Ratio"] = records_gm_ratio[ + 0] if records_gm_ratio else 0 + run_info["FINAL_RESULT"][ + "GpuMemcpyAsync_Total"] = records_gmas_total[ + 0] if records_gmas_total else 0 + run_info["FINAL_RESULT"]["GpuMemcpySync_Total"] = records_gms_total[ + 0] if records_gms_total else 0 else: print("Not support!") except Exception: - traceback.print_exc() - print("{}".format(json.dumps(run_info))) # it's required, for the log file path insert to the database - + traceback.print_exc() + print("{}".format(json.dumps(run_info)) + ) # it's required, for the log file path insert to the database diff --git a/benchmark/run_benchmark_det.sh b/benchmark/run_benchmark_det.sh index 54263e953f3f758b318df147d34ee942a247ed18..818aa7e3e1fb342174a0cf5be4d45af0b0205a39 100644 --- a/benchmark/run_benchmark_det.sh +++ b/benchmark/run_benchmark_det.sh @@ -58,3 +58,4 @@ source ${BENCHMARK_ROOT}/scripts/run_model.sh # 在该脚本中会对符合 _set_params $@ #_train # 如果只想产出训练log,不解析,可取消注释 _run # 该函数在run_model.sh中,执行时会调用_train; 如果不联调只想要产出训练log可以注掉本行,提交时需打开 + diff --git a/benchmark/run_det.sh b/benchmark/run_det.sh index be0c141f7ee168d10eebb6efb57158d18ed02f72..981510c9ae80698dae7f4c8b342dc50442aa7913 100644 --- a/benchmark/run_det.sh +++ b/benchmark/run_det.sh @@ -36,3 +36,4 @@ for model_mode in ${model_mode_list[@]}; do done + diff --git a/configs/det/det_mv3_db.yml b/configs/det/det_mv3_db.yml index 1fab509d12167f0cfa3bb77cf21173c68af55737..6edf0b9194ee59143e287394f505b60010ec6644 100644 --- a/configs/det/det_mv3_db.yml +++ b/configs/det/det_mv3_db.yml @@ -1,5 +1,6 @@ Global: use_gpu: true + use_xpu: false epoch_num: 1200 log_smooth_window: 20 print_batch_step: 10 diff --git a/configs/vqa/re/layoutlmv2.yml b/configs/vqa/re/layoutlmv2.yml new file mode 100644 index 0000000000000000000000000000000000000000..2fa5fd1165c20bbfa8d8505bbb53d48744daebef --- /dev/null +++ b/configs/vqa/re/layoutlmv2.yml @@ -0,0 +1,123 @@ +Global: + use_gpu: True + epoch_num: &epoch_num 200 + log_smooth_window: 10 + print_batch_step: 10 + save_model_dir: ./output/re_layoutlmv2/ + save_epoch_step: 2000 + # evaluation is run every 10 iterations after the 0th iteration + eval_batch_step: [ 0, 19 ] + cal_metric_during_train: False + save_inference_dir: + use_visualdl: False + seed: 2048 + infer_img: doc/vqa/input/zh_val_21.jpg + save_res_path: ./output/re/ + +Architecture: + model_type: vqa + algorithm: &algorithm "LayoutLMv2" + Transform: + Backbone: + name: LayoutLMv2ForRe + pretrained: True + checkpoints: + +Loss: + name: LossFromOutput + key: loss + reduction: mean + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + clip_norm: 10 + lr: + learning_rate: 0.00005 + warmup_epoch: 10 + regularizer: + name: L2 + factor: 0.00000 + +PostProcess: + name: VQAReTokenLayoutLMPostProcess + +Metric: + name: VQAReTokenMetric + main_indicator: hmean + +Train: + dataset: + name: SimpleDataSet + data_dir: train_data/XFUND/zh_train/image + label_file_list: + - train_data/XFUND/zh_train/xfun_normalize_train.json + ratio_list: [ 1.0 ] + transforms: + - DecodeImage: # load image + img_mode: RGB + channel_first: False + - VQATokenLabelEncode: # Class handling label + contains_re: True + algorithm: *algorithm + class_path: &class_path ppstructure/vqa/labels/labels_ser.txt + - VQATokenPad: + max_seq_len: &max_seq_len 512 + return_attention_mask: True + - VQAReTokenRelation: + - VQAReTokenChunk: + max_seq_len: *max_seq_len + - Resize: + size: [224,224] + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: 'hwc' + - ToCHWImage: + - KeepKeys: + keep_keys: [ 'input_ids', 'bbox', 'image', 'attention_mask', 'token_type_ids','entities', 'relations'] # dataloader will return list in this order + loader: + shuffle: True + drop_last: False + batch_size_per_card: 8 + num_workers: 8 + collate_fn: ListCollator + +Eval: + dataset: + name: SimpleDataSet + data_dir: train_data/XFUND/zh_val/image + label_file_list: + - train_data/XFUND/zh_val/xfun_normalize_val.json + transforms: + - DecodeImage: # load image + img_mode: RGB + channel_first: False + - VQATokenLabelEncode: # Class handling label + contains_re: True + algorithm: *algorithm + class_path: *class_path + - VQATokenPad: + max_seq_len: *max_seq_len + return_attention_mask: True + - VQAReTokenRelation: + - VQAReTokenChunk: + max_seq_len: *max_seq_len + - Resize: + size: [224,224] + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: 'hwc' + - ToCHWImage: + - KeepKeys: + keep_keys: [ 'input_ids', 'bbox', 'image', 'attention_mask', 'token_type_ids','entities', 'relations'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 8 + num_workers: 8 + collate_fn: ListCollator diff --git a/configs/vqa/re/layoutxlm.yml b/configs/vqa/re/layoutxlm.yml index ca6b0d29db534eb1189e305d1f033ece24c368b9..ff16120ac1be92e989ebfda6af3ccf346dde89cd 100644 --- a/configs/vqa/re/layoutxlm.yml +++ b/configs/vqa/re/layoutxlm.yml @@ -21,7 +21,7 @@ Architecture: Backbone: name: LayoutXLMForRe pretrained: True - checkpoints: + checkpoints: Loss: name: LossFromOutput @@ -35,6 +35,7 @@ Optimizer: clip_norm: 10 lr: learning_rate: 0.00005 + warmup_epoch: 10 regularizer: name: L2 factor: 0.00000 @@ -81,7 +82,7 @@ Train: shuffle: True drop_last: False batch_size_per_card: 8 - num_workers: 4 + num_workers: 8 collate_fn: ListCollator Eval: @@ -118,5 +119,5 @@ Eval: shuffle: False drop_last: False batch_size_per_card: 8 - num_workers: 4 + num_workers: 8 collate_fn: ListCollator diff --git a/configs/vqa/ser/layoutlmv2.yml b/configs/vqa/ser/layoutlmv2.yml new file mode 100644 index 0000000000000000000000000000000000000000..33406252b31adf4175d7ea2f57772b0faf33cdab --- /dev/null +++ b/configs/vqa/ser/layoutlmv2.yml @@ -0,0 +1,121 @@ +Global: + use_gpu: True + epoch_num: &epoch_num 200 + log_smooth_window: 10 + print_batch_step: 10 + save_model_dir: ./output/ser_layoutlmv2/ + save_epoch_step: 2000 + # evaluation is run every 10 iterations after the 0th iteration + eval_batch_step: [ 0, 19 ] + cal_metric_during_train: False + save_inference_dir: + use_visualdl: False + seed: 2022 + infer_img: doc/vqa/input/zh_val_0.jpg + save_res_path: ./output/ser/ + +Architecture: + model_type: vqa + algorithm: &algorithm "LayoutLMv2" + Transform: + Backbone: + name: LayoutLMv2ForSer + pretrained: True + checkpoints: + num_classes: &num_classes 7 + +Loss: + name: VQASerTokenLayoutLMLoss + num_classes: *num_classes + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + lr: + name: Linear + learning_rate: 0.00005 + epochs: *epoch_num + warmup_epoch: 2 + regularizer: + + name: L2 + factor: 0.00000 + +PostProcess: + name: VQASerTokenLayoutLMPostProcess + class_path: &class_path ppstructure/vqa/labels/labels_ser.txt + +Metric: + name: VQASerTokenMetric + main_indicator: hmean + +Train: + dataset: + name: SimpleDataSet + data_dir: train_data/XFUND/zh_train/image + label_file_list: + - train_data/XFUND/zh_train/xfun_normalize_train.json + transforms: + - DecodeImage: # load image + img_mode: RGB + channel_first: False + - VQATokenLabelEncode: # Class handling label + contains_re: False + algorithm: *algorithm + class_path: *class_path + - VQATokenPad: + max_seq_len: &max_seq_len 512 + return_attention_mask: True + - VQASerTokenChunk: + max_seq_len: *max_seq_len + - Resize: + size: [224,224] + - NormalizeImage: + scale: 1 + mean: [ 123.675, 116.28, 103.53 ] + std: [ 58.395, 57.12, 57.375 ] + order: 'hwc' + - ToCHWImage: + - KeepKeys: + keep_keys: [ 'input_ids','labels', 'bbox', 'image', 'attention_mask', 'token_type_ids'] # dataloader will return list in this order + loader: + shuffle: True + drop_last: False + batch_size_per_card: 8 + num_workers: 4 + +Eval: + dataset: + name: SimpleDataSet + data_dir: train_data/XFUND/zh_val/image + label_file_list: + - train_data/XFUND/zh_val/xfun_normalize_val.json + transforms: + - DecodeImage: # load image + img_mode: RGB + channel_first: False + - VQATokenLabelEncode: # Class handling label + contains_re: False + algorithm: *algorithm + class_path: *class_path + - VQATokenPad: + max_seq_len: *max_seq_len + return_attention_mask: True + - VQASerTokenChunk: + max_seq_len: *max_seq_len + - Resize: + size: [224,224] + - NormalizeImage: + scale: 1 + mean: [ 123.675, 116.28, 103.53 ] + std: [ 58.395, 57.12, 57.375 ] + order: 'hwc' + - ToCHWImage: + - KeepKeys: + keep_keys: [ 'input_ids', 'labels', 'bbox', 'image', 'attention_mask', 'token_type_ids'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 8 + num_workers: 4 diff --git a/deploy/slim/prune/README.md b/deploy/slim/prune/README.md index 7b8dd169c5fa9d01421070f1ccc2bd4e8ed543a2..c438572318f57fdfe9066ff2135156d7129bee4c 100644 --- a/deploy/slim/prune/README.md +++ b/deploy/slim/prune/README.md @@ -45,7 +45,7 @@ python3 setup.py install 'conv10_expand_weights': {0.1: 0.006509952684312718, 0.2: 0.01827734339798862, 0.3: 0.014528405644659832, 0.6: 0.06536008804270439, 0.8: 0.11798612250664964, 0.7: 0.12391408417493704, 0.4: 0.030615754498018757, 0.5: 0.047105205602406594} 'conv10_linear_weights': {0.1: 0.05113190831455035, 0.2: 0.07705573833558801, 0.3: 0.12096721757739311, 0.6: 0.5135061352930738, 0.8: 0.7908166677143281, 0.7: 0.7272187676899062, 0.4: 0.1819252083008504, 0.5: 0.3728054727792405} } -加载敏感度文件后会返回一个字典,字典中的keys为网络模型参数模型的名字,values为一个字典,里面保存了相应网络层的裁剪敏感度信息。例如在例子中,conv10_expand_weights所对应的网络层在裁掉10%的卷积核后模型性能相较原模型会下降0.65%,详细信息可见[PaddleSlim](https://github.com/PaddlePaddle/PaddleSlim/blob/develop/docs/zh_cn/algo/algo.md#2-%E5%8D%B7%E7%A7%AF%E6%A0%B8%E5%89%AA%E8%A3%81%E5%8E%9F%E7%90%86) +加载敏感度文件后会返回一个字典,字典中的keys为网络模型参数模型的名字,values为一个字典,里面保存了相应网络层的裁剪敏感度信息。例如在例子中,conv10_expand_weights所对应的网络层在裁掉10%的卷积核后模型性能相较原模型会下降0.65%,详细信息可见[PaddleSlim](https://github.com/PaddlePaddle/PaddleSlim/blob/release/2.0-alpha/docs/zh_cn/algo/algo.md) 进入PaddleOCR根目录,通过以下命令对模型进行敏感度分析训练: ```bash diff --git a/deploy/slim/prune/README_en.md b/deploy/slim/prune/README_en.md index f0d652f249686c1d462cd2aa71f4766cf39e763e..f8fbed47ca1c788ea816cc76f1092b17f0ea5219 100644 --- a/deploy/slim/prune/README_en.md +++ b/deploy/slim/prune/README_en.md @@ -3,7 +3,7 @@ Generally, a more complex model would achive better performance in the task, but it also leads to some redundancy in the model. Model Pruning is a technique that reduces this redundancy by removing the sub-models in the neural network model, so as to reduce model calculation complexity and improve model inference performance. -This example uses PaddleSlim provided[APIs of Pruning](https://paddlepaddle.github.io/PaddleSlim/api/prune_api/) to compress the OCR model. +This example uses PaddleSlim provided[APIs of Pruning](https://github.com/PaddlePaddle/PaddleSlim/tree/develop/docs/zh_cn/api_cn/dygraph/pruners) to compress the OCR model. [PaddleSlim](https://github.com/PaddlePaddle/PaddleSlim), an open source library which integrates model pruning, quantization (including quantization training and offline quantization), distillation, neural network architecture search, and many other commonly used and leading model compression technique in the industry. It is recommended that you could understand following pages before reading this example: @@ -35,7 +35,7 @@ PaddleOCR also provides a series of [models](../../../doc/doc_en/models_list_en. ### 3. Pruning sensitivity analysis - After the pre-trained model is loaded, sensitivity analysis is performed on each network layer of the model to understand the redundancy of each network layer, and save a sensitivity file which named: sen.pickle. After that, user could load the sensitivity file via the [methods provided by PaddleSlim](https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/prune/sensitive.py#L221) and determining the pruning ratio of each network layer automatically. For specific details of sensitivity analysis, see:[Sensitivity analysis](https://github.com/PaddlePaddle/PaddleSlim/blob/develop/docs/zh_cn/tutorials/image_classification_sensitivity_analysis_tutorial.md) + After the pre-trained model is loaded, sensitivity analysis is performed on each network layer of the model to understand the redundancy of each network layer, and save a sensitivity file which named: sen.pickle. After that, user could load the sensitivity file via the [methods provided by PaddleSlim](https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/prune/sensitive.py#L221) and determining the pruning ratio of each network layer automatically. For specific details of sensitivity analysis, see:[Sensitivity analysis](https://github.com/PaddlePaddle/PaddleSlim/blob/develop/docs/en/tutorials/image_classification_sensitivity_analysis_tutorial_en.md) The data format of sensitivity file: sen.pickle(Dict){ 'layer_weight_name_0': sens_of_each_ratio(Dict){'pruning_ratio_0': acc_loss, 'pruning_ratio_1': acc_loss} @@ -47,7 +47,7 @@ PaddleOCR also provides a series of [models](../../../doc/doc_en/models_list_en. 'conv10_expand_weights': {0.1: 0.006509952684312718, 0.2: 0.01827734339798862, 0.3: 0.014528405644659832, 0.6: 0.06536008804270439, 0.8: 0.11798612250664964, 0.7: 0.12391408417493704, 0.4: 0.030615754498018757, 0.5: 0.047105205602406594} 'conv10_linear_weights': {0.1: 0.05113190831455035, 0.2: 0.07705573833558801, 0.3: 0.12096721757739311, 0.6: 0.5135061352930738, 0.8: 0.7908166677143281, 0.7: 0.7272187676899062, 0.4: 0.1819252083008504, 0.5: 0.3728054727792405} } - The function would return a dict after loading the sensitivity file. The keys of the dict are name of parameters in each layer. And the value of key is the information about pruning sensitivity of corresponding layer. In example, pruning 10% filter of the layer corresponding to conv10_expand_weights would lead to 0.65% degradation of model performance. The details could be seen at: [Sensitivity analysis](https://github.com/PaddlePaddle/PaddleSlim/blob/develop/docs/zh_cn/algo/algo.md#2-%E5%8D%B7%E7%A7%AF%E6%A0%B8%E5%89%AA%E8%A3%81%E5%8E%9F%E7%90%86) + The function would return a dict after loading the sensitivity file. The keys of the dict are name of parameters in each layer. And the value of key is the information about pruning sensitivity of corresponding layer. In example, pruning 10% filter of the layer corresponding to conv10_expand_weights would lead to 0.65% degradation of model performance. The details could be seen at: [Sensitivity analysis](https://github.com/PaddlePaddle/PaddleSlim/blob/release/2.0-alpha/docs/zh_cn/algo/algo.md) Enter the PaddleOCR root directory,perform sensitivity analysis on the model with the following command: diff --git a/deploy/slim/quantization/README_en.md b/deploy/slim/quantization/README_en.md index 4cafe5f44e48a479cf5b0e4209b8e335a7e4917d..d3bf12d625b076c7bc18016bc9973d1212b3d70b 100644 --- a/deploy/slim/quantization/README_en.md +++ b/deploy/slim/quantization/README_en.md @@ -5,11 +5,11 @@ Generally, a more complex model would achieve better performance in the task, bu Quantization is a technique that reduces this redundancy by reducing the full precision data to a fixed number, so as to reduce model calculation complexity and improve model inference performance. -This example uses PaddleSlim provided [APIs of Quantization](https://paddlepaddle.github.io/PaddleSlim/api/quantization_api/) to compress the OCR model. +This example uses PaddleSlim provided [APIs of Quantization](https://github.com/PaddlePaddle/PaddleSlim/blob/develop/docs/zh_cn/api_cn/dygraph/quanter/qat.rst) to compress the OCR model. It is recommended that you could understand following pages before reading this example: - [The training strategy of OCR model](../../../doc/doc_en/quickstart_en.md) -- [PaddleSlim Document](https://paddlepaddle.github.io/PaddleSlim/api/quantization_api/) +- [PaddleSlim Document](https://github.com/PaddlePaddle/PaddleSlim/blob/develop/docs/zh_cn/api_cn/dygraph/quanter/qat.rst) ## Quick Start Quantization is mostly suitable for the deployment of lightweight models on mobile terminals. diff --git a/doc/doc_ch/FAQ.md b/doc/doc_ch/FAQ.md index cd5369f64bbfcf8584f3b3af30d65568770b6033..22e7ad7fc1838008be4e5a6daa6b9d273ea0ea78 100644 --- a/doc/doc_ch/FAQ.md +++ b/doc/doc_ch/FAQ.md @@ -11,7 +11,7 @@ PaddleOCR收集整理了自从开源以来在issues和用户群中的常见问 OCR领域大佬众多,本文档回答主要依赖有限的项目实践,难免挂一漏万,如有遗漏和不足,也**希望有识之士帮忙补充和修正**,万分感谢。 - [FAQ](#faq) - + * [1. 通用问题](#1) + [1.1 检测](#11) + [1.2 识别](#12) @@ -20,7 +20,7 @@ OCR领域大佬众多,本文档回答主要依赖有限的项目实践,难 + [1.5 垂类场景实现思路](#15) + [1.6 训练过程与模型调优](#16) + [1.7 补充资料](#17) - + * [2. PaddleOCR实战问题](#2) + [2.1 PaddleOCR repo](#21) + [2.2 安装环境](#22) @@ -734,7 +734,7 @@ C++TensorRT预测需要使用支持TRT的预测库并在编译时打开[-DWITH_T #### Q:PaddleOCR中,对于模型预测加速,CPU加速的途径有哪些?基于TenorRT加速GPU对输入有什么要求? -**A**:(1)CPU可以使用mkldnn进行加速;对于python inference的话,可以把enable_mkldnn改为true,[参考代码](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/tools/infer/utility.py#L99),对于cpp inference的话,在配置文件里面配置use_mkldnn 1即可,[参考代码](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/deploy/cpp_infer/tools/config.txt#L6) +**A**:(1)CPU可以使用mkldnn进行加速;对于python inference的话,可以把enable_mkldnn改为true,[参考代码](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/tools/infer/utility.py#L99),对于cpp inference的话,可参考[文档](https://github.com/PaddlePaddle/PaddleOCR/tree/dygraph/deploy/cpp_infer) (2)GPU需要注意变长输入问题等,TRT6 之后才支持变长输入 @@ -838,4 +838,4 @@ nvidia-smi --lock-gpu-clocks=1590 -i 0 #### Q: 预测时显存爆炸、内存泄漏问题? -**A**: 打开显存/内存优化开关`enable_memory_optim`可以解决该问题,相关代码已合入,[查看详情](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.1/tools/infer/utility.py#L153)。 \ No newline at end of file +**A**: 打开显存/内存优化开关`enable_memory_optim`可以解决该问题,相关代码已合入,[查看详情](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.1/tools/infer/utility.py#L153)。 diff --git a/doc/doc_ch/config.md b/doc/doc_ch/config.md index 40c63905c3f03070a9dcbf0176ada31378b14fee..1668eba19eb0bcec6bfe3abd39bb6ca73b8f6c14 100644 --- a/doc/doc_ch/config.md +++ b/doc/doc_ch/config.md @@ -66,7 +66,7 @@ | :---------------------: | :---------------------: | :--------------: | :--------------------: | | model_type | 网络类型 | rec | 目前支持`rec`,`det`,`cls` | | algorithm | 模型名称 | CRNN | 支持列表见[algorithm_overview](./algorithm_overview.md) | -| **Transform** | 设置变换方式 | - | 目前仅rec类型的算法支持, 具体见[ppocr/modeling/transform](../../ppocr/modeling/transform) | +| **Transform** | 设置变换方式 | - | 目前仅rec类型的算法支持, 具体见[ppocr/modeling/transforms](../../ppocr/modeling/transforms) | | name | 变换方式类名 | TPS | 目前支持`TPS` | | num_fiducial | TPS控制点数 | 20 | 上下边各十个 | | loc_lr | 定位网络学习率 | 0.1 | \ | @@ -176,7 +176,7 @@ PaddleOCR目前已支持80种(除中文外)语种识别,`configs/rec/multi --dict {path/of/dict} \ # 字典文件路径 -o Global.use_gpu=False # 是否使用gpu ... - + ``` 意大利文由拉丁字母组成,因此执行完命令后会得到名为 rec_latin_lite_train.yml 的配置文件。 @@ -191,21 +191,21 @@ PaddleOCR目前已支持80种(除中文外)语种识别,`configs/rec/multi epoch_num: 500 ... character_dict_path: {path/of/dict} # 字典文件所在路径 - + Train: dataset: name: SimpleDataSet data_dir: train_data/ # 数据存放根目录 label_file_list: ["./train_data/train_list.txt"] # 训练集label路径 ... - + Eval: dataset: name: SimpleDataSet data_dir: train_data/ # 数据存放根目录 label_file_list: ["./train_data/val_list.txt"] # 验证集label路径 ... - + ``` 目前PaddleOCR支持的多语言算法有: diff --git a/doc/doc_ch/serving_inference.md b/doc/doc_ch/serving_inference.md index 7a53628e2f93d4d0ec944ec18ec5f06452698512..fea5a24546ddd2141085f56eeb99cdf72577bff3 100644 --- a/doc/doc_ch/serving_inference.md +++ b/doc/doc_ch/serving_inference.md @@ -20,7 +20,7 @@ **Python操作指南:** -目前Serving用于OCR的部分功能还在测试当中,因此在这里我们给出[Servnig latest package](https://github.com/PaddlePaddle/Serving/blob/develop/doc/LATEST_PACKAGES.md) +目前Serving用于OCR的部分功能还在测试当中,因此在这里我们给出[Servnig latest package](https://github.com/PaddlePaddle/Serving/blob/develop/doc/Latest_Packages_CN.md) 大家根据自己的环境选择需要安装的whl包即可,例如以Python 3.5为例,执行下列命令 ``` #CPU/GPU版本选择一个 diff --git a/doc/doc_ch/thirdparty.md b/doc/doc_ch/thirdparty.md index 2be6a3e9b797e9f324f2afd1b07e8086f1759295..ff9059cdf698938fcd04de852ecef2419b23ee85 100644 --- a/doc/doc_ch/thirdparty.md +++ b/doc/doc_ch/thirdparty.md @@ -24,6 +24,7 @@ PaddleOCR希望可以通过AI的力量助力任何一位有梦想的开发者实 | 通用工具 | [ocr_sdk](https://github.com/mymagicpower/AIAS/blob/main/1_image_sdks/text_recognition/ocr_sdk) | OCR java SDK工具箱 | [Calvin](https://github.com/mymagicpower) | | 通用工具 | [iocr](https://github.com/mymagicpower/AIAS/blob/main/8_suite_hub/iocr) | IOCR 自定义模板识别(支持表格识别) | [Calvin](https://github.com/mymagicpower) | | 通用工具 | [Lmdb Dataset Format Conversion Tool](https://github.com/OneYearIsEnough/PaddleOCR-Recog-LmdbDataset-Conversion) | 文本识别任务中lmdb数据格式转换工具 | [OneYearIsEnough](https://github.com/OneYearIsEnough) | +| 通用工具 | [用paddleocr打造一款“盗幕笔记”](https://github.com/kjf4096/paddleocr_dmbj) | 用PaddleOCR记笔记 | [kjf4096](https://github.com/kjf4096) | | 垂类工具 | [AI Studio项目](https://aistudio.baidu.com/aistudio/projectdetail/1054614?channelType=0&channel=0) | 英文视频自动生成字幕 | [叶月水狐](https://aistudio.baidu.com/aistudio/personalcenter/thirdview/322052) | | 垂类工具 | [id_card_ocr](https://github.com/baseli/id_card_ocr) | 身份证复印件识别 | [baseli](https://github.com/baseli) | | 垂类工具 | [Paddle_Table_Image_Reader](https://github.com/thunder95/Paddle_Table_Image_Reader) | 能看懂表格图片的数据助手 | [thunder95](https://github.com/thunder95]) | @@ -39,6 +40,7 @@ PaddleOCR希望可以通过AI的力量助力任何一位有梦想的开发者实 | 应用部署 | [PaddleOCR-Paddlejs-Vue-Demo](https://github.com/Lovely-Pig/PaddleOCR-Paddlejs-Vue-Demo) | 使用Paddle.js和Vue部署PaddleOCR | [Lovely-Pig](https://github.com/Lovely-Pig) | | 应用部署 | [PaddleOCR-Paddlejs-React-Demo](https://github.com/Lovely-Pig/PaddleOCR-Paddlejs-React-Demo) | 使用Paddle.js和React部署PaddleOCR | [Lovely-Pig](https://github.com/Lovely-Pig) | | 学术前沿模型训练与推理 | [AI Studio项目](https://aistudio.baidu.com/aistudio/projectdetail/3397137) | StarNet-MobileNetV3算法–中文训练 | [xiaoyangyang2](https://github.com/xiaoyangyang2) | +| 学术前沿模型训练与推理 | [ABINet-paddle](https://github.com/Huntersdeng/abinet-paddle) | ABINet算法前向运算的paddle实现以及模型各部分的实现细节分析 | [Huntersdeng](https://github.com/Huntersdeng) | ### 1.2 为PaddleOCR新增功能 @@ -46,6 +48,7 @@ PaddleOCR希望可以通过AI的力量助力任何一位有梦想的开发者实 - 非常感谢 [tangmq](https://gitee.com/tangmq) 给PaddleOCR增加Docker化部署服务,支持快速发布可调用的Restful API服务([#507](https://github.com/PaddlePaddle/PaddleOCR/pull/507))。 - 非常感谢 [lijinhan](https://github.com/lijinhan) 给PaddleOCR增加java SpringBoot 调用OCR Hubserving接口完成对OCR服务化部署的使用([#1027](https://github.com/PaddlePaddle/PaddleOCR/pull/1027))。 - 非常感谢 [Evezerest](https://github.com/Evezerest), [ninetailskim](https://github.com/ninetailskim), [edencfc](https://github.com/edencfc), [BeyondYourself](https://github.com/BeyondYourself), [1084667371](https://github.com/1084667371) 贡献了[PPOCRLabel](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.3/PPOCRLabel/README_ch.md) 的完整代码。 +- 非常感谢 [bupt906](https://github.com/bupt906) 贡献MicroNet结构代码([#5251](https://github.com/PaddlePaddle/PaddleOCR/pull/5251))和贡献OneCycle学习率策略代码([#5252](https://github.com/PaddlePaddle/PaddleOCR/pull/5252)) ### 1.3 代码修复 @@ -55,7 +58,7 @@ PaddleOCR希望可以通过AI的力量助力任何一位有梦想的开发者实 ### 1.4 文档优化与翻译 -- 非常感谢 **[RangeKing](https://github.com/RangeKing)** 贡献翻译《动手学OCR》notebook[电子书英文版](https://github.com/PaddlePaddle/PaddleOCR/tree/dygraph/notebook/notebook_en)。 +- 非常感谢 **[RangeKing](https://github.com/RangeKing),[HustBestCat](https://github.com/HustBestCat),[v3fc](https://github.com/v3fc),[1084667371](https://github.com/1084667371)** 贡献翻译《动手学OCR》notebook[电子书英文版](https://github.com/PaddlePaddle/PaddleOCR/tree/dygraph/notebook/notebook_en)。 - 非常感谢 [thunderstudying](https://github.com/thunderstudying),[RangeKing](https://github.com/RangeKing),[livingbody](https://github.com/livingbody), [WZMIAOMIAO](https://github.com/WZMIAOMIAO),[haigang1975](https://github.com/haigang1975) 补充多个英文markdown文档。 - 非常感谢 **[fanruinet](https://github.com/fanruinet)** 润色和修复35篇英文文档([#5205](https://github.com/PaddlePaddle/PaddleOCR/pull/5205))。 - 非常感谢 [Khanh Tran](https://github.com/xxxpsyduck) 和 [Karl Horky](https://github.com/karlhorky) 贡献修改英文文档。 diff --git a/doc/doc_en/config_en.md b/doc/doc_en/config_en.md index eda1e13da956ab1eede72b97e62d76b915e02169..d7bf5eaddd7b10d178cd472caf8081c4706f15b6 100644 --- a/doc/doc_en/config_en.md +++ b/doc/doc_en/config_en.md @@ -66,7 +66,7 @@ In PaddleOCR, the network is divided into four stages: Transform, Backbone, Neck | :---------------------: | :---------------------: | :--------------: | :--------------------: | | model_type | Network Type | rec | Currently support`rec`,`det`,`cls` | | algorithm | Model name | CRNN | See [algorithm_overview](./algorithm_overview_en.md) for the support list | -| **Transform** | Set the transformation method | - | Currently only recognition algorithms are supported, see [ppocr/modeling/transform](../../ppocr/modeling/transform) for details | +| **Transform** | Set the transformation method | - | Currently only recognition algorithms are supported, see [ppocr/modeling/transforms](../../ppocr/modeling/transforms) for details | | name | Transformation class name | TPS | Currently supports `TPS` | | num_fiducial | Number of TPS control points | 20 | Ten on the top and bottom | | loc_lr | Localization network learning rate | 0.1 | \ | diff --git a/doc/doc_en/training_en.md b/doc/doc_en/training_en.md index 1a3165d0ab226d7cbeef356ee750594c759cfe23..89992ff905426faaf7d22707a76dd9daaa8bcbb7 100644 --- a/doc/doc_en/training_en.md +++ b/doc/doc_en/training_en.md @@ -94,14 +94,14 @@ The current open source models, data sets and magnitudes are as follows: - Chinese data set, LSVT street view data set crops the image according to the truth value, and performs position calibration, a total of 30w images. In addition, based on the LSVT corpus, 500w of synthesized data. - Small language data set, using different corpora and fonts, respectively generated 100w synthetic data set, and using ICDAR-MLT as the verification set. -Among them, the public data sets are all open source, users can search and download by themselves, or refer to [Chinese data set](../doc_ch/datasets.md), synthetic data is not open source, users can use open source synthesis tools to synthesize by themselves. Synthesis tools include [text_renderer](https://github.com/Sanster/text_renderer), [SynthText](https://github.com/ankush-me/SynthText), [TextRecognitionDataGenerator](https://github.com/Belval/TextRecognitionDataGenerator) etc. +Among them, the public data sets are all open source, users can search and download by themselves, or refer to [Chinese data set](./datasets_en.md), synthetic data is not open source, users can use open source synthesis tools to synthesize by themselves. Synthesis tools include [text_renderer](https://github.com/Sanster/text_renderer), [SynthText](https://github.com/ankush-me/SynthText), [TextRecognitionDataGenerator](https://github.com/Belval/TextRecognitionDataGenerator) etc. ### 3.2 Vertical Scene PaddleOCR mainly focuses on general OCR. If you have vertical requirements, you can use PaddleOCR + vertical data to train yourself; -If there is a lack of labeled data, or if you do not want to invest in research and development costs, it is recommended to directly call the open API, which covers some of the more common vertical categories. +If there is a lack of labeled data, or if you do not want to invest in research and development costs, it is recommended to directly call the open API, which covers some of the more common vertical categories. @@ -147,8 +147,8 @@ There are several experiences for reference when constructing the data set: *** -Click the following links for detailed training tutorial: +Click the following links for detailed training tutorial: -- [text detection model training](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.3/doc/doc_ch/detection.md) -- [text recognition model training](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.3/doc/doc_ch/recognition.md) -- [text direction classification model training](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.3/doc/doc_ch/angle_class.md) +- [text detection model training](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.3/doc/doc_ch/detection.md) +- [text recognition model training](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.3/doc/doc_ch/recognition.md) +- [text direction classification model training](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.3/doc/doc_ch/angle_class.md) diff --git a/doc/doc_en/tricks_en.md b/doc/doc_en/tricks_en.md index eab9c89236ca86d4e473fbb2776941fdd3e7567d..4d59857a04f3985c9f8c189e6b0fc54a6cc1cc0f 100644 --- a/doc/doc_en/tricks_en.md +++ b/doc/doc_en/tricks_en.md @@ -12,25 +12,25 @@ Here we have sorted out some Chinese OCR training and prediction tricks, which a At present, ResNet_vd series and MobileNetV3 series are the backbone networks used in PaddleOCR, whether replacing the other backbone networks will help to improve the accuracy? What should be paid attention to when replacing? - **Tips** - - Whether text detection or text recognition, the choice of backbone network is a trade-off between prediction effect and prediction efficiency. Generally, a larger backbone network is selected, e.g. ResNet101_vd, then the performance of the detection or recognition is more accurate, but the time cost will increase accordingly. And a smaller backbone network is selected, e.g. MobileNetV3_small_x0_35, the prediction speed is faster, but the accuracy of detection or recognition will be reduced. Fortunately, the detection or recognition effect of different backbone networks is positively correlated with the performance of ImageNet 1000 classification task. [**PaddleClas**](https://github.com/PaddlePaddle/PaddleClas/blob/master/README_en.md) have sorted out the 23 series of classification network structures, such as ResNet_vd、Res2Net、HRNet、MobileNetV3、GhostNet. It provides the top1 accuracy of classification, the time cost of GPU(V100 and T4) and CPU(SD 855), and the 117 pretrained models [**download addresses**](https://paddleclas-en.readthedocs.io/en/latest/models/models_intro_en.html). - + - Whether text detection or text recognition, the choice of backbone network is a trade-off between prediction effect and prediction efficiency. Generally, a larger backbone network is selected, e.g. ResNet101_vd, then the performance of the detection or recognition is more accurate, but the time cost will increase accordingly. And a smaller backbone network is selected, e.g. MobileNetV3_small_x0_35, the prediction speed is faster, but the accuracy of detection or recognition will be reduced. Fortunately, the detection or recognition effect of different backbone networks is positively correlated with the performance of ImageNet 1000 classification task. [**PaddleClas**](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.3/docs/en/models/models_intro_en.md) have sorted out the 23 series of classification network structures, such as ResNet_vd、Res2Net、HRNet、MobileNetV3、GhostNet. It provides the top1 accuracy of classification, the time cost of GPU(V100 and T4) and CPU(SD 855), and the 117 pretrained models [**download addresses**](https://paddleclas-en.readthedocs.io/en/latest/models/models_intro_en.html). + - Similar as the 4 stages of ResNet, the replacement of text detection backbone network is to determine those four stages to facilitate the integration of FPN like the object detection heads. In addition, for the text detection problem, the pre trained model in ImageNet1000 can accelerate the convergence and improve the accuracy. - + - In order to replace the backbone network of text recognition, we need to pay attention to the descending position of network width and height stride. Since the ratio between width and height is large in chinese text recognition, the frequency of height decrease is less and the frequency of width decrease is more. You can refer the [modifies of MobileNetV3](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/ppocr/modeling/backbones/rec_mobilenet_v3.py) in PaddleOCR. #### 2、Long Chinese Text Recognition -- **Problem Description** +- **Problem Description** The maximum resolution of Chinese recognition model during training is [3,32,320], if the text image to be recognized is too long, as shown in the figure below, how to adapt? - +
- + - **Tips** During the training, the training samples are not directly resized to [3,32,320]. At first, the height of samples are resized to 32 and keep the ratio between the width and the height. When the width is less than 320, the excess parts are padding 0. Besides, when the ratio between the width and the height of the samples is larger than 10, these samples will be ignored. When the prediction for one image, do as above, but do not limit the max ratio between the width and the height. When the prediction for an images batch, do as training, but the resized target width is the longest width of the images in the batch. [Code as following](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/tools/infer/predict_rec.py): - + ``` def resize_norm_img(self, img, max_wh_ratio): imgC, imgH, imgW = self.rec_image_shape @@ -58,11 +58,11 @@ Here we have sorted out some Chinese OCR training and prediction tricks, which a - **Problem Description** As shown in the figure below, for Chinese and English mixed scenes, in order to facilitate reading and using the recognition results, it is often necessary to recognize the spaces between words. How can this situation be adapted? - +
- + - **Tips** - + There are two possible methods for space recognition. (1) Optimize the text detection. For spliting the text at the space in detection results, it needs to divide the text line with space into many segments when label the data for detection. (2) Optimize the text recognition. The space character is introduced into the recognition dictionary. Label the blank line in the training data for text recognition. In addition, we can also concat multiple word lines to synthesize the training data with spaces. PaddleOCR currently uses the second method. diff --git a/ppocr/data/imaug/label_ops.py b/ppocr/data/imaug/label_ops.py index 786647f1f655dd40be1117df912f59c42108539e..ef962b17850b17517b37a754c63a77feb412c45a 100644 --- a/ppocr/data/imaug/label_ops.py +++ b/ppocr/data/imaug/label_ops.py @@ -799,7 +799,7 @@ class VQATokenLabelEncode(object): ocr_engine=None, **kwargs): super(VQATokenLabelEncode, self).__init__() - from paddlenlp.transformers import LayoutXLMTokenizer, LayoutLMTokenizer + from paddlenlp.transformers import LayoutXLMTokenizer, LayoutLMTokenizer, LayoutLMv2Tokenizer from ppocr.utils.utility import load_vqa_bio_label_maps tokenizer_dict = { 'LayoutXLM': { @@ -809,6 +809,10 @@ class VQATokenLabelEncode(object): 'LayoutLM': { 'class': LayoutLMTokenizer, 'pretrained_model': 'layoutlm-base-uncased' + }, + 'LayoutLMv2': { + 'class': LayoutLMv2Tokenizer, + 'pretrained_model': 'layoutlmv2-base-uncased' } } self.contains_re = contains_re diff --git a/ppocr/data/imaug/vqa/token/vqa_token_chunk.py b/ppocr/data/imaug/vqa/token/vqa_token_chunk.py index deb55b4d55b81d5949ed834693e45c3b40c4b762..1fa949e688289b320c6a7c121c944708febe2c9d 100644 --- a/ppocr/data/imaug/vqa/token/vqa_token_chunk.py +++ b/ppocr/data/imaug/vqa/token/vqa_token_chunk.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from collections import defaultdict + class VQASerTokenChunk(object): def __init__(self, max_seq_len=512, infer_mode=False, **kwargs): @@ -39,6 +41,8 @@ class VQASerTokenChunk(object): encoded_inputs_example[key] = data[key] encoded_inputs_all.append(encoded_inputs_example) + if len(encoded_inputs_all) == 0: + return None return encoded_inputs_all[0] @@ -101,17 +105,18 @@ class VQAReTokenChunk(object): "entities": self.reformat(entities_in_this_span), "relations": self.reformat(relations_in_this_span), }) - item['entities']['label'] = [ - self.entities_labels[x] for x in item['entities']['label'] - ] - encoded_inputs_all.append(item) + if len(item['entities']) > 0: + item['entities']['label'] = [ + self.entities_labels[x] for x in item['entities']['label'] + ] + encoded_inputs_all.append(item) + if len(encoded_inputs_all) == 0: + return None return encoded_inputs_all[0] def reformat(self, data): - new_data = {} + new_data = defaultdict(list) for item in data: for k, v in item.items(): - if k not in new_data: - new_data[k] = [] new_data[k].append(v) return new_data diff --git a/ppocr/modeling/backbones/__init__.py b/ppocr/modeling/backbones/__init__.py index a7db52d26704e0c8426e313b8788b656085983d6..b34b75507cbf047e9adb5f79a2cc2c061ffdab0e 100755 --- a/ppocr/modeling/backbones/__init__.py +++ b/ppocr/modeling/backbones/__init__.py @@ -45,8 +45,11 @@ def build_backbone(config, model_type): from .table_mobilenet_v3 import MobileNetV3 support_dict = ["ResNet", "MobileNetV3"] elif model_type == 'vqa': - from .vqa_layoutlm import LayoutLMForSer, LayoutXLMForSer, LayoutXLMForRe - support_dict = ["LayoutLMForSer", "LayoutXLMForSer", 'LayoutXLMForRe'] + from .vqa_layoutlm import LayoutLMForSer, LayoutLMv2ForSer, LayoutLMv2ForRe, LayoutXLMForSer, LayoutXLMForRe + support_dict = [ + "LayoutLMForSer", "LayoutLMv2ForSer", 'LayoutLMv2ForRe', + "LayoutXLMForSer", 'LayoutXLMForRe' + ] else: raise NotImplementedError diff --git a/ppocr/modeling/backbones/vqa_layoutlm.py b/ppocr/modeling/backbones/vqa_layoutlm.py index 0e98155514cdd055680f32b529fdce631384a37f..ede5b7a35af65fac351277cefccd89b251f5cdb7 100644 --- a/ppocr/modeling/backbones/vqa_layoutlm.py +++ b/ppocr/modeling/backbones/vqa_layoutlm.py @@ -21,12 +21,14 @@ from paddle import nn from paddlenlp.transformers import LayoutXLMModel, LayoutXLMForTokenClassification, LayoutXLMForRelationExtraction from paddlenlp.transformers import LayoutLMModel, LayoutLMForTokenClassification +from paddlenlp.transformers import LayoutLMv2Model, LayoutLMv2ForTokenClassification, LayoutLMv2ForRelationExtraction __all__ = ["LayoutXLMForSer", 'LayoutLMForSer'] pretrained_model_dict = { LayoutXLMModel: 'layoutxlm-base-uncased', - LayoutLMModel: 'layoutlm-base-uncased' + LayoutLMModel: 'layoutlm-base-uncased', + LayoutLMv2Model: 'layoutlmv2-base-uncased' } @@ -58,12 +60,34 @@ class NLPBaseModel(nn.Layer): self.out_channels = 1 -class LayoutXLMForSer(NLPBaseModel): +class LayoutLMForSer(NLPBaseModel): def __init__(self, num_classes, pretrained=True, checkpoints=None, **kwargs): - super(LayoutXLMForSer, self).__init__( - LayoutXLMModel, - LayoutXLMForTokenClassification, + super(LayoutLMForSer, self).__init__( + LayoutLMModel, + LayoutLMForTokenClassification, + 'ser', + pretrained, + checkpoints, + num_classes=num_classes) + + def forward(self, x): + x = self.model( + input_ids=x[0], + bbox=x[2], + attention_mask=x[4], + token_type_ids=x[5], + position_ids=None, + output_hidden_states=False) + return x + + +class LayoutLMv2ForSer(NLPBaseModel): + def __init__(self, num_classes, pretrained=True, checkpoints=None, + **kwargs): + super(LayoutLMv2ForSer, self).__init__( + LayoutLMv2Model, + LayoutLMv2ForTokenClassification, 'ser', pretrained, checkpoints, @@ -82,12 +106,12 @@ class LayoutXLMForSer(NLPBaseModel): return x[0] -class LayoutLMForSer(NLPBaseModel): +class LayoutXLMForSer(NLPBaseModel): def __init__(self, num_classes, pretrained=True, checkpoints=None, **kwargs): - super(LayoutLMForSer, self).__init__( - LayoutLMModel, - LayoutLMForTokenClassification, + super(LayoutXLMForSer, self).__init__( + LayoutXLMModel, + LayoutXLMForTokenClassification, 'ser', pretrained, checkpoints, @@ -97,10 +121,33 @@ class LayoutLMForSer(NLPBaseModel): x = self.model( input_ids=x[0], bbox=x[2], + image=x[3], attention_mask=x[4], token_type_ids=x[5], position_ids=None, - output_hidden_states=False) + head_mask=None, + labels=None) + return x[0] + + +class LayoutLMv2ForRe(NLPBaseModel): + def __init__(self, pretrained=True, checkpoints=None, **kwargs): + super(LayoutLMv2ForRe, self).__init__(LayoutLMv2Model, + LayoutLMv2ForRelationExtraction, + 're', pretrained, checkpoints) + + def forward(self, x): + x = self.model( + input_ids=x[0], + bbox=x[1], + labels=None, + image=x[2], + attention_mask=x[3], + token_type_ids=x[4], + position_ids=None, + head_mask=None, + entities=x[5], + relations=x[6]) return x diff --git a/ppocr/optimizer/__init__.py b/ppocr/optimizer/__init__.py index e0c6b90371cb4b09fb894ceeaeb8595e51c6c557..4110fb47678583cff826a9bc855b3fb378a533f9 100644 --- a/ppocr/optimizer/__init__.py +++ b/ppocr/optimizer/__init__.py @@ -25,11 +25,8 @@ __all__ = ['build_optimizer'] def build_lr_scheduler(lr_config, epochs, step_each_epoch): from . import learning_rate lr_config.update({'epochs': epochs, 'step_each_epoch': step_each_epoch}) - if 'name' in lr_config: - lr_name = lr_config.pop('name') - lr = getattr(learning_rate, lr_name)(**lr_config)() - else: - lr = lr_config['learning_rate'] + lr_name = lr_config.pop('name', 'Const') + lr = getattr(learning_rate, lr_name)(**lr_config)() return lr diff --git a/ppocr/optimizer/learning_rate.py b/ppocr/optimizer/learning_rate.py index b1879f3ee509761043c1797d8b67e4e0988af130..fe251f36e736bb1eac8a71a8115c941cbd7443e6 100644 --- a/ppocr/optimizer/learning_rate.py +++ b/ppocr/optimizer/learning_rate.py @@ -275,4 +275,36 @@ class OneCycle(object): start_lr=0.0, end_lr=self.max_lr, last_epoch=self.last_epoch) - return learning_rate \ No newline at end of file + return learning_rate + + +class Const(object): + """ + Const learning rate decay + Args: + learning_rate(float): initial learning rate + step_each_epoch(int): steps each epoch + last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate. + """ + + def __init__(self, + learning_rate, + step_each_epoch, + warmup_epoch=0, + last_epoch=-1, + **kwargs): + super(Const, self).__init__() + self.learning_rate = learning_rate + self.last_epoch = last_epoch + self.warmup_epoch = round(warmup_epoch * step_each_epoch) + + def __call__(self): + learning_rate = self.learning_rate + if self.warmup_epoch > 0: + learning_rate = lr.LinearWarmup( + learning_rate=learning_rate, + warmup_steps=self.warmup_epoch, + start_lr=0.0, + end_lr=self.learning_rate, + last_epoch=self.last_epoch) + return learning_rate diff --git a/ppstructure/README.md b/ppstructure/README.md index b4c1ec8d828fd521601c97f9f5d0754eecd13152..236b6a39045d814b1ad3a00f658b5f778ac207c5 100644 --- a/ppstructure/README.md +++ b/ppstructure/README.md @@ -13,20 +13,18 @@ English | [简体中文](README_ch.md) - [6.1.2 Table recognition](#612-table-recognition) - [6.2 DOC-VQA](#62-doc-vqa) - [7. Model List](#7-model-list) - - + - [7.1 Layout analysis model](#71-layout-analysis-model) + - [7.2 OCR and table recognition model](#72-ocr-and-table-recognition-model) + - [7.3 DOC-VQA model](#73-doc-vqa-model) ## 1. Introduction PP-Structure is an OCR toolkit that can be used for document analysis and processing with complex structures, designed to help developers better complete document understanding tasks - - ## 2. Update log +* 2022.02.12 DOC-VQA add LayoutLMv2 model。 * 2021.12.07 add [DOC-VQA SER and RE tasks](vqa/README.md)。 - - ## 3. Features The main features of PP-Structure are as follows: @@ -38,21 +36,14 @@ The main features of PP-Structure are as follows: - Support custom training for layout analysis and table structure tasks - Support Document Visual Question Answering (DOC-VQA) tasks: Semantic Entity Recognition (SER) and Relation Extraction (RE) - - - ## 4. Results - - ### 4.1 Layout analysis and table recognition The figure shows the pipeline of layout analysis + table recognition. The image is first divided into four areas of image, text, title and table by layout analysis, and then OCR detection and recognition is performed on the three areas of image, text and title, and the table is performed table recognition, where the image will also be stored for use. - - ### 4.2 DOC-VQA * SER @@ -77,19 +68,12 @@ The corresponding category and OCR recognition results are also marked at the to In the figure, the red box represents the question, the blue box represents the answer, and the question and answer are connected by green lines. The corresponding category and OCR recognition results are also marked at the top left of the OCR detection box. - - - ## 5. Quick start Start from [Quick Installation](./docs/quickstart.md) - - ## 6. PP-Structure System - - ### 6.1 Layout analysis and table recognition ![pipeline](../doc/table/pipeline.jpg) @@ -104,39 +88,33 @@ Layout analysis classifies image by region, including the use of Python scripts Table recognition converts table images into excel documents, which include the detection and recognition of table text and the prediction of table structure and cell coordinates. For detailed instructions, please refer to [document](table/README.md) - - ### 6.2 DOC-VQA Document Visual Question Answering (DOC-VQA) if a type of Visual Question Answering (VQA), which includes Semantic Entity Recognition (SER) and Relation Extraction (RE) tasks. Based on SER task, text recognition and classification in images can be completed. Based on THE RE task, we can extract the relation of the text content in the image, such as judge the problem pair. For details, please refer to [document](vqa/README.md) - - - ## 7. Model List -PP-Structure系列模型列表(更新中) +PP-Structure Series Model List (Updating) -* Layout analysis model +### 7.1 Layout analysis model |model name|description|download| | --- | --- | --- | | ppyolov2_r50vd_dcn_365e_publaynet | The layout analysis model trained on the PubLayNet dataset can divide image into 5 types of areas **text, title, table, picture, and list** | [PubLayNet](https://paddle-model-ecology.bj.bcebos.com/model/layout-parser/ppyolov2_r50vd_dcn_365e_publaynet.tar) | - -* OCR and table recognition model +### 7.2 OCR and table recognition model |model name|description|model size|download| | --- | --- | --- | --- | -|ch_ppocr_mobile_slim_v2.0_det|Slim pruned lightweight model, supporting Chinese, English, multilingual text detection|2.6M|[inference model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_prune_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_prune_infer.tar) | -|ch_ppocr_mobile_slim_v2.0_rec|Slim pruned and quantized lightweight model, supporting Chinese, English and number recognition|6M|[inference model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_slim_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_slim_train.tar) | -|en_ppocr_mobile_v2.0_table_structure|Table structure prediction of English table scene trained on PubLayNet dataset|[inference model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.1/table/en_ppocr_mobile_v2.0_table_structure_train.tar) | +|ch_PP-OCRv2_det_slim|[New] Slim quantization with distillation lightweight model, supporting Chinese, English, multilingual text detection| 3M |[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_slim_quant_infer.tar)| +|ch_PP-OCRv2_rec_slim|[New] Slim qunatization with distillation lightweight model, supporting Chinese, English, multilingual text recognition| 9M |[inference model](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_slim_quant_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_slim_quant_train.tar) | +|en_ppocr_mobile_v2.0_table_structure|Table structure prediction of English table scene trained on PubLayNet dataset| 18.6M |[inference model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.1/table/en_ppocr_mobile_v2.0_table_structure_train.tar) | -* DOC-VQA model +### 7.3 DOC-VQA model |model name|description|model size|download| | --- | --- | --- | --- | -|PP-Layout_v1.0_ser_pretrained|SER model trained on xfun Chinese dataset based on LayoutXLM|1.4G|[inference model coming soon]() / [trained model](https://paddleocr.bj.bcebos.com/pplayout/PP-Layout_v1.0_ser_pretrained.tar) | -|PP-Layout_v1.0_re_pretrained|RE model trained on xfun Chinese dataset based on LayoutXLM|1.4G|[inference model coming soon]() / [trained model](https://paddleocr.bj.bcebos.com/pplayout/PP-Layout_v1.0_re_pretrained.tar) | +|ser_LayoutXLM_xfun_zhd|SER model trained on xfun Chinese dataset based on LayoutXLM|1.4G|[inference model coming soon]() / [trained model](https://paddleocr.bj.bcebos.com/pplayout/ser_LayoutXLM_xfun_zh.tar) | +|re_LayoutXLM_xfun_zh|RE model trained on xfun Chinese dataset based on LayoutXLM|1.4G|[inference model coming soon]() / [trained model](https://paddleocr.bj.bcebos.com/pplayout/re_LayoutXLM_xfun_zh.tar) | -If you need to use other models, you can download the model in [PPOCR model_list](../doc/doc_en/models_list_en.md) and [PPStructure model_list](./docs/model_list.md) +If you need to use other models, you can download the model in [PPOCR model_list](../doc/doc_en/models_list_en.md) and [PPStructure model_list](./docs/models_list.md) diff --git a/ppstructure/README_ch.md b/ppstructure/README_ch.md index a449028dff29739e621bfa2aa77eac63b43e6c84..71456fd03196adec2e4dcff196f084411bb69af6 100644 --- a/ppstructure/README_ch.md +++ b/ppstructure/README_ch.md @@ -13,18 +13,17 @@ - [6.1.2 表格识别](#612-表格识别) - [6.2 DOC-VQA](#62-doc-vqa) - [7. 模型库](#7-模型库) + - [7.1 版面分析模型](#71-版面分析模型) + - [7.2 OCR和表格识别模型](#72-ocr和表格识别模型) + - [7.2 DOC-VQA 模型](#72-doc-vqa-模型) - ## 1. 简介 PP-Structure是一个可用于复杂文档结构分析和处理的OCR工具包,旨在帮助开发者更好的完成文档理解相关任务。 - - ## 2. 近期更新 -* 2021.12.07 新增DOC-[VQA任务SER和RE](vqa/README.md)。 - - +* 2022.02.12 DOC-VQA增加LayoutLMv2模型。 +* 2021.12.07 新增[DOC-VQA任务SER和RE](vqa/README.md)。 ## 3. 特性 @@ -36,22 +35,14 @@ PP-Structure的主要特性如下: - 支持版面分析和表格结构化两类任务自定义训练 - 支持文档视觉问答(Document Visual Question Answering,DOC-VQA)任务-语义实体识别(Semantic Entity Recognition,SER)和关系抽取(Relation Extraction,RE) - - - ## 4. 效果展示 - - ### 4.1 版面分析和表格识别 图中展示了版面分析+表格识别的整体流程,图片先有版面分析划分为图像、文本、标题和表格四种区域,然后对图像、文本和标题三种区域进行OCR的检测识别,对表格进行表格识别,其中图像还会被存储下来以便使用。 - - - ### 4.2 DOC-VQA * SER @@ -75,18 +66,12 @@ PP-Structure的主要特性如下: 图中红色框表示问题,蓝色框表示答案,问题和答案之间使用绿色线连接。在OCR检测框的左上方也标出了对应的类别和OCR识别结果。 - - ## 5. 快速体验 请参考[快速安装](./docs/quickstart.md)教程。 - - ## 6. PP-Structure 介绍 - - ### 6.1 版面分析+表格识别 ![pipeline](../doc/table/pipeline.jpg) @@ -101,39 +86,34 @@ PP-Structure的主要特性如下: 表格识别将表格图片转换为excel文档,其中包含对于表格文本的检测和识别以及对于表格结构和单元格坐标的预测,详细说明参考[文档](table/README_ch.md)。 - - ### 6.2 DOC-VQA DOC-VQA指文档视觉问答,其中包括语义实体识别 (Semantic Entity Recognition, SER) 和关系抽取 (Relation Extraction, RE) 任务。基于 SER 任务,可以完成对图像中的文本识别与分类;基于 RE 任务,可以完成对图象中的文本内容的关系提取,如判断问题对(pair),详细说明参考[文档](vqa/README.md)。 - - ## 7. 模型库 PP-Structure系列模型列表(更新中) -* 版面分析模型 +### 7.1 版面分析模型 |模型名称|模型简介|下载地址| | --- | --- | --- | | ppyolov2_r50vd_dcn_365e_publaynet | PubLayNet 数据集训练的版面分析模型,可以划分**文字、标题、表格、图片以及列表**5类区域 | [PubLayNet](https://paddle-model-ecology.bj.bcebos.com/model/layout-parser/ppyolov2_r50vd_dcn_365e_publaynet.tar) | - -* OCR和表格识别模型 +### 7.2 OCR和表格识别模型 |模型名称|模型简介|模型大小|下载地址| | --- | --- | --- | --- | -|ch_ppocr_mobile_slim_v2.0_det|slim裁剪版超轻量模型,支持中英文、多语种文本检测|2.6M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_prune_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_prune_infer.tar) | -|ch_ppocr_mobile_slim_v2.0_rec|slim裁剪量化版超轻量模型,支持中英文、数字识别|6M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_slim_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_slim_train.tar) | +|ch_PP-OCRv2_det_slim|【最新】slim量化+蒸馏版超轻量模型,支持中英文、多语种文本检测| 3M |[推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_slim_quant_infer.tar)| +|ch_PP-OCRv2_rec_slim|【最新】slim量化版超轻量模型,支持中英文、数字识别| 9M |[推理模型](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_slim_quant_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_slim_quant_train.tar) | |en_ppocr_mobile_v2.0_table_structure|PubLayNet数据集训练的英文表格场景的表格结构预测|18.6M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.1/table/en_ppocr_mobile_v2.0_table_structure_train.tar) | -* DOC-VQA 模型 +### 7.2 DOC-VQA 模型 |模型名称|模型简介|模型大小|下载地址| | --- | --- | --- | --- | -|PP-Layout_v1.0_ser_pretrained|基于LayoutXLM在xfun中文数据集上训练的SER模型|1.4G|[推理模型 coming soon]() / [训练模型](https://paddleocr.bj.bcebos.com/pplayout/PP-Layout_v1.0_ser_pretrained.tar) | -|PP-Layout_v1.0_re_pretrained|基于LayoutXLM在xfun中文数据集上训练的RE模型|1.4G|[推理模型 coming soon]() / [训练模型](https://paddleocr.bj.bcebos.com/pplayout/PP-Layout_v1.0_re_pretrained.tar) | +|ser_LayoutXLM_xfun_zhd|基于LayoutXLM在xfun中文数据集上训练的SER模型|1.4G|[推理模型 coming soon]() / [训练模型](https://paddleocr.bj.bcebos.com/pplayout/ser_LayoutXLM_xfun_zh.tar) | +|re_LayoutXLM_xfun_zh|基于LayoutXLM在xfun中文数据集上训练的RE模型|1.4G|[推理模型 coming soon]() / [训练模型](https://paddleocr.bj.bcebos.com/pplayout/re_LayoutXLM_xfun_zh.tar) | -更多模型下载,可以参考 [PPOCR model_list](../doc/doc_en/models_list.md) and [PPStructure model_list](./docs/model_list.md) +更多模型下载,可以参考 [PP-OCR model_list](../doc/doc_ch/models_list.md) and [PP-Structure model_list](./docs/models_list.md) diff --git a/ppstructure/docs/installation.md b/ppstructure/docs/installation.md index 30c25d5dc92f6ccdb0d93dafe9707f30eca0c0a9..155baf29de5701b58c9342cf82897b23f4ab7e45 100644 --- a/ppstructure/docs/installation.md +++ b/ppstructure/docs/installation.md @@ -1,3 +1,9 @@ +- [快速安装](#快速安装) + - [1. PaddlePaddle 和 PaddleOCR](#1-paddlepaddle-和-paddleocr) + - [2. 安装其他依赖](#2-安装其他依赖) + - [2.1 版面分析所需 Layout-Parser](#21-版面分析所需--layout-parser) + - [2.2 VQA所需依赖](#22--vqa所需依赖) + # 快速安装 ## 1. PaddlePaddle 和 PaddleOCR diff --git a/ppstructure/docs/kie.md b/ppstructure/docs/kie.md index 21854b0d24b0b2bbe6a4612b1112b201c5df255d..35498b33478d1010fd2548dfcb8586b4710723a1 100644 --- a/ppstructure/docs/kie.md +++ b/ppstructure/docs/kie.md @@ -1,4 +1,8 @@ - +- [关键信息提取(Key Information Extraction)](#关键信息提取key-information-extraction) + - [1. 快速使用](#1-快速使用) + - [2. 执行训练](#2-执行训练) + - [3. 执行评估](#3-执行评估) + - [4. 参考文献](#4-参考文献) # 关键信息提取(Key Information Extraction) @@ -7,11 +11,6 @@ SDMGR是一个关键信息提取算法,将每个检测到的文本区域分类为预定义的类别,如订单ID、发票号码,金额等。 -* [1. 快速使用](#1-----) -* [2. 执行训练](#2-----) -* [3. 执行评估](#3-----) - - ## 1. 快速使用 训练和测试的数据采用wildreceipt数据集,通过如下指令下载数据集: @@ -36,7 +35,6 @@ python3.7 tools/infer_kie.py -c configs/kie/kie_unet_sdmgr.yml -o Global.checkpo - ## 2. 执行训练 创建数据集软链到PaddleOCR/train_data目录下: @@ -50,7 +48,6 @@ ln -s ../../wildreceipt ./ ``` python3.7 tools/train.py -c configs/kie/kie_unet_sdmgr.yml -o Global.save_model_dir=./output/kie/ ``` - ## 3. 执行评估 ``` @@ -58,7 +55,7 @@ python3.7 tools/eval.py -c configs/kie/kie_unet_sdmgr.yml -o Global.checkpoints= ``` -**参考文献:** +## 4. 参考文献 diff --git a/ppstructure/docs/kie_en.md b/ppstructure/docs/kie_en.md index a424968a9b5a33132afe52a4850cfe541919ae1c..1fe38b0b399e9290526dafa5409673dc87026db7 100644 --- a/ppstructure/docs/kie_en.md +++ b/ppstructure/docs/kie_en.md @@ -1,4 +1,8 @@ - +- [Key Information Extraction(KIE)](#key-information-extractionkie) + - [1. Quick Use](#1-quick-use) + - [2. Model Training](#2-model-training) + - [3. Model Evaluation](#3-model-evaluation) + - [4. Reference](#4-reference) # Key Information Extraction(KIE) @@ -6,13 +10,6 @@ This section provides a tutorial example on how to quickly use, train, and evalu [SDMGR(Spatial Dual-Modality Graph Reasoning)](https://arxiv.org/abs/2103.14470) is a KIE algorithm that classifies each detected text region into predefined categories, such as order ID, invoice number, amount, and etc. - -* [1. Quick Use](#1-----) -* [2. Model Training](#2-----) -* [3. Model Evaluation](#3-----) - - - ## 1. Quick Use [Wildreceipt dataset](https://paperswithcode.com/dataset/wildreceipt) is used for this tutorial. It contains 1765 photos, with 25 classes, and 50000 text boxes, which can be downloaded by wget: @@ -37,7 +34,6 @@ The visualization results are shown in the figure below: - ## 2. Model Training Create a softlink to the folder, `PaddleOCR/train_data`: @@ -51,7 +47,6 @@ The configuration file used for training is `configs/kie/kie_unet_sdmgr.yml`. Th ```shell python3.7 tools/train.py -c configs/kie/kie_unet_sdmgr.yml -o Global.save_model_dir=./output/kie/ ``` - ## 3. Model Evaluation @@ -61,7 +56,7 @@ After training, you can execute the model evaluation with the following command: python3.7 tools/eval.py -c configs/kie/kie_unet_sdmgr.yml -o Global.checkpoints=./output/kie/best_accuracy ``` -**Reference:** +## 4. Reference diff --git a/ppstructure/docs/model_list.md b/ppstructure/docs/models_list.md similarity index 53% rename from ppstructure/docs/model_list.md rename to ppstructure/docs/models_list.md index baec2a2fd08a5b8d51e4c68bc62902feb04de977..d966e18f2a7fd6d76a0fd491058539173b5d9690 100644 --- a/ppstructure/docs/model_list.md +++ b/ppstructure/docs/models_list.md @@ -1,4 +1,13 @@ -# Model List +- [PP-Structure 系列模型列表](#pp-structure-系列模型列表) + - [1. LayoutParser 模型](#1-layoutparser-模型) + - [2. OCR和表格识别模型](#2-ocr和表格识别模型) + - [2.1 OCR](#21-ocr) + - [2.2 表格识别模型](#22-表格识别模型) + - [3. VQA模型](#3-vqa模型) + - [4. KIE模型](#4-kie模型) + +# PP-Structure 系列模型列表 + ## 1. LayoutParser 模型 @@ -10,25 +19,33 @@ ## 2. OCR和表格识别模型 +### 2.1 OCR + |模型名称|模型简介|推理模型大小|下载地址| | --- | --- | --- | --- | -|ch_ppocr_mobile_slim_v2.0_det|slim裁剪版超轻量模型,支持中英文、多语种文本检测|2.6M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_prune_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_prune_infer.tar) | -|ch_ppocr_mobile_slim_v2.0_rec|slim裁剪量化版超轻量模型,支持中英文、数字识别|6M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_slim_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_slim_train.tar) | |en_ppocr_mobile_v2.0_table_det|PubLayNet数据集训练的英文表格场景的文字检测|4.7M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_det_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.1/table/en_ppocr_mobile_v2.0_table_det_train.tar) | |en_ppocr_mobile_v2.0_table_rec|PubLayNet数据集训练的英文表格场景的文字识别|6.9M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_rec_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.1/table/en_ppocr_mobile_v2.0_table_rec_train.tar) | -|en_ppocr_mobile_v2.0_table_structure|PubLayNet数据集训练的英文表格场景的表格结构预测|18.6M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.1/table/en_ppocr_mobile_v2.0_table_structure_train.tar) | -如需要使用其他OCR模型,可以在 [model_list](../../doc/doc_ch/models_list.md) 下载模型或者使用自己训练好的模型配置到`det_model_dir`,`rec_model_dir`两个字段即可。 +如需要使用其他OCR模型,可以在 [PP-OCR model_list](../../doc/doc_ch/models_list.md) 下载模型或者使用自己训练好的模型配置到 `det_model_dir`, `rec_model_dir`两个字段即可。 + +### 2.2 表格识别模型 + +|模型名称|模型简介|推理模型大小|下载地址| +| --- | --- | --- | --- | +|en_ppocr_mobile_v2.0_table_structure|PubLayNet数据集训练的英文表格场景的表格结构预测|18.6M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.1/table/en_ppocr_mobile_v2.0_table_structure_train.tar) | ## 3. VQA模型 |模型名称|模型简介|推理模型大小|下载地址| | --- | --- | --- | --- | -|PP-Layout_v1.0_ser_pretrained|基于LayoutXLM在xfun中文数据集上训练的SER模型|1.4G|[推理模型 coming soon]() / [训练模型](https://paddleocr.bj.bcebos.com/pplayout/re_LayoutXLM_xfun_zh.tar) | -|PP-Layout_v1.0_re_pretrained|基于LayoutXLM在xfun中文数据集上训练的RE模型|1.4G|[推理模型 coming soon]() / [训练模型](https://paddleocr.bj.bcebos.com/pplayout/ser_LayoutXLM_xfun_zh.tar) | +|ser_LayoutXLM_xfun_zh|基于LayoutXLM在xfun中文数据集上训练的SER模型|1.4G|[推理模型 coming soon]() / [训练模型](https://paddleocr.bj.bcebos.com/pplayout/re_LayoutXLM_xfun_zh.tar) | +|re_LayoutXLM_xfun_zh|基于LayoutXLM在xfun中文数据集上训练的RE模型|1.4G|[推理模型 coming soon]() / [训练模型](https://paddleocr.bj.bcebos.com/pplayout/ser_LayoutXLM_xfun_zh.tar) | +|ser_LayoutLMv2_xfun_zh|基于LayoutLMv2在xfun中文数据集上训练的SER模型|778M|[推理模型 coming soon]() / [训练模型](https://paddleocr.bj.bcebos.com/pplayout/ser_LayoutLMv2_xfun_zh.tar) | +|re_LayoutLMv2_xfun_zh|基于LayoutLMv2在xfun中文数据集上训练的RE模型|765M|[推理模型 coming soon]() / [训练模型](https://paddleocr.bj.bcebos.com/pplayout/re_LayoutLMv2_xfun_zh.tar) | +|ser_LayoutLM_xfun_zh|基于LayoutLM在xfun中文数据集上训练的SER模型|430M|[推理模型 coming soon]() / [训练模型](https://paddleocr.bj.bcebos.com/pplayout/ser_LayoutLM_xfun_zh.tar) | -## 3. KIE模型 +## 4. KIE模型 |模型名称|模型简介|模型大小|下载地址| | --- | --- | --- | --- | -|SDMGR|关键信息提取模型|-|[推理模型 coming soon]() / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.1/kie/kie_vgg16.tar)| +|SDMGR|关键信息提取模型|78M|[推理模型 coming soon]() / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.1/kie/kie_vgg16.tar)| diff --git a/ppstructure/docs/quickstart.md b/ppstructure/docs/quickstart.md index 668775c6da2b06d973f69a9ce81a37396460cbdf..7016f0fcb6c10176cf6f9d30457a5ff98d2b06e1 100644 --- a/ppstructure/docs/quickstart.md +++ b/ppstructure/docs/quickstart.md @@ -1,15 +1,13 @@ # PP-Structure 快速开始 -* [1. 安装PaddleOCR whl包](#1) -* [2. 便捷使用](#2) - + [2.1 命令行使用](#21) - + [2.2 Python脚本使用](#22) - + [2.3 返回结果说明](#23) - + [2.4 参数说明](#24) -* [3. Python脚本使用](#3) - - - +- [PP-Structure 快速开始](#pp-structure-快速开始) + - [1. 安装依赖包](#1-安装依赖包) + - [2. 便捷使用](#2-便捷使用) + - [2.1 命令行使用](#21-命令行使用) + - [2.2 Python脚本使用](#22-python脚本使用) + - [2.3 返回结果说明](#23-返回结果说明) + - [2.4 参数说明](#24-参数说明) + - [3. Python脚本使用](#3-python脚本使用) ## 1. 安装依赖包 @@ -24,12 +22,8 @@ pip3 install -e . ``` - - ## 2. 便捷使用 - - ### 2.1 命令行使用 * 版面分析+表格识别 @@ -41,8 +35,6 @@ paddleocr --image_dir=../doc/table/1.png --type=structure 请参考:[文档视觉问答](../vqa/README.md)。 - - ### 2.2 Python脚本使用 * 版面分析+表格识别 @@ -76,8 +68,6 @@ im_show.save('result.jpg') 请参考:[文档视觉问答](../vqa/README.md)。 - - ### 2.3 返回结果说明 PP-Structure的返回结果为一个dict组成的list,示例如下 @@ -103,8 +93,6 @@ dict 里各个字段说明如下 请参考:[文档视觉问答](../vqa/README.md)。 - - ### 2.4 参数说明 | 字段 | 说明 | 默认值 | @@ -122,8 +110,6 @@ dict 里各个字段说明如下 运行完成后,每张图片会在`output`字段指定的目录下有一个同名目录,图片里的每个表格会存储为一个excel,图片区域会被裁剪之后保存下来,excel文件和图片名名为表格在图片里的坐标。 - - ## 3. Python脚本使用 * 版面分析+表格识别 diff --git a/ppstructure/layout/README.md b/ppstructure/layout/README.md index 74cb928e30c012d5b469d685fd63b443a7d22613..0931702a7cf411e6589a1375e014a7374442f9f0 100644 --- a/ppstructure/layout/README.md +++ b/ppstructure/layout/README.md @@ -1,28 +1,19 @@ English | [简体中文](README_ch.md) - +- [Getting Started](#getting-started) + - [1. Install whl package](#1--install-whl-package) + - [2. Quick Start](#2-quick-start) + - [3. PostProcess](#3-postprocess) + - [4. Results](#4-results) + - [5. Training](#5-training) # Getting Started -[1. Install whl package](#Install) - -[2. Quick Start](#QuickStart) - -[3. PostProcess](#PostProcess) - -[4. Results](#Results) - -[5. Training](#Training) - - - ## 1. Install whl package ```bash wget https://paddleocr.bj.bcebos.com/whl/layoutparser-0.0.0-py3-none-any.whl pip install -U layoutparser-0.0.0-py3-none-any.whl ``` - - ## 2. Quick Start Use LayoutParser to identify the layout of a document: @@ -77,8 +68,6 @@ The following model configurations and label maps are currently supported, which * TableBank word and TableBank latex are trained on datasets of word documents and latex documents respectively; * Download TableBank dataset contains both word and latex。 - - ## 3. PostProcess Layout parser contains multiple categories, if you only want to get the detection box for a specific category (such as the "Text" category), you can use the following code: @@ -119,7 +108,6 @@ Displays results with only the "Text" category:
- ## 4. Results @@ -134,8 +122,6 @@ Displays results with only the "Text" category: ​ **GPU:** a single NVIDIA Tesla P40 - - ## 5. Training The above model is based on [PaddleDetection](https://github.com/PaddlePaddle/PaddleDetection). If you want to train your own layout parser model,please refer to:[train_layoutparser_model](train_layoutparser_model.md) diff --git a/ppstructure/layout/README_ch.md b/ppstructure/layout/README_ch.md index c722e0bd88f40ff6b711edecff0433029e101f87..6fec748b7683264f5b4a7d29c0e51c84773425ba 100644 --- a/ppstructure/layout/README_ch.md +++ b/ppstructure/layout/README_ch.md @@ -1,26 +1,18 @@ [English](README.md) | 简体中文 +- [版面分析使用说明](#版面分析使用说明) + - [1. 安装whl包](#1--安装whl包) + - [2. 使用](#2-使用) + - [3. 后处理](#3-后处理) + - [4. 指标](#4-指标) + - [5. 训练版面分析模型](#5-训练版面分析模型) # 版面分析使用说明 -[1. 安装whl包](#安装whl包) - -[2. 使用](#使用) - -[3. 后处理](#后处理) - -[4. 指标](#指标) - -[5. 训练版面分析模型](#训练版面分析模型) - - - ## 1. 安装whl包 ```bash pip install -U https://paddleocr.bj.bcebos.com/whl/layoutparser-0.0.0-py3-none-any.whl ``` - - ## 2. 使用 使用layoutparser识别给定文档的布局: @@ -76,8 +68,6 @@ show_img.show() * TableBank word和TableBank latex分别在word文档、latex文档数据集训练; * 下载的TableBank数据集里同时包含word和latex。 - - ## 3. 后处理 版面分析检测包含多个类别,如果只想获取指定类别(如"Text"类别)的检测框、可以使用下述代码: @@ -119,8 +109,6 @@ show_img.show() - - ## 4. 指标 | Dataset | mAP | CPU time cost | GPU time cost | @@ -134,8 +122,6 @@ show_img.show() ​ **GPU:** a single NVIDIA Tesla P40 - - ## 5. 训练版面分析模型 上述模型基于[PaddleDetection](https://github.com/PaddlePaddle/PaddleDetection) 训练,如果您想训练自己的版面分析模型,请参考:[train_layoutparser_model](train_layoutparser_model_ch.md) diff --git a/ppstructure/layout/train_layoutparser_model.md b/ppstructure/layout/train_layoutparser_model.md index 58975d71606e45b2f68a7f68565459042ef32775..e877c9c0c901e8be8299101daa5ce6248de0a1dc 100644 --- a/ppstructure/layout/train_layoutparser_model.md +++ b/ppstructure/layout/train_layoutparser_model.md @@ -1,31 +1,20 @@ -# Training layout-parse - -[1. Installation](#Installation) - -​ [1.1 Requirements](#Requirements) - -​ [1.2 Install PaddleDetection](#Install_PaddleDetection) - -[2. Data preparation](#Data_reparation) - -[3. Configuration](#Configuration) +English | [简体中文](train_layoutparser_model_ch.md) +- [Training layout-parse](#training-layout-parse) + - [1. Installation](#1--installation) + - [1.1 Requirements](#11-requirements) + - [1.2 Install PaddleDetection](#12-install-paddledetection) + - [2. Data preparation](#2-data-preparation) + - [3. Configuration](#3-configuration) + - [4. Training](#4-training) + - [5. Prediction](#5-prediction) + - [6. Deployment](#6-deployment) + - [6.1 Export model](#61-export-model) + - [6.2 Inference](#62-inference) -[4. Training](#Training) - -[5. Prediction](#Prediction) - -[6. Deployment](#Deployment) - -​ [6.1 Export model](#Export_model) - -​ [6.2 Inference](#Inference) - - +# Training layout-parse ## 1. Installation - - ### 1.1 Requirements - PaddlePaddle 2.1 @@ -35,8 +24,6 @@ - CUDA >= 10.1 - cuDNN >= 7.6 - - ### 1.2 Install PaddleDetection ```bash @@ -51,8 +38,6 @@ pip install -r requirements.txt For more installation tutorials, please refer to: [Install doc](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.1/docs/tutorials/INSTALL_cn.md) - - ## 2. Data preparation Download the [PubLayNet](https://github.com/ibm-aur-nlp/PubLayNet) dataset @@ -80,8 +65,6 @@ PubLayNet directory structure after decompressing : For other datasets,please refer to [the PrepareDataSet]((https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.1/docs/tutorials/PrepareDataSet.md) ) - - ## 3. Configuration We use the `configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml` configuration for training,the configuration file is as follows @@ -113,8 +96,6 @@ The `ppyolov2_r50vd_dcn_365e_coco.yml` configuration depends on other configurat Modify the preceding files, such as the dataset path and batch size etc. - - ## 4. Training PaddleDetection provides single-card/multi-card training mode to meet various training needs of users: @@ -146,8 +127,6 @@ python -m paddle.distributed.launch --gpus 0,1,2,3 tools/train.py -c configs/ppy Note: If you encounter "`Out of memory error`" , try reducing `batch_size` in the `ppyolov2_reader.yml` file -prediction - ## 5. Prediction Set parameters and use PaddleDetection to predict: @@ -159,14 +138,10 @@ python tools/infer.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --infer `--draw_threshold` is an optional parameter. According to the calculation of [NMS](https://ieeexplore.ieee.org/document/1699659), different threshold will produce different results, ` keep_top_k ` represent the maximum amount of output target, the default value is 10. You can set different value according to your own actual situation。 - - ## 6. Deployment Use your trained model in Layout Parser - - ### 6.1 Export model n the process of model training, the model file saved contains the process of forward prediction and back propagation. In the actual industrial deployment, there is no need for back propagation. Therefore, the model should be translated into the model format required by the deployment. The `tools/export_model.py` script is provided in PaddleDetection to export the model. @@ -183,8 +158,6 @@ The prediction model is exported to `inference/ppyolov2_r50vd_dcn_365e_coco` ,in More model export tutorials, please refer to:[EXPORT_MODEL](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.1/deploy/EXPORT_MODEL.md) - - ### 6.2 Inference `model_path` represent the trained model path, and layoutparser is used to predict: @@ -194,8 +167,6 @@ import layoutparser as lp model = lp.PaddleDetectionLayoutModel(model_path="inference/ppyolov2_r50vd_dcn_365e_coco", threshold=0.5,label_map={0: "Text", 1: "Title", 2: "List", 3:"Table", 4:"Figure"},enforce_cpu=True,enable_mkldnn=True) ``` - - *** More PaddleDetection training tutorials,please reference:[PaddleDetection Training](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.1/docs/tutorials/GETTING_STARTED_cn.md) diff --git a/ppstructure/layout/train_layoutparser_model_ch.md b/ppstructure/layout/train_layoutparser_model_ch.md index 2f73c63adcea3f82ae579222e658291224f46237..a89b0f3819b52c79b86d2ada13bac23e3d1656ed 100644 --- a/ppstructure/layout/train_layoutparser_model_ch.md +++ b/ppstructure/layout/train_layoutparser_model_ch.md @@ -1,31 +1,20 @@ -# 训练版面分析 - -[1. 安装](#安装) - -​ [1.1 环境要求](#环境要求) - -​ [1.2 安装PaddleDetection](#安装PaddleDetection) - -[2. 准备数据](#准备数据) - -[3. 配置文件改动和说明](#配置文件改动和说明) - -[4. PaddleDetection训练](#训练) - -[5. PaddleDetection预测](#预测) - -[6. 预测部署](#预测部署) - -​ [6.1 模型导出](#模型导出) - -​ [6.2 layout parser预测](#layout_parser预测) +[English](train_layoutparser_model.md) | 简体中文 +- [训练版面分析](#训练版面分析) + - [1. 安装](#1-安装) + - [1.1 环境要求](#11-环境要求) + - [1.2 安装PaddleDetection](#12-安装paddledetection) + - [2. 准备数据](#2-准备数据) + - [3. 配置文件改动和说明](#3-配置文件改动和说明) + - [4. PaddleDetection训练](#4-paddledetection训练) + - [5. PaddleDetection预测](#5-paddledetection预测) + - [6. 预测部署](#6-预测部署) + - [6.1 模型导出](#61-模型导出) + - [6.2 layout_parser预测](#62-layout_parser预测) - +# 训练版面分析 ## 1. 安装 - - ### 1.1 环境要求 - PaddlePaddle 2.1 @@ -35,8 +24,6 @@ - CUDA >= 10.1 - cuDNN >= 7.6 - - ### 1.2 安装PaddleDetection ```bash @@ -51,8 +38,6 @@ pip install -r requirements.txt 更多安装教程,请参考: [Install doc](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.1/docs/tutorials/INSTALL_cn.md) - - ## 2. 准备数据 下载 [PubLayNet](https://github.com/ibm-aur-nlp/PubLayNet) 数据集: @@ -80,8 +65,6 @@ tar -xvf publaynet.tar.gz 如果使用其它数据集,请参考[准备训练数据](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.1/docs/tutorials/PrepareDataSet.md) - - ## 3. 配置文件改动和说明 我们使用 `configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml`配置进行训练,配置文件摘要如下: @@ -113,8 +96,6 @@ weights: output/ppyolov2_r50vd_dcn_365e_coco/model_final 根据实际情况,修改上述文件,比如数据集路径、batch size等。 - - ## 4. PaddleDetection训练 PaddleDetection提供了单卡/多卡训练模式,满足用户多种训练需求 @@ -146,8 +127,6 @@ python -m paddle.distributed.launch --gpus 0,1,2,3 tools/train.py -c configs/ppy 注意:如果遇到 "`Out of memory error`" 问题, 尝试在 `ppyolov2_reader.yml` 文件中调小`batch_size` - - ## 5. PaddleDetection预测 设置参数,使用PaddleDetection预测: @@ -159,14 +138,10 @@ python tools/infer.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --infer `--draw_threshold` 是个可选参数. 根据 [NMS](https://ieeexplore.ieee.org/document/1699659) 的计算,不同阈值会产生不同的结果 `keep_top_k`表示设置输出目标的最大数量,默认值为100,用户可以根据自己的实际情况进行设定。 - - ## 6. 预测部署 在layout parser中使用自己训练好的模型。 - - ### 6.1 模型导出 在模型训练过程中保存的模型文件是包含前向预测和反向传播的过程,在实际的工业部署则不需要反向传播,因此需要将模型进行导成部署需要的模型格式。 在PaddleDetection中提供了 `tools/export_model.py`脚本来导出模型。 @@ -183,8 +158,6 @@ python tools/export_model.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml 更多模型导出教程,请参考:[EXPORT_MODEL](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.1/deploy/EXPORT_MODEL.md) - - ### 6.2 layout_parser预测 `model_path`指定训练好的模型路径,使用layout parser进行预测: diff --git a/ppstructure/table/README.md b/ppstructure/table/README.md index 94fa76055b93cefab0ac507a6007ec148aa12945..6137cfaef657d70a2b3a2b7eb9c69e364e421d96 100644 --- a/ppstructure/table/README.md +++ b/ppstructure/table/README.md @@ -1,3 +1,13 @@ +- [Table Recognition](#table-recognition) + - [1. pipeline](#1-pipeline) + - [2. Performance](#2-performance) + - [3. How to use](#3-how-to-use) + - [3.1 quick start](#31-quick-start) + - [3.2 Train](#32-train) + - [3.3 Eval](#33-eval) + - [3.4 Inference](#34-inference) + + # Table Recognition ## 1. pipeline @@ -51,10 +61,10 @@ After running, the excel sheet of each picture will be saved in the directory sp In this chapter, we only introduce the training of the table structure model, For model training of [text detection](../../doc/doc_en/detection_en.md) and [text recognition](../../doc/doc_en/recognition_en.md), please refer to the corresponding documents -#### data preparation +* data preparation The training data uses public data set [PubTabNet](https://arxiv.org/abs/1911.10683 ), Can be downloaded from the official [website](https://github.com/ibm-aur-nlp/PubTabNet) 。The PubTabNet data set contains about 500,000 images, as well as annotations in html format。 -#### Start training +* Start training *If you are installing the cpu version of paddle, please modify the `use_gpu` field in the configuration file to false* ```shell # single GPU training @@ -67,7 +77,7 @@ python3 -m paddle.distributed.launch --gpus '0,1,2,3' tools/train.py -c configs/ In the above instruction, use `-c` to select the training to use the `configs/table/table_mv3.yml` configuration file. For a detailed explanation of the configuration file, please refer to [config](../../doc/doc_en/config_en.md). -#### load trained model and continue training +* load trained model and continue training If you expect to load trained model and continue the training again, you can specify the parameter `Global.checkpoints` as the model path to be loaded. diff --git a/ppstructure/table/README_ch.md b/ppstructure/table/README_ch.md index ef0f1ae5c4554e69e4cbeb0fcd783e6d98f96a41..39081995e6dd1e0a05fc88d067bab119ca7b6e39 100644 --- a/ppstructure/table/README_ch.md +++ b/ppstructure/table/README_ch.md @@ -1,14 +1,14 @@ -# 表格识别 +- [表格识别](#表格识别) + - [1. 表格识别 pipeline](#1-表格识别-pipeline) + - [2. 性能](#2-性能) + - [3. 使用](#3-使用) + - [3.1 快速开始](#31-快速开始) + - [3.2 训练](#32-训练) + - [3.3 评估](#33-评估) + - [3.4 预测](#34-预测) -* [1. 表格识别 pipeline](#1) -* [2. 性能](#2) -* [3. 使用](#3) - + [3.1 快速开始](#31) - + [3.2 训练](#32) - + [3.3 评估](#33) - + [3.4 预测](#34) +# 表格识别 - ## 1. 表格识别 pipeline 表格识别主要包含三个模型 @@ -28,7 +28,6 @@ 4. 单元格的识别结果和表格结构一起构造表格的html字符串。 - ## 2. 性能 我们在 PubTabNet[1] 评估数据集上对算法进行了评估,性能如下 @@ -38,9 +37,8 @@ | EDD[2] | 88.3 | | Ours | 93.32 | - ## 3. 使用 - + ### 3.1 快速开始 ```python @@ -61,14 +59,17 @@ python3 table/predict_table.py --det_model_dir=inference/en_ppocr_mobile_v2.0_ta 运行完成后,每张图片的excel表格会保存到output字段指定的目录下 note: 上述模型是在 PubLayNet 数据集上训练的表格识别模型,仅支持英文扫描场景,如需识别其他场景需要自己训练模型后替换 `det_model_dir`,`rec_model_dir`,`table_model_dir`三个字段即可。 - + ### 3.2 训练 + 在这一章节中,我们仅介绍表格结构模型的训练,[文字检测](../../doc/doc_ch/detection.md)和[文字识别](../../doc/doc_ch/recognition.md)的模型训练请参考对应的文档。 -#### 数据准备 +* 数据准备 + 训练数据使用公开数据集PubTabNet ([论文](https://arxiv.org/abs/1911.10683),[下载地址](https://github.com/ibm-aur-nlp/PubTabNet))。PubTabNet数据集包含约50万张表格数据的图像,以及图像对应的html格式的注释。 -#### 启动训练 +* 启动训练 + *如果您安装的是cpu版本,请将配置文件中的 `use_gpu` 字段修改为false* ```shell # 单机单卡训练 @@ -79,7 +80,7 @@ python3 -m paddle.distributed.launch --gpus '0,1,2,3' tools/train.py -c configs/ 上述指令中,通过-c 选择训练使用configs/table/table_mv3.yml配置文件。有关配置文件的详细解释,请参考[链接](../../doc/doc_ch/config.md)。 -#### 断点训练 +* 断点训练 如果训练程序中断,如果希望加载训练中断的模型从而恢复训练,可以通过指定Global.checkpoints指定要加载的模型路径: ```shell @@ -88,7 +89,6 @@ python3 tools/train.py -c configs/table/table_mv3.yml -o Global.checkpoints=./yo **注意**:`Global.checkpoints`的优先级高于`Global.pretrain_weights`的优先级,即同时指定两个参数时,优先加载`Global.checkpoints`指定的模型,如果`Global.checkpoints`指定的模型路径有误,会加载`Global.pretrain_weights`指定的模型。 - ### 3.3 评估 表格使用 [TEDS(Tree-Edit-Distance-based Similarity)](https://github.com/ibm-aur-nlp/PubTabNet/tree/master/src) 作为模型的评估指标。在进行模型评估之前,需要将pipeline中的三个模型分别导出为inference模型(我们已经提供好),还需要准备评估的gt, gt示例如下: @@ -113,7 +113,6 @@ python3 table/eval_table.py --det_model_dir=path/to/det_model_dir --rec_model_di ```bash teds: 93.32 ``` - ### 3.4 预测 ```python diff --git a/ppstructure/vqa/README.md b/ppstructure/vqa/README.md index 7f4ca119f70592e59e4a8ed946bddd589b348b97..b9a82cc5fd971800aaebd9bc4553ba6f0700845e 100644 --- a/ppstructure/vqa/README.md +++ b/ppstructure/vqa/README.md @@ -1,5 +1,23 @@ +- [文档视觉问答(DOC-VQA)](#文档视觉问答doc-vqa) + - [1. 简介](#1-简介) + - [2. 性能](#2-性能) + - [3. 效果演示](#3-效果演示) + - [3.1 SER](#31-ser) + - [3.2 RE](#32-re) + - [4. 安装](#4-安装) + - [4.1 安装依赖](#41-安装依赖) + - [4.2 安装PaddleOCR(包含 PP-OCR 和 VQA)](#42-安装paddleocr包含-pp-ocr-和-vqa) + - [5. 使用](#5-使用) + - [5.1 数据和预训练模型准备](#51-数据和预训练模型准备) + - [5.2 SER](#52-ser) + - [5.3 RE](#53-re) + - [6. 参考链接](#6-参考链接) + + # 文档视觉问答(DOC-VQA) +## 1. 简介 + VQA指视觉问答,主要针对图像内容进行提问和回答,DOC-VQA是VQA任务中的一种,DOC-VQA主要针对文本图像的文字内容提出问题。 PP-Structure 里的 DOC-VQA算法基于PaddleNLP自然语言处理算法库进行开发。 @@ -16,23 +34,23 @@ PP-Structure 里的 DOC-VQA算法基于PaddleNLP自然语言处理算法库进 本项目是 [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/pdf/2104.08836.pdf) 在 Paddle 2.2上的开源实现, 包含了在 [XFUND数据集](https://github.com/doc-analysis/XFUND) 上的微调代码。 -## 1 性能 +## 2. 性能 我们在 [XFUN](https://github.com/doc-analysis/XFUND) 的中文数据集上对算法进行了评估,性能如下 | 模型 | 任务 | hmean | 模型下载地址 | |:---:|:---:|:---:| :---:| -| LayoutXLM | RE | 0.7483 | [链接](https://paddleocr.bj.bcebos.com/pplayout/re_LayoutXLM_xfun_zh.tar) | | LayoutXLM | SER | 0.9038 | [链接](https://paddleocr.bj.bcebos.com/pplayout/ser_LayoutXLM_xfun_zh.tar) | +| LayoutXLM | RE | 0.7483 | [链接](https://paddleocr.bj.bcebos.com/pplayout/re_LayoutXLM_xfun_zh.tar) | +| LayoutLMv2 | SER | 0.8544 | [链接](https://paddleocr.bj.bcebos.com/pplayout/ser_LayoutLMv2_xfun_zh.tar) +| LayoutLMv2 | RE | 0.6777 | [链接](https://paddleocr.bj.bcebos.com/pplayout/re_LayoutLMv2_xfun_zh.tar) | | LayoutLM | SER | 0.7731 | [链接](https://paddleocr.bj.bcebos.com/pplayout/ser_LayoutLM_xfun_zh.tar) | - - -## 2. 效果演示 +## 3. 效果演示 **注意:** 测试图片来源于XFUN数据集。 -### 2.1 SER +### 3.1 SER ![](../../doc/vqa/result_ser/zh_val_0_ser.jpg) | ![](../../doc/vqa/result_ser/zh_val_42_ser.jpg) ---|--- @@ -45,8 +63,7 @@ PP-Structure 里的 DOC-VQA算法基于PaddleNLP自然语言处理算法库进 在OCR检测框的左上方也标出了对应的类别和OCR识别结果。 - -### 2.2 RE +### 3.2 RE ![](../../doc/vqa/result_re/zh_val_21_re.jpg) | ![](../../doc/vqa/result_re/zh_val_40_re.jpg) ---|--- @@ -54,10 +71,9 @@ PP-Structure 里的 DOC-VQA算法基于PaddleNLP自然语言处理算法库进 图中红色框表示问题,蓝色框表示答案,问题和答案之间使用绿色线连接。在OCR检测框的左上方也标出了对应的类别和OCR识别结果。 +## 4. 安装 -## 3. 安装 - -### 3.1 安装依赖 +### 4.1 安装依赖 - **(1) 安装PaddlePaddle** @@ -73,8 +89,7 @@ python3 -m pip install "paddlepaddle>=2.2" -i https://mirror.baidu.com/pypi/simp ``` 更多需求,请参照[安装文档](https://www.paddlepaddle.org.cn/install/quick)中的说明进行操作。 - -### 3.2 安装PaddleOCR(包含 PP-OCR 和 VQA ) +### 4.2 安装PaddleOCR(包含 PP-OCR 和 VQA) - **(1)pip快速安装PaddleOCR whl包(仅预测)** @@ -99,10 +114,9 @@ git clone https://gitee.com/paddlepaddle/PaddleOCR python3 -m pip install -r ppstructure/vqa/requirements.txt ``` -## 4. 使用 +## 5. 使用 - -### 4.1 数据和预训练模型准备 +### 5.1 数据和预训练模型准备 如果希望直接体验预测过程,可以下载我们提供的预训练模型,跳过训练过程,直接预测即可。 @@ -125,7 +139,7 @@ wget https://paddleocr.bj.bcebos.com/dataset/XFUND.tar python3 ppstructure/vqa/helper/trans_xfun_data.py --ori_gt_path=path/to/json_path --output_path=path/to/save_path ``` -### 4.2 SER任务 +### 5.2 SER 启动训练之前,需要修改下面的四个字段 @@ -164,7 +178,7 @@ CUDA_VISIBLE_DEVICES=0 python3 tools/eval.py -c configs/vqa/ser/layoutxlm.yml -o 使用如下命令即可完成`OCR引擎 + SER`的串联预测 ```shell -CUDA_VISIBLE_DEVICES=0 python3 tools/infer_vqa_token_ser.py -c configs/vqa/ser/layoutxlm.yml -o Architecture.Backbone.checkpoints=PP-Layout_v1.0_ser_pretrained/ Global.infer_img=doc/vqa/input/zh_val_42.jpg +CUDA_VISIBLE_DEVICES=0 python3 tools/infer_vqa_token_ser.py -c configs/vqa/ser/layoutxlm.yml -o Architecture.Backbone.checkpoints=ser_LayoutXLM_xfun_zh/ Global.infer_img=doc/vqa/input/zh_val_42.jpg ``` 最终会在`config.Global.save_res_path`字段所配置的目录下保存预测结果可视化图像以及预测结果文本文件,预测结果文本文件名为`infer_results.txt`。 @@ -178,8 +192,7 @@ export CUDA_VISIBLE_DEVICES=0 python3 helper/eval_with_label_end2end.py --gt_json_path XFUND/zh_val/xfun_normalize_val.json --pred_json_path output_res/infer_results.txt ``` - -### 3.3 RE任务 +### 5.3 RE * 启动训练 @@ -219,13 +232,12 @@ CUDA_VISIBLE_DEVICES=0 python3 tools/eval.py -c configs/vqa/re/layoutxlm.yml -o 使用如下命令即可完成`OCR引擎 + SER + RE`的串联预测 ```shell export CUDA_VISIBLE_DEVICES=0 -python3 tools/infer_vqa_token_ser_re.py -c configs/vqa/re/layoutxlm.yml -o Architecture.Backbone.checkpoints=PP-Layout_v1.0_re_pretrained/ Global.infer_img=doc/vqa/input/zh_val_21.jpg -c_ser configs/vqa/ser/layoutxlm.yml -o_ser Architecture.Backbone.checkpoints=PP-Layout_v1.0_ser_pretrained/ +python3 tools/infer_vqa_token_ser_re.py -c configs/vqa/re/layoutxlm.yml -o Architecture.Backbone.checkpoints=re_LayoutXLM_xfun_zh/ Global.infer_img=doc/vqa/input/zh_val_21.jpg -c_ser configs/vqa/ser/layoutxlm.yml -o_ser Architecture.Backbone.checkpoints=ser_LayoutXLM_xfun_zh/ ``` 最终会在`config.Global.save_res_path`字段所配置的目录下保存预测结果可视化图像以及预测结果文本文件,预测结果文本文件名为`infer_results.txt`。 - -## 参考链接 +## 6. 参考链接 - LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding, https://arxiv.org/pdf/2104.08836.pdf - microsoft/unilm/layoutxlm, https://github.com/microsoft/unilm/tree/master/layoutxlm diff --git a/test_tipc/benchmark_train.sh b/test_tipc/benchmark_train.sh index fc49cbb3e69771efb49b39c45cd627a314205360..d5b4e2f11a555e4e11aafcc728cdc96ceb5f7fd4 100644 --- a/test_tipc/benchmark_train.sh +++ b/test_tipc/benchmark_train.sh @@ -3,8 +3,6 @@ source test_tipc/common_func.sh # set env python=python -export model_branch=`git symbolic-ref HEAD 2>/dev/null | cut -d"/" -f 3` -export model_commit=$(git log|head -n1|awk '{print $2}') export str_tmp=$(echo `pip list|grep paddlepaddle-gpu|awk -F ' ' '{print $2}'`) export frame_version=${str_tmp%%.post*} export frame_commit=$(echo `${python} -c "import paddle;print(paddle.version.commit)"`) diff --git a/test_tipc/prepare.sh b/test_tipc/prepare.sh index 158b8cb8bc25dd1e33e8c7b8d3a8bb76f9ad7624..62451417287228868c33f778f3aae796b53dabcf 100644 --- a/test_tipc/prepare.sh +++ b/test_tipc/prepare.sh @@ -24,7 +24,17 @@ if [ ${MODE} = "benchmark_train" ];then pip install -r requirements.txt if [[ ${model_name} =~ "det_mv3_db_v2_0" || ${model_name} =~ "det_r50_vd_east_v2_0" || ${model_name} =~ "det_r50_vd_pse_v2_0" || ${model_name} =~ "det_r18_db_v2_0" ]];then rm -rf ./train_data/icdar2015 - wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV3_large_x0_5_pretrained.pdparams --no-check-certificate + wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/pretrained/MobileNetV3_large_x0_5_pretrained.pdparams --no-check-certificate + wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015.tar --no-check-certificate + cd ./train_data/ && tar xf icdar2015.tar && cd ../ + fi + if [[ ${model_name} =~ "det_r50_vd_east_v2_0" || ${model_name} =~ "det_r50_vd_pse_v2_0" ]];then + wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/pretrained/ResNet50_vd_ssld_pretrained.pdparams --no-check-certificate + wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015.tar --no-check-certificate + cd ./train_data/ && tar xf icdar2015.tar && cd ../ + fi + if [[ ${model_name} =~ "det_r18_db_v2_0" ]];then + wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/pretrained/ResNet18_vd_pretrained.pdparams --no-check-certificate wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015.tar --no-check-certificate cd ./train_data/ && tar xf icdar2015.tar && cd ../ fi diff --git a/test_tipc/readme.md b/test_tipc/readme.md index 7b7548cd7296760d4caec0ed741c47137d86ece1..8110f0073be248259c7cdd002d209c150a52fb71 100644 --- a/test_tipc/readme.md +++ b/test_tipc/readme.md @@ -28,32 +28,32 @@ | DB |ch_ppocr_mobile_v2.0_det_PACT | 检测 | 支持 | 多机多卡
混合精度 | PACT量化 | Paddle Inference: C++
Paddle Serving: Python, C++
Paddle-Lite:
(1) ARM CPU(C++) | | DB |ch_ppocr_mobile_v2.0_det_KL | 检测 | 支持 | 多机多卡
混合精度 | 离线量化| Paddle Inference: C++
Paddle Serving: Python, C++
Paddle-Lite:
(1) ARM CPU(C++) | | DB |ch_ppocr_server_v2.0_det | 检测 | 支持 | 多机多卡
混合精度 | - | Paddle Inference: C++
Paddle Serving: Python, C++ | -| DB |ch_PP-OCRv2_det | 检测 | +| DB |ch_PP-OCRv2_det | 检测 | 支持 | 多机多卡
混合精度 | - | Paddle Inference: C++
Paddle Serving: Python, C++ | | CRNN |ch_ppocr_mobile_v2.0_rec | 识别 | 支持 | 多机多卡
混合精度 | - | Paddle Inference: C++
Paddle Serving: Python, C++
Paddle-Lite:
(1) ARM CPU(C++) | | CRNN |ch_ppocr_server_v2.0_rec | 识别 | 支持 | 多机多卡
混合精度 | - | Paddle Inference: C++
Paddle Serving: Python, C++ | -| CRNN |ch_PP-OCRv2_rec | 识别 | +| CRNN |ch_PP-OCRv2_rec | 识别 | 支持 | 多机多卡
混合精度 | - | Paddle Inference: C++
Paddle Serving: Python, C++ | | PP-OCR |ch_ppocr_mobile_v2.0 | 检测+识别 | 支持 | 多机多卡
混合精度 | - | Paddle Inference: C++
Paddle Serving: Python, C++
Paddle-Lite:
(1) ARM CPU(C++) | | PP-OCR |ch_ppocr_server_v2.0 | 检测+识别 | 支持 | 多机多卡
混合精度 | - | Paddle Inference: C++
Paddle Serving: Python, C++ | -|PP-OCRv2|ch_PP-OCRv2 | 检测+识别 | -| DB |det_mv3_db_v2.0 | 检测 | -| DB |det_r50_vd_db_v2.0 | 检测 | -| EAST |det_mv3_east_v2.0 | 检测 | -| EAST |det_r50_vd_east_v2.0 | 检测 | -| PSENet |det_mv3_pse_v2.0 | 检测 | -| PSENet |det_r50_vd_pse_v2.0 | 检测 | -| SAST |det_r50_vd_sast_totaltext_v2.0 | 检测 | -| Rosetta|rec_mv3_none_none_ctc_v2.0 | 识别 | -| Rosetta|rec_r34_vd_none_none_ctc_v2.0 | 识别 | -| CRNN |rec_mv3_none_bilstm_ctc_v2.0 | 识别 | -| CRNN |rec_r34_vd_none_bilstm_ctc_v2.0| 识别 | -| StarNet|rec_mv3_tps_bilstm_ctc_v2.0 | 识别 | -| StarNet|rec_r34_vd_tps_bilstm_ctc_v2.0 | 识别 | -| RARE |rec_mv3_tps_bilstm_att_v2.0 | 识别 | -| RARE |rec_r34_vd_tps_bilstm_att_v2.0 | 识别 | -| SRN |rec_r50fpn_vd_none_srn | 识别 | -| NRTR |rec_mtb_nrtr | 识别 | -| SAR |rec_r31_sar | 识别 | -| PGNet |rec_r34_vd_none_none_ctc_v2.0 | 端到端| +|PP-OCRv2|ch_PP-OCRv2 | 检测+识别 | 支持 | 多机多卡
混合精度 | - | Paddle Inference: C++
Paddle Serving: Python, C++ | +| DB |det_mv3_db_v2.0 | 检测 | 支持 | 多机多卡
混合精度 | - | - | +| DB |det_r50_vd_db_v2.0 | 检测 | 支持 | 多机多卡
混合精度 | - | - | +| EAST |det_mv3_east_v2.0 | 检测 | 支持 | 多机多卡
混合精度 | - | - | +| EAST |det_r50_vd_east_v2.0 | 检测 | 支持 | 多机多卡
混合精度 | - | - | +| PSENet |det_mv3_pse_v2.0 | 检测 | 支持 | 多机多卡
混合精度 | - | - | +| PSENet |det_r50_vd_pse_v2.0 | 检测 | 支持 | 多机多卡
混合精度 | - | - | +| SAST |det_r50_vd_sast_totaltext_v2.0 | 检测 | 支持 | 多机多卡
混合精度 | - | - | +| Rosetta|rec_mv3_none_none_ctc_v2.0 | 识别 | 支持 | 多机多卡
混合精度 | - | - | +| Rosetta|rec_r34_vd_none_none_ctc_v2.0 | 识别 | 支持 | 多机多卡
混合精度 | - | - | +| CRNN |rec_mv3_none_bilstm_ctc_v2.0 | 识别 | 支持 | 多机多卡
混合精度 | - | - | +| CRNN |rec_r34_vd_none_bilstm_ctc_v2.0| 识别 | 支持 | 多机多卡
混合精度 | - | - | +| StarNet|rec_mv3_tps_bilstm_ctc_v2.0 | 识别 | 支持 | 多机多卡
混合精度 | - | - | +| StarNet|rec_r34_vd_tps_bilstm_ctc_v2.0 | 识别 | 支持 | 多机多卡
混合精度 | - | - | +| RARE |rec_mv3_tps_bilstm_att_v2.0 | 识别 | 支持 | 多机多卡
混合精度 | - | - | +| RARE |rec_r34_vd_tps_bilstm_att_v2.0 | 识别 | 支持 | 多机多卡
混合精度 | - | - | +| SRN |rec_r50fpn_vd_none_srn | 识别 | 支持 | 多机多卡
混合精度 | - | - | +| NRTR |rec_mtb_nrtr | 识别 | 支持 | 多机多卡
混合精度 | - | - | +| SAR |rec_r31_sar | 识别 | 支持 | 多机多卡
混合精度 | - | - | +| PGNet |rec_r34_vd_none_none_ctc_v2.0 | 端到端| 支持 | 多机多卡
混合精度 | - | - | diff --git a/tools/program.py b/tools/program.py index c5b0e69b2d7256a1efe6b13efeea265cfcb3f5df..e92bef330056a2fe5ca53ed31f02422f43bbee4c 100755 --- a/tools/program.py +++ b/tools/program.py @@ -130,6 +130,25 @@ def check_gpu(use_gpu): pass +def check_xpu(use_xpu): + """ + Log error and exit when set use_xpu=true in paddlepaddle + cpu/gpu version. + """ + err = "Config use_xpu cannot be set as true while you are " \ + "using paddlepaddle cpu/gpu version ! \nPlease try: \n" \ + "\t1. Install paddlepaddle-xpu to run model on XPU \n" \ + "\t2. Set use_xpu as false in config file to run " \ + "model on CPU/GPU" + + try: + if use_xpu and not paddle.is_compiled_with_xpu(): + print(err) + sys.exit(1) + except Exception as e: + pass + + def train(config, train_dataloader, valid_dataloader, @@ -512,6 +531,12 @@ def preprocess(is_train=False): use_gpu = config['Global']['use_gpu'] check_gpu(use_gpu) + # check if set use_xpu=True in paddlepaddle cpu/gpu version + use_xpu = False + if 'use_xpu' in config['Global']: + use_xpu = config['Global']['use_xpu'] + check_xpu(use_xpu) + alg = config['Architecture']['algorithm'] assert alg in [ 'EAST', 'DB', 'SAST', 'Rosetta', 'CRNN', 'STARNet', 'RARE', 'SRN', @@ -519,7 +544,11 @@ def preprocess(is_train=False): 'SEED', 'SDMGR', 'LayoutXLM', 'LayoutLM' ] - device = 'gpu:{}'.format(dist.ParallelEnv().dev_id) if use_gpu else 'cpu' + device = 'cpu' + if use_gpu: + device = 'gpu:{}'.format(dist.ParallelEnv().dev_id) + if use_xpu: + device = 'xpu' device = paddle.set_device(device) config['Global']['distributed'] = dist.get_world_size() != 1