From 773b687f5a5a6d3115e27ac15070fd13ea6bb5ce Mon Sep 17 00:00:00 2001
From: Topdu <784990967@qq.com>
Date: Fri, 24 Jun 2022 07:25:50 +0000
Subject: [PATCH] add abinet_rec_aug and trained model
---
configs/rec/rec_r45_abinet.yml | 3 +-
doc/doc_ch/algorithm_rec_abinet.md | 4 +-
doc/doc_en/algorithm_rec_abinet_en.md | 4 +-
ppocr/data/imaug/__init__.py | 2 +-
ppocr/data/imaug/abinet_aug.py | 407 ++++++++++++++++++
ppocr/data/imaug/rec_img_aug.py | 27 ++
.../configs/rec_r45_abinet/rec_r45_abinet.yml | 1 +
7 files changed, 442 insertions(+), 6 deletions(-)
create mode 100644 ppocr/data/imaug/abinet_aug.py
diff --git a/configs/rec/rec_r45_abinet.yml b/configs/rec/rec_r45_abinet.yml
index a756fead..3cf4cddb 100644
--- a/configs/rec/rec_r45_abinet.yml
+++ b/configs/rec/rec_r45_abinet.yml
@@ -8,7 +8,7 @@ Global:
# evaluation is run every 2000 iterations
eval_batch_step: [0, 2000]
cal_metric_during_train: True
- pretrained_model:
+ pretrained_model: ./rec_r45_abinet_train/abinet_vl_pretrained
checkpoints:
save_inference_dir:
use_visualdl: False
@@ -67,6 +67,7 @@ Train:
- DecodeImage: # load image
img_mode: RGB
channel_first: False
+ - ABINetRecAug:
- ABINetLabelEncode: # Class handling label
ignore_index: *ignore_index
- ABINetRecResizeImg:
diff --git a/doc/doc_ch/algorithm_rec_abinet.md b/doc/doc_ch/algorithm_rec_abinet.md
index d20c7030..47507c36 100644
--- a/doc/doc_ch/algorithm_rec_abinet.md
+++ b/doc/doc_ch/algorithm_rec_abinet.md
@@ -27,7 +27,7 @@
|模型|骨干网络|配置文件|Acc|下载链接|
| --- | --- | --- | --- | --- |
-|ABINet|ResNet45|[rec_r45_abinet.yml](../../configs/rec/rec_r45_abinet.yml)|90.75%|[训练模型]()/[预训练模型]|
+|ABINet|ResNet45|[rec_r45_abinet.yml](../../configs/rec/rec_r45_abinet.yml)|90.75%|[预训练、训练模型](https://paddleocr.bj.bcebos.com/rec_r45_abinet_train.tar)|
## 2. 环境配置
@@ -80,7 +80,7 @@ python3 tools/infer_rec.py -c configs/rec/rec_r45_abinet.yml -o Global.infer_img
### 4.1 Python推理
-首先将训练得到best模型,转换成inference model。这里以训练完成的模型为例([模型下载地址]() ),可以使用如下命令进行转换:
+首先将训练得到best模型,转换成inference model。这里以训练完成的模型为例([模型下载地址](https://paddleocr.bj.bcebos.com/rec_r45_abinet_train.tar) ),可以使用如下命令进行转换:
```shell
# 注意将pretrained_model的路径设置为本地路径。
diff --git a/doc/doc_en/algorithm_rec_abinet_en.md b/doc/doc_en/algorithm_rec_abinet_en.md
index 3b0f6c09..767ca65f 100644
--- a/doc/doc_en/algorithm_rec_abinet_en.md
+++ b/doc/doc_en/algorithm_rec_abinet_en.md
@@ -25,7 +25,7 @@ Using MJSynth and SynthText two text recognition datasets for training, and eval
|Model|Backbone|config|Acc|Download link|
| --- | --- | --- | --- | --- |
-|ABINet|ResNet45|[rec_r45_abinet.yml](../../configs/rec/rec_r45_abinet.yml)|90.75%|[trained model]()/[pretrained model]()|
+|ABINet|ResNet45|[rec_r45_abinet.yml](../../configs/rec/rec_r45_abinet.yml)|90.75%|[pretrained & trained model](https://paddleocr.bj.bcebos.com/rec_r45_abinet_train.tar)|
## 2. Environment
@@ -68,7 +68,7 @@ python3 tools/infer_rec.py -c configs/rec/rec_r45_abinet.yml -o Global.infer_img
### 4.1 Python Inference
-First, the model saved during the ABINet text recognition training process is converted into an inference model. ( [Model download link]()) ), you can use the following command to convert:
+First, the model saved during the ABINet text recognition training process is converted into an inference model. ( [Model download link](https://paddleocr.bj.bcebos.com/rec_r45_abinet_train.tar)) ), you can use the following command to convert:
```
python3 tools/export_model.py -c configs/rec/rec_r45_abinet.yml -o Global.pretrained_model=./rec_r45_abinet_train/best_accuracy Global.save_inference_dir=./inference/rec_r45_abinet
diff --git a/ppocr/data/imaug/__init__.py b/ppocr/data/imaug/__init__.py
index 437e0152..63dfda91 100644
--- a/ppocr/data/imaug/__init__.py
+++ b/ppocr/data/imaug/__init__.py
@@ -25,7 +25,7 @@ from .make_pse_gt import MakePseGt
from .rec_img_aug import RecAug, RecConAug, RecResizeImg, ClsResizeImg, \
SRNRecResizeImg, GrayRecResizeImg, SARRecResizeImg, PRENResizeImg, \
- ABINetRecResizeImg, SVTRRecResizeImg
+ ABINetRecResizeImg, SVTRRecResizeImg, ABINetRecAug
from .ssl_img_aug import SSLRotateResize
from .randaugment import RandAugment
from .copy_paste import CopyPaste
diff --git a/ppocr/data/imaug/abinet_aug.py b/ppocr/data/imaug/abinet_aug.py
new file mode 100644
index 00000000..eefdc75d
--- /dev/null
+++ b/ppocr/data/imaug/abinet_aug.py
@@ -0,0 +1,407 @@
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+This code is refer from:
+https://github.com/FangShancheng/ABINet/blob/main/transforms.py
+"""
+import math
+import numbers
+import random
+
+import cv2
+import numpy as np
+from paddle.vision.transforms import Compose, ColorJitter
+
+
+def sample_asym(magnitude, size=None):
+ return np.random.beta(1, 4, size) * magnitude
+
+
+def sample_sym(magnitude, size=None):
+ return (np.random.beta(4, 4, size=size) - 0.5) * 2 * magnitude
+
+
+def sample_uniform(low, high, size=None):
+ return np.random.uniform(low, high, size=size)
+
+
+def get_interpolation(type='random'):
+ if type == 'random':
+ choice = [
+ cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA
+ ]
+ interpolation = choice[random.randint(0, len(choice) - 1)]
+ elif type == 'nearest':
+ interpolation = cv2.INTER_NEAREST
+ elif type == 'linear':
+ interpolation = cv2.INTER_LINEAR
+ elif type == 'cubic':
+ interpolation = cv2.INTER_CUBIC
+ elif type == 'area':
+ interpolation = cv2.INTER_AREA
+ else:
+ raise TypeError(
+ 'Interpolation types only nearest, linear, cubic, area are supported!'
+ )
+ return interpolation
+
+
+class CVRandomRotation(object):
+ def __init__(self, degrees=15):
+ assert isinstance(degrees,
+ numbers.Number), "degree should be a single number."
+ assert degrees >= 0, "degree must be positive."
+ self.degrees = degrees
+
+ @staticmethod
+ def get_params(degrees):
+ return sample_sym(degrees)
+
+ def __call__(self, img):
+ angle = self.get_params(self.degrees)
+ src_h, src_w = img.shape[:2]
+ M = cv2.getRotationMatrix2D(
+ center=(src_w / 2, src_h / 2), angle=angle, scale=1.0)
+ abs_cos, abs_sin = abs(M[0, 0]), abs(M[0, 1])
+ dst_w = int(src_h * abs_sin + src_w * abs_cos)
+ dst_h = int(src_h * abs_cos + src_w * abs_sin)
+ M[0, 2] += (dst_w - src_w) / 2
+ M[1, 2] += (dst_h - src_h) / 2
+
+ flags = get_interpolation()
+ return cv2.warpAffine(
+ img,
+ M, (dst_w, dst_h),
+ flags=flags,
+ borderMode=cv2.BORDER_REPLICATE)
+
+
+class CVRandomAffine(object):
+ def __init__(self, degrees, translate=None, scale=None, shear=None):
+ assert isinstance(degrees,
+ numbers.Number), "degree should be a single number."
+ assert degrees >= 0, "degree must be positive."
+ self.degrees = degrees
+
+ if translate is not None:
+ assert isinstance(translate, (tuple, list)) and len(translate) == 2, \
+ "translate should be a list or tuple and it must be of length 2."
+ for t in translate:
+ if not (0.0 <= t <= 1.0):
+ raise ValueError(
+ "translation values should be between 0 and 1")
+ self.translate = translate
+
+ if scale is not None:
+ assert isinstance(scale, (tuple, list)) and len(scale) == 2, \
+ "scale should be a list or tuple and it must be of length 2."
+ for s in scale:
+ if s <= 0:
+ raise ValueError("scale values should be positive")
+ self.scale = scale
+
+ if shear is not None:
+ if isinstance(shear, numbers.Number):
+ if shear < 0:
+ raise ValueError(
+ "If shear is a single number, it must be positive.")
+ self.shear = [shear]
+ else:
+ assert isinstance(shear, (tuple, list)) and (len(shear) == 2), \
+ "shear should be a list or tuple and it must be of length 2."
+ self.shear = shear
+ else:
+ self.shear = shear
+
+ def _get_inverse_affine_matrix(self, center, angle, translate, scale,
+ shear):
+ # https://github.com/pytorch/vision/blob/v0.4.0/torchvision/transforms/functional.py#L717
+ from numpy import sin, cos, tan
+
+ if isinstance(shear, numbers.Number):
+ shear = [shear, 0]
+
+ if not isinstance(shear, (tuple, list)) and len(shear) == 2:
+ raise ValueError(
+ "Shear should be a single value or a tuple/list containing " +
+ "two values. Got {}".format(shear))
+
+ rot = math.radians(angle)
+ sx, sy = [math.radians(s) for s in shear]
+
+ cx, cy = center
+ tx, ty = translate
+
+ # RSS without scaling
+ a = cos(rot - sy) / cos(sy)
+ b = -cos(rot - sy) * tan(sx) / cos(sy) - sin(rot)
+ c = sin(rot - sy) / cos(sy)
+ d = -sin(rot - sy) * tan(sx) / cos(sy) + cos(rot)
+
+ # Inverted rotation matrix with scale and shear
+ # det([[a, b], [c, d]]) == 1, since det(rotation) = 1 and det(shear) = 1
+ M = [d, -b, 0, -c, a, 0]
+ M = [x / scale for x in M]
+
+ # Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1
+ M[2] += M[0] * (-cx - tx) + M[1] * (-cy - ty)
+ M[5] += M[3] * (-cx - tx) + M[4] * (-cy - ty)
+
+ # Apply center translation: C * RSS^-1 * C^-1 * T^-1
+ M[2] += cx
+ M[5] += cy
+ return M
+
+ @staticmethod
+ def get_params(degrees, translate, scale_ranges, shears, height):
+ angle = sample_sym(degrees)
+ if translate is not None:
+ max_dx = translate[0] * height
+ max_dy = translate[1] * height
+ translations = (np.round(sample_sym(max_dx)),
+ np.round(sample_sym(max_dy)))
+ else:
+ translations = (0, 0)
+
+ if scale_ranges is not None:
+ scale = sample_uniform(scale_ranges[0], scale_ranges[1])
+ else:
+ scale = 1.0
+
+ if shears is not None:
+ if len(shears) == 1:
+ shear = [sample_sym(shears[0]), 0.]
+ elif len(shears) == 2:
+ shear = [sample_sym(shears[0]), sample_sym(shears[1])]
+ else:
+ shear = 0.0
+
+ return angle, translations, scale, shear
+
+ def __call__(self, img):
+ src_h, src_w = img.shape[:2]
+ angle, translate, scale, shear = self.get_params(
+ self.degrees, self.translate, self.scale, self.shear, src_h)
+
+ M = self._get_inverse_affine_matrix((src_w / 2, src_h / 2), angle,
+ (0, 0), scale, shear)
+ M = np.array(M).reshape(2, 3)
+
+ startpoints = [(0, 0), (src_w - 1, 0), (src_w - 1, src_h - 1),
+ (0, src_h - 1)]
+ project = lambda x, y, a, b, c: int(a * x + b * y + c)
+ endpoints = [(project(x, y, *M[0]), project(x, y, *M[1]))
+ for x, y in startpoints]
+
+ rect = cv2.minAreaRect(np.array(endpoints))
+ bbox = cv2.boxPoints(rect).astype(dtype=np.int)
+ max_x, max_y = bbox[:, 0].max(), bbox[:, 1].max()
+ min_x, min_y = bbox[:, 0].min(), bbox[:, 1].min()
+
+ dst_w = int(max_x - min_x)
+ dst_h = int(max_y - min_y)
+ M[0, 2] += (dst_w - src_w) / 2
+ M[1, 2] += (dst_h - src_h) / 2
+
+ # add translate
+ dst_w += int(abs(translate[0]))
+ dst_h += int(abs(translate[1]))
+ if translate[0] < 0: M[0, 2] += abs(translate[0])
+ if translate[1] < 0: M[1, 2] += abs(translate[1])
+
+ flags = get_interpolation()
+ return cv2.warpAffine(
+ img,
+ M, (dst_w, dst_h),
+ flags=flags,
+ borderMode=cv2.BORDER_REPLICATE)
+
+
+class CVRandomPerspective(object):
+ def __init__(self, distortion=0.5):
+ self.distortion = distortion
+
+ def get_params(self, width, height, distortion):
+ offset_h = sample_asym(
+ distortion * height / 2, size=4).astype(dtype=np.int)
+ offset_w = sample_asym(
+ distortion * width / 2, size=4).astype(dtype=np.int)
+ topleft = (offset_w[0], offset_h[0])
+ topright = (width - 1 - offset_w[1], offset_h[1])
+ botright = (width - 1 - offset_w[2], height - 1 - offset_h[2])
+ botleft = (offset_w[3], height - 1 - offset_h[3])
+
+ startpoints = [(0, 0), (width - 1, 0), (width - 1, height - 1),
+ (0, height - 1)]
+ endpoints = [topleft, topright, botright, botleft]
+ return np.array(
+ startpoints, dtype=np.float32), np.array(
+ endpoints, dtype=np.float32)
+
+ def __call__(self, img):
+ height, width = img.shape[:2]
+ startpoints, endpoints = self.get_params(width, height, self.distortion)
+ M = cv2.getPerspectiveTransform(startpoints, endpoints)
+
+ # TODO: more robust way to crop image
+ rect = cv2.minAreaRect(endpoints)
+ bbox = cv2.boxPoints(rect).astype(dtype=np.int)
+ max_x, max_y = bbox[:, 0].max(), bbox[:, 1].max()
+ min_x, min_y = bbox[:, 0].min(), bbox[:, 1].min()
+ min_x, min_y = max(min_x, 0), max(min_y, 0)
+
+ flags = get_interpolation()
+ img = cv2.warpPerspective(
+ img,
+ M, (max_x, max_y),
+ flags=flags,
+ borderMode=cv2.BORDER_REPLICATE)
+ img = img[min_y:, min_x:]
+ return img
+
+
+class CVRescale(object):
+ def __init__(self, factor=4, base_size=(128, 512)):
+ """ Define image scales using gaussian pyramid and rescale image to target scale.
+
+ Args:
+ factor: the decayed factor from base size, factor=4 keeps target scale by default.
+ base_size: base size the build the bottom layer of pyramid
+ """
+ if isinstance(factor, numbers.Number):
+ self.factor = round(sample_uniform(0, factor))
+ elif isinstance(factor, (tuple, list)) and len(factor) == 2:
+ self.factor = round(sample_uniform(factor[0], factor[1]))
+ else:
+ raise Exception('factor must be number or list with length 2')
+ # assert factor is valid
+ self.base_h, self.base_w = base_size[:2]
+
+ def __call__(self, img):
+ if self.factor == 0: return img
+ src_h, src_w = img.shape[:2]
+ cur_w, cur_h = self.base_w, self.base_h
+ scale_img = cv2.resize(
+ img, (cur_w, cur_h), interpolation=get_interpolation())
+ for _ in range(self.factor):
+ scale_img = cv2.pyrDown(scale_img)
+ scale_img = cv2.resize(
+ scale_img, (src_w, src_h), interpolation=get_interpolation())
+ return scale_img
+
+
+class CVGaussianNoise(object):
+ def __init__(self, mean=0, var=20):
+ self.mean = mean
+ if isinstance(var, numbers.Number):
+ self.var = max(int(sample_asym(var)), 1)
+ elif isinstance(var, (tuple, list)) and len(var) == 2:
+ self.var = int(sample_uniform(var[0], var[1]))
+ else:
+ raise Exception('degree must be number or list with length 2')
+
+ def __call__(self, img):
+ noise = np.random.normal(self.mean, self.var**0.5, img.shape)
+ img = np.clip(img + noise, 0, 255).astype(np.uint8)
+ return img
+
+
+class CVMotionBlur(object):
+ def __init__(self, degrees=12, angle=90):
+ if isinstance(degrees, numbers.Number):
+ self.degree = max(int(sample_asym(degrees)), 1)
+ elif isinstance(degrees, (tuple, list)) and len(degrees) == 2:
+ self.degree = int(sample_uniform(degrees[0], degrees[1]))
+ else:
+ raise Exception('degree must be number or list with length 2')
+ self.angle = sample_uniform(-angle, angle)
+
+ def __call__(self, img):
+ M = cv2.getRotationMatrix2D((self.degree // 2, self.degree // 2),
+ self.angle, 1)
+ motion_blur_kernel = np.zeros((self.degree, self.degree))
+ motion_blur_kernel[self.degree // 2, :] = 1
+ motion_blur_kernel = cv2.warpAffine(motion_blur_kernel, M,
+ (self.degree, self.degree))
+ motion_blur_kernel = motion_blur_kernel / self.degree
+ img = cv2.filter2D(img, -1, motion_blur_kernel)
+ img = np.clip(img, 0, 255).astype(np.uint8)
+ return img
+
+
+class CVGeometry(object):
+ def __init__(self,
+ degrees=15,
+ translate=(0.3, 0.3),
+ scale=(0.5, 2.),
+ shear=(45, 15),
+ distortion=0.5,
+ p=0.5):
+ self.p = p
+ type_p = random.random()
+ if type_p < 0.33:
+ self.transforms = CVRandomRotation(degrees=degrees)
+ elif type_p < 0.66:
+ self.transforms = CVRandomAffine(
+ degrees=degrees, translate=translate, scale=scale, shear=shear)
+ else:
+ self.transforms = CVRandomPerspective(distortion=distortion)
+
+ def __call__(self, img):
+ if random.random() < self.p:
+ return self.transforms(img)
+ else:
+ return img
+
+
+class CVDeterioration(object):
+ def __init__(self, var, degrees, factor, p=0.5):
+ self.p = p
+ transforms = []
+ if var is not None:
+ transforms.append(CVGaussianNoise(var=var))
+ if degrees is not None:
+ transforms.append(CVMotionBlur(degrees=degrees))
+ if factor is not None:
+ transforms.append(CVRescale(factor=factor))
+
+ random.shuffle(transforms)
+ transforms = Compose(transforms)
+ self.transforms = transforms
+
+ def __call__(self, img):
+ if random.random() < self.p:
+
+ return self.transforms(img)
+ else:
+ return img
+
+
+class CVColorJitter(object):
+ def __init__(self,
+ brightness=0.5,
+ contrast=0.5,
+ saturation=0.5,
+ hue=0.1,
+ p=0.5):
+ self.p = p
+ self.transforms = ColorJitter(
+ brightness=brightness,
+ contrast=contrast,
+ saturation=saturation,
+ hue=hue)
+
+ def __call__(self, img):
+ if random.random() < self.p: return self.transforms(img)
+ else: return img
diff --git a/ppocr/data/imaug/rec_img_aug.py b/ppocr/data/imaug/rec_img_aug.py
index 874d9aa0..92f73827 100644
--- a/ppocr/data/imaug/rec_img_aug.py
+++ b/ppocr/data/imaug/rec_img_aug.py
@@ -19,6 +19,8 @@ import random
import copy
from PIL import Image
from .text_image_aug import tia_perspective, tia_stretch, tia_distort
+from .abinet_aug import CVGeometry, CVDeterioration, CVColorJitter
+from paddle.vision.transforms import Compose
class RecAug(object):
@@ -94,6 +96,31 @@ class BaseDataAugmentation(object):
return data
+class ABINetRecAug(object):
+ def __init__(self, **kwargs):
+ self.transforms = Compose([
+ CVGeometry(
+ degrees=45,
+ translate=(0.0, 0.0),
+ scale=(0.5, 2.),
+ shear=(45, 15),
+ distortion=0.5,
+ p=0.5), CVDeterioration(
+ var=20, degrees=6, factor=4, p=0.25), CVColorJitter(
+ brightness=0.5,
+ contrast=0.5,
+ saturation=0.5,
+ hue=0.1,
+ p=0.25)
+ ])
+
+ def __call__(self, data):
+ img = data['image']
+ img = self.transforms(img)
+ data['image'] = img
+ return data
+
+
class RecConAug(object):
def __init__(self,
prob=0.5,
diff --git a/test_tipc/configs/rec_r45_abinet/rec_r45_abinet.yml b/test_tipc/configs/rec_r45_abinet/rec_r45_abinet.yml
index 306c49d6..5b5890e7 100644
--- a/test_tipc/configs/rec_r45_abinet/rec_r45_abinet.yml
+++ b/test_tipc/configs/rec_r45_abinet/rec_r45_abinet.yml
@@ -68,6 +68,7 @@ Train:
- DecodeImage: # load image
img_mode: RGB
channel_first: False
+ - ABINetRecAug:
- ABINetLabelEncode: # Class handling label
ignore_index: *ignore_index
- ABINetRecResizeImg:
--
GitLab