提交 e7ad27c3 编写于 作者: L LDOUBLEV

fix conflicts

...@@ -45,9 +45,7 @@ Optimizer: ...@@ -45,9 +45,7 @@ Optimizer:
beta1: 0.9 beta1: 0.9
beta2: 0.999 beta2: 0.999
lr: lr:
# name: Cosine
learning_rate: 0.001 learning_rate: 0.001
# warmup_epoch: 0
regularizer: regularizer:
name: 'L2' name: 'L2'
factor: 0 factor: 0
......
Global:
use_gpu: true
epoch_num: 10000
log_smooth_window: 20
print_batch_step: 2
save_model_dir: ./output/east_mv3/
save_epoch_step: 1000
# evaluation is run every 5000 iterations after the 4000th iteration
eval_batch_step: [4000, 5000]
# if pretrained_model is saved in static mode, load_static_weights must set to True
load_static_weights: True
cal_metric_during_train: False
pretrained_model: ./pretrain_models/MobileNetV3_large_x0_5_pretrained
checkpoints:
save_inference_dir:
use_visualdl: False
infer_img:
save_res_path: ./output/det_east/predicts_east.txt
Architecture:
model_type: det
algorithm: EAST
Transform:
Backbone:
name: MobileNetV3
scale: 0.5
model_name: large
Neck:
name: EASTFPN
model_name: small
Head:
name: EASTHead
model_name: small
Loss:
name: EASTLoss
Optimizer:
name: Adam
beta1: 0.9
beta2: 0.999
lr:
# name: Cosine
learning_rate: 0.001
# warmup_epoch: 0
regularizer:
name: 'L2'
factor: 0
PostProcess:
name: EASTPostProcess
score_thresh: 0.8
cover_thresh: 0.1
nms_thresh: 0.2
Metric:
name: DetMetric
main_indicator: hmean
Train:
dataset:
name: SimpleDataSet
data_dir: ./train_data/icdar2015/text_localization/
label_file_list:
- ./train_data/icdar2015/text_localization/train_icdar2015_label.txt
ratio_list: [1.0]
transforms:
- DecodeImage: # load image
img_mode: BGR
channel_first: False
- DetLabelEncode: # Class handling label
- EASTProcessTrain:
image_shape: [512, 512]
background_ratio: 0.125
min_crop_side_ratio: 0.1
min_text_size: 10
- KeepKeys:
keep_keys: ['image', 'score_map', 'geo_map', 'training_mask'] # dataloader will return list in this order
loader:
shuffle: True
drop_last: False
batch_size_per_card: 16
num_workers: 8
Eval:
dataset:
name: SimpleDataSet
data_dir: ./train_data/icdar2015/text_localization/
label_file_list:
- ./train_data/icdar2015/text_localization/test_icdar2015_label.txt
transforms:
- DecodeImage: # load image
img_mode: BGR
channel_first: False
- DetLabelEncode: # Class handling label
- DetResizeForTest:
limit_side_len: 2400
limit_type: max
- NormalizeImage:
scale: 1./255.
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: 'hwc'
- ToCHWImage:
- KeepKeys:
keep_keys: ['image', 'shape', 'polys', 'ignore_tags']
loader:
shuffle: False
drop_last: False
batch_size_per_card: 1 # must be 1
num_workers: 2
\ No newline at end of file
Global:
use_gpu: true
epoch_num: 1200
log_smooth_window: 20
print_batch_step: 10
save_model_dir: ./output/det_rc/det_r50_vd/
save_epoch_step: 1200
# evaluation is run every 5000 iterations after the 4000th iteration
eval_batch_step: [5000,4000]
# if pretrained_model is saved in static mode, load_static_weights must set to True
load_static_weights: True
cal_metric_during_train: False
pretrained_model: ./pretrain_models/ResNet50_vd_ssld_pretrained
checkpoints:
save_inference_dir:
use_visualdl: False
infer_img: doc/imgs_en/img_10.jpg
save_res_path: ./output/det_db/predicts_db.txt
Architecture:
model_type: det
algorithm: DB
Transform:
Backbone:
name: ResNet
layers: 50
Neck:
name: DBFPN
out_channels: 256
Head:
name: DBHead
k: 50
Loss:
name: DBLoss
balance_loss: true
main_loss_type: DiceLoss
alpha: 5
beta: 10
ohem_ratio: 3
Optimizer:
name: Adam
beta1: 0.9
beta2: 0.999
lr:
learning_rate: 0.001
regularizer:
name: 'L2'
factor: 0
PostProcess:
name: DBPostProcess
thresh: 0.3
box_thresh: 0.7
max_candidates: 1000
unclip_ratio: 1.5
Metric:
name: DetMetric
main_indicator: hmean
Train:
dataset:
name: SimpleDataSet
data_dir: ./train_data/icdar2015/text_localization/
label_file_list:
- ./train_data/icdar2015/text_localization/train_icdar2015_label.txt
ratio_list: [0.5]
transforms:
- DecodeImage: # load image
img_mode: BGR
channel_first: False
- DetLabelEncode: # Class handling label
- IaaAugment:
augmenter_args:
- { 'type': Fliplr, 'args': { 'p': 0.5 } }
- { 'type': Affine, 'args': { 'rotate': [-10, 10] } }
- { 'type': Resize, 'args': { 'size': [0.5, 3] } }
- EastRandomCropData:
size: [640, 640]
max_tries: 50
keep_ratio: true
- MakeBorderMap:
shrink_ratio: 0.4
thresh_min: 0.3
thresh_max: 0.7
- MakeShrinkMap:
shrink_ratio: 0.4
min_text_size: 8
- NormalizeImage:
scale: 1./255.
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: 'hwc'
- ToCHWImage:
- KeepKeys:
keep_keys: ['image', 'threshold_map', 'threshold_mask', 'shrink_map', 'shrink_mask'] # the order of the dataloader list
loader:
shuffle: True
drop_last: False
batch_size_per_card: 16
num_workers: 8
Eval:
dataset:
name: SimpleDataSet
data_dir: ./train_data/icdar2015/text_localization/
label_file_list:
- ./train_data/icdar2015/text_localization/test_icdar2015_label.txt
transforms:
- DecodeImage: # load image
img_mode: BGR
channel_first: False
- DetLabelEncode: # Class handling label
- DetResizeForTest:
image_shape: [736, 1280]
- NormalizeImage:
scale: 1./255.
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: 'hwc'
- ToCHWImage:
- KeepKeys:
keep_keys: ['image', 'shape', 'polys', 'ignore_tags']
loader:
shuffle: False
drop_last: False
batch_size_per_card: 1 # must be 1
num_workers: 8
\ No newline at end of file
Global:
use_gpu: true
epoch_num: 10000
log_smooth_window: 20
print_batch_step: 2
save_model_dir: ./output/east_r50_vd/
save_epoch_step: 1000
# evaluation is run every 5000 iterations after the 4000th iteration
eval_batch_step: [4000, 5000]
# if pretrained_model is saved in static mode, load_static_weights must set to True
load_static_weights: True
cal_metric_during_train: False
pretrained_model: ./pretrain_models/ResNet50_vd_pretrained/
checkpoints:
save_inference_dir:
use_visualdl: False
infer_img:
save_res_path: ./output/det_east/predicts_east.txt
Architecture:
model_type: det
algorithm: EAST
Transform:
Backbone:
name: ResNet
layers: 50
Neck:
name: EASTFPN
model_name: large
Head:
name: EASTHead
model_name: large
Loss:
name: EASTLoss
Optimizer:
name: Adam
beta1: 0.9
beta2: 0.999
lr:
# name: Cosine
learning_rate: 0.001
# warmup_epoch: 0
regularizer:
name: 'L2'
factor: 0
PostProcess:
name: EASTPostProcess
score_thresh: 0.8
cover_thresh: 0.1
nms_thresh: 0.2
Metric:
name: DetMetric
main_indicator: hmean
Train:
dataset:
name: SimpleDataSet
data_dir: ./train_data/icdar2015/text_localization/
label_file_list:
- ./train_data/icdar2015/text_localization/train_icdar2015_label.txt
ratio_list: [1.0]
transforms:
- DecodeImage: # load image
img_mode: BGR
channel_first: False
- DetLabelEncode: # Class handling label
- EASTProcessTrain:
image_shape: [512, 512]
background_ratio: 0.125
min_crop_side_ratio: 0.1
min_text_size: 10
- KeepKeys:
keep_keys: ['image', 'score_map', 'geo_map', 'training_mask'] # dataloader will return list in this order
loader:
shuffle: True
drop_last: False
batch_size_per_card: 8
num_workers: 8
Eval:
dataset:
name: SimpleDataSet
data_dir: ./train_data/icdar2015/text_localization/
label_file_list:
- ./train_data/icdar2015/text_localization/test_icdar2015_label.txt
transforms:
- DecodeImage: # load image
img_mode: BGR
channel_first: False
- DetLabelEncode: # Class handling label
- DetResizeForTest:
limit_side_len: 2400
limit_type: max
- NormalizeImage:
scale: 1./255.
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: 'hwc'
- ToCHWImage:
- KeepKeys:
keep_keys: ['image', 'shape', 'polys', 'ignore_tags']
loader:
shuffle: False
drop_last: False
batch_size_per_card: 1 # must be 1
num_workers: 2
\ No newline at end of file
Global:
use_gpu: true
epoch_num: 5000
log_smooth_window: 20
print_batch_step: 2
save_model_dir: ./output/sast_r50_vd_ic15/
save_epoch_step: 1000
# evaluation is run every 5000 iterations after the 4000th iteration
eval_batch_step: [4000, 5000]
# if pretrained_model is saved in static mode, load_static_weights must set to True
load_static_weights: True
cal_metric_during_train: False
pretrained_model: ./pretrain_models/ResNet50_vd_ssld_pretrained/
checkpoints:
save_inference_dir:
use_visualdl: False
infer_img:
save_res_path: ./output/sast_r50_vd_ic15/predicts_sast.txt
Architecture:
model_type: det
algorithm: SAST
Transform:
Backbone:
name: ResNet_SAST
layers: 50
Neck:
name: SASTFPN
with_cab: True
Head:
name: SASTHead
Loss:
name: SASTLoss
Optimizer:
name: Adam
beta1: 0.9
beta2: 0.999
lr:
# name: Cosine
learning_rate: 0.001
# warmup_epoch: 0
regularizer:
name: 'L2'
factor: 0
PostProcess:
name: SASTPostProcess
score_thresh: 0.5
sample_pts_num: 2
nms_thresh: 0.2
expand_scale: 1.0
shrink_ratio_of_width: 0.3
Metric:
name: DetMetric
main_indicator: hmean
Train:
dataset:
name: SimpleDataSet
data_dir: ./train_data/
label_file_path: [./train_data/art_latin_icdar_14pt/train_no_tt_test/train_label_json.txt, ./train_data/total_text_icdar_14pt/train_label_json.txt]
data_ratio_list: [0.5, 0.5]
transforms:
- DecodeImage: # load image
img_mode: BGR
channel_first: False
- DetLabelEncode: # Class handling label
- SASTProcessTrain:
image_shape: [512, 512]
min_crop_side_ratio: 0.3
min_crop_size: 24
min_text_size: 4
max_text_size: 512
- KeepKeys:
keep_keys: ['image', 'score_map', 'border_map', 'training_mask', 'tvo_map', 'tco_map'] # dataloader will return list in this order
loader:
shuffle: True
drop_last: False
batch_size_per_card: 4
num_workers: 4
Eval:
dataset:
name: SimpleDataSet
data_dir: ./train_data/icdar2015/text_localization/
label_file_list:
- ./train_data/icdar2015/text_localization/test_icdar2015_label.txt
transforms:
- DecodeImage: # load image
img_mode: BGR
channel_first: False
- DetLabelEncode: # Class handling label
- DetResizeForTest:
resize_long: 1536
- NormalizeImage:
scale: 1./255.
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: 'hwc'
- ToCHWImage:
- KeepKeys:
keep_keys: ['image', 'shape', 'polys', 'ignore_tags']
loader:
shuffle: False
drop_last: False
batch_size_per_card: 1 # must be 1
num_workers: 2
\ No newline at end of file
Global:
use_gpu: true
epoch_num: 5000
log_smooth_window: 20
print_batch_step: 2
save_model_dir: ./output/sast_r50_vd_tt/
save_epoch_step: 1000
# evaluation is run every 5000 iterations after the 4000th iteration
eval_batch_step: [4000, 5000]
# if pretrained_model is saved in static mode, load_static_weights must set to True
load_static_weights: True
cal_metric_during_train: False
pretrained_model: ./pretrain_models/ResNet50_vd_ssld_pretrained/
checkpoints:
save_inference_dir:
use_visualdl: False
infer_img:
save_res_path: ./output/sast_r50_vd_tt/predicts_sast.txt
Architecture:
model_type: det
algorithm: SAST
Transform:
Backbone:
name: ResNet_SAST
layers: 50
Neck:
name: SASTFPN
with_cab: True
Head:
name: SASTHead
Loss:
name: SASTLoss
Optimizer:
name: Adam
beta1: 0.9
beta2: 0.999
lr:
# name: Cosine
learning_rate: 0.001
# warmup_epoch: 0
regularizer:
name: 'L2'
factor: 0
PostProcess:
name: SASTPostProcess
score_thresh: 0.5
sample_pts_num: 6
nms_thresh: 0.2
expand_scale: 1.2
shrink_ratio_of_width: 0.2
Metric:
name: DetMetric
main_indicator: hmean
Train:
dataset:
name: SimpleDataSet
label_file_list: [./train_data/icdar2013/train_label_json.txt, ./train_data/icdar2015/train_label_json.txt, ./train_data/icdar17_mlt_latin/train_label_json.txt, ./train_data/coco_text_icdar_4pts/train_label_json.txt]
ratio_list: [0.1, 0.45, 0.3, 0.15]
transforms:
- DecodeImage: # load image
img_mode: BGR
channel_first: False
- DetLabelEncode: # Class handling label
- SASTProcessTrain:
image_shape: [512, 512]
min_crop_side_ratio: 0.3
min_crop_size: 24
min_text_size: 4
max_text_size: 512
- KeepKeys:
keep_keys: ['image', 'score_map', 'border_map', 'training_mask', 'tvo_map', 'tco_map'] # dataloader will return list in this order
loader:
shuffle: True
drop_last: False
batch_size_per_card: 4
num_workers: 4
Eval:
dataset:
name: SimpleDataSet
data_dir: ./train_data/
label_file_list:
- ./train_data/total_text_icdar_14pt/test_label_json.txt
transforms:
- DecodeImage: # load image
img_mode: BGR
channel_first: False
- DetLabelEncode: # Class handling label
- DetResizeForTest:
resize_long: 768
- NormalizeImage:
scale: 1./255.
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: 'hwc'
- ToCHWImage:
- KeepKeys:
keep_keys: ['image', 'shape', 'polys', 'ignore_tags']
loader:
shuffle: False
drop_last: False
batch_size_per_card: 1 # must be 1
num_workers: 2
\ No newline at end of file
...@@ -5,7 +5,7 @@ Global: ...@@ -5,7 +5,7 @@ Global:
print_batch_step: 10 print_batch_step: 10
save_model_dir: ./output/rec/mv3_none_bilstm_ctc/ save_model_dir: ./output/rec/mv3_none_bilstm_ctc/
save_epoch_step: 3 save_epoch_step: 3
# evaluation is run every 5000 iterations after the 4000th iteration # evaluation is run every 2000 iterations
eval_batch_step: [0, 2000] eval_batch_step: [0, 2000]
# if pretrained_model is saved in static mode, load_static_weights must set to True # if pretrained_model is saved in static mode, load_static_weights must set to True
cal_metric_during_train: True cal_metric_during_train: True
...@@ -13,7 +13,7 @@ Global: ...@@ -13,7 +13,7 @@ Global:
checkpoints: checkpoints:
save_inference_dir: save_inference_dir:
use_visualdl: False use_visualdl: False
infer_img: doc/imgs_words/ch/word_1.jpg infer_img: doc/imgs_words_en/word_10.png
# for data or label process # for data or label process
character_dict_path: character_dict_path:
character_type: en character_type: en
...@@ -21,7 +21,6 @@ Global: ...@@ -21,7 +21,6 @@ Global:
infer_mode: False infer_mode: False
use_space_char: False use_space_char: False
Optimizer: Optimizer:
name: Adam name: Adam
beta1: 0.9 beta1: 0.9
......
Global:
use_gpu: True
epoch_num: 72
log_smooth_window: 20
print_batch_step: 10
save_model_dir: ./output/rec/mv3_none_none_ctc/
save_epoch_step: 3
# evaluation is run every 2000 iterations
eval_batch_step: [0, 2000]
# if pretrained_model is saved in static mode, load_static_weights must set to True
cal_metric_during_train: True
pretrained_model:
checkpoints:
save_inference_dir:
use_visualdl: False
infer_img: doc/imgs_words_en/word_10.png
# for data or label process
character_dict_path:
character_type: en
max_text_length: 25
infer_mode: False
use_space_char: False
Optimizer:
name: Adam
beta1: 0.9
beta2: 0.999
lr:
learning_rate: 0.0005
regularizer:
name: 'L2'
factor: 0
Architecture:
model_type: rec
algorithm: Rosetta
Transform:
Backbone:
name: MobileNetV3
scale: 0.5
model_name: large
Neck:
name: SequenceEncoder
encoder_type: reshape
Head:
name: CTCHead
fc_decay: 0.0004
Loss:
name: CTCLoss
PostProcess:
name: CTCLabelDecode
Metric:
name: RecMetric
main_indicator: acc
Train:
dataset:
name: LMDBDateSet
data_dir: ./train_data/data_lmdb_release/training/
transforms:
- DecodeImage: # load image
img_mode: BGR
channel_first: False
- CTCLabelEncode: # Class handling label
- RecResizeImg:
image_shape: [3, 32, 100]
- KeepKeys:
keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order
loader:
shuffle: False
batch_size_per_card: 256
drop_last: True
num_workers: 8
Eval:
dataset:
name: LMDBDateSet
data_dir: ./train_data/data_lmdb_release/validation/
transforms:
- DecodeImage: # load image
img_mode: BGR
channel_first: False
- CTCLabelEncode: # Class handling label
- RecResizeImg:
image_shape: [3, 32, 100]
- KeepKeys:
keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order
loader:
shuffle: False
drop_last: False
batch_size_per_card: 256
num_workers: 8
Global:
use_gpu: true
epoch_num: 72
log_smooth_window: 20
print_batch_step: 10
save_model_dir: ./output/rec/mv3_tps_bilstm_ctc/
save_epoch_step: 3
# evaluation is run every 2000 iterations
eval_batch_step: [0, 2000]
# if pretrained_model is saved in static mode, load_static_weights must set to True
cal_metric_during_train: True
pretrained_model:
checkpoints:
save_inference_dir:
use_visualdl: False
infer_img: doc/imgs_words_en/word_10.png
# for data or label process
character_dict_path:
character_type: en
max_text_length: 25
infer_mode: False
use_space_char: False
Optimizer:
name: Adam
beta1: 0.9
beta2: 0.999
lr:
learning_rate: 0.0005
regularizer:
name: 'L2'
factor: 0
Architecture:
model_type: rec
algorithm: STARNet
Transform:
name: TPS
num_fiducial: 20
loc_lr: 0.1
model_name: small
Backbone:
name: MobileNetV3
scale: 0.5
model_name: large
Neck:
name: SequenceEncoder
encoder_type: rnn
hidden_size: 96
Head:
name: CTCHead
fc_decay: 0.0004
Loss:
name: CTCLoss
PostProcess:
name: CTCLabelDecode
Metric:
name: RecMetric
main_indicator: acc
Train:
dataset:
name: LMDBDateSet
data_dir: ./train_data/data_lmdb_release/training/
transforms:
- DecodeImage: # load image
img_mode: BGR
channel_first: False
- CTCLabelEncode: # Class handling label
- RecResizeImg:
image_shape: [3, 32, 100]
- KeepKeys:
keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order
loader:
shuffle: False
batch_size_per_card: 256
drop_last: True
num_workers: 8
Eval:
dataset:
name: LMDBDateSet
data_dir: ./train_data/data_lmdb_release/validation/
transforms:
- DecodeImage: # load image
img_mode: BGR
channel_first: False
- CTCLabelEncode: # Class handling label
- RecResizeImg:
image_shape: [3, 32, 100]
- KeepKeys:
keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order
loader:
shuffle: False
drop_last: False
batch_size_per_card: 256
num_workers: 4
...@@ -5,7 +5,7 @@ Global: ...@@ -5,7 +5,7 @@ Global:
print_batch_step: 10 print_batch_step: 10
save_model_dir: ./output/rec/r34_vd_none_bilstm_ctc/ save_model_dir: ./output/rec/r34_vd_none_bilstm_ctc/
save_epoch_step: 3 save_epoch_step: 3
# evaluation is run every 5000 iterations after the 4000th iteration # evaluation is run every 2000 iterations
eval_batch_step: [0, 2000] eval_batch_step: [0, 2000]
# if pretrained_model is saved in static mode, load_static_weights must set to True # if pretrained_model is saved in static mode, load_static_weights must set to True
cal_metric_during_train: True cal_metric_during_train: True
...@@ -13,7 +13,7 @@ Global: ...@@ -13,7 +13,7 @@ Global:
checkpoints: checkpoints:
save_inference_dir: save_inference_dir:
use_visualdl: False use_visualdl: False
infer_img: doc/imgs_words/ch/word_1.jpg infer_img: doc/imgs_words_en/word_10.png
# for data or label process # for data or label process
character_dict_path: character_dict_path:
character_type: en character_type: en
...@@ -21,7 +21,6 @@ Global: ...@@ -21,7 +21,6 @@ Global:
infer_mode: False infer_mode: False
use_space_char: False use_space_char: False
Optimizer: Optimizer:
name: Adam name: Adam
beta1: 0.9 beta1: 0.9
...@@ -71,7 +70,7 @@ Train: ...@@ -71,7 +70,7 @@ Train:
- KeepKeys: - KeepKeys:
keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order
loader: loader:
shuffle: False shuffle: True
batch_size_per_card: 256 batch_size_per_card: 256
drop_last: True drop_last: True
num_workers: 8 num_workers: 8
......
Global:
use_gpu: true
epoch_num: 72
log_smooth_window: 20
print_batch_step: 10
save_model_dir: ./output/rec/r34_vd_none_none_ctc/
save_epoch_step: 3
# evaluation is run every 2000 iterations
eval_batch_step: [0, 2000]
# if pretrained_model is saved in static mode, load_static_weights must set to True
cal_metric_during_train: True
pretrained_model:
checkpoints:
save_inference_dir:
use_visualdl: False
infer_img: doc/imgs_words_en/word_10.png
# for data or label process
character_dict_path:
character_type: en
max_text_length: 25
infer_mode: False
use_space_char: False
Optimizer:
name: Adam
beta1: 0.9
beta2: 0.999
lr:
learning_rate: 0.0005
regularizer:
name: 'L2'
factor: 0
Architecture:
model_type: rec
algorithm: Rosetta
Backbone:
name: ResNet
layers: 34
Neck:
name: SequenceEncoder
encoder_type: reshape
Head:
name: CTCHead
fc_decay: 0.0004
Loss:
name: CTCLoss
PostProcess:
name: CTCLabelDecode
Metric:
name: RecMetric
main_indicator: acc
Train:
dataset:
name: LMDBDateSet
data_dir: ./train_data/data_lmdb_release/training/
transforms:
- DecodeImage: # load image
img_mode: BGR
channel_first: False
- CTCLabelEncode: # Class handling label
- RecResizeImg:
image_shape: [3, 32, 100]
- KeepKeys:
keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order
loader:
shuffle: True
batch_size_per_card: 256
drop_last: True
num_workers: 8
Eval:
dataset:
name: LMDBDateSet
data_dir: ./train_data/data_lmdb_release/validation/
transforms:
- DecodeImage: # load image
img_mode: BGR
channel_first: False
- CTCLabelEncode: # Class handling label
- RecResizeImg:
image_shape: [3, 32, 100]
- KeepKeys:
keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order
loader:
shuffle: False
drop_last: False
batch_size_per_card: 256
num_workers: 4
...@@ -5,7 +5,7 @@ Global: ...@@ -5,7 +5,7 @@ Global:
print_batch_step: 10 print_batch_step: 10
save_model_dir: ./output/rec/r34_vd_tps_bilstm_ctc/ save_model_dir: ./output/rec/r34_vd_tps_bilstm_ctc/
save_epoch_step: 3 save_epoch_step: 3
# evaluation is run every 5000 iterations after the 4000th iteration # evaluation is run every 2000 iterations
eval_batch_step: [0, 2000] eval_batch_step: [0, 2000]
# if pretrained_model is saved in static mode, load_static_weights must set to True # if pretrained_model is saved in static mode, load_static_weights must set to True
cal_metric_during_train: True cal_metric_during_train: True
...@@ -13,7 +13,7 @@ Global: ...@@ -13,7 +13,7 @@ Global:
checkpoints: checkpoints:
save_inference_dir: save_inference_dir:
use_visualdl: False use_visualdl: False
infer_img: doc/imgs_words/ch/word_1.jpg infer_img: doc/imgs_words_en/word_10.png
# for data or label process # for data or label process
character_dict_path: character_dict_path:
character_type: en character_type: en
...@@ -21,7 +21,6 @@ Global: ...@@ -21,7 +21,6 @@ Global:
infer_mode: False infer_mode: False
use_space_char: False use_space_char: False
Optimizer: Optimizer:
name: Adam name: Adam
beta1: 0.9 beta1: 0.9
...@@ -34,7 +33,7 @@ Optimizer: ...@@ -34,7 +33,7 @@ Optimizer:
Architecture: Architecture:
model_type: rec model_type: rec
algorithm: CRNN algorithm: STARNet
Transform: Transform:
name: TPS name: TPS
num_fiducial: 20 num_fiducial: 20
......
{
"modules_info": {
"ocr_cls": {
"init_args": {
"version": "1.0.0",
"use_gpu": true
},
"predict_args": {
}
}
},
"port": 8866,
"use_multiprocess": false,
"workers": 2
}
# -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
sys.path.insert(0, ".")
from paddlehub.common.logger import logger
from paddlehub.module.module import moduleinfo, runnable, serving
import cv2
import paddlehub as hub
from tools.infer.utility import base64_to_cv2
from tools.infer.predict_cls import TextClassifier
@moduleinfo(
name="ocr_cls",
version="1.0.0",
summary="ocr recognition service",
author="paddle-dev",
author_email="paddle-dev@baidu.com",
type="cv/text_recognition")
class OCRCls(hub.Module):
def _initialize(self, use_gpu=False, enable_mkldnn=False):
"""
initialize with the necessary elements
"""
from ocr_cls.params import read_params
cfg = read_params()
cfg.use_gpu = use_gpu
if use_gpu:
try:
_places = os.environ["CUDA_VISIBLE_DEVICES"]
int(_places[0])
print("use gpu: ", use_gpu)
print("CUDA_VISIBLE_DEVICES: ", _places)
cfg.gpu_mem = 8000
except:
raise RuntimeError(
"Environment Variable CUDA_VISIBLE_DEVICES is not set correctly. If you wanna use gpu, please set CUDA_VISIBLE_DEVICES via export CUDA_VISIBLE_DEVICES=cuda_device_id."
)
cfg.ir_optim = True
cfg.enable_mkldnn = enable_mkldnn
self.text_classifier = TextClassifier(cfg)
def read_images(self, paths=[]):
images = []
for img_path in paths:
assert os.path.isfile(
img_path), "The {} isn't a valid file.".format(img_path)
img = cv2.imread(img_path)
if img is None:
logger.info("error in loading image:{}".format(img_path))
continue
images.append(img)
return images
def predict(self, images=[], paths=[]):
"""
Get the text angle in the predicted images.
Args:
images (list(numpy.ndarray)): images data, shape of each is [H, W, C]. If images not paths
paths (list[str]): The paths of images. If paths not images
Returns:
res (list): The result of text detection box and save path of images.
"""
if images != [] and isinstance(images, list) and paths == []:
predicted_data = images
elif images == [] and isinstance(paths, list) and paths != []:
predicted_data = self.read_images(paths)
else:
raise TypeError("The input data is inconsistent with expectations.")
assert predicted_data != [], "There is not any image to be predicted. Please check the input data."
img_list = []
for img in predicted_data:
if img is None:
continue
img_list.append(img)
rec_res_final = []
try:
img_list, cls_res, predict_time = self.text_classifier(img_list)
for dno in range(len(cls_res)):
angle, score = cls_res[dno]
rec_res_final.append({
'angle': angle,
'confidence': float(score),
})
except Exception as e:
print(e)
return [[]]
return [rec_res_final]
@serving
def serving_method(self, images, **kwargs):
"""
Run as a service.
"""
images_decode = [base64_to_cv2(image) for image in images]
results = self.predict(images_decode, **kwargs)
return results
if __name__ == '__main__':
ocr = OCRCls()
image_path = [
'./doc/imgs_words/ch/word_1.jpg',
'./doc/imgs_words/ch/word_2.jpg',
'./doc/imgs_words/ch/word_3.jpg',
]
res = ocr.predict(paths=image_path)
print(res)
# -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class Config(object):
pass
def read_params():
cfg = Config()
#params for text classifier
cfg.cls_model_dir = "./inference/ch_ppocr_mobile_v1.1_cls_infer/"
cfg.cls_image_shape = "3, 48, 192"
cfg.label_list = ['0', '180']
cfg.cls_batch_num = 30
cfg.cls_thresh = 0.9
cfg.use_zero_copy_run = False
cfg.use_pdserving = False
return cfg
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
} }
} }
}, },
"port": 8866, "port": 8865,
"use_multiprocess": false, "use_multiprocess": false,
"workers": 2 "workers": 2
} }
...@@ -3,20 +3,14 @@ from __future__ import absolute_import ...@@ -3,20 +3,14 @@ from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
import argparse
import ast
import copy
import math
import os import os
import time import sys
sys.path.insert(0, ".")
from paddle.fluid.core import AnalysisConfig, create_paddle_predictor, PaddleTensor
from paddlehub.common.logger import logger from paddlehub.common.logger import logger
from paddlehub.module.module import moduleinfo, runnable, serving from paddlehub.module.module import moduleinfo, runnable, serving
from PIL import Image
import cv2 import cv2
import numpy as np import numpy as np
import paddle.fluid as fluid
import paddlehub as hub import paddlehub as hub
from tools.infer.utility import base64_to_cv2 from tools.infer.utility import base64_to_cv2
...@@ -67,9 +61,7 @@ class OCRDet(hub.Module): ...@@ -67,9 +61,7 @@ class OCRDet(hub.Module):
images.append(img) images.append(img)
return images return images
def predict(self, def predict(self, images=[], paths=[]):
images=[],
paths=[]):
""" """
Get the text box in the predicted images. Get the text box in the predicted images.
Args: Args:
...@@ -87,7 +79,7 @@ class OCRDet(hub.Module): ...@@ -87,7 +79,7 @@ class OCRDet(hub.Module):
raise TypeError("The input data is inconsistent with expectations.") raise TypeError("The input data is inconsistent with expectations.")
assert predicted_data != [], "There is not any image to be predicted. Please check the input data." assert predicted_data != [], "There is not any image to be predicted. Please check the input data."
all_results = [] all_results = []
for img in predicted_data: for img in predicted_data:
if img is None: if img is None:
...@@ -99,11 +91,9 @@ class OCRDet(hub.Module): ...@@ -99,11 +91,9 @@ class OCRDet(hub.Module):
rec_res_final = [] rec_res_final = []
for dno in range(len(dt_boxes)): for dno in range(len(dt_boxes)):
rec_res_final.append( rec_res_final.append({
{ 'text_region': dt_boxes[dno].astype(np.int).tolist()
'text_region': dt_boxes[dno].astype(np.int).tolist() })
}
)
all_results.append(rec_res_final) all_results.append(rec_res_final)
return all_results return all_results
...@@ -116,7 +106,7 @@ class OCRDet(hub.Module): ...@@ -116,7 +106,7 @@ class OCRDet(hub.Module):
results = self.predict(images_decode, **kwargs) results = self.predict(images_decode, **kwargs)
return results return results
if __name__ == '__main__': if __name__ == '__main__':
ocr = OCRDet() ocr = OCRDet()
image_path = [ image_path = [
...@@ -124,4 +114,4 @@ if __name__ == '__main__': ...@@ -124,4 +114,4 @@ if __name__ == '__main__':
'./doc/imgs/12.jpg', './doc/imgs/12.jpg',
] ]
res = ocr.predict(paths=image_path) res = ocr.predict(paths=image_path)
print(res) print(res)
\ No newline at end of file
...@@ -10,16 +10,17 @@ class Config(object): ...@@ -10,16 +10,17 @@ class Config(object):
def read_params(): def read_params():
cfg = Config() cfg = Config()
#params for text detector #params for text detector
cfg.det_algorithm = "DB" cfg.det_algorithm = "DB"
cfg.det_model_dir = "./inference/ch_det_mv3_db/" cfg.det_model_dir = "./inference/ch_ppocr_mobile_v1.1_det_infer/"
cfg.det_max_side_len = 960 cfg.det_limit_side_len = 960
cfg.det_limit_type = 'max'
#DB parmas #DB parmas
cfg.det_db_thresh =0.3 cfg.det_db_thresh = 0.3
cfg.det_db_box_thresh =0.5 cfg.det_db_box_thresh = 0.5
cfg.det_db_unclip_ratio =2.0 cfg.det_db_unclip_ratio = 2.0
# #EAST parmas # #EAST parmas
# cfg.det_east_score_thresh = 0.8 # cfg.det_east_score_thresh = 0.8
...@@ -37,5 +38,6 @@ def read_params(): ...@@ -37,5 +38,6 @@ def read_params():
# cfg.use_space_char = True # cfg.use_space_char = True
cfg.use_zero_copy_run = False cfg.use_zero_copy_run = False
cfg.use_pdserving = False
return cfg return cfg
...@@ -3,20 +3,13 @@ from __future__ import absolute_import ...@@ -3,20 +3,13 @@ from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
import argparse
import ast
import copy
import math
import os import os
import time import sys
sys.path.insert(0, ".")
from paddle.fluid.core import AnalysisConfig, create_paddle_predictor, PaddleTensor
from paddlehub.common.logger import logger from paddlehub.common.logger import logger
from paddlehub.module.module import moduleinfo, runnable, serving from paddlehub.module.module import moduleinfo, runnable, serving
from PIL import Image
import cv2 import cv2
import numpy as np
import paddle.fluid as fluid
import paddlehub as hub import paddlehub as hub
from tools.infer.utility import base64_to_cv2 from tools.infer.utility import base64_to_cv2
...@@ -67,9 +60,7 @@ class OCRRec(hub.Module): ...@@ -67,9 +60,7 @@ class OCRRec(hub.Module):
images.append(img) images.append(img)
return images return images
def predict(self, def predict(self, images=[], paths=[]):
images=[],
paths=[]):
""" """
Get the text box in the predicted images. Get the text box in the predicted images.
Args: Args:
...@@ -87,31 +78,28 @@ class OCRRec(hub.Module): ...@@ -87,31 +78,28 @@ class OCRRec(hub.Module):
raise TypeError("The input data is inconsistent with expectations.") raise TypeError("The input data is inconsistent with expectations.")
assert predicted_data != [], "There is not any image to be predicted. Please check the input data." assert predicted_data != [], "There is not any image to be predicted. Please check the input data."
img_list = [] img_list = []
for img in predicted_data: for img in predicted_data:
if img is None: if img is None:
continue continue
img_list.append(img) img_list.append(img)
rec_res_final = [] rec_res_final = []
try: try:
rec_res, predict_time = self.text_recognizer(img_list) rec_res, predict_time = self.text_recognizer(img_list)
for dno in range(len(rec_res)): for dno in range(len(rec_res)):
text, score = rec_res[dno] text, score = rec_res[dno]
rec_res_final.append( rec_res_final.append({
{ 'text': text,
'text': text, 'confidence': float(score),
'confidence': float(score), })
}
)
except Exception as e: except Exception as e:
print(e) print(e)
return [[]] return [[]]
return [rec_res_final] return [rec_res_final]
@serving @serving
def serving_method(self, images, **kwargs): def serving_method(self, images, **kwargs):
""" """
...@@ -121,7 +109,7 @@ class OCRRec(hub.Module): ...@@ -121,7 +109,7 @@ class OCRRec(hub.Module):
results = self.predict(images_decode, **kwargs) results = self.predict(images_decode, **kwargs)
return results return results
if __name__ == '__main__': if __name__ == '__main__':
ocr = OCRRec() ocr = OCRRec()
image_path = [ image_path = [
...@@ -130,4 +118,4 @@ if __name__ == '__main__': ...@@ -130,4 +118,4 @@ if __name__ == '__main__':
'./doc/imgs_words/ch/word_3.jpg', './doc/imgs_words/ch/word_3.jpg',
] ]
res = ocr.predict(paths=image_path) res = ocr.predict(paths=image_path)
print(res) print(res)
\ No newline at end of file
...@@ -10,25 +10,10 @@ class Config(object): ...@@ -10,25 +10,10 @@ class Config(object):
def read_params(): def read_params():
cfg = Config() cfg = Config()
# #params for text detector
# cfg.det_algorithm = "DB"
# cfg.det_model_dir = "./inference/ch_det_mv3_db/"
# cfg.det_max_side_len = 960
# #DB parmas
# cfg.det_db_thresh =0.3
# cfg.det_db_box_thresh =0.5
# cfg.det_db_unclip_ratio =2.0
# #EAST parmas
# cfg.det_east_score_thresh = 0.8
# cfg.det_east_cover_thresh = 0.1
# cfg.det_east_nms_thresh = 0.2
#params for text recognizer #params for text recognizer
cfg.rec_algorithm = "CRNN" cfg.rec_algorithm = "CRNN"
cfg.rec_model_dir = "./inference/ch_rec_mv3_crnn/" cfg.rec_model_dir = "./inference/ch_ppocr_mobile_v1.1_rec_infer/"
cfg.rec_image_shape = "3, 32, 320" cfg.rec_image_shape = "3, 32, 320"
cfg.rec_char_type = 'ch' cfg.rec_char_type = 'ch'
...@@ -39,5 +24,6 @@ def read_params(): ...@@ -39,5 +24,6 @@ def read_params():
cfg.use_space_char = True cfg.use_space_char = True
cfg.use_zero_copy_run = False cfg.use_zero_copy_run = False
cfg.use_pdserving = False
return cfg return cfg
...@@ -3,20 +3,16 @@ from __future__ import absolute_import ...@@ -3,20 +3,16 @@ from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
import argparse
import ast
import copy
import math
import os import os
import sys
sys.path.insert(0, ".")
import time import time
from paddle.fluid.core import AnalysisConfig, create_paddle_predictor, PaddleTensor
from paddlehub.common.logger import logger from paddlehub.common.logger import logger
from paddlehub.module.module import moduleinfo, runnable, serving from paddlehub.module.module import moduleinfo, runnable, serving
from PIL import Image
import cv2 import cv2
import numpy as np import numpy as np
import paddle.fluid as fluid
import paddlehub as hub import paddlehub as hub
from tools.infer.utility import base64_to_cv2 from tools.infer.utility import base64_to_cv2
...@@ -52,7 +48,7 @@ class OCRSystem(hub.Module): ...@@ -52,7 +48,7 @@ class OCRSystem(hub.Module):
) )
cfg.ir_optim = True cfg.ir_optim = True
cfg.enable_mkldnn = enable_mkldnn cfg.enable_mkldnn = enable_mkldnn
self.text_sys = TextSystem(cfg) self.text_sys = TextSystem(cfg)
def read_images(self, paths=[]): def read_images(self, paths=[]):
...@@ -67,9 +63,7 @@ class OCRSystem(hub.Module): ...@@ -67,9 +63,7 @@ class OCRSystem(hub.Module):
images.append(img) images.append(img)
return images return images
def predict(self, def predict(self, images=[], paths=[]):
images=[],
paths=[]):
""" """
Get the chinese texts in the predicted images. Get the chinese texts in the predicted images.
Args: Args:
...@@ -104,13 +98,11 @@ class OCRSystem(hub.Module): ...@@ -104,13 +98,11 @@ class OCRSystem(hub.Module):
for dno in range(dt_num): for dno in range(dt_num):
text, score = rec_res[dno] text, score = rec_res[dno]
rec_res_final.append( rec_res_final.append({
{ 'text': text,
'text': text, 'confidence': float(score),
'confidence': float(score), 'text_region': dt_boxes[dno].astype(np.int).tolist()
'text_region': dt_boxes[dno].astype(np.int).tolist() })
}
)
all_results.append(rec_res_final) all_results.append(rec_res_final)
return all_results return all_results
...@@ -123,7 +115,7 @@ class OCRSystem(hub.Module): ...@@ -123,7 +115,7 @@ class OCRSystem(hub.Module):
results = self.predict(images_decode, **kwargs) results = self.predict(images_decode, **kwargs)
return results return results
if __name__ == '__main__': if __name__ == '__main__':
ocr = OCRSystem() ocr = OCRSystem()
image_path = [ image_path = [
...@@ -131,4 +123,4 @@ if __name__ == '__main__': ...@@ -131,4 +123,4 @@ if __name__ == '__main__':
'./doc/imgs/12.jpg', './doc/imgs/12.jpg',
] ]
res = ocr.predict(paths=image_path) res = ocr.predict(paths=image_path)
print(res) print(res)
\ No newline at end of file
...@@ -10,16 +10,17 @@ class Config(object): ...@@ -10,16 +10,17 @@ class Config(object):
def read_params(): def read_params():
cfg = Config() cfg = Config()
#params for text detector #params for text detector
cfg.det_algorithm = "DB" cfg.det_algorithm = "DB"
cfg.det_model_dir = "./inference/ch_det_mv3_db/" cfg.det_model_dir = "./inference/ch_ppocr_mobile_v1.1_det_infer/"
cfg.det_max_side_len = 960 cfg.det_limit_side_len = 960
cfg.det_limit_type = 'max'
#DB parmas #DB parmas
cfg.det_db_thresh =0.3 cfg.det_db_thresh = 0.3
cfg.det_db_box_thresh =0.5 cfg.det_db_box_thresh = 0.5
cfg.det_db_unclip_ratio =2.0 cfg.det_db_unclip_ratio = 2.0
#EAST parmas #EAST parmas
cfg.det_east_score_thresh = 0.8 cfg.det_east_score_thresh = 0.8
...@@ -28,7 +29,7 @@ def read_params(): ...@@ -28,7 +29,7 @@ def read_params():
#params for text recognizer #params for text recognizer
cfg.rec_algorithm = "CRNN" cfg.rec_algorithm = "CRNN"
cfg.rec_model_dir = "./inference/ch_rec_mv3_crnn/" cfg.rec_model_dir = "./inference/ch_ppocr_mobile_v1.1_rec_infer/"
cfg.rec_image_shape = "3, 32, 320" cfg.rec_image_shape = "3, 32, 320"
cfg.rec_char_type = 'ch' cfg.rec_char_type = 'ch'
...@@ -38,6 +39,15 @@ def read_params(): ...@@ -38,6 +39,15 @@ def read_params():
cfg.rec_char_dict_path = "./ppocr/utils/ppocr_keys_v1.txt" cfg.rec_char_dict_path = "./ppocr/utils/ppocr_keys_v1.txt"
cfg.use_space_char = True cfg.use_space_char = True
#params for text classifier
cfg.use_angle_cls = True
cfg.cls_model_dir = "./inference/ch_ppocr_mobile_v1.1_cls_infer/"
cfg.cls_image_shape = "3, 48, 192"
cfg.label_list = ['0', '180']
cfg.cls_batch_num = 30
cfg.cls_thresh = 0.9
cfg.use_zero_copy_run = False cfg.use_zero_copy_run = False
cfg.use_pdserving = False
return cfg return cfg
[English](readme_en.md) | 简体中文
PaddleOCR提供2种服务部署方式:
- 基于PaddleHub Serving的部署:代码路径为"`./deploy/hubserving`",按照本教程使用;
- 基于PaddleServing的部署:代码路径为"`./deploy/pdserving`",使用方法参考[文档](../../deploy/pdserving/readme.md)
# 基于PaddleHub Serving的服务部署
hubserving服务部署目录下包括检测、识别、2阶段串联三种服务包,请根据需求选择相应的服务包进行安装和启动。目录结构如下:
```
deploy/hubserving/
└─ ocr_cls 分类模块服务包
└─ ocr_det 检测模块服务包
└─ ocr_rec 识别模块服务包
└─ ocr_system 检测+识别串联服务包
```
每个服务包下包含3个文件。以2阶段串联服务包为例,目录如下:
```
deploy/hubserving/ocr_system/
└─ __init__.py 空文件,必选
└─ config.json 配置文件,可选,使用配置启动服务时作为参数传入
└─ module.py 主模块,必选,包含服务的完整逻辑
└─ params.py 参数文件,必选,包含模型路径、前后处理参数等参数
```
## 快速启动服务
以下步骤以检测+识别2阶段串联服务为例,如果只需要检测服务或识别服务,替换相应文件路径即可。
### 1. 准备环境
```shell
# 安装paddlehub
pip3 install paddlehub --upgrade -i https://pypi.tuna.tsinghua.edu.cn/simple
```
### 2. 下载推理模型
安装服务模块前,需要准备推理模型并放到正确路径。默认使用的是v1.1版的超轻量模型,默认模型路径为:
```
检测模型:./inference/ch_ppocr_mobile_v1.1_det_infer/
识别模型:./inference/ch_ppocr_mobile_v1.1_rec_infer/
方向分类器:./inference/ch_ppocr_mobile_v1.1_cls_infer/
```
**模型路径可在`params.py`中查看和修改。** 更多模型可以从PaddleOCR提供的[模型库](../../doc/doc_ch/models_list.md)下载,也可以替换成自己训练转换好的模型。
### 3. 安装服务模块
PaddleOCR提供3种服务模块,根据需要安装所需模块。
* 在Linux环境下,安装示例如下:
```shell
# 安装检测服务模块:
hub install deploy/hubserving/ocr_det/
# 或,安装分类服务模块:
hub install deploy/hubserving/ocr_cls/
# 或,安装识别服务模块:
hub install deploy/hubserving/ocr_rec/
# 或,安装检测+识别串联服务模块:
hub install deploy/hubserving/ocr_system/
```
* 在Windows环境下(文件夹的分隔符为`\`),安装示例如下:
```shell
# 安装检测服务模块:
hub install deploy\hubserving\ocr_det\
# 或,安装分类服务模块:
hub install deploy\hubserving\ocr_cls\
# 或,安装识别服务模块:
hub install deploy\hubserving\ocr_rec\
# 或,安装检测+识别串联服务模块:
hub install deploy\hubserving\ocr_system\
```
### 4. 启动服务
#### 方式1. 命令行命令启动(仅支持CPU)
**启动命令:**
```shell
$ hub serving start --modules [Module1==Version1, Module2==Version2, ...] \
--port XXXX \
--use_multiprocess \
--workers \
```
**参数:**
|参数|用途|
|-|-|
|--modules/-m|PaddleHub Serving预安装模型,以多个Module==Version键值对的形式列出<br>*`当不指定Version时,默认选择最新版本`*|
|--port/-p|服务端口,默认为8866|
|--use_multiprocess|是否启用并发方式,默认为单进程方式,推荐多核CPU机器使用此方式<br>*`Windows操作系统只支持单进程方式`*|
|--workers|在并发方式下指定的并发任务数,默认为`2*cpu_count-1`,其中`cpu_count`为CPU核数|
如启动串联服务: ```hub serving start -m ocr_system```
这样就完成了一个服务化API的部署,使用默认端口号8866。
#### 方式2. 配置文件启动(支持CPU、GPU)
**启动命令:**
```hub serving start -c config.json```
其中,`config.json`格式如下:
```python
{
"modules_info": {
"ocr_system": {
"init_args": {
"version": "1.0.0",
"use_gpu": true
},
"predict_args": {
}
}
},
"port": 8868,
"use_multiprocess": false,
"workers": 2
}
```
- `init_args`中的可配参数与`module.py`中的`_initialize`函数接口一致。其中,**当`use_gpu`为`true`时,表示使用GPU启动服务**。
- `predict_args`中的可配参数与`module.py`中的`predict`函数接口一致。
**注意:**
- 使用配置文件启动服务时,其他参数会被忽略。
- 如果使用GPU预测(即,`use_gpu`置为`true`),则需要在启动服务之前,设置CUDA_VISIBLE_DEVICES环境变量,如:```export CUDA_VISIBLE_DEVICES=0```,否则不用设置。
- **`use_gpu`不可与`use_multiprocess`同时为`true`**。
如,使用GPU 3号卡启动串联服务:
```shell
export CUDA_VISIBLE_DEVICES=3
hub serving start -c deploy/hubserving/ocr_system/config.json
```
## 发送预测请求
配置好服务端,可使用以下命令发送预测请求,获取预测结果:
```python tools/test_hubserving.py server_url image_path```
需要给脚本传递2个参数:
- **server_url**:服务地址,格式为
`http://[ip_address]:[port]/predict/[module_name]`
例如,如果使用配置文件启动分类,检测、识别,检测+分类+识别3阶段服务,那么发送请求的url将分别是:
`http://127.0.0.1:8865/predict/ocr_det`
`http://127.0.0.1:8866/predict/ocr_cls`
`http://127.0.0.1:8867/predict/ocr_rec`
`http://127.0.0.1:8868/predict/ocr_system`
- **image_path**:测试图像路径,可以是单张图片路径,也可以是图像集合目录路径
访问示例:
```python tools/test_hubserving.py http://127.0.0.1:8868/predict/ocr_system ./doc/imgs/```
## 返回结果格式说明
返回结果为列表(list),列表中的每一项为词典(dict),词典一共可能包含3种字段,信息如下:
|字段名称|数据类型|意义|
|----|----|----|
|angle|str|文本角度|
|text|str|文本内容|
|confidence|float| 文本识别置信度或文本角度分类置信度|
|text_region|list|文本位置坐标|
不同模块返回的字段不同,如,文本识别服务模块返回结果不含`text_region`字段,具体信息如下:
| 字段名/模块名 | ocr_det | ocr_cls | ocr_rec | ocr_system |
| ---- | ---- | ---- | ---- | ---- |
|angle| | ✔ | | ✔ |
|text| | |✔|✔|
|confidence| |✔ |✔|✔|
|text_region| ✔| | |✔ |
**说明:** 如果需要增加、删除、修改返回字段,可在相应模块的`module.py`文件中进行修改,完整流程参考下一节自定义修改服务模块。
## 自定义修改服务模块
如果需要修改服务逻辑,你一般需要操作以下步骤(以修改`ocr_system`为例):
- 1、 停止服务
```hub serving stop --port/-p XXXX```
- 2、 到相应的`module.py`和`params.py`等文件中根据实际需求修改代码。
例如,如果需要替换部署服务所用模型,则需要到`params.py`中修改模型路径参数`det_model_dir`和`rec_model_dir`,如果需要关闭文本方向分类器,则将参数`use_angle_cls`置为`False`,当然,同时可能还需要修改其他相关参数,请根据实际情况修改调试。 **强烈建议修改后先直接运行`module.py`调试,能正确运行预测后再启动服务测试。**
- 3、 卸载旧服务包
```hub uninstall ocr_system```
- 4、 安装修改后的新服务包
```hub install deploy/hubserving/ocr_system/```
- 5、重新启动服务
```hub serving start -m ocr_system```
English | [简体中文](readme.md)
PaddleOCR provides 2 service deployment methods:
- Based on **PaddleHub Serving**: Code path is "`./deploy/hubserving`". Please follow this tutorial.
- Based on **PaddleServing**: Code path is "`./deploy/pdserving`". Please refer to the [tutorial](../../deploy/pdserving/readme.md) for usage.
# Service deployment based on PaddleHub Serving
The hubserving service deployment directory includes three service packages: detection, recognition, and two-stage series connection. Please select the corresponding service package to install and start service according to your needs. The directory is as follows:
```
deploy/hubserving/
└─ ocr_det detection module service package
└─ ocr_cls angle class module service package
└─ ocr_rec recognition module service package
└─ ocr_system two-stage series connection service package
```
Each service pack contains 3 files. Take the 2-stage series connection service package as an example, the directory is as follows:
```
deploy/hubserving/ocr_system/
└─ __init__.py Empty file, required
└─ config.json Configuration file, optional, passed in as a parameter when using configuration to start the service
└─ module.py Main module file, required, contains the complete logic of the service
└─ params.py Parameter file, required, including parameters such as model path, pre- and post-processing parameters
```
## Quick start service
The following steps take the 2-stage series service as an example. If only the detection service or recognition service is needed, replace the corresponding file path.
### 1. Prepare the environment
```shell
# Install paddlehub
pip3 install paddlehub --upgrade -i https://pypi.tuna.tsinghua.edu.cn/simple
```
### 2. Download inference model
Before installing the service module, you need to prepare the inference model and put it in the correct path. By default, the ultra lightweight model of v1.1 is used, and the default model path is:
```
detection model: ./inference/ch_ppocr_mobile_v1.1_det_infer/
recognition model: ./inference/ch_ppocr_mobile_v1.1_rec_infer/
text direction classifier: ./inference/ch_ppocr_mobile_v1.1_cls_infer/
```
**The model path can be found and modified in `params.py`.** More models provided by PaddleOCR can be obtained from the [model library](../../doc/doc_en/models_list_en.md). You can also use models trained by yourself.
### 3. Install Service Module
PaddleOCR provides 3 kinds of service modules, install the required modules according to your needs.
* On Linux platform, the examples are as follows.
```shell
# Install the detection service module:
hub install deploy/hubserving/ocr_det/
# Or, install the angle class service module:
hub install deploy/hubserving/ocr_cls/
# Or, install the recognition service module:
hub install deploy/hubserving/ocr_rec/
# Or, install the 2-stage series service module:
hub install deploy/hubserving/ocr_system/
```
* On Windows platform, the examples are as follows.
```shell
# Install the detection service module:
hub install deploy\hubserving\ocr_det\
# Or, install the angle class service module:
hub install deploy\hubserving\ocr_cls\
# Or, install the recognition service module:
hub install deploy\hubserving\ocr_rec\
# Or, install the 2-stage series service module:
hub install deploy\hubserving\ocr_system\
```
### 4. Start service
#### Way 1. Start with command line parameters (CPU only)
**start command:**
```shell
$ hub serving start --modules [Module1==Version1, Module2==Version2, ...] \
--port XXXX \
--use_multiprocess \
--workers \
```
**parameters:**
|parameters|usage|
|-|-|
|--modules/-m|PaddleHub Serving pre-installed model, listed in the form of multiple Module==Version key-value pairs<br>*`When Version is not specified, the latest version is selected by default`*|
|--port/-p|Service port, default is 8866|
|--use_multiprocess|Enable concurrent mode, the default is single-process mode, this mode is recommended for multi-core CPU machines<br>*`Windows operating system only supports single-process mode`*|
|--workers|The number of concurrent tasks specified in concurrent mode, the default is `2*cpu_count-1`, where `cpu_count` is the number of CPU cores|
For example, start the 2-stage series service:
```shell
hub serving start -m ocr_system
```
This completes the deployment of a service API, using the default port number 8866.
#### Way 2. Start with configuration file(CPU、GPU)
**start command:**
```shell
hub serving start --config/-c config.json
```
Wherein, the format of `config.json` is as follows:
```python
{
"modules_info": {
"ocr_system": {
"init_args": {
"version": "1.0.0",
"use_gpu": true
},
"predict_args": {
}
}
},
"port": 8868,
"use_multiprocess": false,
"workers": 2
}
```
- The configurable parameters in `init_args` are consistent with the `_initialize` function interface in `module.py`. Among them, **when `use_gpu` is `true`, it means that the GPU is used to start the service**.
- The configurable parameters in `predict_args` are consistent with the `predict` function interface in `module.py`.
**Note:**
- When using the configuration file to start the service, other parameters will be ignored.
- If you use GPU prediction (that is, `use_gpu` is set to `true`), you need to set the environment variable CUDA_VISIBLE_DEVICES before starting the service, such as: ```export CUDA_VISIBLE_DEVICES=0```, otherwise you do not need to set it.
- **`use_gpu` and `use_multiprocess` cannot be `true` at the same time.**
For example, use GPU card No. 3 to start the 2-stage series service:
```shell
export CUDA_VISIBLE_DEVICES=3
hub serving start -c deploy/hubserving/ocr_system/config.json
```
## Send prediction requests
After the service starts, you can use the following command to send a prediction request to obtain the prediction result:
```shell
python tools/test_hubserving.py server_url image_path
```
Two parameters need to be passed to the script:
- **server_url**:service address,format of which is
`http://[ip_address]:[port]/predict/[module_name]`
For example, if the detection, recognition and 2-stage serial services are started with provided configuration files, the respective `server_url` would be:
`http://127.0.0.1:8865/predict/ocr_det`
`http://127.0.0.1:8866/predict/ocr_cls`
`http://127.0.0.1:8867/predict/ocr_rec`
`http://127.0.0.1:8868/predict/ocr_system`
- **image_path**:Test image path, can be a single image path or an image directory path
**Eg.**
```shell
python tools/test_hubserving.py http://127.0.0.1:8868/predict/ocr_system ./doc/imgs/
```
## Returned result format
The returned result is a list. Each item in the list is a dict. The dict may contain three fields. The information is as follows:
|field name|data type|description|
|----|----|----|
|angle|str|angle|
|text|str|text content|
|confidence|float|text recognition confidence|
|text_region|list|text location coordinates|
The fields returned by different modules are different. For example, the results returned by the text recognition service module do not contain `text_region`. The details are as follows:
| field name/module name | ocr_det | ocr_cls | ocr_rec | ocr_system |
| ---- | ---- | ---- | ---- | ---- |
|angle| | ✔ | | ✔ |
|text| | |✔|✔|
|confidence| |✔ |✔|✔|
|text_region| ✔| | |✔ |
**Note:** If you need to add, delete or modify the returned fields, you can modify the file `module.py` of the corresponding module. For the complete process, refer to the user-defined modification service module in the next section.
## User defined service module modification
If you need to modify the service logic, the following steps are generally required (take the modification of `ocr_system` for example):
- 1. Stop service
```shell
hub serving stop --port/-p XXXX
```
- 2. Modify the code in the corresponding files, like `module.py` and `params.py`, according to the actual needs.
For example, if you need to replace the model used by the deployed service, you need to modify model path parameters `det_model_dir` and `rec_model_dir` in `params.py`. If you want to turn off the text direction classifier, set the parameter `use_angle_cls` to `False`. Of course, other related parameters may need to be modified at the same time. Please modify and debug according to the actual situation. It is suggested to run `module.py` directly for debugging after modification before starting the service test.
- 3. Uninstall old service module
```shell
hub uninstall ocr_system
```
- 4. Install modified service module
```shell
hub install deploy/hubserving/ocr_system/
```
- 5. Restart service
```shell
hub serving start -m ocr_system
```
# 添加新算法
PaddleOCR将一个算法分解为以下几个部分,并对各部分进行模块化处理,方便快速组合出新的算法。
* 数据加载和处理
* 网络
* 后处理
* 损失函数
* 指标评估
* 优化器
下面将分别对每个部分进行介绍,并介绍如何在该部分里添加新算法所需模块。
## 数据加载和处理
数据加载和处理由不同的模块(module)组成,其完成了图片的读取、数据增强和label的制作。这一部分在[ppocr/data](../../ppocr/data)下。 各个文件及文件夹作用说明如下:
```bash
ppocr/data/
├── imaug # 图片的读取、数据增强和label制作相关的文件
│ ├── label_ops.py # 对label进行变换的modules
│ ├── operators.py # 对image进行变换的modules
│ ├──.....
├── __init__.py
├── lmdb_dataset.py # 读取lmdb的数据集的dataset
└── simple_dataset.py # 读取以`image_path\tgt`形式保存的数据集的dataset
```
PaddleOCR内置了大量图像操作相关模块,对于没有没有内置的模块可通过如下步骤添加:
1.[ppocr/data/imaug](../../ppocr/data/imaug) 文件夹下新建文件,如my_module.py。
2. 在 my_module.py 文件内添加相关代码,示例代码如下:
```python
class MyModule:
def __init__(self, *args, **kwargs):
# your init code
pass
def __call__(self, data):
img = data['image']
label = data['label']
# your process code
data['image'] = img
data['label'] = label
return data
```
3.[ppocr/data/imaug/\__init\__.py](../../ppocr/data/imaug/__init__.py) 文件内导入添加的模块。
数据处理的所有处理步骤由不同的模块顺序执行而成,在config文件中按照列表的形式组合并执行。如:
```yaml
# angle class data process
transforms:
- DecodeImage: # load image
img_mode: BGR
channel_first: False
- MyModule:
args1: args1
args2: args2
- KeepKeys:
keep_keys: [ 'image', 'label' ] # dataloader will return list in this order
```
## 网络
网络部分完成了网络的组网操作,PaddleOCR将网络划分为四部分,这一部分在[ppocr/modeling](../../ppocr/modeling)下。 进入网络的数据将按照顺序(transforms->backbones->
necks->heads)依次通过这四个部分。
```bash
├── architectures # 网络的组网代码
├── transforms # 网络的图像变换模块
├── backbones # 网络的特征提取模块
├── necks # 网络的特征增强模块
└── heads # 网络的输出模块
```
PaddleOCR内置了DB,EAST,SAST,CRNN和Attention等算法相关的常用模块,对于没有内置的模块可通过如下步骤添加,四个部分添加步骤一致,以backbones为例:
1.[ppocr/modeling/backbones](../../ppocr/modeling/backbones) 文件夹下新建文件,如my_backbone.py。
2. 在 my_backbone.py 文件内添加相关代码,示例代码如下:
```python
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
class MyBackbone(nn.Layer):
def __init__(self, *args, **kwargs):
super(MyBackbone, self).__init__()
# your init code
self.conv = nn.xxxx
def forward(self, inputs):
# your necwork forward
y = self.conv(inputs)
return y
```
3.[ppocr/modeling/backbones/\__init\__.py](../../ppocr/modeling/backbones/__init__.py)文件内导入添加的模块。
在完成网络的四部分模块添加之后,只需要配置文件中进行配置即可使用,如:
```yaml
Architecture:
model_type: rec
algorithm: CRNN
Transform:
name: MyTransform
args1: args1
args2: args2
Backbone:
name: MyBackbone
args1: args1
Neck:
name: MyNeck
args1: args1
Head:
name: MyHead
args1: args1
```
## 后处理
后处理实现解码网络输出获得文本框或者识别到的文字。这一部分在[ppocr/postprocess](../../ppocr/postprocess)下。
PaddleOCR内置了DB,EAST,SAST,CRNN和Attention等算法相关的后处理模块,对于没有内置的组件可通过如下步骤添加:
1.[ppocr/postprocess](../../ppocr/postprocess) 文件夹下新建文件,如 my_postprocess.py。
2. 在 my_postprocess.py 文件内添加相关代码,示例代码如下:
```python
import paddle
class MyPostProcess:
def __init__(self, *args, **kwargs):
# your init code
pass
def __call__(self, preds, label=None, *args, **kwargs):
if isinstance(preds, paddle.Tensor):
preds = preds.numpy()
# you preds decode code
preds = self.decode_preds(preds)
if label is None:
return preds
# you label decode code
label = self.decode_label(label)
return preds, label
def decode_preds(self, preds):
# you preds decode code
pass
def decode_label(self, preds):
# you label decode code
pass
```
3.[ppocr/postprocess/\__init\__.py](../../ppocr/postprocess/__init__.py)文件内导入添加的模块。
在后处理模块添加之后,只需要配置文件中进行配置即可使用,如:
```yaml
PostProcess:
name: MyPostProcess
args1: args1
args2: args2
```
## 损失函数
损失函数用于计算网络输出和label之间的距离。这一部分在[ppocr/losses](../../ppocr/losses)下。
PaddleOCR内置了DB,EAST,SAST,CRNN和Attention等算法相关的损失函数模块,对于没有内置的模块可通过如下步骤添加:
1.[ppocr/losses](../../ppocr/losses) 文件夹下新建文件,如 my_loss.py。
2. 在 my_loss.py 文件内添加相关代码,示例代码如下:
```python
import paddle
from paddle import nn
class MyLoss(nn.Layer):
def __init__(self, **kwargs):
super(MyLoss, self).__init__()
# you init code
pass
def __call__(self, predicts, batch):
label = batch[1]
# your loss code
loss = self.loss(input=predicts, label=label)
return {'loss': loss}
```
3.[ppocr/losses/\__init\__.py](../../ppocr/losses/__init__.py)文件内导入添加的模块。
在损失函数添加之后,只需要配置文件中进行配置即可使用,如:
```yaml
Loss:
name: MyLoss
args1: args1
args2: args2
```
## 指标评估
指标评估用于计算网络在当前batch上的性能。这一部分在[ppocr/metrics](../../ppocr/metrics)下。 PaddleOCR内置了检测,分类和识别等算法相关的指标评估模块,对于没有内置的模块可通过如下步骤添加:
1.[ppocr/metrics](../../ppocr/metrics) 文件夹下新建文件,如my_metric.py。
2. 在 my_metric.py 文件内添加相关代码,示例代码如下:
```python
class MyMetric(object):
def __init__(self, main_indicator='acc', **kwargs):
# main_indicator is used for select best model
self.main_indicator = main_indicator
self.reset()
def __call__(self, preds, batch, *args, **kwargs):
# preds is out of postprocess
# batch is out of dataloader
labels = batch[1]
cur_correct_num = 0
cur_all_num = 0
# you metric code
self.correct_num += cur_correct_num
self.all_num += cur_all_num
return {'acc': cur_correct_num / cur_all_num, }
def get_metric(self):
"""
return metircs {
'acc': 0,
'norm_edit_dis': 0,
}
"""
acc = self.correct_num / self.all_num
self.reset()
return {'acc': acc}
def reset(self):
# reset metric
self.correct_num = 0
self.all_num = 0
```
3.[ppocr/metrics/\__init\__.py](../../ppocr/metrics/__init__.py)文件内导入添加的模块。
在指标评估模块添加之后,只需要配置文件中进行配置即可使用,如:
```yaml
Metric:
name: MyMetric
main_indicator: acc
```
## 优化器
优化器用于训练网络。优化器内部还包含了网络正则化和学习率衰减模块。 这一部分在[ppocr/optimizer](../../ppocr/optimizer)下。 PaddleOCR内置了`Momentum`,`Adam`
`RMSProp`等常用的优化器模块,`Linear`,`Cosine`,`Step``Piecewise`等常用的正则化模块与`L1Decay``L2Decay`等常用的学习率衰减模块。
对于没有内置的模块可通过如下步骤添加,以`optimizer`为例:
1.[ppocr/optimizer/optimizer.py](../../ppocr/optimizer/optimizer.py) 文件内创建自己的优化器,示例代码如下:
```python
from paddle import optimizer as optim
class MyOptim(object):
def __init__(self, learning_rate=0.001, *args, **kwargs):
self.learning_rate = learning_rate
def __call__(self, parameters):
# It is recommended to wrap the built-in optimizer of paddle
opt = optim.XXX(
learning_rate=self.learning_rate,
parameters=parameters)
return opt
```
在优化器模块添加之后,只需要配置文件中进行配置即可使用,如:
```yaml
Optimizer:
name: MyOptim
args1: args1
args2: args2
lr:
name: Cosine
learning_rate: 0.001
regularizer:
name: 'L2'
factor: 0
```
\ No newline at end of file
# 可选参数列表 ## 可选参数列表
以下列表可以通过`--help`查看 以下列表可以通过`--help`查看
...@@ -8,65 +8,115 @@ ...@@ -8,65 +8,115 @@
| -o | ALL | 设置配置文件里的参数内容 | None | 使用-o配置相较于-c选择的配置文件具有更高的优先级。例如:`-o Global.use_gpu=false` | | -o | ALL | 设置配置文件里的参数内容 | None | 使用-o配置相较于-c选择的配置文件具有更高的优先级。例如:`-o Global.use_gpu=false` |
## 配置文件 Global 参数介绍 ## 配置文件参数介绍
`rec_chinese_lite_train_v1.1.yml ` 为例 `rec_chinese_lite_train_v1.1.yml ` 为例
### Global
| 字段 | 用途 | 默认值 | 备注 | | 字段 | 用途 | 默认值 | 备注 |
| :----------------------: | :---------------------: | :--------------: | :--------------------: | | :----------------------: | :---------------------: | :--------------: | :--------------------: |
| algorithm | 设置算法 | 与配置文件同步 | 选择模型,支持模型请参考[简介](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/README.md) | | use_gpu | 设置代码是否在gpu运行 | true | \ |
| use_gpu | 设置代码运行场所 | true | \ | | epoch_num | 最大训练epoch数 | 500 | \ |
| epoch_num | 最大训练epoch数 | 3000 | \ |
| log_smooth_window | 滑动窗口大小 | 20 | \ | | log_smooth_window | 滑动窗口大小 | 20 | \ |
| print_batch_step | 设置打印log间隔 | 10 | \ | | print_batch_step | 设置打印log间隔 | 10 | \ |
| save_model_dir | 设置模型保存路径 | output/{算法名称} | \ | | save_model_dir | 设置模型保存路径 | output/{算法名称} | \ |
| save_epoch_step | 设置模型保存间隔 | 3 | \ | | save_epoch_step | 设置模型保存间隔 | 3 | \ |
| eval_batch_step | 设置模型评估间隔 | 2000 或 [1000, 2000] | 2000 表示每2000次迭代评估一次,[1000, 2000]表示从1000次迭代开始,每2000次评估一次 | | eval_batch_step | 设置模型评估间隔 | 2000 或 [1000, 2000] | 2000 表示每2000次迭代评估一次,[1000, 2000]表示从1000次迭代开始,每2000次评估一次 |
|train_batch_size_per_card | 设置训练时单卡batch size | 256 | \ | | cal_metric_during_train | 设置是否在训练过程中评估指标,此时评估的是模型在当前batch下的指标 | true | \ |
| test_batch_size_per_card | 设置评估时单卡batch size | 256 | \ | | load_static_weights | 设置预训练模型是否是静态图模式保存(目前仅检测算法需要) | true | \ |
| image_shape | 设置输入图片尺寸 | [3, 32, 100] | \ | | pretrained_model | 设置加载预训练模型路径 | ./pretrain_models/CRNN/best_accuracy | \ |
| checkpoints | 加载模型参数路径 | None | 用于中断后加载参数继续训练 |
| use_visualdl | 设置是否启用visualdl进行可视化log展示 | False | [教程地址](https://www.paddlepaddle.org.cn/paddle/visualdl) |
| infer_img | 设置预测图像路径或文件夹路径 | ./infer_img | \|
| character_dict_path | 设置字典路径 | ./ppocr/utils/ppocr_keys_v1.txt | \ |
| max_text_length | 设置文本最大长度 | 25 | \ | | max_text_length | 设置文本最大长度 | 25 | \ |
| character_type | 设置字符类型 | ch | en/ch, en时将使用默认dict,ch时使用自定义dict| | character_type | 设置字符类型 | ch | en/ch, en时将使用默认dict,ch时使用自定义dict|
| character_dict_path | 设置字典路径 | ./ppocr/utils/ic15_dict.txt | \ | | use_space_char | 设置是否识别空格 | True | 仅在 character_type=ch 时支持空格 |
| loss_type | 设置 loss 类型 | ctc | 支持两种loss: ctc / attention |
| distort | 设置是否使用数据增强 | false | 设置为true时,将在训练时随机进行扰动,支持的扰动操作可阅读[img_tools.py](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/ppocr/data/rec/img_tools.py) |
| use_space_char | 设置是否识别空格 | false | 仅在 character_type=ch 时支持空格 |
| label_list | 设置方向分类器支持的角度 | ['0','180'] | 仅在方向分类器中生效 | | label_list | 设置方向分类器支持的角度 | ['0','180'] | 仅在方向分类器中生效 |
| average_window | ModelAverage优化器中的窗口长度计算比例 | 0.15 | 目前仅应用与SRN | | save_res_path | 设置检测模型的结果保存地址 | ./output/det_db/predicts_db.txt | 仅在检测模型中生效 |
| max_average_window | 平均值计算窗口长度的最大值 | 15625 | 推荐设置为一轮训练中mini-batchs的数目|
| min_average_window | 平均值计算窗口长度的最小值 | 10000 | \ |
| reader_yml | 设置reader配置文件 | ./configs/rec/rec_icdar15_reader.yml | \ |
| pretrain_weights | 加载预训练模型路径 | ./pretrain_models/CRNN/best_accuracy | \ |
| checkpoints | 加载模型参数路径 | None | 用于中断后加载参数继续训练 |
| save_inference_dir | inference model 保存路径 | None | 用于保存inference model |
## 配置文件 Reader 系列参数介绍 ### Optimizer ([ppocr/optimizer](../../ppocr/optimizer))
`rec_chinese_reader.yml` 为例 | 字段 | 用途 | 默认值 | 备注 |
| :---------------------: | :---------------------: | :--------------: | :--------------------: |
| name | 优化器类名 | Adam | 目前支持`Momentum`,`Adam`,`RMSProp`, 见[ppocr/optimizer/optimizer.py](../../ppocr/optimizer/optimizer.py) |
| beta1 | 设置一阶矩估计的指数衰减率 | 0.9 | \ |
| beta2 | 设置二阶矩估计的指数衰减率 | 0.999 | \ |
| **lr** | 设置学习率decay方式 | - | \ |
| name | 学习率decay类名 | Cosine | 目前支持`Linear`,`Cosine`,`Step`,`Piecewise`, 见[ppocr/optimizer/learning_rate.py](../../ppocr/optimizer/learning_rate.py) |
| learning_rate | 基础学习率 | 0.001 | \ |
| **regularizer** | 设置网络正则化方式 | - | \ |
| name | 正则化类名 | L2 | 目前支持`L1`,`L2`, 见[ppocr/optimizer/regularizer.py](../../ppocr/optimizer/regularizer.py) |
| factor | 学习率衰减系数 | 0.00004 | \ |
| 字段 | 用途 | 默认值 | 备注 |
| :----------------------: | :---------------------: | :--------------: | :--------------------: |
| reader_function | 选择数据读取方式 | ppocr.data.rec.dataset_traversal,SimpleReader | 支持SimpleReader / LMDBReader 两种数据读取方式 |
| num_workers | 设置数据读取线程数 | 8 | \ |
| img_set_dir | 数据集路径 | ./train_data | \ |
| label_file_path | 数据标签路径 | ./train_data/rec_gt_train.txt| \ |
| infer_img | 预测图像文件夹路径 | ./infer_img | \|
## 配置文件 Optimizer 系列参数介绍 ### Architecture ([ppocr/modeling](../../ppocr/modeling))
在ppocr中,网络被划分为Transform,Backbone,Neck和Head四个阶段
| 字段 | 用途 | 默认值 | 备注 |
| :---------------------: | :---------------------: | :--------------: | :--------------------: |
| model_type | 网络类型 | rec | 目前支持`rec`,`det`,`cls` |
| algorithm | 模型名称 | CRNN | 支持列表见[algorithm_overview](./algorithm_overview.md) |
| **Transform** | 设置变换方式 | - | 目前仅rec类型的算法支持, 具体见[ppocr/modeling/transform](../../ppocr/modeling/transform) |
| name | 变换方式类名 | TPS | 目前支持`TPS` |
| num_fiducial | TPS控制点数 | 20 | 上下边各十个 |
| loc_lr | 定位网络学习率 | 0.1 | \ |
| model_name | 定位网络大小 | small | 目前支持`small`,`large` |
| **Backbone** | 设置网络backbone类名 | - | 具体见[ppocr/modeling/backbones](../../ppocr/modeling/backbones) |
| name | backbone类名 | ResNet | 目前支持`MobileNetV3`,`ResNet` |
| layers | resnet层数 | 34 | 支持18,34,50,101,152,200 |
| model_name | MobileNetV3 网络大小 | small | 支持`small`,`large` |
| **Neck** | 设置网络neck | - | 具体见[ppocr/modeling/necks](../../ppocr/modeling/necks) |
| name | neck类名 | SequenceEncoder | 目前支持`SequenceEncoder`,`DBFPN` |
| encoder_type | SequenceEncoder编码器类型 | rnn | 支持`reshape`,`fc`,`rnn` |
| hidden_size | rnn内部单元数 | 48 | \ |
| out_channels | DBFPN输出通道数 | 256 | \ |
| **Head** | 设置网络Head | - | 具体见[ppocr/modeling/heads](../../ppocr/modeling/heads) |
| name | head类名 | CTCHead | 目前支持`CTCHead`,`DBHead`,`ClsHead` |
| fc_decay | CTCHead正则化系数 | 0.0004 | \ |
| k | DBHead二值化系数 | 50 | \ |
| class_dim | ClsHead输出分类数 | 2 | \ |
`rec_icdar15_train.yml` 为例 ### Loss ([ppocr/losses](../../ppocr/losses))
| 字段 | 用途 | 默认值 | 备注 |
| :---------------------: | :---------------------: | :--------------: | :--------------------: |
| name | 网络loss类名 | CTCLoss | 目前支持`CTCLoss`,`DBLoss`,`ClsLoss` |
| balance_loss | DBLossloss中是否对正负样本数量进行均衡(使用OHEM) | True | \ |
| ohem_ratio | DBLossloss中的OHEM的负正样本比例 | 3 | \ |
| main_loss_type | DBLossloss中shrink_map所采用的的loss | DiceLoss | 支持`DiceLoss`,`BCELoss` |
| alpha | DBLossloss中shrink_map_loss的系数 | 5 | \ |
| beta | DBLossloss中threshold_map_loss的系数 | 10 | \ |
### PostProcess ([ppocr/postprocess](../../ppocr/postprocess))
| 字段 | 用途 | 默认值 | 备注 |
| :---------------------: | :---------------------: | :--------------: | :--------------------: |
| name | 后处理类名 | CTCLabelDecode | 目前支持`CTCLoss`,`AttnLabelDecode`,`DBPostProcess`,`ClsPostProcess` |
| thresh | DBPostProcess中分割图进行二值化的阈值 | 0.3 | \ |
| box_thresh | DBPostProcess中对输出框进行过滤的阈值,低于此阈值的框不会输出 | 0.7 | \ |
| max_candidates | DBPostProcess中输出的最大文本框数量 | 1000 | |
| unclip_ratio | DBPostProcess中对文本框进行放大的比例 | 2.0 | \ |
### Metric ([ppocr/metrics](../../ppocr/metrics))
| 字段 | 用途 | 默认值 | 备注 |
| :---------------------: | :---------------------: | :--------------: | :--------------------: |
| name | 指标评估方法名称 | CTCLabelDecode | 目前支持`DetMetric`,`RecMetric`,`ClsMetric` |
| main_indicator | 主要指标,用于选取最优模型 | acc | 对于检测方法为hmean,识别和分类方法为acc |
### Dataset ([ppocr/data](../../ppocr/data))
| 字段 | 用途 | 默认值 | 备注 | | 字段 | 用途 | 默认值 | 备注 |
| :---------------------: | :---------------------: | :--------------: | :--------------------: | | :---------------------: | :---------------------: | :--------------: | :--------------------: |
| function | 选择优化器 | pocr.optimizer,AdamDecay | 目前只支持Adam方式 | | **dataset** | 每次迭代返回一个样本 | - | - |
| base_lr | 设置初始学习率 | 0.0005 | \ | | name | dataset类名 | SimpleDataSet | 目前支持`SimpleDataSet``LMDBDateSet` |
| beta1 | 设置一阶矩估计的指数衰减率 | 0.9 | \ | | data_dir | 数据集图片存放路径 | ./train_data | \ |
| beta2 | 设置二阶矩估计的指数衰减率 | 0.999 | \ | | label_file_list | 数据标签路径 | ["./train_data/train_list.txt"] | dataset为LMDBDateSet时不需要此参数 |
| decay | 是否使用decay | \ | \ | | ratio_list | 数据集的比例 | [1.0] | 若label_file_list中有两个train_list,且ratio_list为[0.4,0.6],则从train_list1中采样40%,从train_list2中采样60%组合整个dataset |
| function(decay) | 设置decay方式 | - | 目前支持cosine_decay, cosine_decay_warmup与piecewise_decay | | transforms | 对图片和标签进行变换的方法列表 | [DecodeImage,CTCLabelEncode,RecResizeImg,KeepKeys] | 见[ppocr/data/imaug](../../ppocr/data/imaug) |
| step_each_epoch | 每个epoch包含多少次迭代, cosine_decay/cosine_decay_warmup时有效 | 20 | 计算方式:total_image_num / (batch_size_per_card * card_size) | | **loader** | dataloader相关 | - | |
| total_epoch | 总共迭代多少个epoch, cosine_decay/cosine_decay_warmup时有效 | 1000 | 与Global.epoch_num 一致 | | shuffle | 每个epoch是否将数据集顺序打乱 | True | \ |
| warmup_minibatch | 线性warmup的迭代次数, cosine_decay_warmup时有效 | 1000 | \ | | batch_size_per_card | 训练时单卡batch size | 256 | \ |
| boundaries | 学习率下降时的迭代次数间隔, piecewise_decay时有效 | - | 参数为列表形式 | | drop_last | 是否丢弃因数据集样本数不能被 batch_size 整除而产生的最后一个不完整的mini-batch | True | \ |
| decay_rate | 学习率衰减系数, piecewise_decay时有效 | - | \ | | num_workers | 用于加载数据的子进程个数,若为0即为不开启子进程,在主进程中进行数据加载 | 8 | \ |
\ No newline at end of file
...@@ -4,38 +4,34 @@ PaddleOCR 的整体目录结构介绍如下: ...@@ -4,38 +4,34 @@ PaddleOCR 的整体目录结构介绍如下:
``` ```
PaddleOCR PaddleOCR
├── configs // 配置文件,可通过yml文件选择模型结构并修改超参 ├── configs // 配置文件,可通过 yml 文件选择模型结构并修改超参
│ ├── cls // 方向分类器相关配置文件 │ ├── cls // 方向分类器相关配置文件
│ │ ├── cls_mv3.yml // 训练配置相关,包括骨干网络、head、loss、优化器 │ │ ├── cls_mv3.yml // 训练配置相关,包括骨干网络、head、loss、优化器和数据
│ │ └── cls_reader.yml // 数据读取相关,数据读取方式、数据存储路径 │ ├── det // 检测相关配置文件
│ ├── det // 检测相关配置文件 │ │ ├── det_mv3_db.yml // 训练配置
│ │ ├── det_db_icdar15_reader.yml // 数据读取
│ │ ├── det_mv3_db.yml // 训练配置
│ │ ... │ │ ...
│ └── rec // 识别相关配置文件 │ └── rec // 识别相关配置文件
│ ├── rec_benchmark_reader.yml // LMDB 格式数据读取相关 │ ├── rec_mv3_none_bilstm_ctc.yml // crnn 训练配置
│ ├── rec_chinese_common_train.yml // 通用中文训练配置
│ ├── rec_icdar15_reader.yml // simple 数据读取相关,包括数据读取函数、数据路径、标签文件
│ ... │ ...
├── deploy // 部署相关 ├── deploy // 部署相关
│ ├── android_demo // android_demo │ ├── android_demo // android_demo
│ │ ... │ │ ...
│ ├── cpp_infer // C++ infer │ ├── cpp_infer // C++ infer
│ │ ├── CMakeLists.txt // Cmake 文件 │ │ ├── CMakeLists.txt // Cmake 文件
│ │ ├── docs // 说明文档 │ │ ├── docs // 说明文档
│ │ │ └── windows_vs2019_build.md │ │ │ └── windows_vs2019_build.md
│ │ ├── include // 头文件 │ │ ├── include // 头文件
│ │ │ ├── clipper.h // clipper 库 │ │ │ ├── clipper.h // clipper 库
│ │ │ ├── config.h // 预测配置 │ │ │ ├── config.h // 预测配置
│ │ │ ├── ocr_cls.h // 方向分类器 │ │ │ ├── ocr_cls.h // 方向分类器
│ │ │ ├── ocr_det.h // 文字检测 │ │ │ ├── ocr_det.h // 文字检测
│ │ │ ├── ocr_rec.h // 文字识别 │ │ │ ├── ocr_rec.h // 文字识别
│ │ │ ├── postprocess_op.h // 检测后处理 │ │ │ ├── postprocess_op.h // 检测后处理
│ │ │ ├── preprocess_op.h // 检测预处理 │ │ │ ├── preprocess_op.h // 检测预处理
│ │ │ └── utility.h // 工具 │ │ │ └── utility.h // 工具
│ │ ├── readme.md // 说明文档 │ │ ├── readme.md // 说明文档
│ │ ├── ... │ │ ├── ...
│ │ ├── src // 源文件 │ │ ├── src // 源文件
│ │ │ ├── clipper.cpp │ │ │ ├── clipper.cpp
│ │ │ ├── config.cpp │ │ │ ├── config.cpp
│ │ │ ├── main.cpp │ │ │ ├── main.cpp
...@@ -45,10 +41,10 @@ PaddleOCR ...@@ -45,10 +41,10 @@ PaddleOCR
│ │ │ ├── postprocess_op.cpp │ │ │ ├── postprocess_op.cpp
│ │ │ ├── preprocess_op.cpp │ │ │ ├── preprocess_op.cpp
│ │ │ └── utility.cpp │ │ │ └── utility.cpp
│ │ └── tools // 编译、执行脚本 │ │ └── tools // 编译、执行脚本
│ │ ├── build.sh // 编译脚本 │ │ ├── build.sh // 编译脚本
│ │ ├── config.txt // 配置文件 │ │ ├── config.txt // 配置文件
│ │ └── run.sh // 测试启动脚本 │ │ └── run.sh // 测试启动脚本
│ ├── docker │ ├── docker
│ │ └── hubserving │ │ └── hubserving
│ │ ├── cpu │ │ ├── cpu
...@@ -58,151 +54,163 @@ PaddleOCR ...@@ -58,151 +54,163 @@ PaddleOCR
│ │ ├── README_cn.md │ │ ├── README_cn.md
│ │ ├── README.md │ │ ├── README.md
│ │ └── sample_request.txt │ │ └── sample_request.txt
│ ├── hubserving // hubserving │ ├── hubserving // hubserving
│ │ ├── ocr_det // 文字检测 │ │ ├── ocr_cls // 方向分类器
│ │ │ ├── config.json // serving 配置 │ │ │ ├── config.json // serving 配置
│ │ │ ├── __init__.py │ │ │ ├── __init__.py
│ │ │ ├── module.py // 预测模型 │ │ │ ├── module.py // 预测模型
│ │ │ └── params.py // 预测参数 │ │ │ └── params.py // 预测参数
│ │ ├── ocr_rec // 文字识别 │ │ ├── ocr_det // 文字检测
│ │ │ ├── config.json // serving 配置
│ │ │ ├── __init__.py
│ │ │ ├── module.py // 预测模型
│ │ │ └── params.py // 预测参数
│ │ ├── ocr_rec // 文字识别
│ │ │ ├── config.json │ │ │ ├── config.json
│ │ │ ├── __init__.py │ │ │ ├── __init__.py
│ │ │ ├── module.py │ │ │ ├── module.py
│ │ │ └── params.py │ │ │ └── params.py
│ │ └── ocr_system // 系统预测 │ │ └── ocr_system // 系统预测
│ │ ├── config.json │ │ ├── config.json
│ │ ├── __init__.py │ │ ├── __init__.py
│ │ ├── module.py │ │ ├── module.py
│ │ └── params.py │ │ └── params.py
│ ├── imgs // 预测图片 │ ├── imgs // 预测图片
│ │ ├── cpp_infer_pred_12.png │ │ ├── cpp_infer_pred_12.png
│ │ └── demo.png │ │ └── demo.png
│ ├── ios_demo // ios demo │ ├── ios_demo // ios demo
│ │ ... │ │ ...
│ ├── lite // lite 部署 │ ├── lite // lite 部署
│ │ ├── cls_process.cc // 方向分类器数据处理 │ │ ├── cls_process.cc // 方向分类器数据处理
│ │ ├── cls_process.h │ │ ├── cls_process.h
│ │ ├── config.txt // 检测配置参数 │ │ ├── config.txt // 检测配置参数
│ │ ├── crnn_process.cc // crnn数据处理 │ │ ├── crnn_process.cc // crnn 数据处理
│ │ ├── crnn_process.h │ │ ├── crnn_process.h
│ │ ├── db_post_process.cc // db数据处理 │ │ ├── db_post_process.cc // db 数据处理
│ │ ├── db_post_process.h │ │ ├── db_post_process.h
│ │ ├── Makefile // 编译文件 │ │ ├── Makefile // 编译文件
│ │ ├── ocr_db_crnn.cc // 串联预测 │ │ ├── ocr_db_crnn.cc // 串联预测
│ │ ├── prepare.sh // 数据准备 │ │ ├── prepare.sh // 数据准备
│ │ ├── readme.md // 说明文档 │ │ ├── readme.md // 说明文档
│ │ ... │ │ ...
│ ├── pdserving // pdserving 部署 │ ├── pdserving // pdserving 部署
│ │ ├── det_local_server.py // 检测 快速版,部署方便预测速度快 │ │ ├── det_local_server.py // 检测 快速版,部署方便预测速度快
│ │ ├── det_web_server.py // 检测 完整版,稳定性高分布式部署 │ │ ├── det_web_server.py // 检测 完整版,稳定性高分布式部署
│ │ ├── ocr_local_server.py // 检测+识别 快速版 │ │ ├── ocr_local_server.py // 检测+识别 快速版
│ │ ├── ocr_web_client.py // 客户端 │ │ ├── ocr_web_client.py // 客户端
│ │ ├── ocr_web_server.py // 检测+识别 完整版 │ │ ├── ocr_web_server.py // 检测+识别 完整版
│ │ ├── readme.md // 说明文档 │ │ ├── readme.md // 说明文档
│ │ ├── rec_local_server.py // 识别 快速版 │ │ ├── rec_local_server.py // 识别 快速版
│ │ └── rec_web_server.py // 识别 完整版 │ │ └── rec_web_server.py // 识别 完整版
│ └── slim │ └── slim
│ └── quantization // 量化相关 │ └── quantization // 量化相关
│ ├── export_model.py // 导出模型 │ ├── export_model.py // 导出模型
│ ├── quant.py // 量化 │ ├── quant.py // 量化
│ └── README.md // 说明文档 │ └── README.md // 说明文档
├── doc // 文档教程 ├── doc // 文档教程
│ ... │ ...
├── paddleocr.py ├── ppocr // 网络核心代码
├── ppocr // 网络核心代码 │ ├── data // 数据处理
│ ├── data // 数据处理 │ │ ├── imaug // 图片和 label 处理代码
│ │ ├── cls // 方向分类器 │ │ │ ├── text_image_aug // 文本识别的 tia 数据扩充
│ │ │ ├── dataset_traversal.py // 数据传输,定义数据读取器,读取数据并组成batch │ │ │ │ ├── __init__.py
│ │ │ └── randaugment.py // 随机数据增广操作 │ │ │ │ ├── augment.py // tia_distort,tia_stretch 和 tia_perspective 的代码
│ │ ├── det // 检测 │ │ │ │ ├── warp_mls.py
│ │ │ ├── data_augment.py // 数据增广操作 │ │ │ ├── __init__.py
│ │ │ ├── dataset_traversal.py // 数据传输,定义数据读取器,读取数据并组成batch │ │ │ ├── east_process.py // EAST 算法的数据处理步骤
│ │ │ ├── db_process.py // db 数据处理 │ │ │ ├── make_border_map.py // 生成边界图
│ │ │ ├── east_process.py // east 数据处理 │ │ │ ├── make_shrink_map.py // 生成收缩图
│ │ │ ├── make_border_map.py // 生成边界图 │ │ │ ├── operators.py // 图像基本操作,如读取和归一化
│ │ │ ├── make_shrink_map.py // 生成收缩图 │ │ │ ├── randaugment.py // 随机数据增广操作
│ │ │ ├── random_crop_data.py // 随机切割 │ │ │ ├── random_crop_data.py // 随机裁剪
│ │ │ └── sast_process.py // sast 数据处理 │ │ │ ├── rec_img_aug.py // 文本识别的数据扩充
│ │ ├── reader_main.py // 数据读取器主函数 │ │ │ └── sast_process.py // SAST 算法的数据处理步骤
│ │ └── rec // 识别 │ │ ├── __init__.py // 构造 dataloader 相关代码
│ │ ├── dataset_traversal.py // 数据传输,定义数据读取器,包含 LMDB_Reader 和 Simple_Reader │ │ ├── lmdb_dataset.py // 读取lmdb数据集的 dataset
│ │ └── img_tools.py // 数据处理相关,包括数据归一化、扰动 │ │ ├── simple_dataset.py // 读取文本格式存储数据集的 dataset
│ ├── __init__.py │ ├── losses // 损失函数
│ ├── modeling // 组网相关 │ │ ├── __init__.py // 构造 loss 相关代码
│ │ ├── architectures // 模型架构,定义模型所需的各个模块 │ │ ├── cls_loss.py // 方向分类器 loss
│ │ │ ├── cls_model.py // 方向分类器 │ │ ├── det_basic_loss.py // 检测基础 loss
│ │ │ ├── det_model.py // 检测 │ │ ├── det_db_loss.py // DB loss
│ │ │ └── rec_model.py // 识别 │ │ ├── det_east_loss.py // EAST loss
│ │ ├── backbones // 骨干网络 │ │ ├── det_sast_loss.py // SAST loss
│ │ │ ├── det_mobilenet_v3.py // 检测 mobilenet_v3 │ │ ├── rec_ctc_loss.py // CTC loss
│ │ │ ├── det_resnet_vd.py │ │ ├── rec_att_loss.py // Attention loss
│ │ │ ├── det_resnet_vd_sast.py │ ├── metrics // 评估指标
│ │ │ ├── rec_mobilenet_v3.py // 识别 mobilenet_v3 │ │ ├── __init__.py // 构造 metric 相关代码
│ │ │ ├── rec_resnet_fpn.py │ │ ├── cls_metric.py // 方向分类器 metric
│ │ │ └── rec_resnet_vd.py │ │ ├── det_metric.py // 检测 metric
│ │ ├── common_functions.py // 公共函数 │ ├── eval_det_iou.py // 检测 iou 相关
│ │ ├── heads // 头函数 │ │ ├── rec_metric.py // 识别 metric
│ │ │ ├── cls_head.py // 分类头 │ ├── modeling // 组网相关
│ │ │ ├── det_db_head.py // db 检测头 │ │ ├── architectures // 网络
│ │ │ ├── det_east_head.py // east 检测头 │ │ │ ├── __init__.py // 构造 model 相关代码
│ │ │ ├── det_sast_head.py // sast 检测头 │ │ │ ├── base_model.py // 组网代码
│ │ │ ├── rec_attention_head.py // 识别 attention │ │ ├── backbones // 骨干网络
│ │ │ ├── rec_ctc_head.py // 识别 ctc │ │ │ ├── __init__.py // 构造 backbone 相关代码
│ │ │ ├── rec_seq_encoder.py // 识别 序列编码 │ │ │ ├── det_mobilenet_v3.py // 检测 mobilenet_v3
│ │ │ ├── rec_srn_all_head.py // 识别 srn 相关 │ │ │ ├── det_resnet_vd.py // 检测 resnet
│ │ │ └── self_attention // srn attention │ │ │ ├── det_resnet_vd_sast.py // 检测 SAST算法的resnet backbone
│ │ │ └── model.py │ │ │ ├── rec_mobilenet_v3.py // 识别 mobilenet_v3
│ │ ├── losses // 损失函数 │ │ │ └── rec_resnet_vd.py // 识别 resnet
│ │ │ ├── cls_loss.py // 方向分类器损失函数 │ │ ├── necks // 颈函数
│ │ │ ├── det_basic_loss.py // 检测基础loss │ │ │ ├── __init__.py // 构造 neck 相关代码
│ │ │ ├── det_db_loss.py // DB loss │ │ │ ├── db_fpn.py // 标准 fpn 网络
│ │ │ ├── det_east_loss.py // EAST loss │ │ │ ├── east_fpn.py // EAST 算法的 fpn 网络
│ │ │ ├── det_sast_loss.py // SAST loss │ │ │ ├── sast_fpn.py // SAST 算法的 fpn 网络
│ │ │ ├── rec_attention_loss.py // attention loss │ │ │ ├── rnn.py // 识别 序列编码
│ │ │ ├── rec_ctc_loss.py // ctc loss │ │ ├── heads // 头函数
│ │ │ └── rec_srn_loss.py // srn loss │ │ │ ├── __init__.py // 构造 head 相关代码
│ │ └── stns // 空间变换网络 │ │ │ ├── cls_head.py // 方向分类器 分类头
│ │ └── tps.py // TPS 变换 │ │ │ ├── det_db_head.py // DB 检测头
│ ├── optimizer.py // 优化器 │ │ │ ├── det_east_head.py // EAST 检测头
│ ├── postprocess // 后处理 │ │ │ ├── det_sast_head.py // SAST 检测头
│ │ ├── db_postprocess.py // DB 后处理 │ │ │ ├── rec_ctc_head.py // 识别 ctc
│ │ ├── east_postprocess.py // East 后处理 │ │ │ ├── rec_att_head.py // 识别 attention
│ │ ├── lanms // lanms 相关 │ │ ├── transforms // 图像变换
│ │ │ ... │ │ │ ├── __init__.py // 构造 transform 相关代码
│ │ ├── locality_aware_nms.py // nms │ │ │ └── tps.py // TPS 变换
│ │ └── sast_postprocess.py // sast 后处理 │ ├── optimizer // 优化器
│ └── utils // 工具 │ │ ├── __init__.py // 构造 optimizer 相关代码
│ ├── character.py // 字符处理,包括对文本的编码和解码,计算预测准确率 │ │ └── learning_rate.py // 学习率衰减
│ ├── check.py // 参数加载检查 │ │ └── optimizer.py // 优化器
│ ├── ic15_dict.txt // 英文数字字典,区分大小写 │ │ └── regularizer.py // 网络正则化
│ ├── ppocr_keys_v1.txt // 中文字典,用于训练中文模型 │ ├── postprocess // 后处理
│ ├── save_load.py // 模型保存和加载函数 │ │ ├── cls_postprocess.py // 方向分类器 后处理
│ ├── stats.py // 统计 │ │ ├── db_postprocess.py // DB 后处理
│ └── utility.py // 工具函数,包含输入参数是否合法等相关检查工具 │ │ ├── east_postprocess.py // EAST 后处理
├── README_en.md // 说明文档 │ │ ├── locality_aware_nms.py // NMS
├── README.md │ │ ├── rec_postprocess.py // 识别网络 后处理
├── requirments.txt // 安装依赖 │ │ └── sast_postprocess.py // SAST 后处理
├── setup.py // whl包打包脚本 │ └── utils // 工具
└── tools // 启动工具 │ ├── dict // 小语种字典
├── eval.py // 评估函数 │ ....
├── eval_utils // 评估工具 │ ├── ic15_dict.txt // 英文数字字典,区分大小写
│ ├── eval_cls_utils.py // 分类相关 │ ├── ppocr_keys_v1.txt // 中文字典,用于训练中文模型
│ ├── eval_det_iou.py // 检测 iou 相关 │ ├── logging.py // logger
│ ├── eval_det_utils.py // 检测相关 │ ├── save_load.py // 模型保存和加载函数
│ ├── eval_rec_utils.py // 识别相关 │ ├── stats.py // 统计
│ └── __init__.py │ └── utility.py // 工具函数
├── export_model.py // 导出 infer 模型 ├── tools
├── infer // 基于预测引擎预测 │ ├── eval.py // 评估函数
│ ├── predict_cls.py │ ├── export_model.py // 导出 inference 模型
│ ├── predict_det.py │ ├── infer // 基于预测引擎预测
│ ├── predict_rec.py │ │ ├── predict_cls.py
│ ├── predict_system.py │ │ ├── predict_det.py
│ └── utility.py │ │ ├── predict_rec.py
├── infer_cls.py // 基于训练引擎 预测分类 │ │ ├── predict_system.py
├── infer_det.py // 基于训练引擎 预测检测 │ │ └── utility.py
├── infer_rec.py // 基于训练引擎 预测识别 │ ├── infer_cls.py // 基于训练引擎 预测分类
├── program.py // 整体流程 │ ├── infer_det.py // 基于训练引擎 预测检测
├── test_hubserving.py │ ├── infer_rec.py // 基于训练引擎 预测识别
└── train.py // 启动训练 │ ├── program.py // 整体流程
│ ├── test_hubserving.py
``` │ └── train.py // 启动训练
├── paddleocr.py
├── README_ch.md // 中文说明文档
├── README_en.md // 英文说明文档
├── README.md // 主页说明文档
├── requirments.txt // 安装依赖
├── setup.py // whl包打包脚本
├── train.sh // 启动训练脚本
\ No newline at end of file
# Add new algorithm
PaddleOCR decomposes an algorithm into the following parts, and modularizes each part to make it more convenient to develop new algorithms.
* Data loading and processing
* Network
* Post-processing
* Loss
* Metric
* Optimizer
The following will introduce each part separately, and introduce how to add the modules required for the new algorithm.
## Data loading and processing
Data loading and processing are composed of different modules, which complete the image reading, data augment and label production. This part is under [ppocr/data](../../ppocr/data). The explanation of each file and folder are as follows:
```bash
ppocr/data/
├── imaug # Scripts for image reading, data augment and label production
│ ├── label_ops.py # Modules that transform the label
│ ├── operators.py # Modules that transform the image
│ ├──.....
├── __init__.py
├── lmdb_dataset.py # The dataset that reads the lmdb
└── simple_dataset.py # Read the dataset saved in the form of `image_path\tgt`
```
PaddleOCR has a large number of built-in image operation related modules. For modules that are not built-in, you can add them through the following steps:
1. Create a new file under the [ppocr/data/imaug](../../ppocr/data/imaug) folder, such as my_module.py.
2. Add code in the my_module.py file, the sample code is as follows:
```python
class MyModule:
def __init__(self, *args, **kwargs):
# your init code
pass
def __call__(self, data):
img = data['image']
label = data['label']
# your process code
data['image'] = img
data['label'] = label
return data
```
3. Import the added module in the [ppocr/data/imaug/\__init\__.py](../../ppocr/data/imaug/__init__.py) file.
All different modules of data processing are executed by sequence, combined and executed in the form of a list in the config file. Such as:
```yaml
# angle class data process
transforms:
- DecodeImage: # load image
img_mode: BGR
channel_first: False
- MyModule:
args1: args1
args2: args2
- KeepKeys:
keep_keys: [ 'image', 'label' ] # dataloader will return list in this order
```
## Network
The network part completes the construction of the network, and PaddleOCR divides the network into four parts, which are under [ppocr/modeling](../../ppocr/modeling). The data entering the network will pass through these four parts in sequence(transforms->backbones->
necks->heads).
```bash
├── architectures # Code for building network
├── transforms # Image Transformation Module
├── backbones # Feature extraction module
├── necks # Feature enhancement module
└── heads # Output module
```
PaddleOCR has built-in commonly used modules related to algorithms such as DB, EAST, SAST, CRNN and Attention. For modules that do not have built-in, you can add them through the following steps, the four parts are added in the same steps, take backbones as an example:
1. Create a new file under the [ppocr/modeling/backbones](../../ppocr/modeling/backbones) folder, such as my_backbone.py.
2. Add code in the my_backbone.py file, the sample code is as follows:
```python
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
class MyBackbone(nn.Layer):
def __init__(self, *args, **kwargs):
super(MyBackbone, self).__init__()
# your init code
self.conv = nn.xxxx
def forward(self, inputs):
# your necwork forward
y = self.conv(inputs)
return y
```
3. Import the added module in the [ppocr/modeling/backbones/\__init\__.py](../../ppocr/modeling/backbones/__init__.py) file.
After adding the four-part modules of the network, you only need to configure them in the configuration file to use, such as:
```yaml
Architecture:
model_type: rec
algorithm: CRNN
Transform:
name: MyTransform
args1: args1
args2: args2
Backbone:
name: MyBackbone
args1: args1
Neck:
name: MyNeck
args1: args1
Head:
name: MyHead
args1: args1
```
## Post-processing
Post-processing realizes decoding network output to obtain text box or recognized text. This part is under [ppocr/postprocess](../../ppocr/postprocess).
PaddleOCR has built-in post-processing modules related to algorithms such as DB, EAST, SAST, CRNN and Attention. For components that are not built-in, they can be added through the following steps:
1. Create a new file under the [ppocr/postprocess](../../ppocr/postprocess) folder, such as my_postprocess.py.
2. Add code in the my_postprocess.py file, the sample code is as follows:
```python
import paddle
class MyPostProcess:
def __init__(self, *args, **kwargs):
# your init code
pass
def __call__(self, preds, label=None, *args, **kwargs):
if isinstance(preds, paddle.Tensor):
preds = preds.numpy()
# you preds decode code
preds = self.decode_preds(preds)
if label is None:
return preds
# you label decode code
label = self.decode_label(label)
return preds, label
def decode_preds(self, preds):
# you preds decode code
pass
def decode_label(self, preds):
# you label decode code
pass
```
3. Import the added module in the [ppocr/postprocess/\__init\__.py](../../ppocr/postprocess/__init__.py) file.
After the post-processing module is added, you only need to configure it in the configuration file to use, such as:
```yaml
PostProcess:
name: MyPostProcess
args1: args1
args2: args2
```
## Loss
The loss function is used to calculate the distance between the network output and the label. This part is under [ppocr/losses](../../ppocr/losses).
PaddleOCR has built-in loss function modules related to algorithms such as DB, EAST, SAST, CRNN and Attention. For modules that do not have built-in modules, you can add them through the following steps:
1. Create a new file in the [ppocr/losses](../../ppocr/losses) folder, such as my_loss.py.
2. Add code in the my_loss.py file, the sample code is as follows:
```python
import paddle
from paddle import nn
class MyLoss(nn.Layer):
def __init__(self, **kwargs):
super(MyLoss, self).__init__()
# you init code
pass
def __call__(self, predicts, batch):
label = batch[1]
# your loss code
loss = self.loss(input=predicts, label=label)
return {'loss': loss}
```
3. Import the added module in the [ppocr/losses/\__init\__.py](../../ppocr/losses/__init__.py) file.
After the loss function module is added, you only need to configure it in the configuration file to use it, such as:
```yaml
Loss:
name: MyLoss
args1: args1
args2: args2
```
## Metric
Metric is used to calculate the performance of the network on the current batch. This part is under [ppocr/metrics](../../ppocr/metrics). PaddleOCR has built-in evaluation modules related to algorithms such as detection, classification and recognition. For modules that do not have built-in modules, you can add them through the following steps:
1. Create a new file under the [ppocr/metrics](../../ppocr/metrics) folder, such as my_metric.py.
2. Add code in the my_metric.py file, the sample code is as follows:
```python
class MyMetric(object):
def __init__(self, main_indicator='acc', **kwargs):
# main_indicator is used for select best model
self.main_indicator = main_indicator
self.reset()
def __call__(self, preds, batch, *args, **kwargs):
# preds is out of postprocess
# batch is out of dataloader
labels = batch[1]
cur_correct_num = 0
cur_all_num = 0
# you metric code
self.correct_num += cur_correct_num
self.all_num += cur_all_num
return {'acc': cur_correct_num / cur_all_num, }
def get_metric(self):
"""
return metircs {
'acc': 0,
'norm_edit_dis': 0,
}
"""
acc = self.correct_num / self.all_num
self.reset()
return {'acc': acc}
def reset(self):
# reset metric
self.correct_num = 0
self.all_num = 0
```
3. Import the added module in the [ppocr/metrics/\__init\__.py](../../ppocr/metrics/__init__.py) file.
After the metric module is added, you only need to configure it in the configuration file to use it, such as:
```yaml
Metric:
name: MyMetric
main_indicator: acc
```
## 优化器
The optimizer is used to train the network. The optimizer also contains network regularization and learning rate decay modules. This part is under [ppocr/optimizer](../../ppocr/optimizer). PaddleOCR has built-in
Commonly used optimizer modules such as `Momentum`, `Adam` and `RMSProp`, common regularization modules such as `Linear`, `Cosine`, `Step` and `Piecewise`, and common learning rate decay modules such as `L1Decay` and `L2Decay`.
Modules without built-in can be added through the following steps, take `optimizer` as an example:
1. Create your own optimizer in the [ppocr/optimizer/optimizer.py](../../ppocr/optimizer/optimizer.py) file, the sample code is as follows:
```python
from paddle import optimizer as optim
class MyOptim(object):
def __init__(self, learning_rate=0.001, *args, **kwargs):
self.learning_rate = learning_rate
def __call__(self, parameters):
# It is recommended to wrap the built-in optimizer of paddle
opt = optim.XXX(
learning_rate=self.learning_rate,
parameters=parameters)
return opt
```
After the optimizer module is added, you only need to configure it in the configuration file to use, such as:
```yaml
Optimizer:
name: MyOptim
args1: args1
args2: args2
lr:
name: Cosine
learning_rate: 0.001
regularizer:
name: 'L2'
factor: 0
```
\ No newline at end of file
# OPTIONAL PARAMETERS LIST ## Optional parameter list
The following list can be viewed via `--help` The following list can be viewed through `--help`
| FLAG | Supported script | Use | Defaults | Note | | FLAG | Supported script | Use | Defaults | Note |
| :----------------------: | :------------: | :---------------: | :--------------: | :-----------------: | | :----------------------: | :------------: | :---------------: | :--------------: | :-----------------: |
| -c | ALL | Specify configuration file to use | None | **Please refer to the parameter introduction for configuration file usage** | | -c | ALL | Specify configuration file to use | None | **Please refer to the parameter introduction for configuration file usage** |
| -o | ALL | set configuration options | None | Configuration using -o has higher priority than the configuration file selected with -c. E.g: `-o Global.use_gpu=false` | | -o | ALL | set configuration options | None | Configuration using -o has higher priority than the configuration file selected with -c. E.g: -o Global.use_gpu=false |
## INTRODUCTION TO GLOBAL PARAMETERS OF CONFIGURATION FILE ## INTRODUCTION TO GLOBAL PARAMETERS OF CONFIGURATION FILE
Take `rec_chinese_lite_train_v1.1.yml` as an example Take rec_chinese_lite_train_v1.1.yml as an example
### Global
| Parameter | Use | Default | Note | | Parameter | Use | Defaults | Note |
| :----------------------: | :---------------------: | :--------------: | :--------------------: | | :----------------------: | :---------------------: | :--------------: | :--------------------: |
| algorithm | Select algorithm to use | Synchronize with configuration file | For selecting model, please refer to the supported model [list](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/README_en.md) | | use_gpu | Set using GPU or not | true | \ |
| use_gpu | Set using GPU or not | true | \ | | epoch_num | Maximum training epoch number | 500 | \ |
| epoch_num | Maximum training epoch number | 3000 | \ |
| log_smooth_window | Sliding window size | 20 | \ | | log_smooth_window | Sliding window size | 20 | \ |
| print_batch_step | Set print log interval | 10 | \ | | print_batch_step | Set print log interval | 10 | \ |
| save_model_dir | Set model save path | output/{model_name} | \ | | save_model_dir | Set model save path | output/{算法名称} | \ |
| save_epoch_step | Set model save interval | 3 | \ | | save_epoch_step | Set model save interval | 3 | \ |
| eval_batch_step | Set the model evaluation interval |2000 or [1000, 2000] |runing evaluation every 2000 iters or evaluation is run every 2000 iterations after the 1000th iteration | | eval_batch_step | Set the model evaluation interval | 2000 or [1000, 2000] | runing evaluation every 2000 iters or evaluation is run every 2000 iterations after the 1000th iteration |
|train_batch_size_per_card | Set the batch size during training | 256 | \ | | cal_metric_during_train | Set whether to evaluate the metric during the training process. At this time, the metric of the model under the current batch is evaluated | true | \ |
| test_batch_size_per_card | Set the batch size during testing | 256 | \ | | load_static_weights | Set whether the pre-training model is saved in static graph mode (currently only required by the detection algorithm) | true | \ |
| image_shape | Set input image size | [3, 32, 100] | \ | | pretrained_model | Set the path of the pre-trained model | ./pretrain_models/CRNN/best_accuracy | \ |
| max_text_length | Set the maximum text length | 25 | \ | | checkpoints | set model parameter path | None | Used to load parameters after interruption to continue training|
| character_type | Set character type | ch | en/ch, the default dict will be used for en, and the custom dict will be used for ch| | use_visualdl | Set whether to enable visualdl for visual log display | False | [Tutorial](https://www.paddlepaddle.org.cn/paddle/visualdl) |
| character_dict_path | Set dictionary path | ./ppocr/utils/ic15_dict.txt | \ | | infer_img | Set inference image path or folder path | ./infer_img | \|
| loss_type | Set loss type | ctc | Supports two types of loss: ctc / attention | | character_dict_path | Set dictionary path | ./ppocr/utils/ppocr_keys_v1.txt | \ |
| distort | Set use distort | false | Support distort type ,read [img_tools.py](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/ppocr/data/rec/img_tools.py) | | max_text_length | Set the maximum length of text | 25 | \ |
| use_space_char | Wether to recognize space | false | Only support in character_type=ch mode | | character_type | Set character type | ch | en/ch, the default dict will be used for en, and the custom dict will be used for ch |
label_list | Set the angle supported by the direction classifier | ['0','180'] | Only valid in the direction classifier | | use_space_char | Set whether to recognize spaces | True | Only support in character_type=ch mode |
| reader_yml | Set the reader configuration file | ./configs/rec/rec_icdar15_reader.yml | \ | | label_list | Set the angle supported by the direction classifier | ['0','180'] | Only valid in angle classifier model |
| pretrain_weights | Load pre-trained model path | ./pretrain_models/CRNN/best_accuracy | \ | | save_res_path | Set the save address of the test model results | ./output/det_db/predicts_db.txt | Only valid in the text detection model |
| checkpoints | Load saved model path | None | Used to load saved parameters to continue training after interruption |
| save_inference_dir | path to save model for inference | None | Use to save inference model | ### Optimizer ([ppocr/optimizer](../../ppocr/optimizer))
## INTRODUCTION TO READER PARAMETERS OF CONFIGURATION FILE | Parameter | Use | Defaults | Note |
| :---------------------: | :---------------------: | :--------------: | :--------------------: |
Take `rec_chinese_reader.yml` as an example: | name | Optimizer class name | Adam | Currently supports`Momentum`,`Adam`,`RMSProp`, see [ppocr/optimizer/optimizer.py](../../ppocr/optimizer/optimizer.py) |
| beta1 | Set the exponential decay rate for the 1st moment estimates | 0.9 | \ |
| Parameter | Use | Default | Note | | beta2 | Set the exponential decay rate for the 2nd moment estimates | 0.999 | \ |
| :----------------------: | :---------------------: | :--------------: | :--------------------: | | **lr** | Set the learning rate decay method | - | \ |
| reader_function | Select data reading method | ppocr.data.rec.dataset_traversal,SimpleReader | Support two data reading methods: SimpleReader / LMDBReader | | name | Learning rate decay class name | Cosine | Currently supports`Linear`,`Cosine`,`Step`,`Piecewise`, see[ppocr/optimizer/learning_rate.py](../../ppocr/optimizer/learning_rate.py) |
| num_workers | Set the number of data reading threads | 8 | \ | | learning_rate | Set the base learning rate | 0.001 | \ |
| img_set_dir | Image folder path | ./train_data | \ | | **regularizer** | Set network regularization method | - | \ |
| label_file_path | Groundtruth file path | ./train_data/rec_gt_train.txt| \ | | name | Regularizer class name | L2 | Currently support`L1`,`L2`, see[ppocr/optimizer/regularizer.py](../../ppocr/optimizer/regularizer.py) |
| infer_img | Result folder path | ./infer_img | \| | factor | Learning rate decay coefficient | 0.00004 | \ |
### Architecture ([ppocr/modeling](../../ppocr/modeling))
In ppocr, the network is divided into four stages: Transform, Backbone, Neck and Head
| Parameter | Use | Defaults | Note |
| :---------------------: | :---------------------: | :--------------: | :--------------------: |
| model_type | Network Type | rec | Currently support`rec`,`det`,`cls` |
| algorithm | Model name | CRNN | See [algorithm_overview](./algorithm_overview.md) for the support list |
| **Transform** | Set the transformation method | - | Currently only recognition algorithms are supported, see [ppocr/modeling/transform](../../ppocr/modeling/transform) for details |
| name | Transformation class name | TPS | Currently supports `TPS` |
| num_fiducial | Number of TPS control points | 20 | Ten on the top and bottom |
| loc_lr | Localization network learning rate | 0.1 | \ |
| model_name | Localization network size | small | Currently support`small`,`large` |
| **Backbone** | Set the network backbone class name | - | see [ppocr/modeling/backbones](../../ppocr/modeling/backbones) |
| name | backbone class name | ResNet | Currently support`MobileNetV3`,`ResNet` |
| layers | resnet layers | 34 | Currently support18,34,50,101,152,200 |
| model_name | MobileNetV3 network size | small | Currently support`small`,`large` |
| **Neck** | Set network neck | - | see[ppocr/modeling/necks](../../ppocr/modeling/necks) |
| name | neck class name | SequenceEncoder | Currently support`SequenceEncoder`,`DBFPN` |
| encoder_type | SequenceEncoder encoder type | rnn | Currently support`reshape`,`fc`,`rnn` |
| hidden_size | rnn number of internal units | 48 | \ |
| out_channels | Number of DBFPN output channels | 256 | \ |
| **Head** | Set the network head | - | see[ppocr/modeling/heads](../../ppocr/modeling/heads) |
| name | head class name | CTCHead | Currently support`CTCHead`,`DBHead`,`ClsHead` |
| fc_decay | CTCHead regularization coefficient | 0.0004 | \ |
| k | DBHead binarization coefficient | 50 | \ |
| class_dim | ClsHead output category number | 2 | \ |
### Loss ([ppocr/losses](../../ppocr/losses))
| Parameter | Use | Defaults | Note |
| :---------------------: | :---------------------: | :--------------: | :--------------------: |
| name | loss class name | CTCLoss | Currently support`CTCLoss`,`DBLoss`,`ClsLoss` |
| balance_loss | Whether to balance the number of positive and negative samples in DBLossloss (using OHEM) | True | \ |
| ohem_ratio | The negative and positive sample ratio of OHEM in DBLossloss | 3 | \ |
| main_loss_type | The loss used by shrink_map in DBLossloss | DiceLoss | Currently support`DiceLoss`,`BCELoss` |
| alpha | The coefficient of shrink_map_loss in DBLossloss | 5 | \ |
| beta | The coefficient of threshold_map_loss in DBLossloss | 10 | \ |
## INTRODUCTION TO OPTIMIZER PARAMETERS OF CONFIGURATION FILE ### PostProcess ([ppocr/postprocess](../../ppocr/postprocess))
Take `rec_icdar15_train.yml` as an example: | Parameter | Use | Defaults | Note |
| :---------------------: | :---------------------: | :--------------: | :--------------------: |
| name | Post-processing class name | CTCLabelDecode | Currently support`CTCLoss`,`AttnLabelDecode`,`DBPostProcess`,`ClsPostProcess` |
| thresh | The threshold for binarization of the segmentation map in DBPostProcess | 0.3 | \ |
| box_thresh | The threshold for filtering output boxes in DBPostProcess. Boxes below this threshold will not be output | 0.7 | \ |
| max_candidates | The maximum number of text boxes output in DBPostProcess | 1000 | |
| unclip_ratio | The unclip ratio of the text box in DBPostProcess | 2.0 | \ |
### Metric ([ppocr/metrics](../../ppocr/metrics))
| Parameter | Use | Defaults | Note |
| :---------------------: | :---------------------: | :--------------: | :--------------------: |
| name | Metric method name | CTCLabelDecode | Currently support`DetMetric`,`RecMetric`,`ClsMetric` |
| main_indicator | Main indicators, used to select the best model | acc | For the detection method is hmean, the recognition and classification method is acc |
| Parameter | Use | Default | None | ### Dataset ([ppocr/data](../../ppocr/data))
| Parameter | Use | Defaults | Note |
| :---------------------: | :---------------------: | :--------------: | :--------------------: | | :---------------------: | :---------------------: | :--------------: | :--------------------: |
| function | Select Optimizer function | pocr.optimizer,AdamDecay | Only support Adam | | **dataset** | Return one sample per iteration | - | - |
| base_lr | Set the base lr | 0.0005 | \ | | name | dataset class name | SimpleDataSet | Currently support`SimpleDataSet`,`LMDBDateSet` |
| beta1 | Set the exponential decay rate for the 1st moment estimates | 0.9 | \ | | data_dir | Image folder path | ./train_data | \ |
| beta2 | Set the exponential decay rate for the 2nd moment estimates | 0.999 | \ | | label_file_list | Groundtruth file path | ["./train_data/train_list.txt"] | This parameter is not required when dataset is LMDBDateSet |
| decay | Whether to use decay | \ | \ | | ratio_list | Ratio of data set | [1.0] | If there are two train_lists in label_file_list and ratio_list is [0.4,0.6], 40% will be sampled from train_list1, and 60% will be sampled from train_list2 to combine the entire dataset |
| function(decay) | Set the decay function | cosine_decay | Support cosine_decay, cosine_decay_warmup and piecewise_decay | | transforms | List of methods to transform images and labels | [DecodeImage,CTCLabelEncode,RecResizeImg,KeepKeys] | see[ppocr/data/imaug](../../ppocr/data/imaug) |
| step_each_epoch | The number of steps in an epoch. Used in cosine_decay/cosine_decay_warmup | 20 | Calculation: total_image_num / (batch_size_per_card * card_size) | | **loader** | dataloader related | - | |
| total_epoch | The number of epochs. Used in cosine_decay/cosine_decay_warmup | 1000 | Consistent with Global.epoch_num | | shuffle | Does each epoch disrupt the order of the data set | True | \ |
| warmup_minibatch | Number of steps for linear warmup. Used in cosine_decay_warmup | 1000 | \ | | batch_size_per_card | Single card batch size during training | 256 | \ |
| boundaries | The step intervals to reduce learning rate. Used in piecewise_decay | - | The format is list | | drop_last | Whether to discard the last incomplete mini-batch because the number of samples in the data set cannot be divisible by batch_size | True | \ |
| decay_rate | Learning rate decay rate. Used in piecewise_decay | - | \ | | num_workers | The number of sub-processes used to load data, if it is 0, the sub-process is not started, and the data is loaded in the main process | 8 | \ |
\ No newline at end of file
...@@ -2,40 +2,37 @@ ...@@ -2,40 +2,37 @@
The overall directory structure of PaddleOCR is introduced as follows: The overall directory structure of PaddleOCR is introduced as follows:
``` ```
PaddleOCR PaddleOCR
├── configs // configuration file, you can select model structure and modify hyperparameters through yml file ├── configs // Configuration file, you can config the model structure and modify the hyperparameters through the yml file
│ ├── cls // Related configuration files of direction classifier │ ├── cls // Angle classifier config files
│ │ ├── cls_mv3.yml // training configuration related, including backbone network, head, loss, optimizer │ │ ├── cls_mv3.yml // Training config, including backbone network, head, loss, optimizer and data
│ │ └── cls_reader.yml // Data reading related, data reading method, data storage path │ ├── det // Text detection config files
│ ├── det // Detection related configuration files │ │ ├── det_mv3_db.yml // Training config
│ │ ├── det_db_icdar15_reader.yml // data read
│ │ ├── det_mv3_db.yml // training configuration
│ │ ... │ │ ...
│ └── rec // Identify related configuration files │ └── rec // Text recognition config files
│ ├── rec_benchmark_reader.yml // LMDB format data reading related │ ├── rec_mv3_none_bilstm_ctc.yml // CRNN config
│ ├── rec_chinese_common_train.yml // General Chinese training configuration
│ ├── rec_icdar15_reader.yml // simple data reading related, including data reading function, data path, label file
│ ... │ ...
├── deploy // deployment related ├── deploy // Depoly
│ ├── android_demo // android_demo │ ├── android_demo // Android demo
│ │ ... │ │ ...
│ ├── cpp_infer // C++ infer │ ├── cpp_infer // C++ infer
│ │ ├── CMakeLists.txt // Cmake file │ │ ├── CMakeLists.txt // Cmake file
│ │ ├── docs // documentation │ │ ├── docs // Docs
│ │ │ └── windows_vs2019_build.md │ │ │ └── windows_vs2019_build.md
│ │ ├── include │ │ ├── include // Head Files
│ │ │ ├── clipper.h // clipper library │ │ │ ├── clipper.h // clipper
│ │ │ ├── config.h // infer configuration │ │ │ ├── config.h // Inference config
│ │ │ ├── ocr_cls.h // direction classifier │ │ │ ├── ocr_cls.h // Angle class
│ │ │ ├── ocr_det.h // text detection │ │ │ ├── ocr_det.h // Text detection
│ │ │ ├── ocr_rec.h // text recognition │ │ │ ├── ocr_rec.h // Text recognition
│ │ │ ├── postprocess_op.h // postprocess after detection │ │ │ ├── postprocess_op.h // Post-processing
│ │ │ ├── preprocess_op.h // preprocess detection │ │ │ ├── preprocess_op.h // Pre-processing
│ │ │ └── utility.h // tools │ │ │ └── utility.h // tools
│ │ ├── readme.md // documentation │ │ ├── readme.md // Documentation
│ │ ├── ... │ │ ├── ...
│ │ ├── src // source file │ │ ├── src // Source code files
│ │ │ ├── clipper.cpp │ │ │ ├── clipper.cpp
│ │ │ ├── config.cpp │ │ │ ├── config.cpp
│ │ │ ├── main.cpp │ │ │ ├── main.cpp
...@@ -45,10 +42,10 @@ PaddleOCR ...@@ -45,10 +42,10 @@ PaddleOCR
│ │ │ ├── postprocess_op.cpp │ │ │ ├── postprocess_op.cpp
│ │ │ ├── preprocess_op.cpp │ │ │ ├── preprocess_op.cpp
│ │ │ └── utility.cpp │ │ │ └── utility.cpp
│ │ └── tools // compile and execute script │ │ └── tools // Compile and execute script
│ │ ├── build.sh // compile script │ │ ├── build.sh // Compile script
│ │ ├── config.txt // configuration file │ │ ├── config.txt // Config file
│ │ └── run.sh // Test startup script │ │ └── run.sh // Execute script
│ ├── docker │ ├── docker
│ │ └── hubserving │ │ └── hubserving
│ │ ├── cpu │ │ ├── cpu
...@@ -58,151 +55,165 @@ PaddleOCR ...@@ -58,151 +55,165 @@ PaddleOCR
│ │ ├── README_cn.md │ │ ├── README_cn.md
│ │ ├── README.md │ │ ├── README.md
│ │ └── sample_request.txt │ │ └── sample_request.txt
│ ├── hubserving // hubserving │ ├── hubserving // hubserving
│ │ ├── ocr_det // text detection │ │ ├── ocr_cls // Angle class
│ │ │ ├── config.json // serving configuration │ │ │ ├── config.json // Serving config
│ │ │ ├── __init__.py
│ │ │ ├── module.py // Model
│ │ │ └── params.py // Parameters
│ │ ├── ocr_det // Text detection
│ │ │ ├── config.json // serving config
│ │ │ ├── __init__.py │ │ │ ├── __init__.py
│ │ │ ├── module.py // prediction model │ │ │ ├── module.py // Model
│ │ │ └── params.py // prediction parameters │ │ │ └── params.py // Parameters
│ │ ├── ocr_rec // text recognition │ │ ├── ocr_rec // Text recognition
│ │ │ ├── config.json │ │ │ ├── config.json
│ │ │ ├── __init__.py │ │ │ ├── __init__.py
│ │ │ ├── module.py │ │ │ ├── module.py
│ │ │ └── params.py │ │ │ └── params.py
│ │ └── ocr_system // system forecast │ │ └── ocr_system // Inference System
│ │ ├── config.json │ │ ├── config.json
│ │ ├── __init__.py │ │ ├── __init__.py
│ │ ├── module.py │ │ ├── module.py
│ │ └── params.py │ │ └── params.py
│ ├── imgs // prediction picture │ ├── imgs // Inference images
│ │ ├── cpp_infer_pred_12.png │ │ ├── cpp_infer_pred_12.png
│ │ └── demo.png │ │ └── demo.png
│ ├── ios_demo // ios demo │ ├── ios_demo // IOS demo
│ │ ... │ │ ...
│ ├── lite // lite deployment │ ├── lite // Lite depoly
│ │ ├── cls_process.cc // direction classifier data processing │ │ ├── cls_process.cc // Pre-process for angle class
│ │ ├── cls_process.h │ │ ├── cls_process.h
│ │ ├── config.txt // check configuration parameters │ │ ├── config.txt // Config file
│ │ ├── crnn_process.cc // crnn data processing │ │ ├── crnn_process.cc // Pre-process for CRNN
│ │ ├── crnn_process.h │ │ ├── crnn_process.h
│ │ ├── db_post_process.cc // db data processing │ │ ├── db_post_process.cc // Pre-process for DB
│ │ ├── db_post_process.h │ │ ├── db_post_process.h
│ │ ├── Makefile // compile file │ │ ├── Makefile // Compile file
│ │ ├── ocr_db_crnn.cc // series prediction │ │ ├── ocr_db_crnn.cc // Inference system
│ │ ├── prepare.sh // data preparation │ │ ├── prepare.sh // Prepare bash script
│ │ ├── readme.md // documentation │ │ ├── readme.md // Documentation
│ │ ... │ │ ...
│ ├── pdserving // pdserving deployment │ ├── pdserving // Pdserving depoly
│ │ ├── det_local_server.py // fast detection version, easy deployment and fast prediction │ │ ├── det_local_server.py // Text detection fast version, easy to deploy and fast to predict
│ │ ├── det_web_server.py // Full version of detection, high stability and distributed deployment │ │ ├── det_web_server.py // Text detection full version, high stability distributed deployment
│ │ ├── ocr_local_server.py // detection + identification quick version │ │ ├── ocr_local_server.py // Text detection + recognition fast version
│ │ ├── ocr_web_client.py // client │ │ ├── ocr_web_client.py // client
│ │ ├── ocr_web_server.py // detection + identification full version │ │ ├── ocr_web_server.py // Text detection + recognition full version
│ │ ├── readme.md // documentation │ │ ├── readme.md // Documentation
│ │ ├── rec_local_server.py // recognize quick version │ │ ├── rec_local_server.py // Text recognition fast version
│ │ └── rec_web_server.py // Identify the full version │ │ └── rec_web_server.py // Text recognition full version
│ └── slim │ └── slim
│ └── quantization // quantization related │ └── quantization // Quantization
│ ├── export_model.py // export model │ ├── export_model.py // Export model
│ ├── quant.py // quantization │ ├── quant.py // Quantization script
│ └── README.md // Documentation │ └── README.md // Documentation
├── doc // Documentation tutorial ├── doc // Documentation and Tutorials
│ ... │ ...
├── paddleocr.py ├── ppocr // Core code
├── ppocr // network core code │ ├── data // Data processing
│ ├── data // data processing │ │ ├── imaug // Image and label processing code
│ │ ├── cls // direction classifier │ │ │ ├── text_image_aug // Tia data augment for text recognition
│ │ │ ├── dataset_traversal.py // Data transmission, define data reader, read data and form batch │ │ │ │ ├── __init__.py
│ │ │ └── randaugment.py // Random data augmentation operation │ │ │ │ ├── augment.py // Tia_distort,tia_stretch and tia_perspective
│ │ ├── det // detection │ │ │ │ ├── warp_mls.py
│ │ │ ├── data_augment.py // data augmentation operation │ │ │ ├── __init__.py
│ │ │ ├── dataset_traversal.py // Data transmission, define data reader, read data and form batch │ │ │ ├── east_process.py // Data processing steps of EAST algorithm
│ │ │ ├── db_process.py // db data processing │ │ │ ├── iaa_augment.py // Data augmentation operations
│ │ │ ├── east_process.py // east data processing │ │ │ ├── label_ops.py // label encode operations
│ │ │ ├── make_border_map.py // Generate boundary map │ │ │ ├── make_border_map.py // Generate boundary map
│ │ │ ├── make_shrink_map.py // Generate shrink map │ │ │ ├── make_shrink_map.py // Generate shrink graph
│ │ │ ├── random_crop_data.py // random crop │ │ │ ├── operators.py // Basic image operations, such as reading and normalization
│ │ │ └── sast_process.py // sast data processing │ │ │ ├── randaugment.py // Random data augmentation operation
│ │ ├── reader_main.py // main function of data reader │ │ │ ├── random_crop_data.py // Random crop
│ │ └── rec // recognation │ │ │ ├── rec_img_aug.py // Data augmentation for text recognition
│ │ ├── dataset_traversal.py // Data transmission, define data reader, including LMDB_Reader and Simple_Reader │ │ │ └── sast_process.py // Data processing steps of SAST algorithm
│ │ └── img_tools.py // Data processing related, including data normalization and disturbance │ │ ├── __init__.py // Construct dataloader code
│ ├── __init__.py │ │ ├── lmdb_dataset.py // Read lmdb dataset
│ ├── modeling // networking related │ │ ├── simple_dataset.py // Read the dataset stored in text format
│ │ ├── architectures // Model architecture, which defines the various modules required by the model │ ├── losses // Loss function
│ │ │ ├── cls_model.py // direction classifier │ │ ├── __init__.py // Construct loss code
│ │ │ ├── det_model.py // detection │ │ ├── cls_loss.py // Angle class loss
│ │ │ └── rec_model.py // recognition │ │ ├── det_basic_loss.py // Text detection basic loss
│ │ ├── backbones // backbone network │ │ ├── det_db_loss.py // DB loss
│ │ │ ├── det_mobilenet_v3.py // detect mobilenet_v3 │ │ ├── det_east_loss.py // EAST loss
│ │ │ ├── det_resnet_vd.py │ │ ├── det_sast_loss.py // SAST loss
│ │ │ ├── det_resnet_vd_sast.py │ │ ├── rec_ctc_loss.py // CTC loss
│ │ │ ├── rec_mobilenet_v3.py // recognize mobilenet_v3 │ │ ├── rec_att_loss.py // Attention loss
│ │ │ ├── rec_resnet_fpn.py │ ├── metrics // Metrics
│ │ │ └── rec_resnet_vd.py │ │ ├── __init__.py // Construct metric code
│ │ ├── common_functions.py // common functions │ │ ├── cls_metric.py // Angle class metric
│ │ ├── heads │ │ ├── det_metric.py // Text detection metric
│ │ │ ├── cls_head.py // class header │ ├── eval_det_iou.py // Text detection iou code
│ │ │ ├── det_db_head.py // db detection head │ │ ├── rec_metric.py // Text recognition metric
│ │ │ ├── det_east_head.py // east detection head │ ├── modeling // Network
│ │ │ ├── det_sast_head.py // sast detection head │ │ ├── architectures // Architecture
│ │ │ ├── rec_attention_head.py // recognition attention │ │ │ ├── __init__.py // Construct model code
│ │ │ ├── rec_ctc_head.py // recognition ctc │ │ │ ├── base_model.py // Base model
│ │ │ ├── rec_seq_encoder.py // recognition sequence code │ │ ├── backbones // backbones
│ │ │ ├── rec_srn_all_head.py // srn related │ │ │ ├── __init__.py // Construct backbone code
│ │ │ └── self_attention // srn attention │ │ │ ├── det_mobilenet_v3.py // Text detection mobilenet_v3
│ │ │ └── model.py │ │ │ ├── det_resnet_vd.py // Text detection resnet
│ │ ├── losses // loss function │ │ │ ├── det_resnet_vd_sast.py // Text detection resnet backbone of the SAST algorithm
│ │ │ ├── cls_loss.py // Directional classifier loss function │ │ │ ├── rec_mobilenet_v3.py // Text recognition mobilenet_v3
│ │ │ ├── det_basic_loss.py // detect basic loss │ │ │ └── rec_resnet_vd.py // Text recognition resnet
│ │ │ ├── det_db_loss.py // DB loss │ │ ├── necks // Necks
│ │ │ ├── det_east_loss.py // EAST loss │ │ │ ├── __init__.py // Construct neck code
│ │ │ ├── det_sast_loss.py // SAST loss │ │ │ ├── db_fpn.py // Standard fpn
│ │ │ ├── rec_attention_loss.py // attention loss │ │ │ ├── east_fpn.py // EAST algorithm fpn network
│ │ │ ├── rec_ctc_loss.py // ctc loss │ │ │ ├── sast_fpn.py // SAST algorithm fpn network
│ │ │ └── rec_srn_loss.py // srn loss │ │ │ ├── rnn.py // Character recognition sequence encoding
│ │ └── stns // Spatial transformation network │ │ ├── heads // Heads
│ │ └── tps.py // TPS conversion │ │ │ ├── __init__.py // Construct head code
│ ├── optimizer.py // optimizer │ │ │ ├── cls_head.py // Angle class head
│ ├── postprocess // post-processing │ │ │ ├── det_db_head.py // DB head
│ │ ├── db_postprocess.py // DB postprocess │ │ │ ├── det_east_head.py // EAST head
│ │ ├── east_postprocess.py // East postprocess │ │ │ ├── det_sast_head.py // SAST head
│ │ ├── lanms // lanms related │ │ │ ├── rec_ctc_head.py // CTC head
│ │ │ ... │ │ │ ├── rec_att_head.py // Attention head
│ │ ├── locality_aware_nms.py // nms │ │ ├── transforms // Transforms
│ │ └── sast_postprocess.py // sast post-processing │ │ │ ├── __init__.py // Construct transform code
│ └── utils // tools │ │ │ └── tps.py // TPS transform
│ ├── character.py // Character processing, including text encoding and decoding, and calculation of prediction accuracy │ ├── optimizer // Optimizer
│ ├── check.py // parameter loading check │ │ ├── __init__.py // Construct optimizer code
│ ├── ic15_dict.txt // English number dictionary, case sensitive │ │ └── learning_rate.py // Learning rate decay
│ ├── ppocr_keys_v1.txt // Chinese dictionary, used to train Chinese models │ │ └── optimizer.py // Optimizer
│ ├── save_load.py // model save and load function │ │ └── regularizer.py // Network regularization
│ ├── stats.py // Statistics │ ├── postprocess // Post-processing
│ └── utility.py // Tool functions, including related check tools such as whether the input parameters are legal │ │ ├── cls_postprocess.py // Angle class post-processing
├── README_en.md // documentation │ │ ├── db_postprocess.py // DB post-processing
├── README.md │ │ ├── east_postprocess.py // EAST post-processing
├── requirments.txt // installation dependencies │ │ ├── locality_aware_nms.py // NMS
├── setup.py // whl package packaging script │ │ ├── rec_postprocess.py // Text recognition post-processing
└── tools // start tool │ │ └── sast_postprocess.py // SAST post-processing
├── eval.py // evaluation function │ └── utils // utils
├── eval_utils // evaluation tools │ ├── dict // Minor language dictionary
│ ├── eval_cls_utils.py // category related │ ....
│ ├── eval_det_iou.py // detect iou related │ ├── ic15_dict.txt // English number dictionary, case sensitive
│ ├── eval_det_utils.py // detection related │ ├── ppocr_keys_v1.txt // Chinese dictionary for training Chinese models
│ ├── eval_rec_utils.py // recognition related │ ├── logging.py // logger
│ └── __init__.py │ ├── save_load.py // Model saving and loading functions
├── export_model.py // export infer model │ ├── stats.py // Training status statistics
├── infer // Forecast based on prediction engine │ └── utility.py // Utility function
│ ├── predict_cls.py ├── tools
│ ├── predict_det.py │ ├── eval.py // Evaluation function
│ ├── predict_rec.py │ ├── export_model.py // Export inference model
│ ├── predict_system.py │ ├── infer // Inference based on Inference engine
│ └── utility.py │ │ ├── predict_cls.py
├── infer_cls.py // Predict classification based on training engine │ │ ├── predict_det.py
├── infer_det.py // Predictive detection based on training engine │ │ ├── predict_rec.py
├── infer_rec.py // Predictive recognition based on training engine │ │ ├── predict_system.py
├── program.py // overall process │ │ └── utility.py
├── test_hubserving.py │ ├── infer_cls.py // Angle classification inference based on training engine
└── train.py // start training │ ├── infer_det.py // Text detection inference based on training engine
│ ├── infer_rec.py // Text recognition inference based on training engine
``` │ ├── program.py // Inference system
│ ├── test_hubserving.py
│ └── train.py // Start training script
├── paddleocr.py
├── README_ch.md // Chinese documentation
├── README_en.md // English documentation
├── README.md // Home page documentation
├── requirments.txt // Requirments
├── setup.py // Whl package packaging script
├── train.sh // Start training bash script
\ No newline at end of file
...@@ -26,6 +26,9 @@ from .randaugment import RandAugment ...@@ -26,6 +26,9 @@ from .randaugment import RandAugment
from .operators import * from .operators import *
from .label_ops import * from .label_ops import *
from .east_process import *
from .sast_process import *
def transform(data, ops=None): def transform(data, ops=None):
""" transform """ """ transform """
......
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import math
import cv2
import numpy as np
import json
import sys
import os
__all__ = ['EASTProcessTrain']
class EASTProcessTrain(object):
def __init__(self,
image_shape = [512, 512],
background_ratio = 0.125,
min_crop_side_ratio = 0.1,
min_text_size = 10,
**kwargs):
self.input_size = image_shape[1]
self.random_scale = np.array([0.5, 1, 2.0, 3.0])
self.background_ratio = background_ratio
self.min_crop_side_ratio = min_crop_side_ratio
self.min_text_size = min_text_size
def preprocess(self, im):
input_size = self.input_size
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(input_size) / float(im_size_max)
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale)
img_mean = [0.485, 0.456, 0.406]
img_std = [0.229, 0.224, 0.225]
# im = im[:, :, ::-1].astype(np.float32)
im = im / 255
im -= img_mean
im /= img_std
new_h, new_w, _ = im.shape
im_padded = np.zeros((input_size, input_size, 3), dtype=np.float32)
im_padded[:new_h, :new_w, :] = im
im_padded = im_padded.transpose((2, 0, 1))
im_padded = im_padded[np.newaxis, :]
return im_padded, im_scale
def rotate_im_poly(self, im, text_polys):
"""
rotate image with 90 / 180 / 270 degre
"""
im_w, im_h = im.shape[1], im.shape[0]
dst_im = im.copy()
dst_polys = []
rand_degree_ratio = np.random.rand()
rand_degree_cnt = 1
if 0.333 < rand_degree_ratio < 0.666:
rand_degree_cnt = 2
elif rand_degree_ratio > 0.666:
rand_degree_cnt = 3
for i in range(rand_degree_cnt):
dst_im = np.rot90(dst_im)
rot_degree = -90 * rand_degree_cnt
rot_angle = rot_degree * math.pi / 180.0
n_poly = text_polys.shape[0]
cx, cy = 0.5 * im_w, 0.5 * im_h
ncx, ncy = 0.5 * dst_im.shape[1], 0.5 * dst_im.shape[0]
for i in range(n_poly):
wordBB = text_polys[i]
poly = []
for j in range(4):
sx, sy = wordBB[j][0], wordBB[j][1]
dx = math.cos(rot_angle) * (sx - cx)\
- math.sin(rot_angle) * (sy - cy) + ncx
dy = math.sin(rot_angle) * (sx - cx)\
+ math.cos(rot_angle) * (sy - cy) + ncy
poly.append([dx, dy])
dst_polys.append(poly)
dst_polys = np.array(dst_polys, dtype=np.float32)
return dst_im, dst_polys
def polygon_area(self, poly):
"""
compute area of a polygon
:param poly:
:return:
"""
edge = [(poly[1][0] - poly[0][0]) * (poly[1][1] + poly[0][1]),
(poly[2][0] - poly[1][0]) * (poly[2][1] + poly[1][1]),
(poly[3][0] - poly[2][0]) * (poly[3][1] + poly[2][1]),
(poly[0][0] - poly[3][0]) * (poly[0][1] + poly[3][1])]
return np.sum(edge) / 2.
def check_and_validate_polys(self, polys, tags, img_height, img_width):
"""
check so that the text poly is in the same direction,
and also filter some invalid polygons
:param polys:
:param tags:
:return:
"""
h, w = img_height, img_width
if polys.shape[0] == 0:
return polys
polys[:, :, 0] = np.clip(polys[:, :, 0], 0, w - 1)
polys[:, :, 1] = np.clip(polys[:, :, 1], 0, h - 1)
validated_polys = []
validated_tags = []
for poly, tag in zip(polys, tags):
p_area = self.polygon_area(poly)
#invalid poly
if abs(p_area) < 1:
continue
if p_area > 0:
#'poly in wrong direction'
if not tag:
tag = True #reversed cases should be ignore
poly = poly[(0, 3, 2, 1), :]
validated_polys.append(poly)
validated_tags.append(tag)
return np.array(validated_polys), np.array(validated_tags)
def draw_img_polys(self, img, polys):
if len(img.shape) == 4:
img = np.squeeze(img, axis=0)
if img.shape[0] == 3:
img = img.transpose((1, 2, 0))
img[:, :, 2] += 123.68
img[:, :, 1] += 116.78
img[:, :, 0] += 103.94
cv2.imwrite("tmp.jpg", img)
img = cv2.imread("tmp.jpg")
for box in polys:
box = box.astype(np.int32).reshape((-1, 1, 2))
cv2.polylines(img, [box], True, color=(255, 255, 0), thickness=2)
import random
ino = random.randint(0, 100)
cv2.imwrite("tmp_%d.jpg" % ino, img)
return
def shrink_poly(self, poly, r):
"""
fit a poly inside the origin poly, maybe bugs here...
used for generate the score map
:param poly: the text poly
:param r: r in the paper
:return: the shrinked poly
"""
# shrink ratio
R = 0.3
# find the longer pair
dist0 = np.linalg.norm(poly[0] - poly[1])
dist1 = np.linalg.norm(poly[2] - poly[3])
dist2 = np.linalg.norm(poly[0] - poly[3])
dist3 = np.linalg.norm(poly[1] - poly[2])
if dist0 + dist1 > dist2 + dist3:
# first move (p0, p1), (p2, p3), then (p0, p3), (p1, p2)
## p0, p1
theta = np.arctan2((poly[1][1] - poly[0][1]),
(poly[1][0] - poly[0][0]))
poly[0][0] += R * r[0] * np.cos(theta)
poly[0][1] += R * r[0] * np.sin(theta)
poly[1][0] -= R * r[1] * np.cos(theta)
poly[1][1] -= R * r[1] * np.sin(theta)
## p2, p3
theta = np.arctan2((poly[2][1] - poly[3][1]),
(poly[2][0] - poly[3][0]))
poly[3][0] += R * r[3] * np.cos(theta)
poly[3][1] += R * r[3] * np.sin(theta)
poly[2][0] -= R * r[2] * np.cos(theta)
poly[2][1] -= R * r[2] * np.sin(theta)
## p0, p3
theta = np.arctan2((poly[3][0] - poly[0][0]),
(poly[3][1] - poly[0][1]))
poly[0][0] += R * r[0] * np.sin(theta)
poly[0][1] += R * r[0] * np.cos(theta)
poly[3][0] -= R * r[3] * np.sin(theta)
poly[3][1] -= R * r[3] * np.cos(theta)
## p1, p2
theta = np.arctan2((poly[2][0] - poly[1][0]),
(poly[2][1] - poly[1][1]))
poly[1][0] += R * r[1] * np.sin(theta)
poly[1][1] += R * r[1] * np.cos(theta)
poly[2][0] -= R * r[2] * np.sin(theta)
poly[2][1] -= R * r[2] * np.cos(theta)
else:
## p0, p3
# print poly
theta = np.arctan2((poly[3][0] - poly[0][0]),
(poly[3][1] - poly[0][1]))
poly[0][0] += R * r[0] * np.sin(theta)
poly[0][1] += R * r[0] * np.cos(theta)
poly[3][0] -= R * r[3] * np.sin(theta)
poly[3][1] -= R * r[3] * np.cos(theta)
## p1, p2
theta = np.arctan2((poly[2][0] - poly[1][0]),
(poly[2][1] - poly[1][1]))
poly[1][0] += R * r[1] * np.sin(theta)
poly[1][1] += R * r[1] * np.cos(theta)
poly[2][0] -= R * r[2] * np.sin(theta)
poly[2][1] -= R * r[2] * np.cos(theta)
## p0, p1
theta = np.arctan2((poly[1][1] - poly[0][1]),
(poly[1][0] - poly[0][0]))
poly[0][0] += R * r[0] * np.cos(theta)
poly[0][1] += R * r[0] * np.sin(theta)
poly[1][0] -= R * r[1] * np.cos(theta)
poly[1][1] -= R * r[1] * np.sin(theta)
## p2, p3
theta = np.arctan2((poly[2][1] - poly[3][1]),
(poly[2][0] - poly[3][0]))
poly[3][0] += R * r[3] * np.cos(theta)
poly[3][1] += R * r[3] * np.sin(theta)
poly[2][0] -= R * r[2] * np.cos(theta)
poly[2][1] -= R * r[2] * np.sin(theta)
return poly
def generate_quad(self, im_size, polys, tags):
"""
Generate quadrangle.
"""
h, w = im_size
poly_mask = np.zeros((h, w), dtype=np.uint8)
score_map = np.zeros((h, w), dtype=np.uint8)
# (x1, y1, ..., x4, y4, short_edge_norm)
geo_map = np.zeros((h, w, 9), dtype=np.float32)
# mask used during traning, to ignore some hard areas
training_mask = np.ones((h, w), dtype=np.uint8)
for poly_idx, poly_tag in enumerate(zip(polys, tags)):
poly = poly_tag[0]
tag = poly_tag[1]
r = [None, None, None, None]
for i in range(4):
dist1 = np.linalg.norm(poly[i] - poly[(i + 1) % 4])
dist2 = np.linalg.norm(poly[i] - poly[(i - 1) % 4])
r[i] = min(dist1, dist2)
# score map
shrinked_poly = self.shrink_poly(
poly.copy(), r).astype(np.int32)[np.newaxis, :, :]
cv2.fillPoly(score_map, shrinked_poly, 1)
cv2.fillPoly(poly_mask, shrinked_poly, poly_idx + 1)
# if the poly is too small, then ignore it during training
poly_h = min(
np.linalg.norm(poly[0] - poly[3]),
np.linalg.norm(poly[1] - poly[2]))
poly_w = min(
np.linalg.norm(poly[0] - poly[1]),
np.linalg.norm(poly[2] - poly[3]))
if min(poly_h, poly_w) < self.min_text_size:
cv2.fillPoly(training_mask,
poly.astype(np.int32)[np.newaxis, :, :], 0)
if tag:
cv2.fillPoly(training_mask,
poly.astype(np.int32)[np.newaxis, :, :], 0)
xy_in_poly = np.argwhere(poly_mask == (poly_idx + 1))
# geo map.
y_in_poly = xy_in_poly[:, 0]
x_in_poly = xy_in_poly[:, 1]
poly[:, 0] = np.minimum(np.maximum(poly[:, 0], 0), w)
poly[:, 1] = np.minimum(np.maximum(poly[:, 1], 0), h)
for pno in range(4):
geo_channel_beg = pno * 2
geo_map[y_in_poly, x_in_poly, geo_channel_beg] =\
x_in_poly - poly[pno, 0]
geo_map[y_in_poly, x_in_poly, geo_channel_beg+1] =\
y_in_poly - poly[pno, 1]
geo_map[y_in_poly, x_in_poly, 8] = \
1.0 / max(min(poly_h, poly_w), 1.0)
return score_map, geo_map, training_mask
def crop_area(self,
im,
polys,
tags,
crop_background=False,
max_tries=50):
"""
make random crop from the input image
:param im:
:param polys:
:param tags:
:param crop_background:
:param max_tries:
:return:
"""
h, w, _ = im.shape
pad_h = h // 10
pad_w = w // 10
h_array = np.zeros((h + pad_h * 2), dtype=np.int32)
w_array = np.zeros((w + pad_w * 2), dtype=np.int32)
for poly in polys:
poly = np.round(poly, decimals=0).astype(np.int32)
minx = np.min(poly[:, 0])
maxx = np.max(poly[:, 0])
w_array[minx + pad_w:maxx + pad_w] = 1
miny = np.min(poly[:, 1])
maxy = np.max(poly[:, 1])
h_array[miny + pad_h:maxy + pad_h] = 1
# ensure the cropped area not across a text
h_axis = np.where(h_array == 0)[0]
w_axis = np.where(w_array == 0)[0]
if len(h_axis) == 0 or len(w_axis) == 0:
return im, polys, tags
for i in range(max_tries):
xx = np.random.choice(w_axis, size=2)
xmin = np.min(xx) - pad_w
xmax = np.max(xx) - pad_w
xmin = np.clip(xmin, 0, w - 1)
xmax = np.clip(xmax, 0, w - 1)
yy = np.random.choice(h_axis, size=2)
ymin = np.min(yy) - pad_h
ymax = np.max(yy) - pad_h
ymin = np.clip(ymin, 0, h - 1)
ymax = np.clip(ymax, 0, h - 1)
if xmax - xmin < self.min_crop_side_ratio * w or \
ymax - ymin < self.min_crop_side_ratio * h:
# area too small
continue
if polys.shape[0] != 0:
poly_axis_in_area = (polys[:, :, 0] >= xmin)\
& (polys[:, :, 0] <= xmax)\
& (polys[:, :, 1] >= ymin)\
& (polys[:, :, 1] <= ymax)
selected_polys = np.where(
np.sum(poly_axis_in_area, axis=1) == 4)[0]
else:
selected_polys = []
if len(selected_polys) == 0:
# no text in this area
if crop_background:
im = im[ymin:ymax + 1, xmin:xmax + 1, :]
polys = []
tags = []
return im, polys, tags
else:
continue
im = im[ymin:ymax + 1, xmin:xmax + 1, :]
polys = polys[selected_polys]
tags = tags[selected_polys]
polys[:, :, 0] -= xmin
polys[:, :, 1] -= ymin
return im, polys, tags
return im, polys, tags
def crop_background_infor(self, im, text_polys, text_tags):
im, text_polys, text_tags = self.crop_area(
im, text_polys, text_tags, crop_background=True)
if len(text_polys) > 0:
return None
# pad and resize image
input_size = self.input_size
im, ratio = self.preprocess(im)
score_map = np.zeros((input_size, input_size), dtype=np.float32)
geo_map = np.zeros((input_size, input_size, 9), dtype=np.float32)
training_mask = np.ones((input_size, input_size), dtype=np.float32)
return im, score_map, geo_map, training_mask
def crop_foreground_infor(self, im, text_polys, text_tags):
im, text_polys, text_tags = self.crop_area(
im, text_polys, text_tags, crop_background=False)
if text_polys.shape[0] == 0:
return None
#continue for all ignore case
if np.sum((text_tags * 1.0)) >= text_tags.size:
return None
# pad and resize image
input_size = self.input_size
im, ratio = self.preprocess(im)
text_polys[:, :, 0] *= ratio
text_polys[:, :, 1] *= ratio
_, _, new_h, new_w = im.shape
# print(im.shape)
# self.draw_img_polys(im, text_polys)
score_map, geo_map, training_mask = self.generate_quad(
(new_h, new_w), text_polys, text_tags)
return im, score_map, geo_map, training_mask
def __call__(self, data):
im = data['image']
text_polys = data['polys']
text_tags = data['ignore_tags']
if im is None:
return None
if text_polys.shape[0] == 0:
return None
#add rotate cases
if np.random.rand() < 0.5:
im, text_polys = self.rotate_im_poly(im, text_polys)
h, w, _ = im.shape
text_polys, text_tags = self.check_and_validate_polys(text_polys,
text_tags, h, w)
if text_polys.shape[0] == 0:
return None
# random scale this image
rd_scale = np.random.choice(self.random_scale)
im = cv2.resize(im, dsize=None, fx=rd_scale, fy=rd_scale)
text_polys *= rd_scale
if np.random.rand() < self.background_ratio:
outs = self.crop_background_infor(im, text_polys, text_tags)
else:
outs = self.crop_foreground_infor(im, text_polys, text_tags)
if outs is None:
return None
im, score_map, geo_map, training_mask = outs
score_map = score_map[np.newaxis, ::4, ::4].astype(np.float32)
geo_map = np.swapaxes(geo_map, 1, 2)
geo_map = np.swapaxes(geo_map, 1, 0)
geo_map = geo_map[:, ::4, ::4].astype(np.float32)
training_mask = training_mask[np.newaxis, ::4, ::4]
training_mask = training_mask.astype(np.float32)
data['image'] = im[0]
data['score_map'] = score_map
data['geo_map'] = geo_map
data['training_mask'] = training_mask
# print(im.shape, score_map.shape, geo_map.shape, training_mask.shape)
return data
\ No newline at end of file
...@@ -52,6 +52,7 @@ class DetLabelEncode(object): ...@@ -52,6 +52,7 @@ class DetLabelEncode(object):
txt_tags.append(True) txt_tags.append(True)
else: else:
txt_tags.append(False) txt_tags.append(False)
boxes = self.expand_points_num(boxes)
boxes = np.array(boxes, dtype=np.float32) boxes = np.array(boxes, dtype=np.float32)
txt_tags = np.array(txt_tags, dtype=np.bool) txt_tags = np.array(txt_tags, dtype=np.bool)
...@@ -70,6 +71,17 @@ class DetLabelEncode(object): ...@@ -70,6 +71,17 @@ class DetLabelEncode(object):
rect[3] = pts[np.argmax(diff)] rect[3] = pts[np.argmax(diff)]
return rect return rect
def expand_points_num(self, boxes):
max_points_num = 0
for box in boxes:
if len(box) > max_points_num:
max_points_num = len(box)
ex_boxes = []
for box in boxes:
ex_box = box + [box[-1]] * (max_points_num - len(box))
ex_boxes.append(ex_box)
return ex_boxes
class BaseRecLabelEncode(object): class BaseRecLabelEncode(object):
""" Convert between text-label and text-index """ """ Convert between text-label and text-index """
...@@ -83,7 +95,7 @@ class BaseRecLabelEncode(object): ...@@ -83,7 +95,7 @@ class BaseRecLabelEncode(object):
'ch', 'en', 'en_sensitive', 'french', 'german', 'japan', 'korean' 'ch', 'en', 'en_sensitive', 'french', 'german', 'japan', 'korean'
] ]
assert character_type in support_character_type, "Only {} are supported now but get {}".format( assert character_type in support_character_type, "Only {} are supported now but get {}".format(
support_character_type, self.character_str) support_character_type, character_type)
self.max_text_len = max_text_length self.max_text_len = max_text_length
if character_type == "en": if character_type == "en":
......
...@@ -122,26 +122,37 @@ class DetResizeForTest(object): ...@@ -122,26 +122,37 @@ class DetResizeForTest(object):
if 'limit_side_len' in kwargs: if 'limit_side_len' in kwargs:
self.limit_side_len = kwargs['limit_side_len'] self.limit_side_len = kwargs['limit_side_len']
self.limit_type = kwargs.get('limit_type', 'min') self.limit_type = kwargs.get('limit_type', 'min')
if 'resize_long' in kwargs:
self.resize_type = 2
self.resize_long = kwargs.get('resize_long', 960)
else: else:
self.limit_side_len = 736 self.limit_side_len = 736
self.limit_type = 'min' self.limit_type = 'min'
def __call__(self, data): def __call__(self, data):
img = data['image'] img = data['image']
src_h, src_w, _ = img.shape
if self.resize_type == 0: if self.resize_type == 0:
img, shape = self.resize_image_type0(img) # img, shape = self.resize_image_type0(img)
img, [ratio_h, ratio_w] = self.resize_image_type0(img)
elif self.resize_type == 2:
img, [ratio_h, ratio_w] = self.resize_image_type2(img)
else: else:
img, shape = self.resize_image_type1(img) # img, shape = self.resize_image_type1(img)
img, [ratio_h, ratio_w] = self.resize_image_type1(img)
data['image'] = img data['image'] = img
data['shape'] = shape data['shape'] = np.array([src_h, src_w, ratio_h, ratio_w])
return data return data
def resize_image_type1(self, img): def resize_image_type1(self, img):
resize_h, resize_w = self.image_shape resize_h, resize_w = self.image_shape
ori_h, ori_w = img.shape[:2] # (h, w, c) ori_h, ori_w = img.shape[:2] # (h, w, c)
ratio_h = float(resize_h) / ori_h
ratio_w = float(resize_w) / ori_w
img = cv2.resize(img, (int(resize_w), int(resize_h))) img = cv2.resize(img, (int(resize_w), int(resize_h)))
return img, np.array([ori_h, ori_w]) # return img, np.array([ori_h, ori_w])
return img, [ratio_h, ratio_w]
def resize_image_type0(self, img): def resize_image_type0(self, img):
""" """
...@@ -184,4 +195,31 @@ class DetResizeForTest(object): ...@@ -184,4 +195,31 @@ class DetResizeForTest(object):
except: except:
print(img.shape, resize_w, resize_h) print(img.shape, resize_w, resize_h)
sys.exit(0) sys.exit(0)
return img, np.array([h, w]) ratio_h = resize_h / float(h)
ratio_w = resize_w / float(w)
# return img, np.array([h, w])
return img, [ratio_h, ratio_w]
def resize_image_type2(self, img):
h, w, _ = img.shape
resize_w = w
resize_h = h
# Fix the longer side
if resize_h > resize_w:
ratio = float(self.resize_long) / resize_h
else:
ratio = float(self.resize_long) / resize_w
resize_h = int(resize_h * ratio)
resize_w = int(resize_w * ratio)
max_stride = 128
resize_h = (resize_h + max_stride - 1) // max_stride * max_stride
resize_w = (resize_w + max_stride - 1) // max_stride * max_stride
img = cv2.resize(img, (int(resize_w), int(resize_h)))
ratio_h = resize_h / float(h)
ratio_w = resize_w / float(w)
return img, [ratio_h, ratio_w]
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import math
import cv2
import numpy as np
import json
import sys
import os
__all__ = ['SASTProcessTrain']
class SASTProcessTrain(object):
def __init__(self,
image_shape = [512, 512],
min_crop_size = 24,
min_crop_side_ratio = 0.3,
min_text_size = 10,
max_text_size = 512,
**kwargs):
self.input_size = image_shape[1]
self.min_crop_size = min_crop_size
self.min_crop_side_ratio = min_crop_side_ratio
self.min_text_size = min_text_size
self.max_text_size = max_text_size
def quad_area(self, poly):
"""
compute area of a polygon
:param poly:
:return:
"""
edge = [
(poly[1][0] - poly[0][0]) * (poly[1][1] + poly[0][1]),
(poly[2][0] - poly[1][0]) * (poly[2][1] + poly[1][1]),
(poly[3][0] - poly[2][0]) * (poly[3][1] + poly[2][1]),
(poly[0][0] - poly[3][0]) * (poly[0][1] + poly[3][1])
]
return np.sum(edge) / 2.
def gen_quad_from_poly(self, poly):
"""
Generate min area quad from poly.
"""
point_num = poly.shape[0]
min_area_quad = np.zeros((4, 2), dtype=np.float32)
if True:
rect = cv2.minAreaRect(poly.astype(np.int32)) # (center (x,y), (width, height), angle of rotation)
center_point = rect[0]
box = np.array(cv2.boxPoints(rect))
first_point_idx = 0
min_dist = 1e4
for i in range(4):
dist = np.linalg.norm(box[(i + 0) % 4] - poly[0]) + \
np.linalg.norm(box[(i + 1) % 4] - poly[point_num // 2 - 1]) + \
np.linalg.norm(box[(i + 2) % 4] - poly[point_num // 2]) + \
np.linalg.norm(box[(i + 3) % 4] - poly[-1])
if dist < min_dist:
min_dist = dist
first_point_idx = i
for i in range(4):
min_area_quad[i] = box[(first_point_idx + i) % 4]
return min_area_quad
def check_and_validate_polys(self, polys, tags, xxx_todo_changeme):
"""
check so that the text poly is in the same direction,
and also filter some invalid polygons
:param polys:
:param tags:
:return:
"""
(h, w) = xxx_todo_changeme
if polys.shape[0] == 0:
return polys, np.array([]), np.array([])
polys[:, :, 0] = np.clip(polys[:, :, 0], 0, w - 1)
polys[:, :, 1] = np.clip(polys[:, :, 1], 0, h - 1)
validated_polys = []
validated_tags = []
hv_tags = []
for poly, tag in zip(polys, tags):
quad = self.gen_quad_from_poly(poly)
p_area = self.quad_area(quad)
if abs(p_area) < 1:
print('invalid poly')
continue
if p_area > 0:
if tag == False:
print('poly in wrong direction')
tag = True # reversed cases should be ignore
poly = poly[(0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1), :]
quad = quad[(0, 3, 2, 1), :]
len_w = np.linalg.norm(quad[0] - quad[1]) + np.linalg.norm(quad[3] - quad[2])
len_h = np.linalg.norm(quad[0] - quad[3]) + np.linalg.norm(quad[1] - quad[2])
hv_tag = 1
if len_w * 2.0 < len_h:
hv_tag = 0
validated_polys.append(poly)
validated_tags.append(tag)
hv_tags.append(hv_tag)
return np.array(validated_polys), np.array(validated_tags), np.array(hv_tags)
def crop_area(self, im, polys, tags, hv_tags, crop_background=False, max_tries=25):
"""
make random crop from the input image
:param im:
:param polys:
:param tags:
:param crop_background:
:param max_tries: 50 -> 25
:return:
"""
h, w, _ = im.shape
pad_h = h // 10
pad_w = w // 10
h_array = np.zeros((h + pad_h * 2), dtype=np.int32)
w_array = np.zeros((w + pad_w * 2), dtype=np.int32)
for poly in polys:
poly = np.round(poly, decimals=0).astype(np.int32)
minx = np.min(poly[:, 0])
maxx = np.max(poly[:, 0])
w_array[minx + pad_w: maxx + pad_w] = 1
miny = np.min(poly[:, 1])
maxy = np.max(poly[:, 1])
h_array[miny + pad_h: maxy + pad_h] = 1
# ensure the cropped area not across a text
h_axis = np.where(h_array == 0)[0]
w_axis = np.where(w_array == 0)[0]
if len(h_axis) == 0 or len(w_axis) == 0:
return im, polys, tags, hv_tags
for i in range(max_tries):
xx = np.random.choice(w_axis, size=2)
xmin = np.min(xx) - pad_w
xmax = np.max(xx) - pad_w
xmin = np.clip(xmin, 0, w - 1)
xmax = np.clip(xmax, 0, w - 1)
yy = np.random.choice(h_axis, size=2)
ymin = np.min(yy) - pad_h
ymax = np.max(yy) - pad_h
ymin = np.clip(ymin, 0, h - 1)
ymax = np.clip(ymax, 0, h - 1)
# if xmax - xmin < ARGS.min_crop_side_ratio * w or \
# ymax - ymin < ARGS.min_crop_side_ratio * h:
if xmax - xmin < self.min_crop_size or \
ymax - ymin < self.min_crop_size:
# area too small
continue
if polys.shape[0] != 0:
poly_axis_in_area = (polys[:, :, 0] >= xmin) & (polys[:, :, 0] <= xmax) \
& (polys[:, :, 1] >= ymin) & (polys[:, :, 1] <= ymax)
selected_polys = np.where(np.sum(poly_axis_in_area, axis=1) == 4)[0]
else:
selected_polys = []
if len(selected_polys) == 0:
# no text in this area
if crop_background:
return im[ymin : ymax + 1, xmin : xmax + 1, :], \
polys[selected_polys], tags[selected_polys], hv_tags[selected_polys], txts
else:
continue
im = im[ymin: ymax + 1, xmin: xmax + 1, :]
polys = polys[selected_polys]
tags = tags[selected_polys]
hv_tags = hv_tags[selected_polys]
polys[:, :, 0] -= xmin
polys[:, :, 1] -= ymin
return im, polys, tags, hv_tags
return im, polys, tags, hv_tags
def generate_direction_map(self, poly_quads, direction_map):
"""
"""
width_list = []
height_list = []
for quad in poly_quads:
quad_w = (np.linalg.norm(quad[0] - quad[1]) + np.linalg.norm(quad[2] - quad[3])) / 2.0
quad_h = (np.linalg.norm(quad[0] - quad[3]) + np.linalg.norm(quad[2] - quad[1])) / 2.0
width_list.append(quad_w)
height_list.append(quad_h)
norm_width = max(sum(width_list) / (len(width_list) + 1e-6), 1.0)
average_height = max(sum(height_list) / (len(height_list) + 1e-6), 1.0)
for quad in poly_quads:
direct_vector_full = ((quad[1] + quad[2]) - (quad[0] + quad[3])) / 2.0
direct_vector = direct_vector_full / (np.linalg.norm(direct_vector_full) + 1e-6) * norm_width
direction_label = tuple(map(float, [direct_vector[0], direct_vector[1], 1.0 / (average_height + 1e-6)]))
cv2.fillPoly(direction_map, quad.round().astype(np.int32)[np.newaxis, :, :], direction_label)
return direction_map
def calculate_average_height(self, poly_quads):
"""
"""
height_list = []
for quad in poly_quads:
quad_h = (np.linalg.norm(quad[0] - quad[3]) + np.linalg.norm(quad[2] - quad[1])) / 2.0
height_list.append(quad_h)
average_height = max(sum(height_list) / len(height_list), 1.0)
return average_height
def generate_tcl_label(self, hw, polys, tags, ds_ratio,
tcl_ratio=0.3, shrink_ratio_of_width=0.15):
"""
Generate polygon.
"""
h, w = hw
h, w = int(h * ds_ratio), int(w * ds_ratio)
polys = polys * ds_ratio
score_map = np.zeros((h, w,), dtype=np.float32)
tbo_map = np.zeros((h, w, 5), dtype=np.float32)
training_mask = np.ones((h, w,), dtype=np.float32)
direction_map = np.ones((h, w, 3)) * np.array([0, 0, 1]).reshape([1, 1, 3]).astype(np.float32)
for poly_idx, poly_tag in enumerate(zip(polys, tags)):
poly = poly_tag[0]
tag = poly_tag[1]
# generate min_area_quad
min_area_quad, center_point = self.gen_min_area_quad_from_poly(poly)
min_area_quad_h = 0.5 * (np.linalg.norm(min_area_quad[0] - min_area_quad[3]) +
np.linalg.norm(min_area_quad[1] - min_area_quad[2]))
min_area_quad_w = 0.5 * (np.linalg.norm(min_area_quad[0] - min_area_quad[1]) +
np.linalg.norm(min_area_quad[2] - min_area_quad[3]))
if min(min_area_quad_h, min_area_quad_w) < self.min_text_size * ds_ratio \
or min(min_area_quad_h, min_area_quad_w) > self.max_text_size * ds_ratio:
continue
if tag:
# continue
cv2.fillPoly(training_mask, poly.astype(np.int32)[np.newaxis, :, :], 0.15)
else:
tcl_poly = self.poly2tcl(poly, tcl_ratio)
tcl_quads = self.poly2quads(tcl_poly)
poly_quads = self.poly2quads(poly)
# stcl map
stcl_quads, quad_index = self.shrink_poly_along_width(tcl_quads, shrink_ratio_of_width=shrink_ratio_of_width,
expand_height_ratio=1.0 / tcl_ratio)
# generate tcl map
cv2.fillPoly(score_map, np.round(stcl_quads).astype(np.int32), 1.0)
# generate tbo map
for idx, quad in enumerate(stcl_quads):
quad_mask = np.zeros((h, w), dtype=np.float32)
quad_mask = cv2.fillPoly(quad_mask, np.round(quad[np.newaxis, :, :]).astype(np.int32), 1.0)
tbo_map = self.gen_quad_tbo(poly_quads[quad_index[idx]], quad_mask, tbo_map)
return score_map, tbo_map, training_mask
def generate_tvo_and_tco(self, hw, polys, tags, tcl_ratio=0.3, ds_ratio=0.25):
"""
Generate tcl map, tvo map and tbo map.
"""
h, w = hw
h, w = int(h * ds_ratio), int(w * ds_ratio)
polys = polys * ds_ratio
poly_mask = np.zeros((h, w), dtype=np.float32)
tvo_map = np.ones((9, h, w), dtype=np.float32)
tvo_map[0:-1:2] = np.tile(np.arange(0, w), (h, 1))
tvo_map[1:-1:2] = np.tile(np.arange(0, w), (h, 1)).T
poly_tv_xy_map = np.zeros((8, h, w), dtype=np.float32)
# tco map
tco_map = np.ones((3, h, w), dtype=np.float32)
tco_map[0] = np.tile(np.arange(0, w), (h, 1))
tco_map[1] = np.tile(np.arange(0, w), (h, 1)).T
poly_tc_xy_map = np.zeros((2, h, w), dtype=np.float32)
poly_short_edge_map = np.ones((h, w), dtype=np.float32)
for poly, poly_tag in zip(polys, tags):
if poly_tag == True:
continue
# adjust point order for vertical poly
poly = self.adjust_point(poly)
# generate min_area_quad
min_area_quad, center_point = self.gen_min_area_quad_from_poly(poly)
min_area_quad_h = 0.5 * (np.linalg.norm(min_area_quad[0] - min_area_quad[3]) +
np.linalg.norm(min_area_quad[1] - min_area_quad[2]))
min_area_quad_w = 0.5 * (np.linalg.norm(min_area_quad[0] - min_area_quad[1]) +
np.linalg.norm(min_area_quad[2] - min_area_quad[3]))
# generate tcl map and text, 128 * 128
tcl_poly = self.poly2tcl(poly, tcl_ratio)
# generate poly_tv_xy_map
for idx in range(4):
cv2.fillPoly(poly_tv_xy_map[2 * idx],
np.round(tcl_poly[np.newaxis, :, :]).astype(np.int32),
float(min(max(min_area_quad[idx, 0], 0), w)))
cv2.fillPoly(poly_tv_xy_map[2 * idx + 1],
np.round(tcl_poly[np.newaxis, :, :]).astype(np.int32),
float(min(max(min_area_quad[idx, 1], 0), h)))
# generate poly_tc_xy_map
for idx in range(2):
cv2.fillPoly(poly_tc_xy_map[idx],
np.round(tcl_poly[np.newaxis, :, :]).astype(np.int32), float(center_point[idx]))
# generate poly_short_edge_map
cv2.fillPoly(poly_short_edge_map,
np.round(tcl_poly[np.newaxis, :, :]).astype(np.int32),
float(max(min(min_area_quad_h, min_area_quad_w), 1.0)))
# generate poly_mask and training_mask
cv2.fillPoly(poly_mask, np.round(tcl_poly[np.newaxis, :, :]).astype(np.int32), 1)
tvo_map *= poly_mask
tvo_map[:8] -= poly_tv_xy_map
tvo_map[-1] /= poly_short_edge_map
tvo_map = tvo_map.transpose((1, 2, 0))
tco_map *= poly_mask
tco_map[:2] -= poly_tc_xy_map
tco_map[-1] /= poly_short_edge_map
tco_map = tco_map.transpose((1, 2, 0))
return tvo_map, tco_map
def adjust_point(self, poly):
"""
adjust point order.
"""
point_num = poly.shape[0]
if point_num == 4:
len_1 = np.linalg.norm(poly[0] - poly[1])
len_2 = np.linalg.norm(poly[1] - poly[2])
len_3 = np.linalg.norm(poly[2] - poly[3])
len_4 = np.linalg.norm(poly[3] - poly[0])
if (len_1 + len_3) * 1.5 < (len_2 + len_4):
poly = poly[[1, 2, 3, 0], :]
elif point_num > 4:
vector_1 = poly[0] - poly[1]
vector_2 = poly[1] - poly[2]
cos_theta = np.dot(vector_1, vector_2) / (np.linalg.norm(vector_1) * np.linalg.norm(vector_2) + 1e-6)
theta = np.arccos(np.round(cos_theta, decimals=4))
if abs(theta) > (70 / 180 * math.pi):
index = list(range(1, point_num)) + [0]
poly = poly[np.array(index), :]
return poly
def gen_min_area_quad_from_poly(self, poly):
"""
Generate min area quad from poly.
"""
point_num = poly.shape[0]
min_area_quad = np.zeros((4, 2), dtype=np.float32)
if point_num == 4:
min_area_quad = poly
center_point = np.sum(poly, axis=0) / 4
else:
rect = cv2.minAreaRect(poly.astype(np.int32)) # (center (x,y), (width, height), angle of rotation)
center_point = rect[0]
box = np.array(cv2.boxPoints(rect))
first_point_idx = 0
min_dist = 1e4
for i in range(4):
dist = np.linalg.norm(box[(i + 0) % 4] - poly[0]) + \
np.linalg.norm(box[(i + 1) % 4] - poly[point_num // 2 - 1]) + \
np.linalg.norm(box[(i + 2) % 4] - poly[point_num // 2]) + \
np.linalg.norm(box[(i + 3) % 4] - poly[-1])
if dist < min_dist:
min_dist = dist
first_point_idx = i
for i in range(4):
min_area_quad[i] = box[(first_point_idx + i) % 4]
return min_area_quad, center_point
def shrink_quad_along_width(self, quad, begin_width_ratio=0., end_width_ratio=1.):
"""
Generate shrink_quad_along_width.
"""
ratio_pair = np.array([[begin_width_ratio], [end_width_ratio]], dtype=np.float32)
p0_1 = quad[0] + (quad[1] - quad[0]) * ratio_pair
p3_2 = quad[3] + (quad[2] - quad[3]) * ratio_pair
return np.array([p0_1[0], p0_1[1], p3_2[1], p3_2[0]])
def shrink_poly_along_width(self, quads, shrink_ratio_of_width, expand_height_ratio=1.0):
"""
shrink poly with given length.
"""
upper_edge_list = []
def get_cut_info(edge_len_list, cut_len):
for idx, edge_len in enumerate(edge_len_list):
cut_len -= edge_len
if cut_len <= 0.000001:
ratio = (cut_len + edge_len_list[idx]) / edge_len_list[idx]
return idx, ratio
for quad in quads:
upper_edge_len = np.linalg.norm(quad[0] - quad[1])
upper_edge_list.append(upper_edge_len)
# length of left edge and right edge.
left_length = np.linalg.norm(quads[0][0] - quads[0][3]) * expand_height_ratio
right_length = np.linalg.norm(quads[-1][1] - quads[-1][2]) * expand_height_ratio
shrink_length = min(left_length, right_length, sum(upper_edge_list)) * shrink_ratio_of_width
# shrinking length
upper_len_left = shrink_length
upper_len_right = sum(upper_edge_list) - shrink_length
left_idx, left_ratio = get_cut_info(upper_edge_list, upper_len_left)
left_quad = self.shrink_quad_along_width(quads[left_idx], begin_width_ratio=left_ratio, end_width_ratio=1)
right_idx, right_ratio = get_cut_info(upper_edge_list, upper_len_right)
right_quad = self.shrink_quad_along_width(quads[right_idx], begin_width_ratio=0, end_width_ratio=right_ratio)
out_quad_list = []
if left_idx == right_idx:
out_quad_list.append([left_quad[0], right_quad[1], right_quad[2], left_quad[3]])
else:
out_quad_list.append(left_quad)
for idx in range(left_idx + 1, right_idx):
out_quad_list.append(quads[idx])
out_quad_list.append(right_quad)
return np.array(out_quad_list), list(range(left_idx, right_idx + 1))
def vector_angle(self, A, B):
"""
Calculate the angle between vector AB and x-axis positive direction.
"""
AB = np.array([B[1] - A[1], B[0] - A[0]])
return np.arctan2(*AB)
def theta_line_cross_point(self, theta, point):
"""
Calculate the line through given point and angle in ax + by + c =0 form.
"""
x, y = point
cos = np.cos(theta)
sin = np.sin(theta)
return [sin, -cos, cos * y - sin * x]
def line_cross_two_point(self, A, B):
"""
Calculate the line through given point A and B in ax + by + c =0 form.
"""
angle = self.vector_angle(A, B)
return self.theta_line_cross_point(angle, A)
def average_angle(self, poly):
"""
Calculate the average angle between left and right edge in given poly.
"""
p0, p1, p2, p3 = poly
angle30 = self.vector_angle(p3, p0)
angle21 = self.vector_angle(p2, p1)
return (angle30 + angle21) / 2
def line_cross_point(self, line1, line2):
"""
line1 and line2 in 0=ax+by+c form, compute the cross point of line1 and line2
"""
a1, b1, c1 = line1
a2, b2, c2 = line2
d = a1 * b2 - a2 * b1
if d == 0:
#print("line1", line1)
#print("line2", line2)
print('Cross point does not exist')
return np.array([0, 0], dtype=np.float32)
else:
x = (b1 * c2 - b2 * c1) / d
y = (a2 * c1 - a1 * c2) / d
return np.array([x, y], dtype=np.float32)
def quad2tcl(self, poly, ratio):
"""
Generate center line by poly clock-wise point. (4, 2)
"""
ratio_pair = np.array([[0.5 - ratio / 2], [0.5 + ratio / 2]], dtype=np.float32)
p0_3 = poly[0] + (poly[3] - poly[0]) * ratio_pair
p1_2 = poly[1] + (poly[2] - poly[1]) * ratio_pair
return np.array([p0_3[0], p1_2[0], p1_2[1], p0_3[1]])
def poly2tcl(self, poly, ratio):
"""
Generate center line by poly clock-wise point.
"""
ratio_pair = np.array([[0.5 - ratio / 2], [0.5 + ratio / 2]], dtype=np.float32)
tcl_poly = np.zeros_like(poly)
point_num = poly.shape[0]
for idx in range(point_num // 2):
point_pair = poly[idx] + (poly[point_num - 1 - idx] - poly[idx]) * ratio_pair
tcl_poly[idx] = point_pair[0]
tcl_poly[point_num - 1 - idx] = point_pair[1]
return tcl_poly
def gen_quad_tbo(self, quad, tcl_mask, tbo_map):
"""
Generate tbo_map for give quad.
"""
# upper and lower line function: ax + by + c = 0;
up_line = self.line_cross_two_point(quad[0], quad[1])
lower_line = self.line_cross_two_point(quad[3], quad[2])
quad_h = 0.5 * (np.linalg.norm(quad[0] - quad[3]) + np.linalg.norm(quad[1] - quad[2]))
quad_w = 0.5 * (np.linalg.norm(quad[0] - quad[1]) + np.linalg.norm(quad[2] - quad[3]))
# average angle of left and right line.
angle = self.average_angle(quad)
xy_in_poly = np.argwhere(tcl_mask == 1)
for y, x in xy_in_poly:
point = (x, y)
line = self.theta_line_cross_point(angle, point)
cross_point_upper = self.line_cross_point(up_line, line)
cross_point_lower = self.line_cross_point(lower_line, line)
##FIX, offset reverse
upper_offset_x, upper_offset_y = cross_point_upper - point
lower_offset_x, lower_offset_y = cross_point_lower - point
tbo_map[y, x, 0] = upper_offset_y
tbo_map[y, x, 1] = upper_offset_x
tbo_map[y, x, 2] = lower_offset_y
tbo_map[y, x, 3] = lower_offset_x
tbo_map[y, x, 4] = 1.0 / max(min(quad_h, quad_w), 1.0) * 2
return tbo_map
def poly2quads(self, poly):
"""
Split poly into quads.
"""
quad_list = []
point_num = poly.shape[0]
# point pair
point_pair_list = []
for idx in range(point_num // 2):
point_pair = [poly[idx], poly[point_num - 1 - idx]]
point_pair_list.append(point_pair)
quad_num = point_num // 2 - 1
for idx in range(quad_num):
# reshape and adjust to clock-wise
quad_list.append((np.array(point_pair_list)[[idx, idx + 1]]).reshape(4, 2)[[0, 2, 3, 1]])
return np.array(quad_list)
def __call__(self, data):
im = data['image']
text_polys = data['polys']
text_tags = data['ignore_tags']
if im is None:
return None
if text_polys.shape[0] == 0:
return None
h, w, _ = im.shape
text_polys, text_tags, hv_tags = self.check_and_validate_polys(text_polys, text_tags, (h, w))
if text_polys.shape[0] == 0:
return None
#set aspect ratio and keep area fix
asp_scales = np.arange(1.0, 1.55, 0.1)
asp_scale = np.random.choice(asp_scales)
if np.random.rand() < 0.5:
asp_scale = 1.0 / asp_scale
asp_scale = math.sqrt(asp_scale)
asp_wx = asp_scale
asp_hy = 1.0 / asp_scale
im = cv2.resize(im, dsize=None, fx=asp_wx, fy=asp_hy)
text_polys[:, :, 0] *= asp_wx
text_polys[:, :, 1] *= asp_hy
h, w, _ = im.shape
if max(h, w) > 2048:
rd_scale = 2048.0 / max(h, w)
im = cv2.resize(im, dsize=None, fx=rd_scale, fy=rd_scale)
text_polys *= rd_scale
h, w, _ = im.shape
if min(h, w) < 16:
return None
#no background
im, text_polys, text_tags, hv_tags = self.crop_area(im, \
text_polys, text_tags, hv_tags, crop_background=False)
if text_polys.shape[0] == 0:
return None
#continue for all ignore case
if np.sum((text_tags * 1.0)) >= text_tags.size:
return None
new_h, new_w, _ = im.shape
if (new_h is None) or (new_w is None):
return None
#resize image
std_ratio = float(self.input_size) / max(new_w, new_h)
rand_scales = np.array([0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1.0, 1.0, 1.0, 1.0, 1.0])
rz_scale = std_ratio * np.random.choice(rand_scales)
im = cv2.resize(im, dsize=None, fx=rz_scale, fy=rz_scale)
text_polys[:, :, 0] *= rz_scale
text_polys[:, :, 1] *= rz_scale
#add gaussian blur
if np.random.rand() < 0.1 * 0.5:
ks = np.random.permutation(5)[0] + 1
ks = int(ks/2)*2 + 1
im = cv2.GaussianBlur(im, ksize=(ks, ks), sigmaX=0, sigmaY=0)
#add brighter
if np.random.rand() < 0.1 * 0.5:
im = im * (1.0 + np.random.rand() * 0.5)
im = np.clip(im, 0.0, 255.0)
#add darker
if np.random.rand() < 0.1 * 0.5:
im = im * (1.0 - np.random.rand() * 0.5)
im = np.clip(im, 0.0, 255.0)
# Padding the im to [input_size, input_size]
new_h, new_w, _ = im.shape
if min(new_w, new_h) < self.input_size * 0.5:
return None
im_padded = np.ones((self.input_size, self.input_size, 3), dtype=np.float32)
im_padded[:, :, 2] = 0.485 * 255
im_padded[:, :, 1] = 0.456 * 255
im_padded[:, :, 0] = 0.406 * 255
# Random the start position
del_h = self.input_size - new_h
del_w = self.input_size - new_w
sh, sw = 0, 0
if del_h > 1:
sh = int(np.random.rand() * del_h)
if del_w > 1:
sw = int(np.random.rand() * del_w)
# Padding
im_padded[sh: sh + new_h, sw: sw + new_w, :] = im.copy()
text_polys[:, :, 0] += sw
text_polys[:, :, 1] += sh
score_map, border_map, training_mask = self.generate_tcl_label((self.input_size, self.input_size),
text_polys, text_tags, 0.25)
# SAST head
tvo_map, tco_map = self.generate_tvo_and_tco((self.input_size, self.input_size), text_polys, text_tags, tcl_ratio=0.3, ds_ratio=0.25)
# print("test--------tvo_map shape:", tvo_map.shape)
im_padded[:, :, 2] -= 0.485 * 255
im_padded[:, :, 1] -= 0.456 * 255
im_padded[:, :, 0] -= 0.406 * 255
im_padded[:, :, 2] /= (255.0 * 0.229)
im_padded[:, :, 1] /= (255.0 * 0.224)
im_padded[:, :, 0] /= (255.0 * 0.225)
im_padded = im_padded.transpose((2, 0, 1))
data['image'] = im_padded[::-1, :, :]
data['score_map'] = score_map[np.newaxis, :, :]
data['border_map'] = border_map.transpose((2, 0, 1))
data['training_mask'] = training_mask[np.newaxis, :, :]
data['tvo_map'] = tvo_map.transpose((2, 0, 1))
data['tco_map'] = tco_map.transpose((2, 0, 1))
return data
\ No newline at end of file
...@@ -18,6 +18,8 @@ import copy ...@@ -18,6 +18,8 @@ import copy
def build_loss(config): def build_loss(config):
# det loss # det loss
from .det_db_loss import DBLoss from .det_db_loss import DBLoss
from .det_east_loss import EASTLoss
from .det_sast_loss import SASTLoss
# rec loss # rec loss
from .rec_ctc_loss import CTCLoss from .rec_ctc_loss import CTCLoss
...@@ -25,7 +27,7 @@ def build_loss(config): ...@@ -25,7 +27,7 @@ def build_loss(config):
# cls loss # cls loss
from .cls_loss import ClsLoss from .cls_loss import ClsLoss
support_dict = ['DBLoss', 'CTCLoss', 'ClsLoss'] support_dict = ['DBLoss', 'EASTLoss', 'SASTLoss', 'CTCLoss', 'ClsLoss']
config = copy.deepcopy(config) config = copy.deepcopy(config)
module_name = config.pop('name') module_name = config.pop('name')
......
# copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
from paddle import nn
from .det_basic_loss import DiceLoss
class EASTLoss(nn.Layer):
"""
"""
def __init__(self,
eps=1e-6,
**kwargs):
super(EASTLoss, self).__init__()
self.dice_loss = DiceLoss(eps=eps)
def forward(self, predicts, labels):
l_score, l_geo, l_mask = labels[1:]
f_score = predicts['f_score']
f_geo = predicts['f_geo']
dice_loss = self.dice_loss(f_score, l_score, l_mask)
#smoooth_l1_loss
channels = 8
l_geo_split = paddle.split(
l_geo, num_or_sections=channels + 1, axis=1)
f_geo_split = paddle.split(f_geo, num_or_sections=channels, axis=1)
smooth_l1 = 0
for i in range(0, channels):
geo_diff = l_geo_split[i] - f_geo_split[i]
abs_geo_diff = paddle.abs(geo_diff)
smooth_l1_sign = paddle.less_than(abs_geo_diff, l_score)
smooth_l1_sign = paddle.cast(smooth_l1_sign, dtype='float32')
in_loss = abs_geo_diff * abs_geo_diff * smooth_l1_sign + \
(abs_geo_diff - 0.5) * (1.0 - smooth_l1_sign)
out_loss = l_geo_split[-1] / channels * in_loss * l_score
smooth_l1 += out_loss
smooth_l1_loss = paddle.mean(smooth_l1 * l_score)
dice_loss = dice_loss * 0.01
total_loss = dice_loss + smooth_l1_loss
losses = {"loss":total_loss, \
"dice_loss":dice_loss,\
"smooth_l1_loss":smooth_l1_loss}
return losses
# copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
from paddle import nn
from .det_basic_loss import DiceLoss
import paddle.fluid as fluid
import numpy as np
class SASTLoss(nn.Layer):
"""
"""
def __init__(self,
eps=1e-6,
**kwargs):
super(SASTLoss, self).__init__()
self.dice_loss = DiceLoss(eps=eps)
def forward(self, predicts, labels):
"""
tcl_pos: N x 128 x 3
tcl_mask: N x 128 x 1
tcl_label: N x X list or LoDTensor
"""
f_score = predicts['f_score']
f_border = predicts['f_border']
f_tvo = predicts['f_tvo']
f_tco = predicts['f_tco']
l_score, l_border, l_mask, l_tvo, l_tco = labels[1:]
#score_loss
intersection = paddle.sum(f_score * l_score * l_mask)
union = paddle.sum(f_score * l_mask) + paddle.sum(l_score * l_mask)
score_loss = 1.0 - 2 * intersection / (union + 1e-5)
#border loss
l_border_split, l_border_norm = paddle.split(l_border, num_or_sections=[4, 1], axis=1)
f_border_split = f_border
border_ex_shape = l_border_norm.shape * np.array([1, 4, 1, 1])
l_border_norm_split = paddle.expand(x=l_border_norm, shape=border_ex_shape)
l_border_score = paddle.expand(x=l_score, shape=border_ex_shape)
l_border_mask = paddle.expand(x=l_mask, shape=border_ex_shape)
border_diff = l_border_split - f_border_split
abs_border_diff = paddle.abs(border_diff)
border_sign = abs_border_diff < 1.0
border_sign = paddle.cast(border_sign, dtype='float32')
border_sign.stop_gradient = True
border_in_loss = 0.5 * abs_border_diff * abs_border_diff * border_sign + \
(abs_border_diff - 0.5) * (1.0 - border_sign)
border_out_loss = l_border_norm_split * border_in_loss
border_loss = paddle.sum(border_out_loss * l_border_score * l_border_mask) / \
(paddle.sum(l_border_score * l_border_mask) + 1e-5)
#tvo_loss
l_tvo_split, l_tvo_norm = paddle.split(l_tvo, num_or_sections=[8, 1], axis=1)
f_tvo_split = f_tvo
tvo_ex_shape = l_tvo_norm.shape * np.array([1, 8, 1, 1])
l_tvo_norm_split = paddle.expand(x=l_tvo_norm, shape=tvo_ex_shape)
l_tvo_score = paddle.expand(x=l_score, shape=tvo_ex_shape)
l_tvo_mask = paddle.expand(x=l_mask, shape=tvo_ex_shape)
#
tvo_geo_diff = l_tvo_split - f_tvo_split
abs_tvo_geo_diff = paddle.abs(tvo_geo_diff)
tvo_sign = abs_tvo_geo_diff < 1.0
tvo_sign = paddle.cast(tvo_sign, dtype='float32')
tvo_sign.stop_gradient = True
tvo_in_loss = 0.5 * abs_tvo_geo_diff * abs_tvo_geo_diff * tvo_sign + \
(abs_tvo_geo_diff - 0.5) * (1.0 - tvo_sign)
tvo_out_loss = l_tvo_norm_split * tvo_in_loss
tvo_loss = paddle.sum(tvo_out_loss * l_tvo_score * l_tvo_mask) / \
(paddle.sum(l_tvo_score * l_tvo_mask) + 1e-5)
#tco_loss
l_tco_split, l_tco_norm = paddle.split(l_tco, num_or_sections=[2, 1], axis=1)
f_tco_split = f_tco
tco_ex_shape = l_tco_norm.shape * np.array([1, 2, 1, 1])
l_tco_norm_split = paddle.expand(x=l_tco_norm, shape=tco_ex_shape)
l_tco_score = paddle.expand(x=l_score, shape=tco_ex_shape)
l_tco_mask = paddle.expand(x=l_mask, shape=tco_ex_shape)
tco_geo_diff = l_tco_split - f_tco_split
abs_tco_geo_diff = paddle.abs(tco_geo_diff)
tco_sign = abs_tco_geo_diff < 1.0
tco_sign = paddle.cast(tco_sign, dtype='float32')
tco_sign.stop_gradient = True
tco_in_loss = 0.5 * abs_tco_geo_diff * abs_tco_geo_diff * tco_sign + \
(abs_tco_geo_diff - 0.5) * (1.0 - tco_sign)
tco_out_loss = l_tco_norm_split * tco_in_loss
tco_loss = paddle.sum(tco_out_loss * l_tco_score * l_tco_mask) / \
(paddle.sum(l_tco_score * l_tco_mask) + 1e-5)
# total loss
tvo_lw, tco_lw = 1.5, 1.5
score_lw, border_lw = 1.0, 1.0
total_loss = score_loss * score_lw + border_loss * border_lw + \
tvo_loss * tvo_lw + tco_loss * tco_lw
losses = {'loss':total_loss, "score_loss":score_loss,\
"border_loss":border_loss, 'tvo_loss':tvo_loss, 'tco_loss':tco_loss}
return losses
\ No newline at end of file
...@@ -19,6 +19,7 @@ def build_backbone(config, model_type): ...@@ -19,6 +19,7 @@ def build_backbone(config, model_type):
if model_type == 'det': if model_type == 'det':
from .det_mobilenet_v3 import MobileNetV3 from .det_mobilenet_v3 import MobileNetV3
from .det_resnet_vd import ResNet from .det_resnet_vd import ResNet
from .det_resnet_vd_sast import ResNet_SAST
support_dict = ['MobileNetV3', 'ResNet', 'ResNet_SAST'] support_dict = ['MobileNetV3', 'ResNet', 'ResNet_SAST']
elif model_type == 'rec' or model_type == 'cls': elif model_type == 'rec' or model_type == 'cls':
from .rec_mobilenet_v3 import MobileNetV3 from .rec_mobilenet_v3 import MobileNetV3
......
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
from paddle import ParamAttr
import paddle.nn as nn
import paddle.nn.functional as F
__all__ = ["ResNet_SAST"]
class ConvBNLayer(nn.Layer):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
groups=1,
is_vd_mode=False,
act=None,
name=None, ):
super(ConvBNLayer, self).__init__()
self.is_vd_mode = is_vd_mode
self._pool2d_avg = nn.AvgPool2D(
kernel_size=2, stride=2, padding=0, ceil_mode=True)
self._conv = nn.Conv2D(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=(kernel_size - 1) // 2,
groups=groups,
weight_attr=ParamAttr(name=name + "_weights"),
bias_attr=False)
if name == "conv1":
bn_name = "bn_" + name
else:
bn_name = "bn" + name[3:]
self._batch_norm = nn.BatchNorm(
out_channels,
act=act,
param_attr=ParamAttr(name=bn_name + '_scale'),
bias_attr=ParamAttr(bn_name + '_offset'),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance')
def forward(self, inputs):
if self.is_vd_mode:
inputs = self._pool2d_avg(inputs)
y = self._conv(inputs)
y = self._batch_norm(y)
return y
class BottleneckBlock(nn.Layer):
def __init__(self,
in_channels,
out_channels,
stride,
shortcut=True,
if_first=False,
name=None):
super(BottleneckBlock, self).__init__()
self.conv0 = ConvBNLayer(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
act='relu',
name=name + "_branch2a")
self.conv1 = ConvBNLayer(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
act='relu',
name=name + "_branch2b")
self.conv2 = ConvBNLayer(
in_channels=out_channels,
out_channels=out_channels * 4,
kernel_size=1,
act=None,
name=name + "_branch2c")
if not shortcut:
self.short = ConvBNLayer(
in_channels=in_channels,
out_channels=out_channels * 4,
kernel_size=1,
stride=1,
is_vd_mode=False if if_first else True,
name=name + "_branch1")
self.shortcut = shortcut
def forward(self, inputs):
y = self.conv0(inputs)
conv1 = self.conv1(y)
conv2 = self.conv2(conv1)
if self.shortcut:
short = inputs
else:
short = self.short(inputs)
y = paddle.add(x=short, y=conv2)
y = F.relu(y)
return y
class BasicBlock(nn.Layer):
def __init__(self,
in_channels,
out_channels,
stride,
shortcut=True,
if_first=False,
name=None):
super(BasicBlock, self).__init__()
self.stride = stride
self.conv0 = ConvBNLayer(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
act='relu',
name=name + "_branch2a")
self.conv1 = ConvBNLayer(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=3,
act=None,
name=name + "_branch2b")
if not shortcut:
self.short = ConvBNLayer(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=1,
is_vd_mode=False if if_first else True,
name=name + "_branch1")
self.shortcut = shortcut
def forward(self, inputs):
y = self.conv0(inputs)
conv1 = self.conv1(y)
if self.shortcut:
short = inputs
else:
short = self.short(inputs)
y = paddle.add(x=short, y=conv1)
y = F.relu(y)
return y
class ResNet_SAST(nn.Layer):
def __init__(self, in_channels=3, layers=50, **kwargs):
super(ResNet_SAST, self).__init__()
self.layers = layers
supported_layers = [18, 34, 50, 101, 152, 200]
assert layers in supported_layers, \
"supported layers are {} but input layer is {}".format(
supported_layers, layers)
if layers == 18:
depth = [2, 2, 2, 2]
elif layers == 34 or layers == 50:
# depth = [3, 4, 6, 3]
depth = [3, 4, 6, 3, 3]
elif layers == 101:
depth = [3, 4, 23, 3]
elif layers == 152:
depth = [3, 8, 36, 3]
elif layers == 200:
depth = [3, 12, 48, 3]
# num_channels = [64, 256, 512,
# 1024] if layers >= 50 else [64, 64, 128, 256]
# num_filters = [64, 128, 256, 512]
num_channels = [64, 256, 512,
1024, 2048] if layers >= 50 else [64, 64, 128, 256]
num_filters = [64, 128, 256, 512, 512]
self.conv1_1 = ConvBNLayer(
in_channels=in_channels,
out_channels=32,
kernel_size=3,
stride=2,
act='relu',
name="conv1_1")
self.conv1_2 = ConvBNLayer(
in_channels=32,
out_channels=32,
kernel_size=3,
stride=1,
act='relu',
name="conv1_2")
self.conv1_3 = ConvBNLayer(
in_channels=32,
out_channels=64,
kernel_size=3,
stride=1,
act='relu',
name="conv1_3")
self.pool2d_max = nn.MaxPool2D(kernel_size=3, stride=2, padding=1)
self.stages = []
self.out_channels = [3, 64]
if layers >= 50:
for block in range(len(depth)):
block_list = []
shortcut = False
for i in range(depth[block]):
if layers in [101, 152] and block == 2:
if i == 0:
conv_name = "res" + str(block + 2) + "a"
else:
conv_name = "res" + str(block + 2) + "b" + str(i)
else:
conv_name = "res" + str(block + 2) + chr(97 + i)
bottleneck_block = self.add_sublayer(
'bb_%d_%d' % (block, i),
BottleneckBlock(
in_channels=num_channels[block]
if i == 0 else num_filters[block] * 4,
out_channels=num_filters[block],
stride=2 if i == 0 and block != 0 else 1,
shortcut=shortcut,
if_first=block == i == 0,
name=conv_name))
shortcut = True
block_list.append(bottleneck_block)
self.out_channels.append(num_filters[block] * 4)
self.stages.append(nn.Sequential(*block_list))
else:
for block in range(len(depth)):
block_list = []
shortcut = False
for i in range(depth[block]):
conv_name = "res" + str(block + 2) + chr(97 + i)
basic_block = self.add_sublayer(
'bb_%d_%d' % (block, i),
BasicBlock(
in_channels=num_channels[block]
if i == 0 else num_filters[block],
out_channels=num_filters[block],
stride=2 if i == 0 and block != 0 else 1,
shortcut=shortcut,
if_first=block == i == 0,
name=conv_name))
shortcut = True
block_list.append(basic_block)
self.out_channels.append(num_filters[block])
self.stages.append(nn.Sequential(*block_list))
def forward(self, inputs):
out = [inputs]
y = self.conv1_1(inputs)
y = self.conv1_2(y)
y = self.conv1_3(y)
out.append(y)
y = self.pool2d_max(y)
for block in self.stages:
y = block(y)
out.append(y)
return out
\ No newline at end of file
...@@ -18,13 +18,15 @@ __all__ = ['build_head'] ...@@ -18,13 +18,15 @@ __all__ = ['build_head']
def build_head(config): def build_head(config):
# det head # det head
from .det_db_head import DBHead from .det_db_head import DBHead
from .det_east_head import EASTHead
from .det_sast_head import SASTHead
# rec head # rec head
from .rec_ctc_head import CTCHead from .rec_ctc_head import CTCHead
# cls head # cls head
from .cls_head import ClsHead from .cls_head import ClsHead
support_dict = ['DBHead', 'CTCHead', 'ClsHead'] support_dict = ['DBHead', 'EASTHead', 'SASTHead', 'CTCHead', 'ClsHead']
module_name = config.pop('name') module_name = config.pop('name')
assert module_name in support_dict, Exception('head only support {}'.format( assert module_name in support_dict, Exception('head only support {}'.format(
......
# copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import paddle
from paddle import nn
import paddle.nn.functional as F
from paddle import ParamAttr
class ConvBNLayer(nn.Layer):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
groups=1,
if_act=True,
act=None,
name=None):
super(ConvBNLayer, self).__init__()
self.if_act = if_act
self.act = act
self.conv = nn.Conv2D(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups,
weight_attr=ParamAttr(name=name + '_weights'),
bias_attr=False)
self.bn = nn.BatchNorm(
num_channels=out_channels,
act=act,
param_attr=ParamAttr(name="bn_" + name + "_scale"),
bias_attr=ParamAttr(name="bn_" + name + "_offset"),
moving_mean_name="bn_" + name + "_mean",
moving_variance_name="bn_" + name + "_variance")
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
class EASTHead(nn.Layer):
"""
"""
def __init__(self, in_channels, model_name, **kwargs):
super(EASTHead, self).__init__()
self.model_name = model_name
if self.model_name == "large":
num_outputs = [128, 64, 1, 8]
else:
num_outputs = [64, 32, 1, 8]
self.det_conv1 = ConvBNLayer(
in_channels=in_channels,
out_channels=num_outputs[0],
kernel_size=3,
stride=1,
padding=1,
if_act=True,
act='relu',
name="det_head1")
self.det_conv2 = ConvBNLayer(
in_channels=num_outputs[0],
out_channels=num_outputs[1],
kernel_size=3,
stride=1,
padding=1,
if_act=True,
act='relu',
name="det_head2")
self.score_conv = ConvBNLayer(
in_channels=num_outputs[1],
out_channels=num_outputs[2],
kernel_size=1,
stride=1,
padding=0,
if_act=False,
act=None,
name="f_score")
self.geo_conv = ConvBNLayer(
in_channels=num_outputs[1],
out_channels=num_outputs[3],
kernel_size=1,
stride=1,
padding=0,
if_act=False,
act=None,
name="f_geo")
def forward(self, x):
f_det = self.det_conv1(x)
f_det = self.det_conv2(f_det)
f_score = self.score_conv(f_det)
f_score = F.sigmoid(f_score)
f_geo = self.geo_conv(f_det)
f_geo = (F.sigmoid(f_geo) - 0.5) * 2 * 800
pred = {'f_score': f_score, 'f_geo': f_geo}
return pred
# copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import paddle
from paddle import nn
import paddle.nn.functional as F
from paddle import ParamAttr
class ConvBNLayer(nn.Layer):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
groups=1,
if_act=True,
act=None,
name=None):
super(ConvBNLayer, self).__init__()
self.if_act = if_act
self.act = act
self.conv = nn.Conv2D(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=(kernel_size - 1) // 2,
groups=groups,
weight_attr=ParamAttr(name=name + '_weights'),
bias_attr=False)
self.bn = nn.BatchNorm(
num_channels=out_channels,
act=act,
param_attr=ParamAttr(name="bn_" + name + "_scale"),
bias_attr=ParamAttr(name="bn_" + name + "_offset"),
moving_mean_name="bn_" + name + "_mean",
moving_variance_name="bn_" + name + "_variance")
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
class SAST_Header1(nn.Layer):
def __init__(self, in_channels, **kwargs):
super(SAST_Header1, self).__init__()
out_channels = [64, 64, 128]
self.score_conv = nn.Sequential(
ConvBNLayer(in_channels, out_channels[0], 1, 1, act='relu', name='f_score1'),
ConvBNLayer(out_channels[0], out_channels[1], 3, 1, act='relu', name='f_score2'),
ConvBNLayer(out_channels[1], out_channels[2], 1, 1, act='relu', name='f_score3'),
ConvBNLayer(out_channels[2], 1, 3, 1, act=None, name='f_score4')
)
self.border_conv = nn.Sequential(
ConvBNLayer(in_channels, out_channels[0], 1, 1, act='relu', name='f_border1'),
ConvBNLayer(out_channels[0], out_channels[1], 3, 1, act='relu', name='f_border2'),
ConvBNLayer(out_channels[1], out_channels[2], 1, 1, act='relu', name='f_border3'),
ConvBNLayer(out_channels[2], 4, 3, 1, act=None, name='f_border4')
)
def forward(self, x):
f_score = self.score_conv(x)
f_score = F.sigmoid(f_score)
f_border = self.border_conv(x)
return f_score, f_border
class SAST_Header2(nn.Layer):
def __init__(self, in_channels, **kwargs):
super(SAST_Header2, self).__init__()
out_channels = [64, 64, 128]
self.tvo_conv = nn.Sequential(
ConvBNLayer(in_channels, out_channels[0], 1, 1, act='relu', name='f_tvo1'),
ConvBNLayer(out_channels[0], out_channels[1], 3, 1, act='relu', name='f_tvo2'),
ConvBNLayer(out_channels[1], out_channels[2], 1, 1, act='relu', name='f_tvo3'),
ConvBNLayer(out_channels[2], 8, 3, 1, act=None, name='f_tvo4')
)
self.tco_conv = nn.Sequential(
ConvBNLayer(in_channels, out_channels[0], 1, 1, act='relu', name='f_tco1'),
ConvBNLayer(out_channels[0], out_channels[1], 3, 1, act='relu', name='f_tco2'),
ConvBNLayer(out_channels[1], out_channels[2], 1, 1, act='relu', name='f_tco3'),
ConvBNLayer(out_channels[2], 2, 3, 1, act=None, name='f_tco4')
)
def forward(self, x):
f_tvo = self.tvo_conv(x)
f_tco = self.tco_conv(x)
return f_tvo, f_tco
class SASTHead(nn.Layer):
"""
"""
def __init__(self, in_channels, **kwargs):
super(SASTHead, self).__init__()
self.head1 = SAST_Header1(in_channels)
self.head2 = SAST_Header2(in_channels)
def forward(self, x):
f_score, f_border = self.head1(x)
f_tvo, f_tco = self.head2(x)
predicts = {}
predicts['f_score'] = f_score
predicts['f_border'] = f_border
predicts['f_tvo'] = f_tvo
predicts['f_tco'] = f_tco
return predicts
\ No newline at end of file
...@@ -16,8 +16,10 @@ __all__ = ['build_neck'] ...@@ -16,8 +16,10 @@ __all__ = ['build_neck']
def build_neck(config): def build_neck(config):
from .db_fpn import DBFPN from .db_fpn import DBFPN
from .east_fpn import EASTFPN
from .sast_fpn import SASTFPN
from .rnn import SequenceEncoder from .rnn import SequenceEncoder
support_dict = ['DBFPN', 'SequenceEncoder'] support_dict = ['DBFPN', 'EASTFPN', 'SASTFPN', 'SequenceEncoder']
module_name = config.pop('name') module_name = config.pop('name')
assert module_name in support_dict, Exception('neck only support {}'.format( assert module_name in support_dict, Exception('neck only support {}'.format(
......
# copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
from paddle import nn
import paddle.nn.functional as F
from paddle import ParamAttr
class ConvBNLayer(nn.Layer):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
groups=1,
if_act=True,
act=None,
name=None):
super(ConvBNLayer, self).__init__()
self.if_act = if_act
self.act = act
self.conv = nn.Conv2D(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups,
weight_attr=ParamAttr(name=name + '_weights'),
bias_attr=False)
self.bn = nn.BatchNorm(
num_channels=out_channels,
act=act,
param_attr=ParamAttr(name="bn_" + name + "_scale"),
bias_attr=ParamAttr(name="bn_" + name + "_offset"),
moving_mean_name="bn_" + name + "_mean",
moving_variance_name="bn_" + name + "_variance")
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
class DeConvBNLayer(nn.Layer):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
groups=1,
if_act=True,
act=None,
name=None):
super(DeConvBNLayer, self).__init__()
self.if_act = if_act
self.act = act
self.deconv = nn.Conv2DTranspose(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups,
weight_attr=ParamAttr(name=name + '_weights'),
bias_attr=False)
self.bn = nn.BatchNorm(
num_channels=out_channels,
act=act,
param_attr=ParamAttr(name="bn_" + name + "_scale"),
bias_attr=ParamAttr(name="bn_" + name + "_offset"),
moving_mean_name="bn_" + name + "_mean",
moving_variance_name="bn_" + name + "_variance")
def forward(self, x):
x = self.deconv(x)
x = self.bn(x)
return x
class EASTFPN(nn.Layer):
def __init__(self, in_channels, model_name, **kwargs):
super(EASTFPN, self).__init__()
self.model_name = model_name
if self.model_name == "large":
self.out_channels = 128
else:
self.out_channels = 64
self.in_channels = in_channels[::-1]
self.h1_conv = ConvBNLayer(
in_channels=self.out_channels+self.in_channels[1],
out_channels=self.out_channels,
kernel_size=3,
stride=1,
padding=1,
if_act=True,
act='relu',
name="unet_h_1")
self.h2_conv = ConvBNLayer(
in_channels=self.out_channels+self.in_channels[2],
out_channels=self.out_channels,
kernel_size=3,
stride=1,
padding=1,
if_act=True,
act='relu',
name="unet_h_2")
self.h3_conv = ConvBNLayer(
in_channels=self.out_channels+self.in_channels[3],
out_channels=self.out_channels,
kernel_size=3,
stride=1,
padding=1,
if_act=True,
act='relu',
name="unet_h_3")
self.g0_deconv = DeConvBNLayer(
in_channels=self.in_channels[0],
out_channels=self.out_channels,
kernel_size=4,
stride=2,
padding=1,
if_act=True,
act='relu',
name="unet_g_0")
self.g1_deconv = DeConvBNLayer(
in_channels=self.out_channels,
out_channels=self.out_channels,
kernel_size=4,
stride=2,
padding=1,
if_act=True,
act='relu',
name="unet_g_1")
self.g2_deconv = DeConvBNLayer(
in_channels=self.out_channels,
out_channels=self.out_channels,
kernel_size=4,
stride=2,
padding=1,
if_act=True,
act='relu',
name="unet_g_2")
self.g3_conv = ConvBNLayer(
in_channels=self.out_channels,
out_channels=self.out_channels,
kernel_size=3,
stride=1,
padding=1,
if_act=True,
act='relu',
name="unet_g_3")
def forward(self, x):
f = x[::-1]
h = f[0]
g = self.g0_deconv(h)
h = paddle.concat([g, f[1]], axis=1)
h = self.h1_conv(h)
g = self.g1_deconv(h)
h = paddle.concat([g, f[2]], axis=1)
h = self.h2_conv(h)
g = self.g2_deconv(h)
h = paddle.concat([g, f[3]], axis=1)
h = self.h3_conv(h)
g = self.g3_conv(h)
return g
\ No newline at end of file
# copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
from paddle import nn
import paddle.nn.functional as F
from paddle import ParamAttr
class ConvBNLayer(nn.Layer):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
groups=1,
if_act=True,
act=None,
name=None):
super(ConvBNLayer, self).__init__()
self.if_act = if_act
self.act = act
self.conv = nn.Conv2D(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=(kernel_size - 1) // 2,
groups=groups,
weight_attr=ParamAttr(name=name + '_weights'),
bias_attr=False)
self.bn = nn.BatchNorm(
num_channels=out_channels,
act=act,
param_attr=ParamAttr(name="bn_" + name + "_scale"),
bias_attr=ParamAttr(name="bn_" + name + "_offset"),
moving_mean_name="bn_" + name + "_mean",
moving_variance_name="bn_" + name + "_variance")
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
class DeConvBNLayer(nn.Layer):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
groups=1,
if_act=True,
act=None,
name=None):
super(DeConvBNLayer, self).__init__()
self.if_act = if_act
self.act = act
self.deconv = nn.Conv2DTranspose(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=(kernel_size - 1) // 2,
groups=groups,
weight_attr=ParamAttr(name=name + '_weights'),
bias_attr=False)
self.bn = nn.BatchNorm(
num_channels=out_channels,
act=act,
param_attr=ParamAttr(name="bn_" + name + "_scale"),
bias_attr=ParamAttr(name="bn_" + name + "_offset"),
moving_mean_name="bn_" + name + "_mean",
moving_variance_name="bn_" + name + "_variance")
def forward(self, x):
x = self.deconv(x)
x = self.bn(x)
return x
class FPN_Up_Fusion(nn.Layer):
def __init__(self, in_channels):
super(FPN_Up_Fusion, self).__init__()
in_channels = in_channels[::-1]
out_channels = [256, 256, 192, 192, 128]
self.h0_conv = ConvBNLayer(in_channels[0], out_channels[0], 1, 1, act=None, name='fpn_up_h0')
self.h1_conv = ConvBNLayer(in_channels[1], out_channels[1], 1, 1, act=None, name='fpn_up_h1')
self.h2_conv = ConvBNLayer(in_channels[2], out_channels[2], 1, 1, act=None, name='fpn_up_h2')
self.h3_conv = ConvBNLayer(in_channels[3], out_channels[3], 1, 1, act=None, name='fpn_up_h3')
self.h4_conv = ConvBNLayer(in_channels[4], out_channels[4], 1, 1, act=None, name='fpn_up_h4')
self.g0_conv = DeConvBNLayer(out_channels[0], out_channels[1], 4, 2, act=None, name='fpn_up_g0')
self.g1_conv = nn.Sequential(
ConvBNLayer(out_channels[1], out_channels[1], 3, 1, act='relu', name='fpn_up_g1_1'),
DeConvBNLayer(out_channels[1], out_channels[2], 4, 2, act=None, name='fpn_up_g1_2')
)
self.g2_conv = nn.Sequential(
ConvBNLayer(out_channels[2], out_channels[2], 3, 1, act='relu', name='fpn_up_g2_1'),
DeConvBNLayer(out_channels[2], out_channels[3], 4, 2, act=None, name='fpn_up_g2_2')
)
self.g3_conv = nn.Sequential(
ConvBNLayer(out_channels[3], out_channels[3], 3, 1, act='relu', name='fpn_up_g3_1'),
DeConvBNLayer(out_channels[3], out_channels[4], 4, 2, act=None, name='fpn_up_g3_2')
)
self.g4_conv = nn.Sequential(
ConvBNLayer(out_channels[4], out_channels[4], 3, 1, act='relu', name='fpn_up_fusion_1'),
ConvBNLayer(out_channels[4], out_channels[4], 1, 1, act=None, name='fpn_up_fusion_2')
)
def _add_relu(self, x1, x2):
x = paddle.add(x=x1, y=x2)
x = F.relu(x)
return x
def forward(self, x):
f = x[2:][::-1]
h0 = self.h0_conv(f[0])
h1 = self.h1_conv(f[1])
h2 = self.h2_conv(f[2])
h3 = self.h3_conv(f[3])
h4 = self.h4_conv(f[4])
g0 = self.g0_conv(h0)
g1 = self._add_relu(g0, h1)
g1 = self.g1_conv(g1)
g2 = self.g2_conv(self._add_relu(g1, h2))
g3 = self.g3_conv(self._add_relu(g2, h3))
g4 = self.g4_conv(self._add_relu(g3, h4))
return g4
class FPN_Down_Fusion(nn.Layer):
def __init__(self, in_channels):
super(FPN_Down_Fusion, self).__init__()
out_channels = [32, 64, 128]
self.h0_conv = ConvBNLayer(in_channels[0], out_channels[0], 3, 1, act=None, name='fpn_down_h0')
self.h1_conv = ConvBNLayer(in_channels[1], out_channels[1], 3, 1, act=None, name='fpn_down_h1')
self.h2_conv = ConvBNLayer(in_channels[2], out_channels[2], 3, 1, act=None, name='fpn_down_h2')
self.g0_conv = ConvBNLayer(out_channels[0], out_channels[1], 3, 2, act=None, name='fpn_down_g0')
self.g1_conv = nn.Sequential(
ConvBNLayer(out_channels[1], out_channels[1], 3, 1, act='relu', name='fpn_down_g1_1'),
ConvBNLayer(out_channels[1], out_channels[2], 3, 2, act=None, name='fpn_down_g1_2')
)
self.g2_conv = nn.Sequential(
ConvBNLayer(out_channels[2], out_channels[2], 3, 1, act='relu', name='fpn_down_fusion_1'),
ConvBNLayer(out_channels[2], out_channels[2], 1, 1, act=None, name='fpn_down_fusion_2')
)
def forward(self, x):
f = x[:3]
h0 = self.h0_conv(f[0])
h1 = self.h1_conv(f[1])
h2 = self.h2_conv(f[2])
g0 = self.g0_conv(h0)
g1 = paddle.add(x=g0, y=h1)
g1 = F.relu(g1)
g1 = self.g1_conv(g1)
g2 = paddle.add(x=g1, y=h2)
g2 = F.relu(g2)
g2 = self.g2_conv(g2)
return g2
class Cross_Attention(nn.Layer):
def __init__(self, in_channels):
super(Cross_Attention, self).__init__()
self.theta_conv = ConvBNLayer(in_channels, in_channels, 1, 1, act='relu', name='f_theta')
self.phi_conv = ConvBNLayer(in_channels, in_channels, 1, 1, act='relu', name='f_phi')
self.g_conv = ConvBNLayer(in_channels, in_channels, 1, 1, act='relu', name='f_g')
self.fh_weight_conv = ConvBNLayer(in_channels, in_channels, 1, 1, act=None, name='fh_weight')
self.fh_sc_conv = ConvBNLayer(in_channels, in_channels, 1, 1, act=None, name='fh_sc')
self.fv_weight_conv = ConvBNLayer(in_channels, in_channels, 1, 1, act=None, name='fv_weight')
self.fv_sc_conv = ConvBNLayer(in_channels, in_channels, 1, 1, act=None, name='fv_sc')
self.f_attn_conv = ConvBNLayer(in_channels * 2, in_channels, 1, 1, act='relu', name='f_attn')
def _cal_fweight(self, f, shape):
f_theta, f_phi, f_g = f
#flatten
f_theta = paddle.transpose(f_theta, [0, 2, 3, 1])
f_theta = paddle.reshape(f_theta, [shape[0] * shape[1], shape[2], 128])
f_phi = paddle.transpose(f_phi, [0, 2, 3, 1])
f_phi = paddle.reshape(f_phi, [shape[0] * shape[1], shape[2], 128])
f_g = paddle.transpose(f_g, [0, 2, 3, 1])
f_g = paddle.reshape(f_g, [shape[0] * shape[1], shape[2], 128])
#correlation
f_attn = paddle.matmul(f_theta, paddle.transpose(f_phi, [0, 2, 1]))
#scale
f_attn = f_attn / (128**0.5)
f_attn = F.softmax(f_attn)
#weighted sum
f_weight = paddle.matmul(f_attn, f_g)
f_weight = paddle.reshape(
f_weight, [shape[0], shape[1], shape[2], 128])
return f_weight
def forward(self, f_common):
f_shape = paddle.shape(f_common)
# print('f_shape: ', f_shape)
f_theta = self.theta_conv(f_common)
f_phi = self.phi_conv(f_common)
f_g = self.g_conv(f_common)
######## horizon ########
fh_weight = self._cal_fweight([f_theta, f_phi, f_g],
[f_shape[0], f_shape[2], f_shape[3]])
fh_weight = paddle.transpose(fh_weight, [0, 3, 1, 2])
fh_weight = self.fh_weight_conv(fh_weight)
#short cut
fh_sc = self.fh_sc_conv(f_common)
f_h = F.relu(fh_weight + fh_sc)
######## vertical ########
fv_theta = paddle.transpose(f_theta, [0, 1, 3, 2])
fv_phi = paddle.transpose(f_phi, [0, 1, 3, 2])
fv_g = paddle.transpose(f_g, [0, 1, 3, 2])
fv_weight = self._cal_fweight([fv_theta, fv_phi, fv_g],
[f_shape[0], f_shape[3], f_shape[2]])
fv_weight = paddle.transpose(fv_weight, [0, 3, 2, 1])
fv_weight = self.fv_weight_conv(fv_weight)
#short cut
fv_sc = self.fv_sc_conv(f_common)
f_v = F.relu(fv_weight + fv_sc)
######## merge ########
f_attn = paddle.concat([f_h, f_v], axis=1)
f_attn = self.f_attn_conv(f_attn)
return f_attn
class SASTFPN(nn.Layer):
def __init__(self, in_channels, with_cab=False, **kwargs):
super(SASTFPN, self).__init__()
self.in_channels = in_channels
self.with_cab = with_cab
self.FPN_Down_Fusion = FPN_Down_Fusion(self.in_channels)
self.FPN_Up_Fusion = FPN_Up_Fusion(self.in_channels)
self.out_channels = 128
self.cross_attention = Cross_Attention(self.out_channels)
def forward(self, x):
#down fpn
f_down = self.FPN_Down_Fusion(x)
#up fpn
f_up = self.FPN_Up_Fusion(x)
#fusion
f_common = paddle.add(x=f_down, y=f_up)
f_common = F.relu(f_common)
if self.with_cab:
# print('enhence f_common with CAB.')
f_common = self.cross_attention(f_common)
return f_common
...@@ -24,11 +24,13 @@ __all__ = ['build_post_process'] ...@@ -24,11 +24,13 @@ __all__ = ['build_post_process']
def build_post_process(config, global_config=None): def build_post_process(config, global_config=None):
from .db_postprocess import DBPostProcess from .db_postprocess import DBPostProcess
from .east_postprocess import EASTPostProcess
from .sast_postprocess import SASTPostProcess
from .rec_postprocess import CTCLabelDecode, AttnLabelDecode from .rec_postprocess import CTCLabelDecode, AttnLabelDecode
from .cls_postprocess import ClsPostProcess from .cls_postprocess import ClsPostProcess
support_dict = [ support_dict = [
'DBPostProcess', 'CTCLabelDecode', 'AttnLabelDecode', 'ClsPostProcess' 'DBPostProcess', 'EASTPostProcess', 'SASTPostProcess', 'CTCLabelDecode', 'AttnLabelDecode', 'ClsPostProcess'
] ]
config = copy.deepcopy(config) config = copy.deepcopy(config)
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from .locality_aware_nms import nms_locality
import cv2
import os
import sys
# __dir__ = os.path.dirname(os.path.abspath(__file__))
# sys.path.append(__dir__)
# sys.path.append(os.path.abspath(os.path.join(__dir__, '..')))
class EASTPostProcess(object):
"""
The post process for EAST.
"""
def __init__(self,
score_thresh=0.8,
cover_thresh=0.1,
nms_thresh=0.2,
**kwargs):
self.score_thresh = score_thresh
self.cover_thresh = cover_thresh
self.nms_thresh = nms_thresh
# c++ la-nms is faster, but only support python 3.5
self.is_python35 = False
if sys.version_info.major == 3 and sys.version_info.minor == 5:
self.is_python35 = True
def restore_rectangle_quad(self, origin, geometry):
"""
Restore rectangle from quadrangle.
"""
# quad
origin_concat = np.concatenate(
(origin, origin, origin, origin), axis=1) # (n, 8)
pred_quads = origin_concat - geometry
pred_quads = pred_quads.reshape((-1, 4, 2)) # (n, 4, 2)
return pred_quads
def detect(self,
score_map,
geo_map,
score_thresh=0.8,
cover_thresh=0.1,
nms_thresh=0.2):
"""
restore text boxes from score map and geo map
"""
score_map = score_map[0]
geo_map = np.swapaxes(geo_map, 1, 0)
geo_map = np.swapaxes(geo_map, 1, 2)
# filter the score map
xy_text = np.argwhere(score_map > score_thresh)
if len(xy_text) == 0:
return []
# sort the text boxes via the y axis
xy_text = xy_text[np.argsort(xy_text[:, 0])]
#restore quad proposals
text_box_restored = self.restore_rectangle_quad(
xy_text[:, ::-1] * 4, geo_map[xy_text[:, 0], xy_text[:, 1], :])
boxes = np.zeros((text_box_restored.shape[0], 9), dtype=np.float32)
boxes[:, :8] = text_box_restored.reshape((-1, 8))
boxes[:, 8] = score_map[xy_text[:, 0], xy_text[:, 1]]
if self.is_python35:
import lanms
boxes = lanms.merge_quadrangle_n9(boxes, nms_thresh)
else:
boxes = nms_locality(boxes.astype(np.float64), nms_thresh)
if boxes.shape[0] == 0:
return []
# Here we filter some low score boxes by the average score map,
# this is different from the orginal paper.
for i, box in enumerate(boxes):
mask = np.zeros_like(score_map, dtype=np.uint8)
cv2.fillPoly(mask, box[:8].reshape(
(-1, 4, 2)).astype(np.int32) // 4, 1)
boxes[i, 8] = cv2.mean(score_map, mask)[0]
boxes = boxes[boxes[:, 8] > cover_thresh]
return boxes
def sort_poly(self, p):
"""
Sort polygons.
"""
min_axis = np.argmin(np.sum(p, axis=1))
p = p[[min_axis, (min_axis + 1) % 4,\
(min_axis + 2) % 4, (min_axis + 3) % 4]]
if abs(p[0, 0] - p[1, 0]) > abs(p[0, 1] - p[1, 1]):
return p
else:
return p[[0, 3, 2, 1]]
def __call__(self, outs_dict, shape_list):
score_list = outs_dict['f_score']
geo_list = outs_dict['f_geo']
img_num = len(shape_list)
dt_boxes_list = []
for ino in range(img_num):
score = score_list[ino].numpy()
geo = geo_list[ino].numpy()
boxes = self.detect(
score_map=score,
geo_map=geo,
score_thresh=self.score_thresh,
cover_thresh=self.cover_thresh,
nms_thresh=self.nms_thresh)
boxes_norm = []
if len(boxes) > 0:
h, w = score.shape[1:]
src_h, src_w, ratio_h, ratio_w = shape_list[ino]
boxes = boxes[:, :8].reshape((-1, 4, 2))
boxes[:, :, 0] /= ratio_w
boxes[:, :, 1] /= ratio_h
for i_box, box in enumerate(boxes):
box = self.sort_poly(box.astype(np.int32))
if np.linalg.norm(box[0] - box[1]) < 5 \
or np.linalg.norm(box[3] - box[0]) < 5:
continue
boxes_norm.append(box)
dt_boxes_list.append({'points': np.array(boxes_norm)})
return dt_boxes_list
\ No newline at end of file
"""
Locality aware nms.
"""
import numpy as np
from shapely.geometry import Polygon
def intersection(g, p):
"""
Intersection.
"""
g = Polygon(g[:8].reshape((4, 2)))
p = Polygon(p[:8].reshape((4, 2)))
g = g.buffer(0)
p = p.buffer(0)
if not g.is_valid or not p.is_valid:
return 0
inter = Polygon(g).intersection(Polygon(p)).area
union = g.area + p.area - inter
if union == 0:
return 0
else:
return inter / union
def intersection_iog(g, p):
"""
Intersection_iog.
"""
g = Polygon(g[:8].reshape((4, 2)))
p = Polygon(p[:8].reshape((4, 2)))
if not g.is_valid or not p.is_valid:
return 0
inter = Polygon(g).intersection(Polygon(p)).area
#union = g.area + p.area - inter
union = p.area
if union == 0:
print("p_area is very small")
return 0
else:
return inter / union
def weighted_merge(g, p):
"""
Weighted merge.
"""
g[:8] = (g[8] * g[:8] + p[8] * p[:8]) / (g[8] + p[8])
g[8] = (g[8] + p[8])
return g
def standard_nms(S, thres):
"""
Standard nms.
"""
order = np.argsort(S[:, 8])[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
ovr = np.array([intersection(S[i], S[t]) for t in order[1:]])
inds = np.where(ovr <= thres)[0]
order = order[inds + 1]
return S[keep]
def standard_nms_inds(S, thres):
"""
Standard nms, retun inds.
"""
order = np.argsort(S[:, 8])[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
ovr = np.array([intersection(S[i], S[t]) for t in order[1:]])
inds = np.where(ovr <= thres)[0]
order = order[inds + 1]
return keep
def nms(S, thres):
"""
nms.
"""
order = np.argsort(S[:, 8])[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
ovr = np.array([intersection(S[i], S[t]) for t in order[1:]])
inds = np.where(ovr <= thres)[0]
order = order[inds + 1]
return keep
def soft_nms(boxes_in, Nt_thres=0.3, threshold=0.8, sigma=0.5, method=2):
"""
soft_nms
:para boxes_in, N x 9 (coords + score)
:para threshould, eliminate cases min score(0.001)
:para Nt_thres, iou_threshi
:para sigma, gaussian weght
:method, linear or gaussian
"""
boxes = boxes_in.copy()
N = boxes.shape[0]
if N is None or N < 1:
return np.array([])
pos, maxpos = 0, 0
weight = 0.0
inds = np.arange(N)
tbox, sbox = boxes[0].copy(), boxes[0].copy()
for i in range(N):
maxscore = boxes[i, 8]
maxpos = i
tbox = boxes[i].copy()
ti = inds[i]
pos = i + 1
#get max box
while pos < N:
if maxscore < boxes[pos, 8]:
maxscore = boxes[pos, 8]
maxpos = pos
pos = pos + 1
#add max box as a detection
boxes[i, :] = boxes[maxpos, :]
inds[i] = inds[maxpos]
#swap
boxes[maxpos, :] = tbox
inds[maxpos] = ti
tbox = boxes[i].copy()
pos = i + 1
#NMS iteration
while pos < N:
sbox = boxes[pos].copy()
ts_iou_val = intersection(tbox, sbox)
if ts_iou_val > 0:
if method == 1:
if ts_iou_val > Nt_thres:
weight = 1 - ts_iou_val
else:
weight = 1
elif method == 2:
weight = np.exp(-1.0 * ts_iou_val**2 / sigma)
else:
if ts_iou_val > Nt_thres:
weight = 0
else:
weight = 1
boxes[pos, 8] = weight * boxes[pos, 8]
#if box score falls below thresold, discard the box by
#swaping last box update N
if boxes[pos, 8] < threshold:
boxes[pos, :] = boxes[N - 1, :]
inds[pos] = inds[N - 1]
N = N - 1
pos = pos - 1
pos = pos + 1
return boxes[:N]
def nms_locality(polys, thres=0.3):
"""
locality aware nms of EAST
:param polys: a N*9 numpy array. first 8 coordinates, then prob
:return: boxes after nms
"""
S = []
p = None
for g in polys:
if p is not None and intersection(g, p) > thres:
p = weighted_merge(g, p)
else:
if p is not None:
S.append(p)
p = g
if p is not None:
S.append(p)
if len(S) == 0:
return np.array([])
return standard_nms(np.array(S), thres)
if __name__ == '__main__':
# 343,350,448,135,474,143,369,359
print(
Polygon(np.array([[343, 350], [448, 135], [474, 143], [369, 359]]))
.area)
\ No newline at end of file
...@@ -27,7 +27,7 @@ class BaseRecLabelDecode(object): ...@@ -27,7 +27,7 @@ class BaseRecLabelDecode(object):
'ch', 'en', 'en_sensitive', 'french', 'german', 'japan', 'korean' 'ch', 'en', 'en_sensitive', 'french', 'german', 'japan', 'korean'
] ]
assert character_type in support_character_type, "Only {} are supported now but get {}".format( assert character_type in support_character_type, "Only {} are supported now but get {}".format(
support_character_type, self.character_str) support_character_type, character_type)
if character_type == "en": if character_type == "en":
self.character_str = "0123456789abcdefghijklmnopqrstuvwxyz" self.character_str = "0123456789abcdefghijklmnopqrstuvwxyz"
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
__dir__ = os.path.dirname(__file__)
sys.path.append(__dir__)
sys.path.append(os.path.join(__dir__, '..'))
import numpy as np
from .locality_aware_nms import nms_locality
# import lanms
import cv2
import time
class SASTPostProcess(object):
"""
The post process for SAST.
"""
def __init__(self,
score_thresh=0.5,
nms_thresh=0.2,
sample_pts_num=2,
shrink_ratio_of_width=0.3,
expand_scale=1.0,
tcl_map_thresh=0.5,
**kwargs):
self.score_thresh = score_thresh
self.nms_thresh = nms_thresh
self.sample_pts_num = sample_pts_num
self.shrink_ratio_of_width = shrink_ratio_of_width
self.expand_scale = expand_scale
self.tcl_map_thresh = tcl_map_thresh
# c++ la-nms is faster, but only support python 3.5
self.is_python35 = False
if sys.version_info.major == 3 and sys.version_info.minor == 5:
self.is_python35 = True
def point_pair2poly(self, point_pair_list):
"""
Transfer vertical point_pairs into poly point in clockwise.
"""
# constract poly
point_num = len(point_pair_list) * 2
point_list = [0] * point_num
for idx, point_pair in enumerate(point_pair_list):
point_list[idx] = point_pair[0]
point_list[point_num - 1 - idx] = point_pair[1]
return np.array(point_list).reshape(-1, 2)
def shrink_quad_along_width(self, quad, begin_width_ratio=0., end_width_ratio=1.):
"""
Generate shrink_quad_along_width.
"""
ratio_pair = np.array([[begin_width_ratio], [end_width_ratio]], dtype=np.float32)
p0_1 = quad[0] + (quad[1] - quad[0]) * ratio_pair
p3_2 = quad[3] + (quad[2] - quad[3]) * ratio_pair
return np.array([p0_1[0], p0_1[1], p3_2[1], p3_2[0]])
def expand_poly_along_width(self, poly, shrink_ratio_of_width=0.3):
"""
expand poly along width.
"""
point_num = poly.shape[0]
left_quad = np.array([poly[0], poly[1], poly[-2], poly[-1]], dtype=np.float32)
left_ratio = -shrink_ratio_of_width * np.linalg.norm(left_quad[0] - left_quad[3]) / \
(np.linalg.norm(left_quad[0] - left_quad[1]) + 1e-6)
left_quad_expand = self.shrink_quad_along_width(left_quad, left_ratio, 1.0)
right_quad = np.array([poly[point_num // 2 - 2], poly[point_num // 2 - 1],
poly[point_num // 2], poly[point_num // 2 + 1]], dtype=np.float32)
right_ratio = 1.0 + \
shrink_ratio_of_width * np.linalg.norm(right_quad[0] - right_quad[3]) / \
(np.linalg.norm(right_quad[0] - right_quad[1]) + 1e-6)
right_quad_expand = self.shrink_quad_along_width(right_quad, 0.0, right_ratio)
poly[0] = left_quad_expand[0]
poly[-1] = left_quad_expand[-1]
poly[point_num // 2 - 1] = right_quad_expand[1]
poly[point_num // 2] = right_quad_expand[2]
return poly
def restore_quad(self, tcl_map, tcl_map_thresh, tvo_map):
"""Restore quad."""
xy_text = np.argwhere(tcl_map[:, :, 0] > tcl_map_thresh)
xy_text = xy_text[:, ::-1] # (n, 2)
# Sort the text boxes via the y axis
xy_text = xy_text[np.argsort(xy_text[:, 1])]
scores = tcl_map[xy_text[:, 1], xy_text[:, 0], 0]
scores = scores[:, np.newaxis]
# Restore
point_num = int(tvo_map.shape[-1] / 2)
assert point_num == 4
tvo_map = tvo_map[xy_text[:, 1], xy_text[:, 0], :]
xy_text_tile = np.tile(xy_text, (1, point_num)) # (n, point_num * 2)
quads = xy_text_tile - tvo_map
return scores, quads, xy_text
def quad_area(self, quad):
"""
compute area of a quad.
"""
edge = [
(quad[1][0] - quad[0][0]) * (quad[1][1] + quad[0][1]),
(quad[2][0] - quad[1][0]) * (quad[2][1] + quad[1][1]),
(quad[3][0] - quad[2][0]) * (quad[3][1] + quad[2][1]),
(quad[0][0] - quad[3][0]) * (quad[0][1] + quad[3][1])
]
return np.sum(edge) / 2.
def nms(self, dets):
if self.is_python35:
import lanms
dets = lanms.merge_quadrangle_n9(dets, self.nms_thresh)
else:
dets = nms_locality(dets, self.nms_thresh)
return dets
def cluster_by_quads_tco(self, tcl_map, tcl_map_thresh, quads, tco_map):
"""
Cluster pixels in tcl_map based on quads.
"""
instance_count = quads.shape[0] + 1 # contain background
instance_label_map = np.zeros(tcl_map.shape[:2], dtype=np.int32)
if instance_count == 1:
return instance_count, instance_label_map
# predict text center
xy_text = np.argwhere(tcl_map[:, :, 0] > tcl_map_thresh)
n = xy_text.shape[0]
xy_text = xy_text[:, ::-1] # (n, 2)
tco = tco_map[xy_text[:, 1], xy_text[:, 0], :] # (n, 2)
pred_tc = xy_text - tco
# get gt text center
m = quads.shape[0]
gt_tc = np.mean(quads, axis=1) # (m, 2)
pred_tc_tile = np.tile(pred_tc[:, np.newaxis, :], (1, m, 1)) # (n, m, 2)
gt_tc_tile = np.tile(gt_tc[np.newaxis, :, :], (n, 1, 1)) # (n, m, 2)
dist_mat = np.linalg.norm(pred_tc_tile - gt_tc_tile, axis=2) # (n, m)
xy_text_assign = np.argmin(dist_mat, axis=1) + 1 # (n,)
instance_label_map[xy_text[:, 1], xy_text[:, 0]] = xy_text_assign
return instance_count, instance_label_map
def estimate_sample_pts_num(self, quad, xy_text):
"""
Estimate sample points number.
"""
eh = (np.linalg.norm(quad[0] - quad[3]) + np.linalg.norm(quad[1] - quad[2])) / 2.0
ew = (np.linalg.norm(quad[0] - quad[1]) + np.linalg.norm(quad[2] - quad[3])) / 2.0
dense_sample_pts_num = max(2, int(ew))
dense_xy_center_line = xy_text[np.linspace(0, xy_text.shape[0] - 1, dense_sample_pts_num,
endpoint=True, dtype=np.float32).astype(np.int32)]
dense_xy_center_line_diff = dense_xy_center_line[1:] - dense_xy_center_line[:-1]
estimate_arc_len = np.sum(np.linalg.norm(dense_xy_center_line_diff, axis=1))
sample_pts_num = max(2, int(estimate_arc_len / eh))
return sample_pts_num
def detect_sast(self, tcl_map, tvo_map, tbo_map, tco_map, ratio_w, ratio_h, src_w, src_h,
shrink_ratio_of_width=0.3, tcl_map_thresh=0.5, offset_expand=1.0, out_strid=4.0):
"""
first resize the tcl_map, tvo_map and tbo_map to the input_size, then restore the polys
"""
# restore quad
scores, quads, xy_text = self.restore_quad(tcl_map, tcl_map_thresh, tvo_map)
dets = np.hstack((quads, scores)).astype(np.float32, copy=False)
dets = self.nms(dets)
if dets.shape[0] == 0:
return []
quads = dets[:, :-1].reshape(-1, 4, 2)
# Compute quad area
quad_areas = []
for quad in quads:
quad_areas.append(-self.quad_area(quad))
# instance segmentation
# instance_count, instance_label_map = cv2.connectedComponents(tcl_map.astype(np.uint8), connectivity=8)
instance_count, instance_label_map = self.cluster_by_quads_tco(tcl_map, tcl_map_thresh, quads, tco_map)
# restore single poly with tcl instance.
poly_list = []
for instance_idx in range(1, instance_count):
xy_text = np.argwhere(instance_label_map == instance_idx)[:, ::-1]
quad = quads[instance_idx - 1]
q_area = quad_areas[instance_idx - 1]
if q_area < 5:
continue
#
len1 = float(np.linalg.norm(quad[0] -quad[1]))
len2 = float(np.linalg.norm(quad[1] -quad[2]))
min_len = min(len1, len2)
if min_len < 3:
continue
# filter small CC
if xy_text.shape[0] <= 0:
continue
# filter low confidence instance
xy_text_scores = tcl_map[xy_text[:, 1], xy_text[:, 0], 0]
if np.sum(xy_text_scores) / quad_areas[instance_idx - 1] < 0.1:
# if np.sum(xy_text_scores) / quad_areas[instance_idx - 1] < 0.05:
continue
# sort xy_text
left_center_pt = np.array([[(quad[0, 0] + quad[-1, 0]) / 2.0,
(quad[0, 1] + quad[-1, 1]) / 2.0]]) # (1, 2)
right_center_pt = np.array([[(quad[1, 0] + quad[2, 0]) / 2.0,
(quad[1, 1] + quad[2, 1]) / 2.0]]) # (1, 2)
proj_unit_vec = (right_center_pt - left_center_pt) / \
(np.linalg.norm(right_center_pt - left_center_pt) + 1e-6)
proj_value = np.sum(xy_text * proj_unit_vec, axis=1)
xy_text = xy_text[np.argsort(proj_value)]
# Sample pts in tcl map
if self.sample_pts_num == 0:
sample_pts_num = self.estimate_sample_pts_num(quad, xy_text)
else:
sample_pts_num = self.sample_pts_num
xy_center_line = xy_text[np.linspace(0, xy_text.shape[0] - 1, sample_pts_num,
endpoint=True, dtype=np.float32).astype(np.int32)]
point_pair_list = []
for x, y in xy_center_line:
# get corresponding offset
offset = tbo_map[y, x, :].reshape(2, 2)
if offset_expand != 1.0:
offset_length = np.linalg.norm(offset, axis=1, keepdims=True)
expand_length = np.clip(offset_length * (offset_expand - 1), a_min=0.5, a_max=3.0)
offset_detal = offset / offset_length * expand_length
offset = offset + offset_detal
# original point
ori_yx = np.array([y, x], dtype=np.float32)
point_pair = (ori_yx + offset)[:, ::-1]* out_strid / np.array([ratio_w, ratio_h]).reshape(-1, 2)
point_pair_list.append(point_pair)
# ndarry: (x, 2), expand poly along width
detected_poly = self.point_pair2poly(point_pair_list)
detected_poly = self.expand_poly_along_width(detected_poly, shrink_ratio_of_width)
detected_poly[:, 0] = np.clip(detected_poly[:, 0], a_min=0, a_max=src_w)
detected_poly[:, 1] = np.clip(detected_poly[:, 1], a_min=0, a_max=src_h)
poly_list.append(detected_poly)
return poly_list
def __call__(self, outs_dict, shape_list):
score_list = outs_dict['f_score']
border_list = outs_dict['f_border']
tvo_list = outs_dict['f_tvo']
tco_list = outs_dict['f_tco']
img_num = len(shape_list)
poly_lists = []
for ino in range(img_num):
p_score = score_list[ino].transpose((1,2,0)).numpy()
p_border = border_list[ino].transpose((1,2,0)).numpy()
p_tvo = tvo_list[ino].transpose((1,2,0)).numpy()
p_tco = tco_list[ino].transpose((1,2,0)).numpy()
src_h, src_w, ratio_h, ratio_w = shape_list[ino]
poly_list = self.detect_sast(p_score, p_tvo, p_border, p_tco, ratio_w, ratio_h, src_w, src_h,
shrink_ratio_of_width=self.shrink_ratio_of_width,
tcl_map_thresh=self.tcl_map_thresh, offset_expand=self.expand_scale)
poly_lists.append({'points': np.array(poly_list)})
return poly_lists
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import string
import re
from .check import check_config_params
import sys
class CharacterOps(object):
""" Convert between text-label and text-index """
def __init__(self, config):
self.character_type = config['character_type']
self.loss_type = config['loss_type']
self.max_text_len = config['max_text_length']
if self.character_type == "en":
self.character_str = "0123456789abcdefghijklmnopqrstuvwxyz"
dict_character = list(self.character_str)
elif self.character_type == "ch":
character_dict_path = config['character_dict_path']
add_space = False
if 'use_space_char' in config:
add_space = config['use_space_char']
self.character_str = ""
with open(character_dict_path, "rb") as fin:
lines = fin.readlines()
for line in lines:
line = line.decode('utf-8').strip("\n").strip("\r\n")
self.character_str += line
if add_space:
self.character_str += " "
dict_character = list(self.character_str)
elif self.character_type == "en_sensitive":
# same with ASTER setting (use 94 char).
self.character_str = string.printable[:-6]
dict_character = list(self.character_str)
else:
self.character_str = None
assert self.character_str is not None, \
"Nonsupport type of the character: {}".format(self.character_str)
self.beg_str = "sos"
self.end_str = "eos"
if self.loss_type == "attention":
dict_character = [self.beg_str, self.end_str] + dict_character
elif self.loss_type == "srn":
dict_character = dict_character + [self.beg_str, self.end_str]
self.dict = {}
for i, char in enumerate(dict_character):
self.dict[char] = i
self.character = dict_character
def encode(self, text):
"""convert text-label into text-index.
input:
text: text labels of each image. [batch_size]
output:
text: concatenated text index for CTCLoss.
[sum(text_lengths)] = [text_index_0 + text_index_1 + ... + text_index_(n - 1)]
length: length of each text. [batch_size]
"""
if self.character_type == "en":
text = text.lower()
text_list = []
for char in text:
if char not in self.dict:
continue
text_list.append(self.dict[char])
text = np.array(text_list)
return text
def decode(self, text_index, is_remove_duplicate=False):
""" convert text-index into text-label. """
char_list = []
char_num = self.get_char_num()
if self.loss_type == "attention":
beg_idx = self.get_beg_end_flag_idx("beg")
end_idx = self.get_beg_end_flag_idx("end")
ignored_tokens = [beg_idx, end_idx]
else:
ignored_tokens = [char_num]
for idx in range(len(text_index)):
if text_index[idx] in ignored_tokens:
continue
if is_remove_duplicate:
if idx > 0 and text_index[idx - 1] == text_index[idx]:
continue
char_list.append(self.character[int(text_index[idx])])
text = ''.join(char_list)
return text
def get_char_num(self):
return len(self.character)
def get_beg_end_flag_idx(self, beg_or_end):
if self.loss_type == "attention":
if beg_or_end == "beg":
idx = np.array(self.dict[self.beg_str])
elif beg_or_end == "end":
idx = np.array(self.dict[self.end_str])
else:
assert False, "Unsupport type %s in get_beg_end_flag_idx"\
% beg_or_end
return idx
else:
err = "error in get_beg_end_flag_idx when using the loss %s"\
% (self.loss_type)
assert False, err
def cal_predicts_accuracy(char_ops,
preds,
preds_lod,
labels,
labels_lod,
is_remove_duplicate=False):
acc_num = 0
img_num = 0
for ino in range(len(labels_lod) - 1):
beg_no = preds_lod[ino]
end_no = preds_lod[ino + 1]
preds_text = preds[beg_no:end_no].reshape(-1)
preds_text = char_ops.decode(preds_text, is_remove_duplicate)
beg_no = labels_lod[ino]
end_no = labels_lod[ino + 1]
labels_text = labels[beg_no:end_no].reshape(-1)
labels_text = char_ops.decode(labels_text, is_remove_duplicate)
img_num += 1
if preds_text == labels_text:
acc_num += 1
acc = acc_num * 1.0 / img_num
return acc, acc_num, img_num
def cal_predicts_accuracy_srn(char_ops,
preds,
labels,
max_text_len,
is_debug=False):
acc_num = 0
img_num = 0
char_num = char_ops.get_char_num()
total_len = preds.shape[0]
img_num = int(total_len / max_text_len)
for i in range(img_num):
cur_label = []
cur_pred = []
for j in range(max_text_len):
if labels[j + i * max_text_len] != int(char_num-1): #0
cur_label.append(labels[j + i * max_text_len][0])
else:
break
for j in range(max_text_len + 1):
if j < len(cur_label) and preds[j + i * max_text_len][
0] != cur_label[j]:
break
elif j == len(cur_label) and j == max_text_len:
acc_num += 1
break
elif j == len(cur_label) and preds[j + i * max_text_len][0] == int(char_num-1):
acc_num += 1
break
acc = acc_num * 1.0 / img_num
return acc, acc_num, img_num
def convert_rec_attention_infer_res(preds):
img_num = preds.shape[0]
target_lod = [0]
convert_ids = []
for ino in range(img_num):
end_pos = np.where(preds[ino, :] == 1)[0]
if len(end_pos) <= 1:
text_list = preds[ino, 1:]
else:
text_list = preds[ino, 1:end_pos[1]]
target_lod.append(target_lod[ino] + len(text_list))
convert_ids = convert_ids + list(text_list)
convert_ids = np.array(convert_ids)
convert_ids = convert_ids.reshape((-1, 1))
return convert_ids, target_lod
def convert_rec_label_to_lod(ori_labels):
img_num = len(ori_labels)
target_lod = [0]
convert_ids = []
for ino in range(img_num):
target_lod.append(target_lod[ino] + len(ori_labels[ino]))
convert_ids = convert_ids + list(ori_labels[ino])
convert_ids = np.array(convert_ids)
convert_ids = convert_ids.reshape((-1, 1))
return convert_ids, target_lod
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import logging
logger = logging.getLogger(__name__)
def check_config_params(config, config_name, params):
for param in params:
if param not in config:
err = "param %s didn't find in %s!" % (param, config_name)
assert False, err
return
...@@ -230,10 +230,10 @@ def draw_ocr_box_txt(image, ...@@ -230,10 +230,10 @@ def draw_ocr_box_txt(image,
box[2][1], box[3][0], box[3][1] box[2][1], box[3][0], box[3][1]
], ],
outline=color) outline=color)
box_height = math.sqrt((box[0][0] - box[3][0]) ** 2 + (box[0][1] - box[3][ box_height = math.sqrt((box[0][0] - box[3][0])**2 + (box[0][1] - box[3][
1]) ** 2) 1])**2)
box_width = math.sqrt((box[0][0] - box[1][0]) ** 2 + (box[0][1] - box[1][ box_width = math.sqrt((box[0][0] - box[1][0])**2 + (box[0][1] - box[1][
1]) ** 2) 1])**2)
if box_height > 2 * box_width: if box_height > 2 * box_width:
font_size = max(int(box_width * 0.9), 10) font_size = max(int(box_width * 0.9), 10)
font = ImageFont.truetype(font_path, font_size, encoding="utf-8") font = ImageFont.truetype(font_path, font_size, encoding="utf-8")
...@@ -260,7 +260,6 @@ def str_count(s): ...@@ -260,7 +260,6 @@ def str_count(s):
Count the number of Chinese characters, Count the number of Chinese characters,
a single English character and a single number a single English character and a single number
equal to half the length of Chinese characters. equal to half the length of Chinese characters.
args: args:
s(string): the input of string s(string): the input of string
return(int): return(int):
...@@ -295,7 +294,6 @@ def text_visual(texts, ...@@ -295,7 +294,6 @@ def text_visual(texts,
img_w(int): the width of blank img img_w(int): the width of blank img
font_path: the path of font which is used to draw text font_path: the path of font which is used to draw text
return(array): return(array):
""" """
if scores is not None: if scores is not None:
assert len(texts) == len( assert len(texts) == len(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册