diff --git a/README.md b/README.md index ad1ebd96e91ed963396dcc4afb445298b09f7d61..df53191a12b2a64290db25ba4c6c600f2e489628 100644 --- a/README.md +++ b/README.md @@ -101,7 +101,7 @@ PaddleOCR support a variety of cutting-edge algorithms related to OCR, and devel - [PP-Structure 🔥](./ppstructure/README.md) - [Quick Start](./ppstructure/docs/quickstart_en.md) - [Model Zoo](./ppstructure/docs/models_list_en.md) - - [Model training](./doc/doc_en/training_en.md) + - [Model training](./doc/doc_en/training_en.md) - [Layout Parser](./ppstructure/layout/README.md) - [Table Recognition](./ppstructure/table/README.md) - [DocVQA](./ppstructure/vqa/README.md) @@ -121,9 +121,9 @@ PaddleOCR support a variety of cutting-edge algorithms related to OCR, and devel - [Other Data Annotation Tools](./doc/doc_en/data_annotation_en.md) - [Other Data Synthesis Tools](./doc/doc_en/data_synthesis_en.md) - Datasets - - [General OCR Datasets(Chinese/English)](./doc/doc_en/datasets_en.md) - - [HandWritten_OCR_Datasets(Chinese)](./doc/doc_en/handwritten_datasets_en.md) - - [Various OCR Datasets(multilingual)](./doc/doc_en/vertical_and_multilingual_datasets_en.md) + - [General OCR Datasets(Chinese/English)](doc/doc_en/dataset/datasets_en.md) + - [HandWritten_OCR_Datasets(Chinese)](doc/doc_en/dataset/handwritten_datasets_en.md) + - [Various OCR Datasets(multilingual)](doc/doc_en/dataset/vertical_and_multilingual_datasets_en.md) - [Code Structure](./doc/doc_en/tree_en.md) - [Visualization](#Visualization) - [Community](#Community) @@ -170,4 +170,4 @@ More details, please refer to [Multilingual OCR Development Plan](https://github ## License -This project is released under Apache 2.0 license \ No newline at end of file +This project is released under Apache 2.0 license diff --git a/README_ch.md b/README_ch.md index d3b26ee9d99c839ac9823dd635dabe41f13f5d31..e2e6835c884da16928e7d36419c6daa6895dd9e1 100755 --- a/README_ch.md +++ b/README_ch.md @@ -128,12 +128,12 @@ PaddleOCR旨在打造一套丰富、领先、且实用的OCR工具库,助力 - [其它数据标注工具](./doc/doc_ch/data_annotation.md) - [其它数据合成工具](./doc/doc_ch/data_synthesis.md) - 数据集 - - [通用中英文OCR数据集](./doc/doc_ch/datasets.md) - - [手写中文OCR数据集](./doc/doc_ch/handwritten_datasets.md) - - [垂类多语言OCR数据集](./doc/doc_ch/vertical_and_multilingual_datasets.md) - - [版面分析数据集](./doc/doc_ch/layout_datasets.md) - - [表格识别数据集](./doc/doc_ch/table_datasets.md) - - [DocVQA数据集](./doc/doc_ch/docvqa_datasets.md) + - [通用中英文OCR数据集](doc/doc_ch/dataset/datasets.md) + - [手写中文OCR数据集](doc/doc_ch/dataset/handwritten_datasets.md) + - [垂类多语言OCR数据集](doc/doc_ch/dataset/vertical_and_multilingual_datasets.md) + - [版面分析数据集](doc/doc_ch/dataset/layout_datasets.md) + - [表格识别数据集](doc/doc_ch/dataset/table_datasets.md) + - [DocVQA数据集](doc/doc_ch/dataset/docvqa_datasets.md) - [代码组织结构](./doc/doc_ch/tree.md) - [效果展示](#效果展示) - [《动手学OCR》电子书📚](./doc/doc_ch/ocr_book.md) @@ -160,13 +160,13 @@ PaddleOCR旨在打造一套丰富、领先、且实用的OCR工具库,助力 - +
PP-OCRv2 英文模型 - +
@@ -176,12 +176,12 @@ PaddleOCR旨在打造一套丰富、领先、且实用的OCR工具库,助力
PP-OCRv2 其他语言模型 - +
- +
@@ -196,8 +196,8 @@ PaddleOCR旨在打造一套丰富、领先、且实用的OCR工具库,助力
- -- RE(关系提取) + +- RE(关系提取)
diff --git a/configs/cls/ch_PP-OCRv3/ch_PP-OCRv3_rotnet.yml b/configs/cls/ch_PP-OCRv3/ch_PP-OCRv3_rotnet.yml new file mode 100644 index 0000000000000000000000000000000000000000..1ffeba07995860f964e22b8b9d2538320d80f651 --- /dev/null +++ b/configs/cls/ch_PP-OCRv3/ch_PP-OCRv3_rotnet.yml @@ -0,0 +1,99 @@ +Global: + debug: false + use_gpu: true + epoch_num: 100 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec_ppocr_v3_rotnet + save_epoch_step: 3 + eval_batch_step: [0, 2000] + cal_metric_during_train: true + pretrained_model: null + checkpoints: null + save_inference_dir: null + use_visualdl: false + infer_img: doc/imgs_words/ch/word_1.jpg + character_dict_path: ppocr/utils/ppocr_keys_v1.txt + max_text_length: 25 + infer_mode: false + use_space_char: true + save_res_path: ./output/rec/predicts_chinese_lite_v2.0.txt +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + regularizer: + name: L2 + factor: 1.0e-05 +Architecture: + model_type: cls + algorithm: CLS + Transform: null + Backbone: + name: MobileNetV1Enhance + scale: 0.5 + last_conv_stride: [1, 2] + last_pool_type: avg + Neck: + Head: + name: ClsHead + class_dim: 4 + +Loss: + name: ClsLoss + main_indicator: acc + +PostProcess: + name: ClsPostProcess + +Metric: + name: ClsMetric + main_indicator: acc + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data + label_file_list: + - ./train_data/train_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - RecAug: + use_tia: False + - RandAugment: + - SSLRotateResize: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: ["image", "label"] + loader: + collate_fn: "SSLRotateCollate" + shuffle: true + batch_size_per_card: 32 + drop_last: true + num_workers: 8 +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data + label_file_list: + - ./train_data/val_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - SSLRotateResize: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: ["image", "label"] + loader: + collate_fn: "SSLRotateCollate" + shuffle: false + drop_last: false + batch_size_per_card: 64 + num_workers: 8 +profiler_options: null diff --git a/configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml b/configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml new file mode 100644 index 0000000000000000000000000000000000000000..3e77577c17abe2111c501d96ce6b1087ac44f8d6 --- /dev/null +++ b/configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml @@ -0,0 +1,234 @@ +Global: + debug: false + use_gpu: true + epoch_num: 500 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/ch_PP-OCR_v3_det/ + save_epoch_step: 100 + eval_batch_step: + - 0 + - 400 + cal_metric_during_train: false + pretrained_model: null + checkpoints: null + save_inference_dir: null + use_visualdl: false + infer_img: doc/imgs_en/img_10.jpg + save_res_path: ./checkpoints/det_db/predicts_db.txt + distributed: true + +Architecture: + name: DistillationModel + algorithm: Distillation + model_type: det + Models: + Student: + model_type: det + algorithm: DB + Transform: null + Backbone: + name: MobileNetV3 + scale: 0.5 + model_name: large + disable_se: true + Neck: + name: RSEFPN + out_channels: 96 + shortcut: True + Head: + name: DBHead + k: 50 + Student2: + model_type: det + algorithm: DB + Transform: null + Backbone: + name: MobileNetV3 + scale: 0.5 + model_name: large + disable_se: true + Neck: + name: RSEFPN + out_channels: 96 + shortcut: True + Head: + name: DBHead + k: 50 + Teacher: + freeze_params: true + return_all_feats: false + model_type: det + algorithm: DB + Backbone: + name: ResNet + in_channels: 3 + layers: 50 + Neck: + name: LKPAN + out_channels: 256 + Head: + name: DBHead + kernel_list: [7,2,2] + k: 50 + +Loss: + name: CombinedLoss + loss_config_list: + - DistillationDilaDBLoss: + weight: 1.0 + model_name_pairs: + - ["Student", "Teacher"] + - ["Student2", "Teacher"] + key: maps + balance_loss: true + main_loss_type: DiceLoss + alpha: 5 + beta: 10 + ohem_ratio: 3 + - DistillationDMLLoss: + model_name_pairs: + - ["Student", "Student2"] + maps_name: "thrink_maps" + weight: 1.0 + # act: None + model_name_pairs: ["Student", "Student2"] + key: maps + - DistillationDBLoss: + weight: 1.0 + model_name_list: ["Student", "Student2"] + # key: maps + # name: DBLoss + balance_loss: true + main_loss_type: DiceLoss + alpha: 5 + beta: 10 + ohem_ratio: 3 + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + warmup_epoch: 2 + regularizer: + name: L2 + factor: 5.0e-05 + +PostProcess: + name: DistillationDBPostProcess + model_name: ["Student"] + key: head_out + thresh: 0.3 + box_thresh: 0.6 + max_candidates: 1000 + unclip_ratio: 1.5 + +Metric: + name: DistillationMetric + base_metric_name: DetMetric + main_indicator: hmean + key: "Student" + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt + ratio_list: [1.0] + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - DetLabelEncode: null + - CopyPaste: + - IaaAugment: + augmenter_args: + - type: Fliplr + args: + p: 0.5 + - type: Affine + args: + rotate: + - -10 + - 10 + - type: Resize + args: + size: + - 0.5 + - 3 + - EastRandomCropData: + size: + - 960 + - 960 + max_tries: 50 + keep_ratio: true + - MakeBorderMap: + shrink_ratio: 0.4 + thresh_min: 0.3 + thresh_max: 0.7 + - MakeShrinkMap: + shrink_ratio: 0.4 + min_text_size: 8 + - NormalizeImage: + scale: 1./255. + mean: + - 0.485 + - 0.456 + - 0.406 + std: + - 0.229 + - 0.224 + - 0.225 + order: hwc + - ToCHWImage: null + - KeepKeys: + keep_keys: + - image + - threshold_map + - threshold_mask + - shrink_map + - shrink_mask + loader: + shuffle: true + drop_last: false + batch_size_per_card: 8 + num_workers: 4 +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - DetLabelEncode: null + - DetResizeForTest: null + - NormalizeImage: + scale: 1./255. + mean: + - 0.485 + - 0.456 + - 0.406 + std: + - 0.229 + - 0.224 + - 0.225 + order: hwc + - ToCHWImage: null + - KeepKeys: + keep_keys: + - image + - shape + - polys + - ignore_tags + loader: + shuffle: false + drop_last: false + batch_size_per_card: 1 + num_workers: 2 diff --git a/configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_student.yml b/configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_student.yml new file mode 100644 index 0000000000000000000000000000000000000000..0e8af776479ea26f834ca9ddc169f80b3982e86d --- /dev/null +++ b/configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_student.yml @@ -0,0 +1,163 @@ +Global: + debug: false + use_gpu: true + epoch_num: 500 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/ch_PP-OCR_V3_det/ + save_epoch_step: 100 + eval_batch_step: + - 0 + - 400 + cal_metric_during_train: false + pretrained_model: null + checkpoints: null + save_inference_dir: null + use_visualdl: false + infer_img: doc/imgs_en/img_10.jpg + save_res_path: ./checkpoints/det_db/predicts_db.txt + distributed: true + +Architecture: + model_type: det + algorithm: DB + Transform: + Backbone: + name: MobileNetV3 + scale: 0.5 + model_name: large + disable_se: True + Neck: + name: RSEFPN + out_channels: 96 + shortcut: True + Head: + name: DBHead + k: 50 + +Loss: + name: DBLoss + balance_loss: true + main_loss_type: DiceLoss + alpha: 5 + beta: 10 + ohem_ratio: 3 +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + warmup_epoch: 2 + regularizer: + name: L2 + factor: 5.0e-05 +PostProcess: + name: DBPostProcess + thresh: 0.3 + box_thresh: 0.6 + max_candidates: 1000 + unclip_ratio: 1.5 +Metric: + name: DetMetric + main_indicator: hmean +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt + ratio_list: [1.0] + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - DetLabelEncode: null + - IaaAugment: + augmenter_args: + - type: Fliplr + args: + p: 0.5 + - type: Affine + args: + rotate: + - -10 + - 10 + - type: Resize + args: + size: + - 0.5 + - 3 + - EastRandomCropData: + size: + - 960 + - 960 + max_tries: 50 + keep_ratio: true + - MakeBorderMap: + shrink_ratio: 0.4 + thresh_min: 0.3 + thresh_max: 0.7 + - MakeShrinkMap: + shrink_ratio: 0.4 + min_text_size: 8 + - NormalizeImage: + scale: 1./255. + mean: + - 0.485 + - 0.456 + - 0.406 + std: + - 0.229 + - 0.224 + - 0.225 + order: hwc + - ToCHWImage: null + - KeepKeys: + keep_keys: + - image + - threshold_map + - threshold_mask + - shrink_map + - shrink_mask + loader: + shuffle: true + drop_last: false + batch_size_per_card: 8 + num_workers: 4 +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - DetLabelEncode: null + - DetResizeForTest: null + - NormalizeImage: + scale: 1./255. + mean: + - 0.485 + - 0.456 + - 0.406 + std: + - 0.229 + - 0.224 + - 0.225 + order: hwc + - ToCHWImage: null + - KeepKeys: + keep_keys: + - image + - shape + - polys + - ignore_tags + loader: + shuffle: false + drop_last: false + batch_size_per_card: 1 + num_workers: 2 diff --git a/deploy/README.md b/deploy/README.md index 033662a7535e10c2c468436d4f01f06d84996fb7..69e2438996a1329e801f842ef78d2d6e115c5831 100644 --- a/deploy/README.md +++ b/deploy/README.md @@ -22,9 +22,11 @@ PP-OCR has supported muti deployment schemes. Click the link to get the specific - [Python Inference](../doc/doc_en/inference_ppocr_en.md) - [C++ Inference](./cpp_infer/readme.md) -- [Serving](./pdserving/README.md) -- [Paddle-Lite](./lite/readme.md) +- [Serving (Python/C++)](./pdserving/README.md) +- [Paddle-Lite (ARM CPU/OpenCL ARM GPU/Metal ARM GPU)](./lite/readme.md) - [Paddle.js](./paddlejs/README.md) +- [Jetson Inference]() +- [XPU Inference]() - [Paddle2ONNX](./paddle2onnx/readme.md) If you need the deployment tutorial of academic algorithm models other than PP-OCR, please directly enter the main page of corresponding algorithms, [entrance](../doc/doc_en/algorithm_overview_en.md)。 \ No newline at end of file diff --git a/deploy/README_ch.md b/deploy/README_ch.md index 96b49ddd9b94bff877ae1ac1d0e6f6e90612ec85..63ae59537316480a302dca7c3714db3c1003553e 100644 --- a/deploy/README_ch.md +++ b/deploy/README_ch.md @@ -22,9 +22,11 @@ PP-OCR模型已打通多种场景部署方案,点击链接获取具体的使 - [Python 推理](../doc/doc_ch/inference_ppocr.md) - [C++ 推理](./cpp_infer/readme_ch.md) -- [Serving 服务化部署](./pdserving/README_CN.md) -- [Paddle-Lite 端侧部署](./lite/readme_ch.md) -- [Paddle.js 服务化部署](./paddlejs/README_ch.md) +- [Serving 服务化部署(Python/C++)](./pdserving/README_CN.md) +- [Paddle-Lite 端侧部署(ARM CPU/OpenCL ARM GPU/Metal ARM GPU)](./lite/readme_ch.md) +- [Paddle.js 部署](./paddlejs/README_ch.md) +- [Jetson 推理]() +- [XPU 推理]() - [Paddle2ONNX 推理](./paddle2onnx/readme_ch.md) 需要PP-OCR以外的学术算法模型的推理部署,请直接进入相应算法主页面,[入口](../doc/doc_ch/algorithm_overview.md)。 \ No newline at end of file diff --git a/deploy/slim/quantization/export_model.py b/deploy/slim/quantization/export_model.py index 822fd5da4c30a934d0e590ab1067f9f9188213c2..90f79dab34a5f20d4556ae4b10ad1d4e1f8b7f0d 100755 --- a/deploy/slim/quantization/export_model.py +++ b/deploy/slim/quantization/export_model.py @@ -35,17 +35,7 @@ from ppocr.metrics import build_metric import tools.program as program from paddleslim.dygraph.quant import QAT from ppocr.data import build_dataloader - - -def export_single_model(quanter, model, infer_shape, save_path, logger): - quanter.save_quantized_model( - model, - save_path, - input_spec=[ - paddle.static.InputSpec( - shape=[None] + infer_shape, dtype='float32') - ]) - logger.info('inference QAT model is saved to {}'.format(save_path)) +from tools.export_model import export_single_model def main(): @@ -84,17 +74,54 @@ def main(): config['Global']) # build model - # for rec algorithm if hasattr(post_process_class, 'character'): char_num = len(getattr(post_process_class, 'character')) if config['Architecture']["algorithm"] in ["Distillation", ]: # distillation model for key in config['Architecture']["Models"]: - config['Architecture']["Models"][key]["Head"][ - 'out_channels'] = char_num + if config['Architecture']['Models'][key]['Head'][ + 'name'] == 'MultiHead': # for multi head + if config['PostProcess'][ + 'name'] == 'DistillationSARLabelDecode': + char_num = char_num - 2 + # update SARLoss params + assert list(config['Loss']['loss_config_list'][-1].keys())[ + 0] == 'DistillationSARLoss' + config['Loss']['loss_config_list'][-1][ + 'DistillationSARLoss']['ignore_index'] = char_num + 1 + out_channels_list = {} + out_channels_list['CTCLabelDecode'] = char_num + out_channels_list['SARLabelDecode'] = char_num + 2 + config['Architecture']['Models'][key]['Head'][ + 'out_channels_list'] = out_channels_list + else: + config['Architecture']["Models"][key]["Head"][ + 'out_channels'] = char_num + elif config['Architecture']['Head'][ + 'name'] == 'MultiHead': # for multi head + if config['PostProcess']['name'] == 'SARLabelDecode': + char_num = char_num - 2 + # update SARLoss params + assert list(config['Loss']['loss_config_list'][1].keys())[ + 0] == 'SARLoss' + if config['Loss']['loss_config_list'][1]['SARLoss'] is None: + config['Loss']['loss_config_list'][1]['SARLoss'] = { + 'ignore_index': char_num + 1 + } + else: + config['Loss']['loss_config_list'][1]['SARLoss'][ + 'ignore_index'] = char_num + 1 + out_channels_list = {} + out_channels_list['CTCLabelDecode'] = char_num + out_channels_list['SARLabelDecode'] = char_num + 2 + config['Architecture']['Head'][ + 'out_channels_list'] = out_channels_list else: # base rec model config['Architecture']["Head"]['out_channels'] = char_num + if config['PostProcess']['name'] == 'SARLabelDecode': # for SAR model + config['Loss']['ignore_index'] = char_num - 1 + model = build_model(config['Architecture']) # get QAT model @@ -120,21 +147,22 @@ def main(): for k, v in metric.items(): logger.info('{}:{}'.format(k, v)) - infer_shape = [3, 32, 100] if model_type == "rec" else [3, 640, 640] - save_path = config["Global"]["save_inference_dir"] arch_config = config["Architecture"] + + arch_config = config["Architecture"] + if arch_config["algorithm"] in ["Distillation", ]: # distillation model + archs = list(arch_config["Models"].values()) for idx, name in enumerate(model.model_name_list): model.model_list[idx].eval() sub_model_save_path = os.path.join(save_path, name, "inference") - export_single_model(quanter, model.model_list[idx], infer_shape, - sub_model_save_path, logger) + export_single_model(model.model_list[idx], archs[idx], + sub_model_save_path, logger, quanter) else: save_path = os.path.join(save_path, "inference") - model.eval() - export_single_model(quanter, model, infer_shape, save_path, logger) + export_single_model(model, arch_config, save_path, logger, quanter) if __name__ == "__main__": diff --git a/deploy/slim/quantization/quant.py b/deploy/slim/quantization/quant.py index 355ba77f83121d07a52b1b8645bc6d4893373c42..f7acb185add5d40b749e7442111891869dfaeb22 100755 --- a/deploy/slim/quantization/quant.py +++ b/deploy/slim/quantization/quant.py @@ -112,10 +112,48 @@ def main(config, device, logger, vdl_writer): if config['Architecture']["algorithm"] in ["Distillation", ]: # distillation model for key in config['Architecture']["Models"]: - config['Architecture']["Models"][key]["Head"][ - 'out_channels'] = char_num + if config['Architecture']['Models'][key]['Head'][ + 'name'] == 'MultiHead': # for multi head + if config['PostProcess'][ + 'name'] == 'DistillationSARLabelDecode': + char_num = char_num - 2 + # update SARLoss params + assert list(config['Loss']['loss_config_list'][-1].keys())[ + 0] == 'DistillationSARLoss' + config['Loss']['loss_config_list'][-1][ + 'DistillationSARLoss']['ignore_index'] = char_num + 1 + out_channels_list = {} + out_channels_list['CTCLabelDecode'] = char_num + out_channels_list['SARLabelDecode'] = char_num + 2 + config['Architecture']['Models'][key]['Head'][ + 'out_channels_list'] = out_channels_list + else: + config['Architecture']["Models"][key]["Head"][ + 'out_channels'] = char_num + elif config['Architecture']['Head'][ + 'name'] == 'MultiHead': # for multi head + if config['PostProcess']['name'] == 'SARLabelDecode': + char_num = char_num - 2 + # update SARLoss params + assert list(config['Loss']['loss_config_list'][1].keys())[ + 0] == 'SARLoss' + if config['Loss']['loss_config_list'][1]['SARLoss'] is None: + config['Loss']['loss_config_list'][1]['SARLoss'] = { + 'ignore_index': char_num + 1 + } + else: + config['Loss']['loss_config_list'][1]['SARLoss'][ + 'ignore_index'] = char_num + 1 + out_channels_list = {} + out_channels_list['CTCLabelDecode'] = char_num + out_channels_list['SARLabelDecode'] = char_num + 2 + config['Architecture']['Head'][ + 'out_channels_list'] = out_channels_list else: # base rec model config['Architecture']["Head"]['out_channels'] = char_num + + if config['PostProcess']['name'] == 'SARLabelDecode': # for SAR model + config['Loss']['ignore_index'] = char_num - 1 model = build_model(config['Architecture']) pre_best_model_dict = dict() diff --git a/doc/datasets/table_PubTabNet_demo/PMC524509_007_00.png b/doc/datasets/table_PubTabNet_demo/PMC524509_007_00.png new file mode 100755 index 0000000000000000000000000000000000000000..5b9d631cba434e4bd6ac6fe2108b7f6c081c4811 Binary files /dev/null and b/doc/datasets/table_PubTabNet_demo/PMC524509_007_00.png differ diff --git a/doc/datasets/table_PubTabNet_demo/PMC535543_007_01.png b/doc/datasets/table_PubTabNet_demo/PMC535543_007_01.png new file mode 100755 index 0000000000000000000000000000000000000000..e808de72d62325ae4cbd009397b7beaeed0d88fc Binary files /dev/null and b/doc/datasets/table_PubTabNet_demo/PMC535543_007_01.png differ diff --git a/doc/datasets/table_tal_demo/1.jpg b/doc/datasets/table_tal_demo/1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e7ddd6d1db59ca27a0461ab93b3672aeec4a8941 Binary files /dev/null and b/doc/datasets/table_tal_demo/1.jpg differ diff --git a/doc/datasets/table_tal_demo/2.jpg b/doc/datasets/table_tal_demo/2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e7ddd6d1db59ca27a0461ab93b3672aeec4a8941 Binary files /dev/null and b/doc/datasets/table_tal_demo/2.jpg differ diff --git a/doc/doc_ch/algorithm_det_db.md b/doc/doc_ch/algorithm_det_db.md index 7f94ceaee06ac41a42c785f26bffa30005a98355..90837c2ac1ebbc04ee47cbb74ed6466352710e88 100644 --- a/doc/doc_ch/algorithm_det_db.md +++ b/doc/doc_ch/algorithm_det_db.md @@ -25,8 +25,8 @@ |模型|骨干网络|配置文件|precision|recall|Hmean|下载链接| | --- | --- | --- | --- | --- | --- | --- | -|DB|ResNet50_vd|configs/det/det_r50_vd_db.yml|86.41%|78.72%|82.38%|[训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_db_v2.0_train.tar)| -|DB|MobileNetV3|configs/det/det_mv3_db.yml|77.29%|73.08%|75.12%|[训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_db_v2.0_train.tar)| +|DB|ResNet50_vd|[configs/det/det_r50_vd_db.yml](../../configs/det/det_r50_vd_db.yml)|86.41%|78.72%|82.38%|[训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_db_v2.0_train.tar)| +|DB|MobileNetV3|[configs/det/det_mv3_db.yml](../../configs/det/det_mv3_db.yml)|77.29%|73.08%|75.12%|[训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_db_v2.0_train.tar)| diff --git a/doc/doc_ch/algorithm_det_fcenet.md b/doc/doc_ch/algorithm_det_fcenet.md new file mode 100644 index 0000000000000000000000000000000000000000..bd2e734204d32bbf575ddea9f889953a72582c59 --- /dev/null +++ b/doc/doc_ch/algorithm_det_fcenet.md @@ -0,0 +1,104 @@ +# FCENet + +- [1. 算法简介](#1) +- [2. 环境配置](#2) +- [3. 模型训练、评估、预测](#3) + - [3.1 训练](#3-1) + - [3.2 评估](#3-2) + - [3.3 预测](#3-3) +- [4. 推理部署](#4) + - [4.1 Python推理](#4-1) + - [4.2 C++推理](#4-2) + - [4.3 Serving服务化部署](#4-3) + - [4.4 更多推理部署](#4-4) +- [5. FAQ](#5) + + +## 1. 算法简介 + +论文信息: +> [Fourier Contour Embedding for Arbitrary-Shaped Text Detection](https://arxiv.org/abs/2104.10442) +> Yiqin Zhu and Jianyong Chen and Lingyu Liang and Zhanghui Kuang and Lianwen Jin and Wayne Zhang +> CVPR, 2021 + +在CTW1500文本检测公开数据集上,算法复现效果如下: + +| 模型 |骨干网络|配置文件|precision|recall|Hmean|下载链接| +|-----| --- | --- | --- | --- | --- | --- | +| FCE | ResNet50_dcn | [configs/det/det_r50_vd_dcn_fce_ctw.yml](../../configs/det/det_r50_vd_dcn_fce_ctw.yml)| 88.39%|82.18%|85.27%|[训练模型](https://paddleocr.bj.bcebos.com/contribution/det_r50_dcn_fce_ctw_v2.0_train.tar)| + + +## 2. 环境配置 +请先参考[《运行环境准备》](./environment.md)配置PaddleOCR运行环境,参考[《项目克隆》](./clone.md)克隆项目代码。 + + + +## 3. 模型训练、评估、预测 + +上述FCE模型使用CTW1500文本检测公开数据集训练得到,数据集下载可参考 [ocr_datasets](./dataset/ocr_datasets.md)。 + +数据下载完成后,请参考[文本检测训练教程](./detection.md)进行训练。PaddleOCR对代码进行了模块化,训练不同的检测模型只需要**更换配置文件**即可。 + + + +## 4. 推理部署 + + +### 4.1 Python推理 +首先将FCE文本检测训练过程中保存的模型,转换成inference model。以基于Resnet50_vd_dcn骨干网络,在CTW1500英文数据集训练的模型为例( [模型下载地址](https://paddleocr.bj.bcebos.com/contribution/det_r50_dcn_fce_ctw_v2.0_train.tar) ),可以使用如下命令进行转换: + +```shell +python3 tools/export_model.py -c configs/det/det_r50_vd_dcn_fce_ctw.yml -o Global.pretrained_model=./det_r50_dcn_fce_ctw_v2.0_train/best_accuracy Global.save_inference_dir=./inference/det_fce +``` + +FCE文本检测模型推理,执行非弯曲文本检测,可以执行如下命令: + +```shell +python3 tools/infer/predict_det.py --image_dir="./doc/imgs_en/img_10.jpg" --det_model_dir="./inference/det_fce/" --det_algorithm="FCE" --det_fce_box_type=quad +``` + +可视化文本检测结果默认保存到`./inference_results`文件夹里面,结果文件的名称前缀为'det_res'。结果示例如下: + +![](../imgs_results/det_res_img_10_fce.jpg) + +如果想执行弯曲文本检测,可以执行如下命令: + +```shell +python3 tools/infer/predict_det.py --image_dir="./doc/imgs_en/img623.jpg" --det_model_dir="./inference/det_fce/" --det_algorithm="FCE" --det_fce_box_type=poly +``` + +可视化文本检测结果默认保存到`./inference_results`文件夹里面,结果文件的名称前缀为'det_res'。结果示例如下: + +![](../imgs_results/det_res_img623_fce.jpg) + +**注意**:由于CTW1500数据集只有1000张训练图像,且主要针对英文场景,所以上述模型对中文文本图像检测效果会比较差。 + + +### 4.2 C++推理 + +由于后处理暂未使用CPP编写,FCE文本检测模型暂不支持CPP推理。 + + +### 4.3 Serving服务化部署 + +暂未支持 + + +### 4.4 更多推理部署 + +暂未支持 + + +## 5. FAQ + + +## 引用 + +```bibtex +@InProceedings{zhu2021fourier, + title={Fourier Contour Embedding for Arbitrary-Shaped Text Detection}, + author={Yiqin Zhu and Jianyong Chen and Lingyu Liang and Zhanghui Kuang and Lianwen Jin and Wayne Zhang}, + year={2021}, + booktitle = {CVPR} +} +``` diff --git a/doc/doc_ch/algorithm_det_psenet.md b/doc/doc_ch/algorithm_det_psenet.md new file mode 100644 index 0000000000000000000000000000000000000000..58d8ccf97292f4e988861b618697fb0e7694fbab --- /dev/null +++ b/doc/doc_ch/algorithm_det_psenet.md @@ -0,0 +1,106 @@ +# PSENet + +- [1. 算法简介](#1) +- [2. 环境配置](#2) +- [3. 模型训练、评估、预测](#3) + - [3.1 训练](#3-1) + - [3.2 评估](#3-2) + - [3.3 预测](#3-3) +- [4. 推理部署](#4) + - [4.1 Python推理](#4-1) + - [4.2 C++推理](#4-2) + - [4.3 Serving服务化部署](#4-3) + - [4.4 更多推理部署](#4-4) +- [5. FAQ](#5) + + +## 1. 算法简介 + +论文信息: +> [Shape robust text detection with progressive scale expansion network](https://arxiv.org/abs/1903.12473) +> Wang, Wenhai and Xie, Enze and Li, Xiang and Hou, Wenbo and Lu, Tong and Yu, Gang and Shao, Shuai +> CVPR, 2019 + +在ICDAR2015文本检测公开数据集上,算法复现效果如下: + +|模型|骨干网络|配置文件|precision|recall|Hmean|下载链接| +| --- | --- | --- | --- | --- | --- | --- | +|PSE| ResNet50_vd | [configs/det/det_r50_vd_pse.yml](../../configs/det/det_r50_vd_pse.yml)| 85.81% |79.53%|82.55%|[训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.1/en_det/det_r50_vd_pse_v2.0_train.tar)| +|PSE| MobileNetV3| [configs/det/det_mv3_pse.yml](../../configs/det/det_mv3_pse.yml) | 82.20% |70.48%|75.89%|[训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.1/en_det/det_mv3_pse_v2.0_train.tar)| + + +## 2. 环境配置 +请先参考[《运行环境准备》](./environment.md)配置PaddleOCR运行环境,参考[《项目克隆》](./clone.md)克隆项目代码。 + + + +## 3. 模型训练、评估、预测 + +上述PSE模型使用ICDAR2015文本检测公开数据集训练得到,数据集下载可参考 [ocr_datasets](./dataset/ocr_datasets.md)。 + +数据下载完成后,请参考[文本检测训练教程](./detection.md)进行训练。PaddleOCR对代码进行了模块化,训练不同的检测模型只需要**更换配置文件**即可。 + + + +## 4. 推理部署 + + +### 4.1 Python推理 +首先将PSE文本检测训练过程中保存的模型,转换成inference model。以基于Resnet50_vd骨干网络,在ICDAR2015英文数据集训练的模型为例( [模型下载地址](https://paddleocr.bj.bcebos.com/dygraph_v2.1/en_det/det_r50_vd_pse_v2.0_train.tar) ),可以使用如下命令进行转换: + +```shell +python3 tools/export_model.py -c configs/det/det_r50_vd_pse.yml -o Global.pretrained_model=./det_r50_vd_pse_v2.0_train/best_accuracy Global.save_inference_dir=./inference/det_pse +``` + +PSE文本检测模型推理,执行非弯曲文本检测,可以执行如下命令: + +```shell +python3 tools/infer/predict_det.py --image_dir="./doc/imgs_en/img_10.jpg" --det_model_dir="./inference/det_pse/" --det_algorithm="PSE" --det_pse_box_type=quad +``` + +可视化文本检测结果默认保存到`./inference_results`文件夹里面,结果文件的名称前缀为'det_res'。结果示例如下: + +![](../imgs_results/det_res_img_10_pse.jpg) + +如果想执行弯曲文本检测,可以执行如下命令: + +```shell +python3 tools/infer/predict_det.py --image_dir="./doc/imgs_en/img_10.jpg" --det_model_dir="./inference/det_pse/" --det_algorithm="PSE" --det_pse_box_type=poly +``` + +可视化文本检测结果默认保存到`./inference_results`文件夹里面,结果文件的名称前缀为'det_res'。结果示例如下: + +![](../imgs_results/det_res_img_10_pse_poly.jpg) + +**注意**:由于ICDAR2015数据集只有1000张训练图像,且主要针对英文场景,所以上述模型对中文或弯曲文本图像检测效果会比较差。 + + +### 4.2 C++推理 + +由于后处理暂未使用CPP编写,PSE文本检测模型暂不支持CPP推理。 + + +### 4.3 Serving服务化部署 + +暂未支持 + + +### 4.4 更多推理部署 + +暂未支持 + + +## 5. FAQ + + +## 引用 + +```bibtex +@inproceedings{wang2019shape, + title={Shape robust text detection with progressive scale expansion network}, + author={Wang, Wenhai and Xie, Enze and Li, Xiang and Hou, Wenbo and Lu, Tong and Yu, Gang and Shao, Shuai}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={9336--9345}, + year={2019} +} +``` diff --git a/doc/doc_ch/datasets.md b/doc/doc_ch/dataset/datasets.md similarity index 90% rename from doc/doc_ch/datasets.md rename to doc/doc_ch/dataset/datasets.md index d365fd711aff2dffcd30dd06028734cc707d5df0..aad4f50b2d8baa369cf6f2576a24127a23cb5c48 100644 --- a/doc/doc_ch/datasets.md +++ b/doc/doc_ch/dataset/datasets.md @@ -6,17 +6,17 @@ - [中文文档文字识别](#中文文档文字识别) - [ICDAR2019-ArT](#ICDAR2019-ArT) -除了开源数据,用户还可使用合成工具自行合成,可参考[数据合成工具](./data_synthesis.md); +除了开源数据,用户还可使用合成工具自行合成,可参考[数据合成工具](../data_synthesis.md); -如果需要标注自己的数据,可参考[数据标注工具](./data_annotation.md)。 +如果需要标注自己的数据,可参考[数据标注工具](../data_annotation.md)。 #### 1、ICDAR2019-LSVT - **数据来源**:https://ai.baidu.com/broad/introduction?dataset=lsvt - **数据简介**: 共45w中文街景图像,包含5w(2w测试+3w训练)全标注数据(文本坐标+文本内容),40w弱标注数据(仅文本内容),如下图所示: - ![](../datasets/LSVT_1.jpg) + ![](../../datasets/LSVT_1.jpg) (a) 全标注数据 - ![](../datasets/LSVT_2.jpg) + ![](../../datasets/LSVT_2.jpg) (b) 弱标注数据 - **下载地址**:https://ai.baidu.com/broad/download?dataset=lsvt - **说明**:其中,test数据集的label目前没有开源,如要评估结果,可以去官网提交:https://rrc.cvc.uab.es/?ch=16 @@ -25,16 +25,16 @@ #### 2、ICDAR2017-RCTW-17 - **数据来源**:https://rctw.vlrlab.net/ - **数据简介**:共包含12,000+图像,大部分图片是通过手机摄像头在野外采集的。有些是截图。这些图片展示了各种各样的场景,包括街景、海报、菜单、室内场景和手机应用程序的截图。 - ![](../datasets/rctw.jpg) + ![](../../datasets/rctw.jpg) - **下载地址**:https://rctw.vlrlab.net/dataset/ -#### 3、中文街景文字识别 +#### 3、中文街景文字识别 - **数据来源**:https://aistudio.baidu.com/aistudio/competition/detail/8 - **数据简介**:ICDAR2019-LSVT行识别任务,共包括29万张图片,其中21万张图片作为训练集(带标注),8万张作为测试集(无标注)。数据集采自中国街景,并由街景图片中的文字行区域(例如店铺标牌、地标等等)截取出来而形成。所有图像都经过一些预处理,将文字区域利用仿射变化,等比映射为一张高为48像素的图片,如图所示: - ![](../datasets/ch_street_rec_1.png) + ![](../../datasets/ch_street_rec_1.png) (a) 标注:魅派集成吊顶 - ![](../datasets/ch_street_rec_2.png) + ![](../../datasets/ch_street_rec_2.png) (b) 标注:母婴用品连锁 - **下载地址** https://aistudio.baidu.com/aistudio/datasetdetail/8429 @@ -48,15 +48,15 @@ https://aistudio.baidu.com/aistudio/datasetdetail/8429 - 包含汉字、英文字母、数字和标点共5990个字符(字符集合:https://github.com/YCG09/chinese_ocr/blob/master/train/char_std_5990.txt ) - 每个样本固定10个字符,字符随机截取自语料库中的句子 - 图片分辨率统一为280x32 - ![](../datasets/ch_doc1.jpg) - ![](../datasets/ch_doc3.jpg) + ![](../../datasets/ch_doc1.jpg) + ![](../../datasets/ch_doc3.jpg) - **下载地址**:https://pan.baidu.com/s/1QkI7kjah8SPHwOQ40rS1Pw (密码:lu7m) #### 5、ICDAR2019-ArT - **数据来源**:https://ai.baidu.com/broad/introduction?dataset=art - **数据简介**:共包含10,166张图像,训练集5603图,测试集4563图。由Total-Text、SCUT-CTW1500、Baidu Curved Scene Text (ICDAR2019-LSVT部分弯曲数据) 三部分组成,包含水平、多方向和弯曲等多种形状的文本。 - ![](../datasets/ArT.jpg) + ![](../../datasets/ArT.jpg) - **下载地址**:https://ai.baidu.com/broad/download?dataset=art ## 参考文献 diff --git a/doc/doc_ch/docvqa_datasets.md b/doc/doc_ch/dataset/docvqa_datasets.md similarity index 100% rename from doc/doc_ch/docvqa_datasets.md rename to doc/doc_ch/dataset/docvqa_datasets.md diff --git a/doc/doc_ch/handwritten_datasets.md b/doc/doc_ch/dataset/handwritten_datasets.md similarity index 95% rename from doc/doc_ch/handwritten_datasets.md rename to doc/doc_ch/dataset/handwritten_datasets.md index 46e85e4f9dc22e4732f654f9a1ef2a715a498fcf..6485870cdf3ede140c55e7fccce28741b22ab04d 100644 --- a/doc/doc_ch/handwritten_datasets.md +++ b/doc/doc_ch/dataset/handwritten_datasets.md @@ -9,7 +9,7 @@ - **数据简介**: * 包含在线和离线两类手写数据,`HWDB1.0~1.2`总共有3895135个手写单字样本,分属7356类(7185个汉字和171个英文字母、数字、符号);`HWDB2.0~2.2`总共有5091页图像,分割为52230个文本行和1349414个文字。所有文字和文本样本均存为灰度图像。部分单字样本图片如下所示。 - ![](../datasets/CASIA_0.jpg) + ![](../../datasets/CASIA_0.jpg) - **下载地址**:http://www.nlpr.ia.ac.cn/databases/handwriting/Download.html - **使用建议**:数据为单字,白色背景,可以大量合成文字行进行训练。白色背景可以处理成透明状态,方便添加各种背景。对于需要语义的情况,建议从真实语料出发,抽取单字组成文字行 @@ -22,7 +22,7 @@ - **数据简介**: NIST19数据集适用于手写文档和字符识别的模型训练,从3600位作者的手写样本表格中提取得到,总共包含81万张字符图片。其中9张图片示例如下。 - ![](../datasets/nist_demo.png) + ![](../../datasets/nist_demo.png) - **下载地址**: [https://www.nist.gov/srd/nist-special-database-19](https://www.nist.gov/srd/nist-special-database-19) diff --git a/doc/doc_ch/layout_datasets.md b/doc/doc_ch/dataset/layout_datasets.md similarity index 89% rename from doc/doc_ch/layout_datasets.md rename to doc/doc_ch/dataset/layout_datasets.md index 45ac3a1127014eed420bb816f9ee07963efff533..e7055b4e607aae358a9ec1e93f3640b2b68ea4a1 100644 --- a/doc/doc_ch/layout_datasets.md +++ b/doc/doc_ch/dataset/layout_datasets.md @@ -27,7 +27,7 @@ #### 2、CDLA数据集 - **数据来源**:https://github.com/buptlihang/CDLA -- **数据简介**:publaynet数据集的训练集合中包含5000张图像,验证集合中包含1000张图像。总共包含10个类别,分别是: `Text, Title, Figure, Figure caption, Table, Table caption, Header, Footer, Reference, Equation`。部分图像以及标注框可视化如下所示。 +- **数据简介**:CDLA据集的训练集合中包含5000张图像,验证集合中包含1000张图像。总共包含10个类别,分别是: `Text, Title, Figure, Figure caption, Table, Table caption, Header, Footer, Reference, Equation`。部分图像以及标注框可视化如下所示。
diff --git a/doc/doc_ch/dataset/ocr_datasets.md b/doc/doc_ch/dataset/ocr_datasets.md new file mode 100644 index 0000000000000000000000000000000000000000..c6ff2e170f7c30a29e98ed2b1349cae2b84cf441 --- /dev/null +++ b/doc/doc_ch/dataset/ocr_datasets.md @@ -0,0 +1,164 @@ +# OCR数据集 + +- [1. 文本检测](#1-文本检测) + - [1.1 PaddleOCR 文字检测数据格式](#11-paddleocr-文字检测数据格式) + - [1.2 公开数据集](#12-公开数据集) + - [1.2.1 ICDAR 2015](#121-icdar-2015) +- [2. 文本识别](#2-文本识别) + - [2.1 PaddleOCR 文字识别数据格式](#21-paddleocr-文字识别数据格式) + - [2.2 公开数据集](#22-公开数据集) + - [2.1 ICDAR 2015](#21-icdar-2015) +- [3. 数据存放路径](#3-数据存放路径) + +这里整理了OCR中常用的公开数据集,持续更新中,欢迎各位小伙伴贡献数据集~ + +## 1. 文本检测 + +### 1.1 PaddleOCR 文字检测数据格式 + + +PaddleOCR 中的文本检测算法支持的标注文件格式如下,中间用"\t"分隔: +``` +" 图像文件名 json.dumps编码的图像标注信息" +ch4_test_images/img_61.jpg [{"transcription": "MASA", "points": [[310, 104], [416, 141], [418, 216], [312, 179]]}, {...}] +``` +json.dumps编码前的图像标注信息是包含多个字典的list,字典中的 `points` 表示文本框的四个点的坐标(x, y),从左上角的点开始顺时针排列。 +`transcription` 表示当前文本框的文字,**当其内容为“###”时,表示该文本框无效,在训练时会跳过。** + +如果您想在我们未提供的数据集上训练,可以按照上述形式构建标注文件。 + +### 1.2 公开数据集 + +| 数据集名称 |图片下载地址| PaddleOCR 标注下载地址 | +|---|---|---| +| ICDAR 2015 |https://rrc.cvc.uab.es/?ch=4&com=downloads| [train](https://paddleocr.bj.bcebos.com/dataset/train_icdar2015_label.txt) / [test](https://paddleocr.bj.bcebos.com/dataset/test_icdar2015_label.txt) | +| ctw1500 |https://paddleocr.bj.bcebos.com/dataset/ctw1500.zip| 图片下载地址中已包含 | +| total text |https://paddleocr.bj.bcebos.com/dataset/total_text.tar| 图片下载地址中已包含 | + +#### 1.2.1 ICDAR 2015 +ICDAR 2015 数据集包含1000张训练图像和500张测试图像。ICDAR 2015 数据集可以从上表中链接下载,首次下载需注册。 +注册完成登陆后,下载下图中红色框标出的部分,其中, `Training Set Images`下载的内容保存在`icdar_c4_train_imgs`文件夹下,`Test Set Images` 下载的内容保存早`ch4_test_images`文件夹下 + +

+ +

+ +将下载到的数据集解压到工作目录下,假设解压在 PaddleOCR/train_data/下。然后从上表中下载转换好的标注文件。 + +PaddleOCR 也提供了数据格式转换脚本,可以将官网 label 转换支持的数据格式。 数据转换工具在 `ppocr/utils/gen_label.py`, 这里以训练集为例: + +``` +# 将官网下载的标签文件转换为 train_icdar2015_label.txt +python gen_label.py --mode="det" --root_path="/path/to/icdar_c4_train_imgs/" \ + --input_path="/path/to/ch4_training_localization_transcription_gt" \ + --output_label="/path/to/train_icdar2015_label.txt" +``` + +解压数据集和下载标注文件后,PaddleOCR/train_data/ 有两个文件夹和两个文件,按照如下方式组织icdar2015数据集: +``` +/PaddleOCR/train_data/icdar2015/text_localization/ + └─ icdar_c4_train_imgs/ icdar 2015 数据集的训练数据 + └─ ch4_test_images/ icdar 2015 数据集的测试数据 + └─ train_icdar2015_label.txt icdar 2015 数据集的训练标注 + └─ test_icdar2015_label.txt icdar 2015 数据集的测试标注 +``` + +## 2. 文本识别 + +### 2.1 PaddleOCR 文字识别数据格式 + +PaddleOCR 中的文字识别算法支持两种数据格式: + + - `lmdb` 用于训练以lmdb格式存储的数据集,使用 [lmdb_dataset.py](../../../ppocr/data/lmdb_dataset.py) 进行读取; + - `通用数据` 用于训练以文本文件存储的数据集,使用 [simple_dataset.py](../../../ppocr/data/simple_dataset.py)进行读取。 + +下面以通用数据集为例, 介绍如何准备数据集: + +* 训练集 + +建议将训练图片放入同一个文件夹,并用一个txt文件(rec_gt_train.txt)记录图片路径和标签,txt文件里的内容如下: + +**注意:** txt文件中默认请将图片路径和图片标签用 \t 分割,如用其他方式分割将造成训练报错。 + +``` +" 图像文件名 图像标注信息 " + +train_data/rec/train/word_001.jpg 简单可依赖 +train_data/rec/train/word_002.jpg 用科技让复杂的世界更简单 +... +``` + +最终训练集应有如下文件结构: +``` +|-train_data + |-rec + |- rec_gt_train.txt + |- train + |- word_001.png + |- word_002.jpg + |- word_003.jpg + | ... +``` + +除上述单张图像为一行格式之外,PaddleOCR也支持对离线增广后的数据进行训练,为了防止相同样本在同一个batch中被多次采样,我们可以将相同标签对应的图片路径写在一行中,以列表的形式给出,在训练中,PaddleOCR会随机选择列表中的一张图片进行训练。对应地,标注文件的格式如下。 + +``` +["11.jpg", "12.jpg"] 简单可依赖 +["21.jpg", "22.jpg", "23.jpg"] 用科技让复杂的世界更简单 +3.jpg ocr +``` + +上述示例标注文件中,"11.jpg"和"12.jpg"的标签相同,都是`简单可依赖`,在训练的时候,对于该行标注,会随机选择其中的一张图片进行训练。 + + +- 验证集 + +同训练集类似,验证集也需要提供一个包含所有图片的文件夹(test)和一个rec_gt_test.txt,验证集的结构如下所示: + +``` +|-train_data + |-rec + |- rec_gt_test.txt + |- test + |- word_001.jpg + |- word_002.jpg + |- word_003.jpg + | ... +``` + + +### 2.2 公开数据集 + +| 数据集名称 | 图片下载地址 | PaddleOCR 标注下载地址 | +|---|---|---------------------------------------------------------------------| +| en benchmark(MJ, SJ, IIIT, SVT, IC03, IC13, IC15, SVTP, and CUTE.) | [DTRB](https://github.com/clovaai/deep-text-recognition-benchmark#download-lmdb-dataset-for-traininig-and-evaluation-from-here) | LMDB格式,可直接用[lmdb_dataset.py](../../../ppocr/data/lmdb_dataset.py)加载 | +|ICDAR 2015| http://rrc.cvc.uab.es/?ch=4&com=downloads | [train](https://paddleocr.bj.bcebos.com/dataset/rec_gt_train.txt)/ [test](https://paddleocr.bj.bcebos.com/dataset/rec_gt_test.txt) | +| 多语言数据集 |[百度网盘](https://pan.baidu.com/s/1bS_u207Rm7YbY33wOECKDA) 提取码:frgi
[google drive](https://drive.google.com/file/d/18cSWX7wXSy4G0tbKJ0d9PuIaiwRLHpjA/view) | 图片下载地址中已包含 | + +#### 2.1 ICDAR 2015 + +ICDAR 2015 数据集可以在上表中链接下载,用于快速验证。也可以从上表中下载 en benchmark 所需的lmdb格式数据集。 + +下载完图片后从上表中下载转换好的标注文件。 + +PaddleOCR 也提供了数据格式转换脚本,可以将ICDAR官网 label 转换为PaddleOCR支持的数据格式。 数据转换工具在 `ppocr/utils/gen_label.py`, 这里以训练集为例: + +``` +# 将官网下载的标签文件转换为 rec_gt_label.txt +python gen_label.py --mode="rec" --input_path="{path/of/origin/label}" --output_label="rec_gt_label.txt" +``` + +数据样式格式如下,(a)为原始图片,(b)为每张图片对应的 Ground Truth 文本文件: +![](../../datasets/icdar_rec.png) + + +## 3. 数据存放路径 + +PaddleOCR训练数据的默认存储路径是 `PaddleOCR/train_data`,如果您的磁盘上已有数据集,只需创建软链接至数据集目录: + +``` +# linux and mac os +ln -sf /train_data/dataset +# windows +mklink /d /train_data/dataset +``` diff --git a/doc/doc_ch/dataset/table_datasets.md b/doc/doc_ch/dataset/table_datasets.md new file mode 100644 index 0000000000000000000000000000000000000000..ae902b23ccf985d522386b7454c7f76a74917502 --- /dev/null +++ b/doc/doc_ch/dataset/table_datasets.md @@ -0,0 +1,33 @@ +# 表格识别数据集 + +- [数据集汇总](#数据集汇总) +- [1. PubTabNet数据集](#1-pubtabnet数据集) +- [2. 好未来表格识别竞赛数据集](#2-好未来表格识别竞赛数据集) + +这里整理了常用表格识别数据集,持续更新中,欢迎各位小伙伴贡献数据集~ + +## 数据集汇总 + +| 数据集名称 |图片下载地址| PPOCR标注下载地址 | +|---|---|---| +| PubTabNet |https://github.com/ibm-aur-nlp/PubTabNet| jsonl格式,可直接用[pubtab_dataset.py](../../../ppocr/data/pubtab_dataset.py)加载 | +| 好未来表格识别竞赛数据集 |https://ai.100tal.com/dataset| jsonl格式,可直接用[pubtab_dataset.py](../../../ppocr/data/pubtab_dataset.py)加载 | + +## 1. PubTabNet数据集 +- **数据简介**:PubTabNet数据集的训练集合中包含50万张图像,验证集合中包含0.9万张图像。部分图像可视化如下所示。 + + +

+ + +
+ +- **说明**:使用该数据集时,需要遵守[CDLA-Permissive](https://cdla.io/permissive-1-0/)协议。 + +## 2. 好未来表格识别竞赛数据集 +- **数据简介**:好未来表格识别竞赛数据集的训练集合中包含1.6万张图像。验证集未给出可训练的标注。 + +
+ + +
diff --git a/doc/doc_ch/vertical_and_multilingual_datasets.md b/doc/doc_ch/dataset/vertical_and_multilingual_datasets.md similarity index 96% rename from doc/doc_ch/vertical_and_multilingual_datasets.md rename to doc/doc_ch/dataset/vertical_and_multilingual_datasets.md index 802ade5f8eb3b0d3cc8335034a8fda8821464a8b..095b7713e1a13211f81249bf16db44a609e6e668 100644 --- a/doc/doc_ch/vertical_and_multilingual_datasets.md +++ b/doc/doc_ch/dataset/vertical_and_multilingual_datasets.md @@ -22,7 +22,7 @@ * CCPD-Challenge: 至今在车牌检测识别任务中最有挑战性的一些图片 * CCPD-NP: 没有安装车牌的新车图片。 - ![](../datasets/ccpd_demo.png) + ![](../../datasets/ccpd_demo.png) - **下载地址** @@ -46,7 +46,7 @@ * 有效期结束:07/41 * 卡用户拼音:MICHAEL - ![](../datasets/cmb_demo.jpg) + ![](../../datasets/cmb_demo.jpg) - **下载地址**: [https://cdn.kesci.com/cmb2017-2.zip](https://cdn.kesci.com/cmb2017-2.zip) @@ -59,7 +59,7 @@ - **数据简介**: 这是一个数据合成的工具包,可以根据输入的文本,输出验证码图片,使用该工具包生成几张demo图片如下。 - ![](../datasets/captcha_demo.png) + ![](../../datasets/captcha_demo.png) - **下载地址**: 该数据集是生成得到,无下载地址。 diff --git a/doc/doc_ch/detection.md b/doc/doc_ch/detection.md index 9dc910c5cdc5fcca522dfa418bb34591a46faf26..a915bc60fb9613f4c80e9bc0ae4bdfa3d630f052 100644 --- a/doc/doc_ch/detection.md +++ b/doc/doc_ch/detection.md @@ -1,75 +1,31 @@ - # 文字检测 本节以icdar2015数据集为例,介绍PaddleOCR中检测模型训练、评估、测试的使用方式。 -- [1. 准备数据和模型](#1--------) - * [1.1 数据准备](#11-----) - * [1.2 下载预训练模型](#12--------) -- [2. 开始训练](#2-----) - * [2.1 启动训练](#21-----) - * [2.2 断点训练](#22-----) - * [2.3 更换Backbone 训练](#23---backbone---) - * [2.4 混合精度训练](#24---amp---) - * [2.5 分布式训练](#25---fleet---) - * [2.6 知识蒸馏训练](#26---distill---) - * [2.7 其他训练环境(Windows/macOS/Linux DCU)](#27---other---) -- [3. 模型评估与预测](#3--------) - * [3.1 指标评估](#31-----) - * [3.2 测试检测效果](#32-------) -- [4. 模型导出与预测](#4--------) +- [1. 准备数据和模型](#1-准备数据和模型) + - [1.1 准备数据集](#11-准备数据集) + - [1.2 下载预训练模型](#12-下载预训练模型) +- [2. 开始训练](#2-开始训练) + - [2.1 启动训练](#21-启动训练) + - [2.2 断点训练](#22-断点训练) + - [2.3 更换Backbone 训练](#23-更换backbone-训练) + - [2.4 混合精度训练](#24-混合精度训练) + - [2.5 分布式训练](#25-分布式训练) + - [2.6 知识蒸馏训练](#26-知识蒸馏训练) + - [2.7 其他训练环境](#27-其他训练环境) +- [3. 模型评估与预测](#3-模型评估与预测) + - [3.1 指标评估](#31-指标评估) + - [3.2 测试检测效果](#32-测试检测效果) +- [4. 模型导出与预测](#4-模型导出与预测) - [5. FAQ](#5-faq) # 1. 准备数据和模型 - -## 1.1 数据准备 - -icdar2015 TextLocalization数据集是文本检测的数据集,包含1000张训练图像和500张测试图像。 -icdar2015数据集可以从[官网](https://rrc.cvc.uab.es/?ch=4&com=downloads)下载到,首次下载需注册。 -注册完成登陆后,下载下图中红色框标出的部分,其中, `Training Set Images`下载的内容保存为`icdar_c4_train_imgs`文件夹下,`Test Set Images` 下载的内容保存为`ch4_test_images`文件夹下 - -

- -

- -将下载到的数据集解压到工作目录下,假设解压在 PaddleOCR/train_data/下。另外,PaddleOCR将零散的标注文件整理成单独的标注文件 -,您可以通过wget的方式进行下载。 -```shell -# 在PaddleOCR路径下 -cd PaddleOCR/ -wget -P ./train_data/ https://paddleocr.bj.bcebos.com/dataset/train_icdar2015_label.txt -wget -P ./train_data/ https://paddleocr.bj.bcebos.com/dataset/test_icdar2015_label.txt -``` - -PaddleOCR 也提供了数据格式转换脚本,可以将官网 label 转换支持的数据格式。 数据转换工具在 `ppocr/utils/gen_label.py`, 这里以训练集为例: - -``` -# 将官网下载的标签文件转换为 train_icdar2015_label.txt -python gen_label.py --mode="det" --root_path="/path/to/icdar_c4_train_imgs/" \ - --input_path="/path/to/ch4_training_localization_transcription_gt" \ - --output_label="/path/to/train_icdar2015_label.txt" -``` - -解压数据集和下载标注文件后,PaddleOCR/train_data/ 有两个文件夹和两个文件,按照如下方式组织icdar2015数据集: -``` -/PaddleOCR/train_data/icdar2015/text_localization/ - └─ icdar_c4_train_imgs/ icdar数据集的训练数据 - └─ ch4_test_images/ icdar数据集的测试数据 - └─ train_icdar2015_label.txt icdar数据集的训练标注 - └─ test_icdar2015_label.txt icdar数据集的测试标注 -``` +## 1.1 准备数据集 -提供的标注文件格式如下,中间用"\t"分隔: -``` -" 图像文件名 json.dumps编码的图像标注信息" -ch4_test_images/img_61.jpg [{"transcription": "MASA", "points": [[310, 104], [416, 141], [418, 216], [312, 179]]}, {...}] -``` -json.dumps编码前的图像标注信息是包含多个字典的list,字典中的 `points` 表示文本框的四个点的坐标(x, y),从左上角的点开始顺时针排列。 -`transcription` 表示当前文本框的文字,**当其内容为“###”时,表示该文本框无效,在训练时会跳过。** +准备数据集可参考 [ocr_datasets](./dataset/ocr_datasets.md) 。 -如果您想在其他数据集上训练,可以按照上述形式构建标注文件。 ## 1.2 下载预训练模型 @@ -178,7 +134,7 @@ args1: args1 ## 2.4 混合精度训练 如果您想进一步加快训练速度,可以使用[自动混合精度训练](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/01_paddle2.0_introduction/basic_concept/amp_cn.html), 以单机单卡为例,命令如下: - + ```shell python3 tools/train.py -c configs/det/det_mv3_db.yml \ -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained \ @@ -197,7 +153,7 @@ python3 -m paddle.distributed.launch --ips="xx.xx.xx.xx,xx.xx.xx.xx" --gpus '0,1 **注意:** 采用多机多卡训练时,需要替换上面命令中的ips值为您机器的地址,机器之间需要能够相互ping通。另外,训练时需要在多个机器上分别启动命令。查看机器ip地址的命令为`ifconfig`。 - + ## 2.6 知识蒸馏训练 @@ -211,12 +167,12 @@ PaddleOCR支持了基于知识蒸馏的检测模型训练过程,更多内容 ## 2.7 其他训练环境 - Windows GPU/CPU - + - macOS - + - Linux DCU - - + + # 3. 模型评估与预测 diff --git a/doc/doc_ch/ppocr_introduction.md b/doc/doc_ch/ppocr_introduction.md index d9b5a4e0231dcec271c12942dfdb108854b530ae..2e25ebc9501d2e916b86867bf265490aa0971be0 100644 --- a/doc/doc_ch/ppocr_introduction.md +++ b/doc/doc_ch/ppocr_introduction.md @@ -17,6 +17,8 @@ PP-OCR是PaddleOCR自研的实用的超轻量OCR系统。在实现[前沿算法](algorithm.md)的基础上,考虑精度与速度的平衡,进行**模型瘦身**和**深度优化**,使其尽可能满足产业落地需求。 +#### PP-OCR + PP-OCR是一个两阶段的OCR系统,其中文本检测算法选用[DB](algorithm_det_db.md),文本识别算法选用[CRNN](algorithm_rec_crnn.md),并在检测和识别模块之间添加[文本方向分类器](angle_class.md),以应对不同方向的文本识别。 PP-OCR系统pipeline如下: @@ -28,9 +30,13 @@ PP-OCR系统pipeline如下: PP-OCR系统在持续迭代优化,目前已发布PP-OCR和PP-OCRv2两个版本: -[1] PP-OCR从骨干网络选择和调整、预测头部的设计、数据增强、学习率变换策略、正则化参数选择、预训练模型使用以及模型自动裁剪量化8个方面,采用19个有效策略,对各个模块的模型进行效果调优和瘦身(如绿框所示),最终得到整体大小为3.5M的超轻量中英文OCR和2.8M的英文数字OCR。更多细节请参考PP-OCR技术方案 https://arxiv.org/abs/2009.09941 +PP-OCR从骨干网络选择和调整、预测头部的设计、数据增强、学习率变换策略、正则化参数选择、预训练模型使用以及模型自动裁剪量化8个方面,采用19个有效策略,对各个模块的模型进行效果调优和瘦身(如绿框所示),最终得到整体大小为3.5M的超轻量中英文OCR和2.8M的英文数字OCR。更多细节请参考PP-OCR技术方案 https://arxiv.org/abs/2009.09941 + +#### PP-OCRv2 + +PP-OCRv2在PP-OCR的基础上,进一步在5个方面重点优化,检测模型采用CML协同互学习知识蒸馏策略和CopyPaste数据增广策略;识别模型采用LCNet轻量级骨干网络、UDML 改进知识蒸馏策略和[Enhanced CTC loss](./doc/doc_ch/enhanced_ctc_loss.md)损失函数改进(如上图红框所示),进一步在推理速度和预测效果上取得明显提升。更多细节请参考PP-OCRv2[技术报告](https://arxiv.org/abs/2109.03144)。 -[2] PP-OCRv2在PP-OCR的基础上,进一步在5个方面重点优化,检测模型采用CML协同互学习知识蒸馏策略和CopyPaste数据增广策略;识别模型采用LCNet轻量级骨干网络、UDML 改进知识蒸馏策略和[Enhanced CTC loss](./doc/doc_ch/enhanced_ctc_loss.md)损失函数改进(如上图红框所示),进一步在推理速度和预测效果上取得明显提升。更多细节请参考PP-OCRv2[技术报告](https://arxiv.org/abs/2109.03144)。 +#### PP-OCRv3 diff --git a/doc/doc_ch/recognition.md b/doc/doc_ch/recognition.md index 6cdd547517ebb8888374b22c1b52314da53eebab..4a1fe7ae956c187a7293d5ace99b70f499f15fd4 100644 --- a/doc/doc_ch/recognition.md +++ b/doc/doc_ch/recognition.md @@ -2,133 +2,31 @@ 本文提供了PaddleOCR文本识别任务的全流程指南,包括数据准备、模型训练、调优、评估、预测,各个阶段的详细说明: -- [文字识别](#文字识别) - - [1. 数据准备](#1-数据准备) - - [1.1 自定义数据集](#11-自定义数据集) - - [1.2 数据下载](#12-数据下载) - - [1.3 字典](#13-字典) - - [1.4 添加空格类别](#14-添加空格类别) - - [2. 启动训练](#2-启动训练) - - [2.1 数据增强](#21-数据增强) - - [2.2 通用模型训练](#22-通用模型训练) - - [2.3 多语言模型训练](#23-多语言模型训练) - - [2.4 知识蒸馏训练](#24-知识蒸馏训练) - - [3 评估](#3-评估) - - [4 预测](#4-预测) - - [5. 转Inference模型测试](#5-转inference模型测试) +- [1. 数据准备](#1-数据准备) + - [1.1 准备数据集](#11-准备数据集) + - [1.2 字典](#12-字典) + - [1.3 添加空格类别](#13-添加空格类别) +- [2. 启动训练](#2-启动训练) + - [2.1 数据增强](#21-数据增强) + - [2.2 通用模型训练](#22-通用模型训练) + - [2.3 多语言模型训练](#23-多语言模型训练) + - [2.4 知识蒸馏训练](#24-知识蒸馏训练) +- [3 评估](#3-评估) +- [4 预测](#4-预测) +- [5. 转Inference模型测试](#5-转inference模型测试) ## 1. 数据准备 +### 1.1 准备数据集 -PaddleOCR 支持两种数据格式: - - `lmdb` 用于训练以lmdb格式存储的数据集(LMDBDataSet); - - `通用数据` 用于训练以文本文件存储的数据集(SimpleDataSet); - -训练数据的默认存储路径是 `PaddleOCR/train_data`,如果您的磁盘上已有数据集,只需创建软链接至数据集目录: - -``` -# linux and mac os -ln -sf /train_data/dataset -# windows -mklink /d /train_data/dataset -``` - - -### 1.1 自定义数据集 -下面以通用数据集为例, 介绍如何准备数据集: - -* 训练集 - -建议将训练图片放入同一个文件夹,并用一个txt文件(rec_gt_train.txt)记录图片路径和标签,txt文件里的内容如下: - -**注意:** txt文件中默认请将图片路径和图片标签用 \t 分割,如用其他方式分割将造成训练报错。 - -``` -" 图像文件名 图像标注信息 " - -train_data/rec/train/word_001.jpg 简单可依赖 -train_data/rec/train/word_002.jpg 用科技让复杂的世界更简单 -... -``` - -最终训练集应有如下文件结构: -``` -|-train_data - |-rec - |- rec_gt_train.txt - |- train - |- word_001.png - |- word_002.jpg - |- word_003.jpg - | ... -``` - -除上述单张图像为一行格式之外,PaddleOCR也支持对离线增广后的数据进行训练,为了防止相同样本在同一个batch中被多次采样,我们可以将相同标签对应的图片路径写在一行中,以列表的形式给出,在训练中,PaddleOCR会随机选择列表中的一张图片进行训练。对应地,标注文件的格式如下。 - -``` -["11.jpg", "12.jpg"] 简单可依赖 -["21.jpg", "22.jpg", "23.jpg"] 用科技让复杂的世界更简单 -3.jpg ocr -``` - -上述示例标注文件中,"11.jpg"和"12.jpg"的标签相同,都是`简单可依赖`,在训练的时候,对于该行标注,会随机选择其中的一张图片进行训练。 - - -- 验证集 - -同训练集类似,验证集也需要提供一个包含所有图片的文件夹(test)和一个rec_gt_test.txt,验证集的结构如下所示: - -``` -|-train_data - |-rec - |- rec_gt_test.txt - |- test - |- word_001.jpg - |- word_002.jpg - |- word_003.jpg - | ... -``` - - - -### 1.2 数据下载 - -- ICDAR2015 - -若您本地没有数据集,可以在官网下载 [ICDAR2015](http://rrc.cvc.uab.es/?ch=4&com=downloads) 数据,用于快速验证。也可以参考[DTRB](https://github.com/clovaai/deep-text-recognition-benchmark#download-lmdb-dataset-for-traininig-and-evaluation-from-here) ,下载 benchmark 所需的lmdb格式数据集。 +准备数据集可参考 [ocr_datasets](./dataset/ocr_datasets.md) 。 如果希望复现SAR的论文指标,需要下载[SynthAdd](https://pan.baidu.com/share/init?surl=uV0LtoNmcxbO-0YA7Ch4dg), 提取码:627x。此外,真实数据集icdar2013, icdar2015, cocotext, IIIT5也作为训练数据的一部分。具体数据细节可以参考论文SAR。 -如果你使用的是icdar2015的公开数据集,PaddleOCR 提供了一份用于训练 ICDAR2015 数据集的标签文件,通过以下方式下载: - -``` -# 训练集标签 -wget -P ./train_data/ic15_data https://paddleocr.bj.bcebos.com/dataset/rec_gt_train.txt -# 测试集标签 -wget -P ./train_data/ic15_data https://paddleocr.bj.bcebos.com/dataset/rec_gt_test.txt -``` - -PaddleOCR 也提供了数据格式转换脚本,可以将ICDAR官网 label 转换为PaddleOCR支持的数据格式。 数据转换工具在 `ppocr/utils/gen_label.py`, 这里以训练集为例: - -``` -# 将官网下载的标签文件转换为 rec_gt_label.txt -python gen_label.py --mode="rec" --input_path="{path/of/origin/label}" --output_label="rec_gt_label.txt" -``` - -数据样式格式如下,(a)为原始图片,(b)为每张图片对应的 Ground Truth 文本文件: -![](../datasets/icdar_rec.png) - -- 多语言数据集 - -多语言模型的训练数据集均为100w的合成数据,使用了开源合成工具 [text_renderer](https://github.com/Sanster/text_renderer) ,少量的字体可以通过下面两种方式下载。 -* [百度网盘](https://pan.baidu.com/s/1bS_u207Rm7YbY33wOECKDA) 提取码:frgi -* [google drive](https://drive.google.com/file/d/18cSWX7wXSy4G0tbKJ0d9PuIaiwRLHpjA/view) - - -### 1.3 字典 +### 1.2 字典 最后需要提供一个字典({word_dict_name}.txt),使模型在训练时,可以将所有出现的字符映射为字典的索引。 @@ -174,7 +72,7 @@ PaddleOCR内置了一部分字典,可以按需使用。 如需自定义dic文件,请在 `configs/rec/rec_icdar15_train.yml` 中添加 `character_dict_path` 字段, 指向您的字典路径。 -### 1.4 添加空格类别 +### 1.3 添加空格类别 如果希望支持识别"空格"类别, 请将yml文件中的 `use_space_char` 字段设置为 `True`。 diff --git a/doc/doc_ch/table_datasets.md b/doc/doc_ch/table_datasets.md deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/doc/doc_ch/training.md b/doc/doc_ch/training.md index 231b83e64b48d6c6fe1192b34b1d2f11a06f4cd8..d46b9af701ee1c605526c18d6994842b3faf8e14 100644 --- a/doc/doc_ch/training.md +++ b/doc/doc_ch/training.md @@ -81,13 +81,13 @@ Optimizer: - 检测: - 英文数据集,ICDAR2015 - 中文数据集,LSVT街景数据集训练数据3w张图片 - + - 识别: - 英文数据集,MJSynth和SynthText合成数据,数据量上千万。 - 中文数据集,LSVT街景数据集根据真值将图crop出来,并进行位置校准,总共30w张图像。此外基于LSVT的语料,合成数据500w。 - 小语种数据集,使用不同语料和字体,分别生成了100w合成数据集,并使用ICDAR-MLT作为验证集。 -其中,公开数据集都是开源的,用户可自行搜索下载,也可参考[中文数据集](./datasets.md),合成数据暂不开源,用户可使用开源合成工具自行合成,可参考的合成工具包括[text_renderer](https://github.com/Sanster/text_renderer) 、[SynthText](https://github.com/ankush-me/SynthText) 、[TextRecognitionDataGenerator](https://github.com/Belval/TextRecognitionDataGenerator) 等。 +其中,公开数据集都是开源的,用户可自行搜索下载,也可参考[中文数据集](dataset/datasets.md),合成数据暂不开源,用户可使用开源合成工具自行合成,可参考的合成工具包括[text_renderer](https://github.com/Sanster/text_renderer) 、[SynthText](https://github.com/ankush-me/SynthText) 、[TextRecognitionDataGenerator](https://github.com/Belval/TextRecognitionDataGenerator) 等。 ### 3.2 垂类场景 @@ -120,17 +120,17 @@ PaddleOCR主要聚焦通用OCR,如果有垂类需求,您可以用PaddleOCR+ **Q**:训练CRNN识别时,如何选择合适的网络输入shape? A:一般高度采用32,最长宽度的选择,有两种方法: - + (1)统计训练样本图像的宽高比分布。最大宽高比的选取考虑满足80%的训练样本。 - + (2)统计训练样本文字数目。最长字符数目的选取考虑满足80%的训练样本。然后中文字符长宽比近似认为是1,英文认为3:1,预估一个最长宽度。 **Q**:识别训练时,训练集精度已经到达90了,但验证集精度一直在70,涨不上去怎么办? A:训练集精度90,测试集70多的话,应该是过拟合了,有两个可尝试的方法: - + (1)加入更多的增广方式或者调大增广prob的[概率](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/ppocr/data/imaug/rec_img_aug.py#L341),默认为0.4。 - + (2)调大系统的[l2 dcay值](https://github.com/PaddlePaddle/PaddleOCR/blob/a501603d54ff5513fc4fc760319472e59da25424/configs/rec/ch_ppocr_v1.1/rec_chinese_lite_train_v1.1.yml#L47) **Q**: 识别模型训练时,loss能正常下降,但acc一直为0 @@ -141,12 +141,11 @@ PaddleOCR主要聚焦通用OCR,如果有垂类需求,您可以用PaddleOCR+ *** -具体的训练教程可点击下方链接跳转: +具体的训练教程可点击下方链接跳转: -- [文本检测模型训练](./detection.md) +- [文本检测模型训练](./detection.md) - [文本识别模型训练](./recognition.md) - [文本方向分类器训练](./angle_class.md) - [知识蒸馏](./knowledge_distillation.md) - diff --git a/doc/doc_ch/update.md b/doc/doc_ch/update.md index c4c870681c6ccb5ad7702101312e5dbe47e9cb85..9071e673910f8d87762dc8f9dd097d444f36e624 100644 --- a/doc/doc_ch/update.md +++ b/doc/doc_ch/update.md @@ -22,7 +22,7 @@ - 2020.7.15 整理OCR相关数据集、常用数据标注以及合成工具 - 2020.7.9 添加支持空格的识别模型,识别效果,预测及训练方式请参考快速开始和文本识别训练相关文档 - 2020.7.9 添加数据增强、学习率衰减策略,具体参考[配置文件](./config.md) -- 2020.6.8 添加[数据集](./datasets.md),并保持持续更新 +- 2020.6.8 添加[数据集](dataset/datasets.md),并保持持续更新 - 2020.6.5 支持 `attetnion` 模型导出 `inference_model` - 2020.6.5 支持单独预测识别时,输出结果得分 - 2020.5.30 提供超轻量级中文OCR在线体验 diff --git a/doc/doc_en/FAQ_en.md b/doc/doc_en/FAQ_en.md index 5cf82a78720d15ce5b0aac37c409921474923813..5b0884276543645cc5569a99a0e24b40c29581a6 100644 --- a/doc/doc_en/FAQ_en.md +++ b/doc/doc_en/FAQ_en.md @@ -42,7 +42,7 @@ At present, the open source model, dataset and magnitude are as follows: English dataset: MJSynth and SynthText synthetic dataset, the amount of data is tens of millions. Chinese dataset: LSVT street view dataset with cropped text area, a total of 30w images. In addition, the synthesized data based on LSVT corpus is 500w. - Among them, the public datasets are opensourced, users can search and download by themselves, or refer to [Chinese data set](./datasets_en.md), synthetic data is not opensourced, users can use open-source synthesis tools to synthesize data themselves. Current available synthesis tools include [text_renderer](https://github.com/Sanster/text_renderer), [SynthText](https://github.com/ankush-me/SynthText), [TextRecognitionDataGenerator](https://github.com/Belval/TextRecognitionDataGenerator), etc. + Among them, the public datasets are opensourced, users can search and download by themselves, or refer to [Chinese data set](dataset/datasets_en.md), synthetic data is not opensourced, users can use open-source synthesis tools to synthesize data themselves. Current available synthesis tools include [text_renderer](https://github.com/Sanster/text_renderer), [SynthText](https://github.com/ankush-me/SynthText), [TextRecognitionDataGenerator](https://github.com/Belval/TextRecognitionDataGenerator), etc. 10. **Error in using the model with TPS module for prediction** Error message: Input(X) dims[3] and Input(Grid) dims[2] should be equal, but received X dimension[3]\(108) != Grid dimension[2]\(100) diff --git a/doc/doc_en/algorithm_det_db_en.md b/doc/doc_en/algorithm_det_db_en.md index b387a8ec217b351164d7cac878539bab19157a6e..f5f333a039acded88f0f28d302821c5eb10d7402 100644 --- a/doc/doc_en/algorithm_det_db_en.md +++ b/doc/doc_en/algorithm_det_db_en.md @@ -25,8 +25,8 @@ On the ICDAR2015 dataset, the text detection result is as follows: |Model|Backbone|Configuration|Precision|Recall|Hmean|Download| | --- | --- | --- | --- | --- | --- | --- | -|DB|ResNet50_vd|configs/det/det_r50_vd_db.yml|86.41%|78.72%|82.38%|[trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_db_v2.0_train.tar)| -|DB|MobileNetV3|configs/det/det_mv3_db.yml|77.29%|73.08%|75.12%|[trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_db_v2.0_train.tar)| +|DB|ResNet50_vd|[configs/det/det_r50_vd_db.yml](../../configs/det/det_r50_vd_db.yml)|86.41%|78.72%|82.38%|[trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_db_v2.0_train.tar)| +|DB|MobileNetV3|[configs/det/det_mv3_db.yml](../../configs/det/det_mv3_db.yml)|77.29%|73.08%|75.12%|[trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_db_v2.0_train.tar)| diff --git a/doc/doc_en/algorithm_det_fcenet_en.md b/doc/doc_en/algorithm_det_fcenet_en.md new file mode 100644 index 0000000000000000000000000000000000000000..e15fb9a07ede3296d3de83c134457194d4639a1c --- /dev/null +++ b/doc/doc_en/algorithm_det_fcenet_en.md @@ -0,0 +1,104 @@ +# FCENet + +- [1. Introduction](#1) +- [2. Environment](#2) +- [3. Model Training / Evaluation / Prediction](#3) + - [3.1 Training](#3-1) + - [3.2 Evaluation](#3-2) + - [3.3 Prediction](#3-3) +- [4. Inference and Deployment](#4) + - [4.1 Python Inference](#4-1) + - [4.2 C++ Inference](#4-2) + - [4.3 Serving](#4-3) + - [4.4 More](#4-4) +- [5. FAQ](#5) + + +## 1. Introduction + +Paper: +> [Fourier Contour Embedding for Arbitrary-Shaped Text Detection](https://arxiv.org/abs/2104.10442) +> Yiqin Zhu and Jianyong Chen and Lingyu Liang and Zhanghui Kuang and Lianwen Jin and Wayne Zhang +> CVPR, 2021 + +On the CTW1500 dataset, the text detection result is as follows: + +|Model|Backbone|Configuration|Precision|Recall|Hmean|Download| +| --- | --- | --- | --- | --- | --- | --- | +| FCE | ResNet50_dcn | [configs/det/det_r50_vd_dcn_fce_ctw.yml](../../configs/det/det_r50_vd_dcn_fce_ctw.yml)| 88.39%|82.18%|85.27%|[trained model](https://paddleocr.bj.bcebos.com/contribution/det_r50_dcn_fce_ctw_v2.0_train.tar)| + + +## 2. Environment +Please prepare your environment referring to [prepare the environment](./environment_en.md) and [clone the repo](./clone_en.md). + + + +## 3. Model Training / Evaluation / Prediction + +The above FCE model is trained using the CTW1500 text detection public dataset. For the download of the dataset, please refer to [ocr_datasets](./dataset/ocr_datasets_en.md). + +After the data download is complete, please refer to [Text Detection Training Tutorial](./detection.md) for training. PaddleOCR has modularized the code structure, so that you only need to **replace the configuration file** to train different detection models. + + +## 4. Inference and Deployment + + +### 4.1 Python Inference +First, convert the model saved in the FCE text detection training process into an inference model. Taking the model based on the Resnet50_vd_dcn backbone network and trained on the CTW1500 English dataset as example ([model download link](https://paddleocr.bj.bcebos.com/contribution/det_r50_dcn_fce_ctw_v2.0_train.tar)), you can use the following command to convert: + +```shell +python3 tools/export_model.py -c configs/det/det_r50_vd_dcn_fce_ctw.yml -o Global.pretrained_model=./det_r50_dcn_fce_ctw_v2.0_train/best_accuracy Global.save_inference_dir=./inference/det_fce +``` + +FCE text detection model inference, to perform non-curved text detection, you can run the following commands: + +```shell +python3 tools/infer/predict_det.py --image_dir="./doc/imgs_en/img_10.jpg" --det_model_dir="./inference/det_fce/" --det_algorithm="FCE" --det_fce_box_type=quad +``` + +The visualized text detection results are saved to the `./inference_results` folder by default, and the name of the result file is prefixed with 'det_res'. Examples of results are as follows: + +![](../imgs_results/det_res_img_10_fce.jpg) + +If you want to perform curved text detection, you can execute the following command: + +```shell +python3 tools/infer/predict_det.py --image_dir="./doc/imgs_en/img623.jpg" --det_model_dir="./inference/det_fce/" --det_algorithm="FCE" --det_fce_box_type=poly +``` + +The visualized text detection results are saved to the `./inference_results` folder by default, and the name of the result file is prefixed with 'det_res'. Examples of results are as follows: + +![](../imgs_results/det_res_img623_fce.jpg) + +**Note**: Since the CTW1500 dataset has only 1,000 training images, mainly for English scenes, the above model has very poor detection result on Chinese or curved text images. + + + +### 4.2 C++ Inference + +Since the post-processing is not written in CPP, the FCE text detection model does not support CPP inference. + + +### 4.3 Serving + +Not supported + + +### 4.4 More + +Not supported + + +## 5. FAQ + + +## Citation + +```bibtex +@InProceedings{zhu2021fourier, + title={Fourier Contour Embedding for Arbitrary-Shaped Text Detection}, + author={Yiqin Zhu and Jianyong Chen and Lingyu Liang and Zhanghui Kuang and Lianwen Jin and Wayne Zhang}, + year={2021}, + booktitle = {CVPR} +} +``` diff --git a/doc/doc_en/algorithm_det_psenet_en.md b/doc/doc_en/algorithm_det_psenet_en.md new file mode 100644 index 0000000000000000000000000000000000000000..d4cb3ea7d1e82a3f9c261c6e44cd6df6b0f6bf1e --- /dev/null +++ b/doc/doc_en/algorithm_det_psenet_en.md @@ -0,0 +1,107 @@ +# PSENet + +- [1. Introduction](#1) +- [2. Environment](#2) +- [3. Model Training / Evaluation / Prediction](#3) + - [3.1 Training](#3-1) + - [3.2 Evaluation](#3-2) + - [3.3 Prediction](#3-3) +- [4. Inference and Deployment](#4) + - [4.1 Python Inference](#4-1) + - [4.2 C++ Inference](#4-2) + - [4.3 Serving](#4-3) + - [4.4 More](#4-4) +- [5. FAQ](#5) + + +## 1. Introduction + +Paper: +> [Shape robust text detection with progressive scale expansion network](https://arxiv.org/abs/1903.12473) +> Wang, Wenhai and Xie, Enze and Li, Xiang and Hou, Wenbo and Lu, Tong and Yu, Gang and Shao, Shuai +> CVPR, 2019 + +On the ICDAR2015 dataset, the text detection result is as follows: + +|Model|Backbone|Configuration|Precision|Recall|Hmean|Download| +| --- | --- | --- | --- | --- | --- | --- | +|PSE| ResNet50_vd | [configs/det/det_r50_vd_pse.yml](../../configs/det/det_r50_vd_pse.yml)| 85.81% |79.53%|82.55%|[trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.1/en_det/det_r50_vd_pse_v2.0_train.tar)| +|PSE| MobileNetV3| [configs/det/det_mv3_pse.yml](../../configs/det/det_mv3_pse.yml) | 82.20% |70.48%|75.89%|[trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.1/en_det/det_mv3_pse_v2.0_train.tar)| + + + +## 2. Environment +Please prepare your environment referring to [prepare the environment](./environment_en.md) and [clone the repo](./clone_en.md). + + + +## 3. Model Training / Evaluation / Prediction + +The above PSE model is trained using the ICDAR2015 text detection public dataset. For the download of the dataset, please refer to [ocr_datasets](./dataset/ocr_datasets_en.md). + +After the data download is complete, please refer to [Text Detection Training Tutorial](./detection.md) for training. PaddleOCR has modularized the code structure, so that you only need to **replace the configuration file** to train different detection models. + + +## 4. Inference and Deployment + + +### 4.1 Python Inference +First, convert the model saved in the PSE text detection training process into an inference model. Taking the model based on the Resnet50_vd backbone network and trained on the ICDAR2015 English dataset as example ([model download link](https://paddleocr.bj.bcebos.com/dygraph_v2.1/en_det/det_r50_vd_pse_v2.0_train.tar)), you can use the following command to convert: + +```shell +python3 tools/export_model.py -c configs/det/det_r50_vd_pse.yml -o Global.pretrained_model=./det_r50_vd_pse_v2.0_train/best_accuracy Global.save_inference_dir=./inference/det_pse +``` + +PSE text detection model inference, to perform non-curved text detection, you can run the following commands: + +```shell +python3 tools/infer/predict_det.py --image_dir="./doc/imgs_en/img_10.jpg" --det_model_dir="./inference/det_pse/" --det_algorithm="PSE" --det_pse_box_type=quad +``` + +The visualized text detection results are saved to the `./inference_results` folder by default, and the name of the result file is prefixed with 'det_res'. Examples of results are as follows: + +![](../imgs_results/det_res_img_10_pse.jpg) + +If you want to perform curved text detection, you can execute the following command: + +```shell +python3 tools/infer/predict_det.py --image_dir="./doc/imgs_en/img_10.jpg" --det_model_dir="./inference/det_pse/" --det_algorithm="PSE" --det_pse_box_type=poly +``` + +The visualized text detection results are saved to the `./inference_results` folder by default, and the name of the result file is prefixed with 'det_res'. Examples of results are as follows: + +![](../imgs_results/det_res_img_10_pse_poly.jpg) + +**Note**: Since the ICDAR2015 dataset has only 1,000 training images, mainly for English scenes, the above model has very poor detection result on Chinese or curved text images. + + + +### 4.2 C++ Inference + +Since the post-processing is not written in CPP, the PSE text detection model does not support CPP inference. + + +### 4.3 Serving + +Not supported + + +### 4.4 More + +Not supported + + +## 5. FAQ + + +## Citation + +```bibtex +@inproceedings{wang2019shape, + title={Shape robust text detection with progressive scale expansion network}, + author={Wang, Wenhai and Xie, Enze and Li, Xiang and Hou, Wenbo and Lu, Tong and Yu, Gang and Shao, Shuai}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={9336--9345}, + year={2019} +} +``` diff --git a/doc/doc_en/datasets_en.md b/doc/doc_en/dataset/datasets_en.md similarity index 92% rename from doc/doc_en/datasets_en.md rename to doc/doc_en/dataset/datasets_en.md index 0e6b6f381e9d008add802c5f8a30d5498a4f94b2..d81c058caa5e82097641405ff1ba048e95a2e3d7 100644 --- a/doc/doc_en/datasets_en.md +++ b/doc/doc_en/dataset/datasets_en.md @@ -12,12 +12,12 @@ In addition to opensource data, users can also use synthesis tools to synthesize #### 1. ICDAR2019-LSVT - **Data sources**:https://ai.baidu.com/broad/introduction?dataset=lsvt - **Introduction**: A total of 45w Chinese street view images, including 5w (2w test + 3w training) fully labeled data (text coordinates + text content), 40w weakly labeled data (text content only), as shown in the following figure: - ![](../datasets/LSVT_1.jpg) + ![](../../datasets/LSVT_1.jpg) (a) Fully labeled data - ![](../datasets/LSVT_2.jpg) - + ![](../../datasets/LSVT_2.jpg) + (b) Weakly labeled data - **Download link**:https://ai.baidu.com/broad/download?dataset=lsvt @@ -25,7 +25,7 @@ In addition to opensource data, users can also use synthesis tools to synthesize #### 2. ICDAR2017-RCTW-17 - **Data sources**:https://rctw.vlrlab.net/ - **Introduction**:It contains 12000 + images, most of them are collected in the wild through mobile camera. Some are screenshots. These images show a variety of scenes, including street views, posters, menus, indoor scenes and screenshots of mobile applications. - ![](../datasets/rctw.jpg) + ![](../../datasets/rctw.jpg) - **Download link**:https://rctw.vlrlab.net/dataset/ @@ -33,9 +33,9 @@ In addition to opensource data, users can also use synthesis tools to synthesize - **Data sources**:https://aistudio.baidu.com/aistudio/competition/detail/8 - **Introduction**:A total of 290000 pictures are included, of which 210000 are used as training sets (with labels) and 80000 are used as test sets (without labels). The dataset is collected from the Chinese street view, and is formed by by cutting out the text line area (such as shop signs, landmarks, etc.) in the street view picture. All the images are preprocessed: by using affine transform, the text area is proportionally mapped to a picture with a height of 48 pixels, as shown in the figure: - ![](../datasets/ch_street_rec_1.png) + ![](../../datasets/ch_street_rec_1.png) (a) Label: 魅派集成吊顶 - ![](../datasets/ch_street_rec_2.png) + ![](../../datasets/ch_street_rec_2.png) (b) Label: 母婴用品连锁 - **Download link** https://aistudio.baidu.com/aistudio/datasetdetail/8429 @@ -49,13 +49,13 @@ https://aistudio.baidu.com/aistudio/datasetdetail/8429 - 5990 characters including Chinese characters, English letters, numbers and punctuation(Characters set: https://github.com/YCG09/chinese_ocr/blob/master/train/char_std_5990.txt ) - Each sample is fixed with 10 characters, and the characters are randomly intercepted from the sentences in the corpus - Image resolution is 280x32 - ![](../datasets/ch_doc1.jpg) - ![](../datasets/ch_doc3.jpg) + ![](../../datasets/ch_doc1.jpg) + ![](../../datasets/ch_doc3.jpg) - **Download link**:https://pan.baidu.com/s/1QkI7kjah8SPHwOQ40rS1Pw (Password: lu7m) #### 5、ICDAR2019-ArT - **Data source**:https://ai.baidu.com/broad/introduction?dataset=art - **Introduction**:It includes 10166 images, 5603 in training sets and 4563 in test sets. It is composed of three parts: total text, scut-ctw1500 and Baidu curved scene text, including text with various shapes such as horizontal, multi-directional and curved. - ![](../datasets/ArT.jpg) + ![](../../datasets/ArT.jpg) - **Download link**:https://ai.baidu.com/broad/download?dataset=art diff --git a/doc/doc_en/handwritten_datasets_en.md b/doc/doc_en/dataset/handwritten_datasets_en.md similarity index 96% rename from doc/doc_en/handwritten_datasets_en.md rename to doc/doc_en/dataset/handwritten_datasets_en.md index da6008a2acfa684dd6efe37fda42eb0e2c7eb97a..2059549601eba285eb56f27d6b7e721cd05c97f1 100644 --- a/doc/doc_en/handwritten_datasets_en.md +++ b/doc/doc_en/dataset/handwritten_datasets_en.md @@ -9,7 +9,7 @@ Here we have sorted out the commonly used handwritten OCR dataset datasets, whic - **Data introduction**: * It includes online and offline handwritten data,`HWDB1.0~1.2` has totally 3895135 handwritten single character samples, which belong to 7356 categories (7185 Chinese characters and 171 English letters, numbers and symbols);`HWDB2.0~2.2` has totally 5091 pages of images, which are divided into 52230 text lines and 1349414 words. All text and text samples are stored as grayscale images. Some sample words are shown below. - ![](../datasets/CASIA_0.jpg) + ![](../../datasets/CASIA_0.jpg) - **Download address**:http://www.nlpr.ia.ac.cn/databases/handwriting/Download.html - **使用建议**:Data for single character, white background, can form a large number of text lines for training. White background can be processed into transparent state, which is convenient to add various backgrounds. For the case of semantic needs, it is suggested to extract single character from real corpus to form text lines. @@ -22,7 +22,7 @@ Here we have sorted out the commonly used handwritten OCR dataset datasets, whic - **Data introduction**: NIST19 dataset is suitable for handwritten document and character recognition model training. It is extracted from the handwritten sample form of 3600 authors and contains 810000 character images in total. Nine of them are shown below. - ![](../datasets/nist_demo.png) + ![](../../datasets/nist_demo.png) - **Download address**: [https://www.nist.gov/srd/nist-special-database-19](https://www.nist.gov/srd/nist-special-database-19) diff --git a/doc/doc_en/dataset/ocr_datasets_en.md b/doc/doc_en/dataset/ocr_datasets_en.md new file mode 100644 index 0000000000000000000000000000000000000000..0b9abd529ddb6d0cf0bc294d74e3249215c8fd45 --- /dev/null +++ b/doc/doc_en/dataset/ocr_datasets_en.md @@ -0,0 +1,157 @@ +# OCR datasets + +- [1. Text detection](#1-text-detection) + - [1.1 PaddleOCR text detection format annotation](#11-paddleocr-text-detection-format-annotation) + - [1.2 Public dataset](#12-public-dataset) + - [1.2.1 ICDAR 2015](#121-icdar-2015) +- [2. Text recognition](#2-text-recognition) + - [2.1 PaddleOCR text recognition format annotation](#21-paddleocr-text-recognition-format-annotation) + - [2.2 Public dataset](#22-public-dataset) + - [2.1 ICDAR 2015](#21-icdar-2015) +- [3. Data storage path](#3-data-storage-path) + +Here is a list of public datasets commonly used in OCR, which are being continuously updated. Welcome to contribute datasets~ + +## 1. Text detection + +### 1.1 PaddleOCR text detection format annotation + +The annotation file formats supported by the PaddleOCR text detection algorithm are as follows, separated by "\t": +``` +" Image file name Image annotation information encoded by json.dumps" +ch4_test_images/img_61.jpg [{"transcription": "MASA", "points": [[310, 104], [416, 141], [418, 216], [312, 179]]}, {...}] +``` +The image annotation after **json.dumps()** encoding is a list containing multiple dictionaries. + +The `points` in the dictionary represent the coordinates (x, y) of the four points of the text box, arranged clockwise from the point at the upper left corner. + +`transcription` represents the text of the current text box. **When its content is "###" it means that the text box is invalid and will be skipped during training.** + +If you want to train PaddleOCR on other datasets, please build the annotation file according to the above format. + +### 1.2 Public dataset +| dataset | Image download link | PaddleOCR format annotation download link | +|---|---|---| +| ICDAR 2015 | https://rrc.cvc.uab.es/?ch=4&com=downloads | [train](https://paddleocr.bj.bcebos.com/dataset/train_icdar2015_label.txt) / [test](https://paddleocr.bj.bcebos.com/dataset/test_icdar2015_label.txt) | +| ctw1500 | https://paddleocr.bj.bcebos.com/dataset/ctw1500.zip | Included in the downloaded image zip | +| total text | https://paddleocr.bj.bcebos.com/dataset/total_text.tar | Included in the downloaded image zip | + +#### 1.2.1 ICDAR 2015 + +The icdar2015 dataset contains train set which has 1000 images obtained with wearable cameras and test set which has 500 images obtained with wearable cameras. The icdar2015 dataset can be downloaded from the link in the table above. Registration is required for downloading. + + +After registering and logging in, download the part marked in the red box in the figure below. And, the content downloaded by `Training Set Images` should be saved as the folder `icdar_c4_train_imgs`, and the content downloaded by `Test Set Images` is saved as the folder `ch4_test_images` + +

+ +

+ +Decompress the downloaded dataset to the working directory, assuming it is decompressed under PaddleOCR/train_data/. Then download the PaddleOCR format annotation file from the table above. + +PaddleOCR also provides a data format conversion script, which can convert the official website label to the PaddleOCR format. The data conversion tool is in `ppocr/utils/gen_label.py`, here is the training set as an example: +``` +# Convert the label file downloaded from the official website to train_icdar2015_label.txt +python gen_label.py --mode="det" --root_path="/path/to/icdar_c4_train_imgs/" \ + --input_path="/path/to/ch4_training_localization_transcription_gt" \ + --output_label="/path/to/train_icdar2015_label.txt" +``` + +After decompressing the data set and downloading the annotation file, PaddleOCR/train_data/ has two folders and two files, which are: +``` +/PaddleOCR/train_data/icdar2015/text_localization/ + └─ icdar_c4_train_imgs/ Training data of icdar dataset + └─ ch4_test_images/ Testing data of icdar dataset + └─ train_icdar2015_label.txt Training annotation of icdar dataset + └─ test_icdar2015_label.txt Test annotation of icdar dataset +``` + + +## 2. Text recognition + +### 2.1 PaddleOCR text recognition format annotation + +The text recognition algorithm in PaddleOCR supports two data formats: + - `lmdb` is used to train data sets stored in lmdb format, use [lmdb_dataset.py](../../../ppocr/data/lmdb_dataset.py) to load; + - `common dataset` is used to train data sets stored in text files, use [simple_dataset.py](../../../ppocr/data/simple_dataset.py) to load. + + +If you want to use your own data for training, please refer to the following to organize your data. + +- Training set + +It is recommended to put the training images in the same folder, and use a txt file (rec_gt_train.txt) to store the image path and label. The contents of the txt file are as follows: + +* Note: by default, the image path and image label are split with \t, if you use other methods to split, it will cause training error + +``` +" Image file name Image annotation " + +train_data/rec/train/word_001.jpg 简单可依赖 +train_data/rec/train/word_002.jpg 用科技让复杂的世界更简单 +... +``` + +The final training set should have the following file structure: + +``` +|-train_data + |-rec + |- rec_gt_train.txt + |- train + |- word_001.png + |- word_002.jpg + |- word_003.jpg + | ... +``` + +- Test set + +Similar to the training set, the test set also needs to be provided a folder containing all images (test) and a rec_gt_test.txt. The structure of the test set is as follows: + +``` +|-train_data + |-rec + |-ic15_data + |- rec_gt_test.txt + |- test + |- word_001.jpg + |- word_002.jpg + |- word_003.jpg + | ... +``` + +### 2.2 Public dataset +| dataset | Image download link | PaddleOCR format annotation download link | +|---|---|---| +| en benchmark(MJ, SJ, IIIT, SVT, IC03, IC13, IC15, SVTP, and CUTE.) | [DTRB](https://github.com/clovaai/deep-text-recognition-benchmark#download-lmdb-dataset-for-traininig-and-evaluation-from-here) | LMDB format, which can be loaded directly with [lmdb_dataset.py](../../../ppocr/data/lmdb_dataset.py) | +|ICDAR 2015| http://rrc.cvc.uab.es/?ch=4&com=downloads | [train](https://paddleocr.bj.bcebos.com/dataset/rec_gt_train.txt)/ [test](https://paddleocr.bj.bcebos.com/dataset/rec_gt_test.txt) | +| Multilingual datasets |[Baidu network disk](https://pan.baidu.com/s/1bS_u207Rm7YbY33wOECKDA) Extraction code: frgi
[google drive](https://drive.google.com/file/d/18cSWX7wXSy4G0tbKJ0d9PuIaiwRLHpjA/view) | Included in the downloaded image zip | + +#### 2.1 ICDAR 2015 + +The ICDAR 2015 dataset can be downloaded from the link in the table above for quick validation. The lmdb format dataset required by en benchmark can also be downloaded from the table above. + +Then download the PaddleOCR format annotation file from the table above. + +PaddleOCR also provides a data format conversion script, which can convert the ICDAR official website label to the data format supported by PaddleOCR. The data conversion tool is in `ppocr/utils/gen_label.py`, here is the training set as an example: + +``` +# Convert the label file downloaded from the official website to rec_gt_label.txt +python gen_label.py --mode="rec" --input_path="{path/of/origin/label}" --output_label="rec_gt_label.txt" +``` + +The data format is as follows, (a) is the original picture, (b) is the Ground Truth text file corresponding to each picture: + +![](../../datasets/icdar_rec.png) + +## 3. Data storage path + +The default storage path for PaddleOCR training data is `PaddleOCR/train_data`, if you already have a dataset on your disk, just create a soft link to the dataset directory: + +``` +# linux and mac os +ln -sf /train_data/dataset +# windows +mklink /d /train_data/dataset +``` diff --git a/doc/doc_en/dataset/table_datasets_en.md b/doc/doc_en/dataset/table_datasets_en.md new file mode 100644 index 0000000000000000000000000000000000000000..e30147909812a153f311add50f0bef5d1d1e0e32 --- /dev/null +++ b/doc/doc_en/dataset/table_datasets_en.md @@ -0,0 +1,32 @@ +# Table Recognition Datasets + +- [Dataset Summary](#dataset-summary) +- [1. PubTabNet](#1-pubtabnet) +- [2. TAL Table Recognition Competition Dataset](#2-tal-table-recognition-competition-dataset) + +Here are the commonly used table recognition datasets, which are being updated continuously. Welcome to contribute datasets~ + +## Dataset Summary + +| dataset | Image download link | PPOCR format annotation download link | +|---|---|---| +| PubTabNet |https://github.com/ibm-aur-nlp/PubTabNet| jsonl format, which can be loaded directly with [pubtab_dataset.py](../../../ppocr/data/pubtab_dataset.py) | +| TAL Table Recognition Competition Dataset |https://ai.100tal.com/dataset| jsonl format, which can be loaded directly with [pubtab_dataset.py](../../../ppocr/data/pubtab_dataset.py) | + +## 1. PubTabNet +- **Data Introduction**:The training set of the PubTabNet dataset contains 500,000 images and the validation set contains 9000 images. Part of the image visualization is shown below. + +

+ + +
+ +- **illustrate**:When using this dataset, the [CDLA-Permissive](https://cdla.io/permissive-1-0/) protocol is required. + +## 2. TAL Table Recognition Competition Dataset +- **Data Introduction**:The training set of the TAL table recognition competition dataset contains 16,000 images. The validation set does not give trainable annotations. + +
+ + +
diff --git a/doc/doc_en/vertical_and_multilingual_datasets_en.md b/doc/doc_en/dataset/vertical_and_multilingual_datasets_en.md similarity index 97% rename from doc/doc_en/vertical_and_multilingual_datasets_en.md rename to doc/doc_en/dataset/vertical_and_multilingual_datasets_en.md index 9d5ecff7e327213656870845f9897321f6521df6..ea366ef202545a1e0cb7b535e4deda196b37fa8d 100644 --- a/doc/doc_en/vertical_and_multilingual_datasets_en.md +++ b/doc/doc_en/dataset/vertical_and_multilingual_datasets_en.md @@ -22,7 +22,7 @@ Here we have sorted out the commonly used vertical multi-language OCR dataset da * CCPD-Challenge: So far, some of the most challenging images in license plate detection and recognition tasks * CCPD-NP: Pictures of new cars without license plates. - ![](../datasets/ccpd_demo.png) + ![](../../datasets/ccpd_demo.png) - **Download address** @@ -46,7 +46,7 @@ Here we have sorted out the commonly used vertical multi-language OCR dataset da * End of validity: 07/41 * Chinese phonetic alphabet of card users: MICHAEL - ![](../datasets/cmb_demo.jpg) + ![](../../datasets/cmb_demo.jpg) - **Download address**: [https://cdn.kesci.com/cmb2017-2.zip](https://cdn.kesci.com/cmb2017-2.zip) @@ -59,7 +59,7 @@ Here we have sorted out the commonly used vertical multi-language OCR dataset da - **Data introduction**: This is a toolkit for data synthesis. You can output captcha images according to the input text. Use the toolkit to generate several demo images as follows. - ![](../datasets/captcha_demo.png) + ![](../../datasets/captcha_demo.png) - **Download address**: The dataset is generated and has no download address. diff --git a/doc/doc_en/detection_en.md b/doc/doc_en/detection_en.md index 618e20fb5e2a9a7afd67bb7d15646971b88365ee..1693211fb77d1adb6fe7906f01e6d8f7a8b42c17 100644 --- a/doc/doc_en/detection_en.md +++ b/doc/doc_en/detection_en.md @@ -2,63 +2,25 @@ This section uses the icdar2015 dataset as an example to introduce the training, evaluation, and testing of the detection model in PaddleOCR. -- [1. Data and Weights Preparation](#1-data-and-weights-preparatio) - * [1.1 Data Preparation](#11-data-preparation) - * [1.2 Download Pre-trained Model](#12-download-pretrained-model) +- [1. Data and Weights Preparation](#1-data-and-weights-preparation) + - [1.1 Data Preparation](#11-data-preparation) + - [1.2 Download Pre-trained Model](#12-download-pre-trained-model) - [2. Training](#2-training) - * [2.1 Start Training](#21-start-training) - * [2.2 Load Trained Model and Continue Training](#22-load-trained-model-and-continue-training) - * [2.3 Training with New Backbone](#23-training-with-new-backbone) - * [2.4 Training with knowledge distillation](#24) + - [2.1 Start Training](#21-start-training) + - [2.2 Load Trained Model and Continue Training](#22-load-trained-model-and-continue-training) + - [2.3 Training with New Backbone](#23-training-with-new-backbone) + - [2.4 Training with knowledge distillation](#24-training-with-knowledge-distillation) - [3. Evaluation and Test](#3-evaluation-and-test) - * [3.1 Evaluation](#31-evaluation) - * [3.2 Test](#32-test) + - [3.1 Evaluation](#31-evaluation) + - [3.2 Test](#32-test) - [4. Inference](#4-inference) -- [5. FAQ](#2-faq) +- [5. FAQ](#5-faq) ## 1. Data and Weights Preparation ### 1.1 Data Preparation -The icdar2015 dataset contains train set which has 1000 images obtained with wearable cameras and test set which has 500 images obtained with wearable cameras. The icdar2015 can be obtained from [official website](https://rrc.cvc.uab.es/?ch=4&com=downloads). Registration is required for downloading. - - -After registering and logging in, download the part marked in the red box in the figure below. And, the content downloaded by `Training Set Images` should be saved as the folder `icdar_c4_train_imgs`, and the content downloaded by `Test Set Images` is saved as the folder `ch4_test_images` - -

- -

- -Decompress the downloaded dataset to the working directory, assuming it is decompressed under PaddleOCR/train_data/. In addition, PaddleOCR organizes many scattered annotation files into two separate annotation files for train and test respectively, which can be downloaded by wget: -```shell -# Under the PaddleOCR path -cd PaddleOCR/ -wget -P ./train_data/ https://paddleocr.bj.bcebos.com/dataset/train_icdar2015_label.txt -wget -P ./train_data/ https://paddleocr.bj.bcebos.com/dataset/test_icdar2015_label.txt -``` - -After decompressing the data set and downloading the annotation file, PaddleOCR/train_data/ has two folders and two files, which are: -``` -/PaddleOCR/train_data/icdar2015/text_localization/ - └─ icdar_c4_train_imgs/ Training data of icdar dataset - └─ ch4_test_images/ Testing data of icdar dataset - └─ train_icdar2015_label.txt Training annotation of icdar dataset - └─ test_icdar2015_label.txt Test annotation of icdar dataset -``` - -The provided annotation file format is as follow, separated by "\t": -``` -" Image file name Image annotation information encoded by json.dumps" -ch4_test_images/img_61.jpg [{"transcription": "MASA", "points": [[310, 104], [416, 141], [418, 216], [312, 179]]}, {...}] -``` -The image annotation after **json.dumps()** encoding is a list containing multiple dictionaries. - -The `points` in the dictionary represent the coordinates (x, y) of the four points of the text box, arranged clockwise from the point at the upper left corner. - -`transcription` represents the text of the current text box. **When its content is "###" it means that the text box is invalid and will be skipped during training.** - -If you want to train PaddleOCR on other datasets, please build the annotation file according to the above format. - +To prepare datasets, refer to [ocr_datasets](./dataset/ocr_datasets_en.md) . ### 1.2 Download Pre-trained Model diff --git a/doc/doc_en/recognition_en.md b/doc/doc_en/recognition_en.md index c3700070b9d01c89cf8189a7af5f13d877114fb2..2b53d8ef8fd71950e80049628570793dcd49c424 100644 --- a/doc/doc_en/recognition_en.md +++ b/doc/doc_en/recognition_en.md @@ -1,130 +1,29 @@ # Text Recognition -- [1. Data Preparation](#DATA_PREPARATION) - - [1.1 Costom Dataset](#Costom_Dataset) - - [1.2 Dataset Download](#Dataset_download) - - [1.3 Dictionary](#Dictionary) - - [1.4 Add Space Category](#Add_space_category) - -- [2. Training](#TRAINING) - - [2.1 Data Augmentation](#Data_Augmentation) - - [2.2 General Training](#Training) - - [2.3 Multi-language Training](#Multi_language) - - [2.4 Training with Knowledge Distillation](#kd) - -- [3. Evaluation](#EVALUATION) - -- [4. Prediction](#PREDICTION) -- [5. Convert to Inference Model](#Inference) +- [1. Data Preparation](#1-data-preparation) + - [1.1 DataSet Preparation](#11-dataset-preparation) + - [1.2 Dictionary](#12-dictionary) + - [1.4 Add Space Category](#14-add-space-category) +- [2.Training](#2training) + - [2.1 Data Augmentation](#21-data-augmentation) + - [2.2 General Training](#22-general-training) + - [2.3 Multi-language Training](#23-multi-language-training) + - [2.4 Training with Knowledge Distillation](#24-training-with-knowledge-distillation) +- [3. Evalution](#3-evalution) +- [4. Prediction](#4-prediction) +- [5. Convert to Inference Model](#5-convert-to-inference-model) ## 1. Data Preparation +### 1.1 DataSet Preparation -PaddleOCR supports two data formats: -- `LMDB` is used to train data sets stored in lmdb format(LMDBDataSet); -- `general data` is used to train data sets stored in text files(SimpleDataSet): - -Please organize the dataset as follows: - -The default storage path for training data is `PaddleOCR/train_data`, if you already have a dataset on your disk, just create a soft link to the dataset directory: - -``` -# linux and mac os -ln -sf /train_data/dataset -# windows -mklink /d /train_data/dataset -``` - - -### 1.1 Costom Dataset - -If you want to use your own data for training, please refer to the following to organize your data. - -- Training set - -It is recommended to put the training images in the same folder, and use a txt file (rec_gt_train.txt) to store the image path and label. The contents of the txt file are as follows: - -* Note: by default, the image path and image label are split with \t, if you use other methods to split, it will cause training error - -``` -" Image file name Image annotation " - -train_data/rec/train/word_001.jpg 简单可依赖 -train_data/rec/train/word_002.jpg 用科技让复杂的世界更简单 -... -``` - -The final training set should have the following file structure: - -``` -|-train_data - |-rec - |- rec_gt_train.txt - |- train - |- word_001.png - |- word_002.jpg - |- word_003.jpg - | ... -``` - -- Test set - -Similar to the training set, the test set also needs to be provided a folder containing all images (test) and a rec_gt_test.txt. The structure of the test set is as follows: - -``` -|-train_data - |-rec - |-ic15_data - |- rec_gt_test.txt - |- test - |- word_001.jpg - |- word_002.jpg - |- word_003.jpg - | ... -``` - - -### 1.2 Dataset Download - -- ICDAR2015 - -If you do not have a dataset locally, you can download it on the official website [icdar2015](http://rrc.cvc.uab.es/?ch=4&com=downloads). -Also refer to [DTRB](https://github.com/clovaai/deep-text-recognition-benchmark#download-lmdb-dataset-for-traininig-and-evaluation-from-here) ,download the lmdb format dataset required for benchmark +To prepare datasets, refer to [ocr_datasets](./dataset/ocr_datasets.md) . If you want to reproduce the paper SAR, you need to download extra dataset [SynthAdd](https://pan.baidu.com/share/init?surl=uV0LtoNmcxbO-0YA7Ch4dg), extraction code: 627x. Besides, icdar2013, icdar2015, cocotext, IIIT5k datasets are also used to train. For specific details, please refer to the paper SAR. -PaddleOCR provides label files for training the icdar2015 dataset, which can be downloaded in the following ways: - -``` -# Training set label -wget -P ./train_data/ic15_data https://paddleocr.bj.bcebos.com/dataset/rec_gt_train.txt -# Test Set Label -wget -P ./train_data/ic15_data https://paddleocr.bj.bcebos.com/dataset/rec_gt_test.txt -``` - -PaddleOCR also provides a data format conversion script, which can convert ICDAR official website label to a data format -supported by PaddleOCR. The data conversion tool is in `ppocr/utils/gen_label.py`, here is the training set as an example: - -``` -# convert the official gt to rec_gt_label.txt -python gen_label.py --mode="rec" --input_path="{path/of/origin/label}" --output_label="rec_gt_label.txt" -``` - -The data format is as follows, (a) is the original picture, (b) is the Ground Truth text file corresponding to each picture: - -![](../datasets/icdar_rec.png) - - -- Multilingual dataset - -The multi-language model training method is the same as the Chinese model. The training data set is 100w synthetic data. A small amount of fonts and test data can be downloaded using the following two methods. -* [Baidu Netdisk](https://pan.baidu.com/s/1bS_u207Rm7YbY33wOECKDA) ,Extraction code:frgi. -* [Google drive](https://drive.google.com/file/d/18cSWX7wXSy4G0tbKJ0d9PuIaiwRLHpjA/view) - - -### 1.3 Dictionary +### 1.2 Dictionary Finally, a dictionary ({word_dict_name}.txt) needs to be provided so that when the model is trained, all the characters that appear can be mapped to the dictionary index. diff --git a/doc/doc_en/training_en.md b/doc/doc_en/training_en.md index 89992ff905426faaf7d22707a76dd9daaa8bcbb7..86c4deb3868081552f5b27d67c627693c3f95a62 100644 --- a/doc/doc_en/training_en.md +++ b/doc/doc_en/training_en.md @@ -94,7 +94,7 @@ The current open source models, data sets and magnitudes are as follows: - Chinese data set, LSVT street view data set crops the image according to the truth value, and performs position calibration, a total of 30w images. In addition, based on the LSVT corpus, 500w of synthesized data. - Small language data set, using different corpora and fonts, respectively generated 100w synthetic data set, and using ICDAR-MLT as the verification set. -Among them, the public data sets are all open source, users can search and download by themselves, or refer to [Chinese data set](./datasets_en.md), synthetic data is not open source, users can use open source synthesis tools to synthesize by themselves. Synthesis tools include [text_renderer](https://github.com/Sanster/text_renderer), [SynthText](https://github.com/ankush-me/SynthText), [TextRecognitionDataGenerator](https://github.com/Belval/TextRecognitionDataGenerator) etc. +Among them, the public data sets are all open source, users can search and download by themselves, or refer to [Chinese data set](dataset/datasets_en.md), synthetic data is not open source, users can use open source synthesis tools to synthesize by themselves. Synthesis tools include [text_renderer](https://github.com/Sanster/text_renderer), [SynthText](https://github.com/ankush-me/SynthText), [TextRecognitionDataGenerator](https://github.com/Belval/TextRecognitionDataGenerator) etc. diff --git a/doc/doc_en/update_en.md b/doc/doc_en/update_en.md index 39fd936d1bd4e5f8d8535805f865792820ee1199..8ec74fe8b73d89cc97904e2ce156e14bbd596eb4 100644 --- a/doc/doc_en/update_en.md +++ b/doc/doc_en/update_en.md @@ -19,7 +19,7 @@ - 2020.7.15, Add several related datasets, data annotation and synthesis tools. - 2020.7.9 Add a new model to support recognize the character "space". - 2020.7.9 Add the data augument and learning rate decay strategies during training. -- 2020.6.8 Add [datasets](./datasets_en.md) and keep updating +- 2020.6.8 Add [datasets](dataset/datasets_en.md) and keep updating - 2020.6.5 Support exporting `attention` model to `inference_model` - 2020.6.5 Support separate prediction and recognition, output result score - 2020.5.30 Provide Lightweight Chinese OCR online experience diff --git a/doc/imgs_results/det_res_img623_fce.jpg b/doc/imgs_results/det_res_img623_fce.jpg new file mode 100644 index 0000000000000000000000000000000000000000..938ae4cabf32cf5f89519f81b33259b188ed494a Binary files /dev/null and b/doc/imgs_results/det_res_img623_fce.jpg differ diff --git a/doc/imgs_results/det_res_img_10_fce.jpg b/doc/imgs_results/det_res_img_10_fce.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fb32950ffda29f3263ab8bddc445e7c71f7d2ee0 Binary files /dev/null and b/doc/imgs_results/det_res_img_10_fce.jpg differ diff --git a/doc/imgs_results/det_res_img_10_pse.jpg b/doc/imgs_results/det_res_img_10_pse.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cdb7625dd05e6865ff3d6d476a33466f42cb3aee Binary files /dev/null and b/doc/imgs_results/det_res_img_10_pse.jpg differ diff --git a/doc/imgs_results/det_res_img_10_pse_poly.jpg b/doc/imgs_results/det_res_img_10_pse_poly.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9c06a17ccb6a6d99c82a79eca5cf5755af4d0ce5 Binary files /dev/null and b/doc/imgs_results/det_res_img_10_pse_poly.jpg differ diff --git a/ppocr/data/__init__.py b/ppocr/data/__init__.py index 60ab7bd0b4ceab846982c8744d5b277ee17185df..78c3279656e184a3a34bff3847d3936b5e8977b6 100644 --- a/ppocr/data/__init__.py +++ b/ppocr/data/__init__.py @@ -72,6 +72,7 @@ def build_dataloader(config, mode, device, logger, seed=None): use_shared_memory = loader_config['use_shared_memory'] else: use_shared_memory = True + if mode == "Train": # Distribute data to multiple cards batch_sampler = DistributedBatchSampler( diff --git a/ppocr/data/collate_fn.py b/ppocr/data/collate_fn.py index 89c6b4fd5ae151e1d703ea5c59abf0177dfc3a8b..0da6060f042a0e60cdf211d8bc13aede32d5930a 100644 --- a/ppocr/data/collate_fn.py +++ b/ppocr/data/collate_fn.py @@ -56,3 +56,17 @@ class ListCollator(object): for idx in to_tensor_idxs: data_dict[idx] = paddle.to_tensor(data_dict[idx]) return list(data_dict.values()) + + +class SSLRotateCollate(object): + """ + bach: [ + [(4*3xH*W), (4,)] + [(4*3xH*W), (4,)] + ... + ] + """ + + def __call__(self, batch): + output = [np.concatenate(d, axis=0) for d in zip(*batch)] + return output diff --git a/ppocr/data/imaug/__init__.py b/ppocr/data/imaug/__init__.py index 7580e607afb356a1032c4d6b2d2267bff608a80d..20aaf48e119d68e6c37ce9246a87701fb149d5e7 100644 --- a/ppocr/data/imaug/__init__.py +++ b/ppocr/data/imaug/__init__.py @@ -24,6 +24,7 @@ from .make_pse_gt import MakePseGt from .rec_img_aug import RecAug, RecConAug, RecResizeImg, ClsResizeImg, \ SRNRecResizeImg, NRTRRecResizeImg, SARRecResizeImg, PRENResizeImg, SVTRRecResizeImg +from .ssl_img_aug import SSLRotateResize from .randaugment import RandAugment from .copy_paste import CopyPaste from .ColorJitter import ColorJitter diff --git a/ppocr/data/imaug/label_ops.py b/ppocr/data/imaug/label_ops.py index 86366d7a4f2e908426e67015b4338e44350da492..c9bc2e7722e8027ce870e4969bfcdab720495c28 100644 --- a/ppocr/data/imaug/label_ops.py +++ b/ppocr/data/imaug/label_ops.py @@ -113,14 +113,14 @@ class BaseRecLabelEncode(object): dict_character = list(self.character_str) self.lower = True else: - self.character_str = "" + self.character_str = [] with open(character_dict_path, "rb") as fin: lines = fin.readlines() for line in lines: line = line.decode('utf-8').strip("\n").strip("\r\n") - self.character_str += line + self.character_str.append(line) if use_space_char: - self.character_str += " " + self.character_str.append(" ") dict_character = list(self.character_str) dict_character = self.add_special_char(dict_character) self.dict = {} diff --git a/ppocr/data/imaug/ssl_img_aug.py b/ppocr/data/imaug/ssl_img_aug.py new file mode 100644 index 0000000000000000000000000000000000000000..f9ed6ac3e230ae85754bf40189c392c7e6e29b63 --- /dev/null +++ b/ppocr/data/imaug/ssl_img_aug.py @@ -0,0 +1,60 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +import cv2 +import numpy as np +import random +from PIL import Image + +from .rec_img_aug import resize_norm_img + + +class SSLRotateResize(object): + def __init__(self, + image_shape, + padding=False, + select_all=True, + mode="train", + **kwargs): + self.image_shape = image_shape + self.padding = padding + self.select_all = select_all + self.mode = mode + + def __call__(self, data): + img = data["image"] + + data["image_r90"] = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE) + data["image_r180"] = cv2.rotate(data["image_r90"], + cv2.ROTATE_90_CLOCKWISE) + data["image_r270"] = cv2.rotate(data["image_r180"], + cv2.ROTATE_90_CLOCKWISE) + + images = [] + for key in ["image", "image_r90", "image_r180", "image_r270"]: + images.append( + resize_norm_img( + data.pop(key), + image_shape=self.image_shape, + padding=self.padding)[0]) + data["image"] = np.stack(images, axis=0) + data["label"] = np.array(list(range(4))) + if not self.select_all: + data["image"] = data["image"][0::2] # just choose 0 and 180 + data["label"] = data["label"][0:2] # label needs to be continuous + if self.mode == "test": + data["image"] = data["image"][0] + data["label"] = data["label"][0] + return data diff --git a/ppocr/modeling/heads/det_db_head.py b/ppocr/modeling/heads/det_db_head.py index f76cb34d37af7d81b5e628d06c1a4cfe126f8bb4..a686ae5ab0662ad31ddfd339bd1999c45c370cf0 100644 --- a/ppocr/modeling/heads/det_db_head.py +++ b/ppocr/modeling/heads/det_db_head.py @@ -31,13 +31,14 @@ def get_bias_attr(k): class Head(nn.Layer): - def __init__(self, in_channels, name_list): + def __init__(self, in_channels, name_list, kernel_list=[3, 2, 2], **kwargs): super(Head, self).__init__() + self.conv1 = nn.Conv2D( in_channels=in_channels, out_channels=in_channels // 4, - kernel_size=3, - padding=1, + kernel_size=kernel_list[0], + padding=int(kernel_list[0] // 2), weight_attr=ParamAttr(), bias_attr=False) self.conv_bn1 = nn.BatchNorm( @@ -50,7 +51,7 @@ class Head(nn.Layer): self.conv2 = nn.Conv2DTranspose( in_channels=in_channels // 4, out_channels=in_channels // 4, - kernel_size=2, + kernel_size=kernel_list[1], stride=2, weight_attr=ParamAttr( initializer=paddle.nn.initializer.KaimingUniform()), @@ -65,7 +66,7 @@ class Head(nn.Layer): self.conv3 = nn.Conv2DTranspose( in_channels=in_channels // 4, out_channels=1, - kernel_size=2, + kernel_size=kernel_list[2], stride=2, weight_attr=ParamAttr( initializer=paddle.nn.initializer.KaimingUniform()), @@ -100,8 +101,8 @@ class DBHead(nn.Layer): 'conv2d_57', 'batch_norm_49', 'conv2d_transpose_2', 'batch_norm_50', 'conv2d_transpose_3', 'thresh' ] - self.binarize = Head(in_channels, binarize_name_list) - self.thresh = Head(in_channels, thresh_name_list) + self.binarize = Head(in_channels, binarize_name_list, **kwargs) + self.thresh = Head(in_channels, thresh_name_list, **kwargs) def step_function(self, x, y): return paddle.reciprocal(1 + paddle.exp(-self.k * (x - y))) diff --git a/ppocr/modeling/necks/__init__.py b/ppocr/modeling/necks/__init__.py index 54837dc65be4b6243136559cf281dc62c441512b..e10b082d11be69b1865f0093b6fec442b255f03a 100644 --- a/ppocr/modeling/necks/__init__.py +++ b/ppocr/modeling/necks/__init__.py @@ -16,7 +16,7 @@ __all__ = ['build_neck'] def build_neck(config): - from .db_fpn import DBFPN + from .db_fpn import DBFPN, RSEFPN, LKPAN from .east_fpn import EASTFPN from .sast_fpn import SASTFPN from .rnn import SequenceEncoder @@ -26,8 +26,8 @@ def build_neck(config): from .fce_fpn import FCEFPN from .pren_fpn import PRENFPN support_dict = [ - 'FPN', 'FCEFPN', 'DBFPN', 'EASTFPN', 'SASTFPN', 'SequenceEncoder', - 'PGFPN', 'TableFPN', 'PRENFPN' + 'FPN', 'FCEFPN', 'LKPAN', 'DBFPN', 'RSEFPN', 'EASTFPN', 'SASTFPN', + 'SequenceEncoder', 'PGFPN', 'TableFPN', 'PRENFPN' ] module_name = config.pop('name') diff --git a/ppocr/modeling/necks/db_fpn.py b/ppocr/modeling/necks/db_fpn.py index 1cf30cedd5b23e8a7ba243726a6d7eea7924750c..93ed2dbfd1fac9bf2d163c54d23a20e16b537981 100644 --- a/ppocr/modeling/necks/db_fpn.py +++ b/ppocr/modeling/necks/db_fpn.py @@ -20,6 +20,88 @@ import paddle from paddle import nn import paddle.nn.functional as F from paddle import ParamAttr +import os +import sys + +__dir__ = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(__dir__) +sys.path.insert(0, os.path.abspath(os.path.join(__dir__, '../../..'))) + +from ppocr.modeling.backbones.det_mobilenet_v3 import SEModule + + +class DSConv(nn.Layer): + def __init__(self, + in_channels, + out_channels, + kernel_size, + padding, + stride=1, + groups=None, + if_act=True, + act="relu", + **kwargs): + super(DSConv, self).__init__() + if groups == None: + groups = in_channels + self.if_act = if_act + self.act = act + self.conv1 = nn.Conv2D( + in_channels=in_channels, + out_channels=in_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=groups, + bias_attr=False) + + self.bn1 = nn.BatchNorm(num_channels=in_channels, act=None) + + self.conv2 = nn.Conv2D( + in_channels=in_channels, + out_channels=int(in_channels * 4), + kernel_size=1, + stride=1, + bias_attr=False) + + self.bn2 = nn.BatchNorm(num_channels=int(in_channels * 4), act=None) + + self.conv3 = nn.Conv2D( + in_channels=int(in_channels * 4), + out_channels=out_channels, + kernel_size=1, + stride=1, + bias_attr=False) + self._c = [in_channels, out_channels] + if in_channels != out_channels: + self.conv_end = nn.Conv2D( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + bias_attr=False) + + def forward(self, inputs): + + x = self.conv1(inputs) + x = self.bn1(x) + + x = self.conv2(x) + x = self.bn2(x) + if self.if_act: + if self.act == "relu": + x = F.relu(x) + elif self.act == "hardswish": + x = F.hardswish(x) + else: + print("The activation function({}) is selected incorrectly.". + format(self.act)) + exit() + + x = self.conv3(x) + if self._c[0] != self._c[1]: + x = x + self.conv_end(inputs) + return x class DBFPN(nn.Layer): @@ -106,3 +188,171 @@ class DBFPN(nn.Layer): fuse = paddle.concat([p5, p4, p3, p2], axis=1) return fuse + + +class RSELayer(nn.Layer): + def __init__(self, in_channels, out_channels, kernel_size, shortcut=True): + super(RSELayer, self).__init__() + weight_attr = paddle.nn.initializer.KaimingUniform() + self.out_channels = out_channels + self.in_conv = nn.Conv2D( + in_channels=in_channels, + out_channels=self.out_channels, + kernel_size=kernel_size, + padding=int(kernel_size // 2), + weight_attr=ParamAttr(initializer=weight_attr), + bias_attr=False) + self.se_block = SEModule(self.out_channels) + self.shortcut = shortcut + + def forward(self, ins): + x = self.in_conv(ins) + if self.shortcut: + out = x + self.se_block(x) + else: + out = self.se_block(x) + return out + + +class RSEFPN(nn.Layer): + def __init__(self, in_channels, out_channels, shortcut=True, **kwargs): + super(RSEFPN, self).__init__() + self.out_channels = out_channels + self.ins_conv = nn.LayerList() + self.inp_conv = nn.LayerList() + + for i in range(len(in_channels)): + self.ins_conv.append( + RSELayer( + in_channels[i], + out_channels, + kernel_size=1, + shortcut=shortcut)) + self.inp_conv.append( + RSELayer( + out_channels, + out_channels // 4, + kernel_size=3, + shortcut=shortcut)) + + def forward(self, x): + c2, c3, c4, c5 = x + + in5 = self.ins_conv[3](c5) + in4 = self.ins_conv[2](c4) + in3 = self.ins_conv[1](c3) + in2 = self.ins_conv[0](c2) + + out4 = in4 + F.upsample( + in5, scale_factor=2, mode="nearest", align_mode=1) # 1/16 + out3 = in3 + F.upsample( + out4, scale_factor=2, mode="nearest", align_mode=1) # 1/8 + out2 = in2 + F.upsample( + out3, scale_factor=2, mode="nearest", align_mode=1) # 1/4 + + p5 = self.inp_conv[3](in5) + p4 = self.inp_conv[2](out4) + p3 = self.inp_conv[1](out3) + p2 = self.inp_conv[0](out2) + + p5 = F.upsample(p5, scale_factor=8, mode="nearest", align_mode=1) + p4 = F.upsample(p4, scale_factor=4, mode="nearest", align_mode=1) + p3 = F.upsample(p3, scale_factor=2, mode="nearest", align_mode=1) + + fuse = paddle.concat([p5, p4, p3, p2], axis=1) + return fuse + + +class LKPAN(nn.Layer): + def __init__(self, in_channels, out_channels, mode='large', **kwargs): + super(LKPAN, self).__init__() + self.out_channels = out_channels + weight_attr = paddle.nn.initializer.KaimingUniform() + + self.ins_conv = nn.LayerList() + self.inp_conv = nn.LayerList() + # pan head + self.pan_head_conv = nn.LayerList() + self.pan_lat_conv = nn.LayerList() + + if mode.lower() == 'lite': + p_layer = DSConv + elif mode.lower() == 'large': + p_layer = nn.Conv2D + else: + raise ValueError( + "mode can only be one of ['lite', 'large'], but received {}". + format(mode)) + + for i in range(len(in_channels)): + self.ins_conv.append( + nn.Conv2D( + in_channels=in_channels[i], + out_channels=self.out_channels, + kernel_size=1, + weight_attr=ParamAttr(initializer=weight_attr), + bias_attr=False)) + + self.inp_conv.append( + p_layer( + in_channels=self.out_channels, + out_channels=self.out_channels // 4, + kernel_size=9, + padding=4, + weight_attr=ParamAttr(initializer=weight_attr), + bias_attr=False)) + + if i > 0: + self.pan_head_conv.append( + nn.Conv2D( + in_channels=self.out_channels // 4, + out_channels=self.out_channels // 4, + kernel_size=3, + padding=1, + stride=2, + weight_attr=ParamAttr(initializer=weight_attr), + bias_attr=False)) + self.pan_lat_conv.append( + p_layer( + in_channels=self.out_channels // 4, + out_channels=self.out_channels // 4, + kernel_size=9, + padding=4, + weight_attr=ParamAttr(initializer=weight_attr), + bias_attr=False)) + + def forward(self, x): + c2, c3, c4, c5 = x + + in5 = self.ins_conv[3](c5) + in4 = self.ins_conv[2](c4) + in3 = self.ins_conv[1](c3) + in2 = self.ins_conv[0](c2) + + out4 = in4 + F.upsample( + in5, scale_factor=2, mode="nearest", align_mode=1) # 1/16 + out3 = in3 + F.upsample( + out4, scale_factor=2, mode="nearest", align_mode=1) # 1/8 + out2 = in2 + F.upsample( + out3, scale_factor=2, mode="nearest", align_mode=1) # 1/4 + + f5 = self.inp_conv[3](in5) + f4 = self.inp_conv[2](out4) + f3 = self.inp_conv[1](out3) + f2 = self.inp_conv[0](out2) + + pan3 = f3 + self.pan_head_conv[0](f2) + pan4 = f4 + self.pan_head_conv[1](pan3) + pan5 = f5 + self.pan_head_conv[2](pan4) + + p2 = self.pan_lat_conv[0](f2) + p3 = self.pan_lat_conv[1](pan3) + p4 = self.pan_lat_conv[2](pan4) + p5 = self.pan_lat_conv[3](pan5) + + p5 = F.upsample(p5, scale_factor=8, mode="nearest", align_mode=1) + p4 = F.upsample(p4, scale_factor=4, mode="nearest", align_mode=1) + p3 = F.upsample(p3, scale_factor=2, mode="nearest", align_mode=1) + + fuse = paddle.concat([p5, p4, p3, p2], axis=1) + return fuse diff --git a/ppocr/postprocess/cls_postprocess.py b/ppocr/postprocess/cls_postprocess.py index 77e7f46d6f774ffb81f8e9cbd6b100c780665dca..9a27ba0831358564d99a6ec698a5019eae1c25f7 100644 --- a/ppocr/postprocess/cls_postprocess.py +++ b/ppocr/postprocess/cls_postprocess.py @@ -17,17 +17,26 @@ import paddle class ClsPostProcess(object): """ Convert between text-label and text-index """ - def __init__(self, label_list, **kwargs): + def __init__(self, label_list=None, key=None, **kwargs): super(ClsPostProcess, self).__init__() self.label_list = label_list + self.key = key def __call__(self, preds, label=None, *args, **kwargs): + if self.key is not None: + preds = preds[self.key] + + label_list = self.label_list + if label_list is None: + label_list = {idx: idx for idx in range(preds.shape[-1])} + if isinstance(preds, paddle.Tensor): preds = preds.numpy() + pred_idxs = preds.argmax(axis=1) - decode_out = [(self.label_list[idx], preds[i, idx]) + decode_out = [(label_list[idx], preds[i, idx]) for i, idx in enumerate(pred_idxs)] if label is None: return decode_out - label = [(self.label_list[idx], 1.0) for idx in label] + label = [(label_list[idx], 1.0) for idx in label] return decode_out, label diff --git a/ppocr/utils/dict/ka_dict.txt b/ppocr/utils/dict/ka_dict.txt index 33d605c4de106c3c4b2504f5b3c42cdadd076dd8..d506b691bd1a6c55299ad89a72cf3a69a2c879a9 100644 --- a/ppocr/utils/dict/ka_dict.txt +++ b/ppocr/utils/dict/ka_dict.txt @@ -21,7 +21,7 @@ l 8 . j -p +p ಗ ು ಣ diff --git a/ppocr/utils/dict/ta_dict.txt b/ppocr/utils/dict/ta_dict.txt index d1bae501ad2556bb59b16a6c4b27a27091a6cbcf..19d81892c205627f296adbf8b20ea41aba2de5d0 100644 --- a/ppocr/utils/dict/ta_dict.txt +++ b/ppocr/utils/dict/ta_dict.txt @@ -22,7 +22,7 @@ l 8 . j -p +p ப ூ த diff --git a/tools/export_model.py b/tools/export_model.py index 003bc61f791b6c41a3b08d58ab87f12109744f9a..1f9f29e396fe4960914ae802769b65d20c103bd3 100755 --- a/tools/export_model.py +++ b/tools/export_model.py @@ -31,7 +31,7 @@ from ppocr.utils.logging import get_logger from tools.program import load_config, merge_config, ArgsParser -def export_single_model(model, arch_config, save_path, logger): +def export_single_model(model, arch_config, save_path, logger, quanter=None): if arch_config["algorithm"] == "SRN": max_text_length = arch_config["Head"]["max_text_length"] other_shape = [ @@ -95,7 +95,10 @@ def export_single_model(model, arch_config, save_path, logger): shape=[None] + infer_shape, dtype="float32") ]) - paddle.jit.save(model, save_path) + if quanter is None: + paddle.jit.save(model, save_path) + else: + quanter.save_quantized_model(model, save_path) logger.info("inference model is saved to {}".format(save_path)) return @@ -125,7 +128,6 @@ def main(): char_num = char_num - 2 out_channels_list['CTCLabelDecode'] = char_num out_channels_list['SARLabelDecode'] = char_num + 2 - loss_list = config['Loss']['loss_config_list'] config['Architecture']['Models'][key]['Head'][ 'out_channels_list'] = out_channels_list else: diff --git a/tools/infer/predict_det.py b/tools/infer/predict_det.py index 695587a9aa39f27fb5e37ba8d5447fb9f085e1e1..5f2675d667c2aab8186886a60d8d447f43419954 100755 --- a/tools/infer/predict_det.py +++ b/tools/infer/predict_det.py @@ -158,7 +158,7 @@ class TextDetector(object): rect[1] = pts[np.argmin(diff)] rect[3] = pts[np.argmax(diff)] return rect - + def clip_det_res(self, points, img_height, img_width): for pno in range(points.shape[0]): points[pno, 0] = int(min(max(points[pno, 0], 0), img_width - 1)) @@ -284,7 +284,7 @@ if __name__ == "__main__": total_time += elapse count += 1 save_pred = os.path.basename(image_file) + "\t" + str( - json.dumps(np.array(dt_boxes).astype(np.int32).tolist())) + "\n" + json.dumps([x.tolist() for x in dt_boxes])) + "\n" save_results.append(save_pred) logger.info(save_pred) logger.info("The predict time of {}: {}".format(image_file, elapse)) diff --git a/tools/infer/utility.py b/tools/infer/utility.py index 762db868f58d11aaa626a2e55591d47bfa9536a9..c92e8e152a9ee4d86d269aec7ff5645f23cad443 100644 --- a/tools/infer/utility.py +++ b/tools/infer/utility.py @@ -301,8 +301,8 @@ def create_predictor(args, mode, logger): # enable memory optim config.enable_memory_optim() config.disable_glog_info() - config.delete_pass("conv_transpose_eltwiseadd_bn_fuse_pass") + config.delete_pass("matmul_transpose_reshape_fuse_pass") if mode == 'table': config.delete_pass("fc_fuse_pass") # not supported for table config.switch_use_feed_fetch_ops(False) diff --git a/tools/infer_cls.py b/tools/infer_cls.py index 4be30bbb3c2f8bbf6a59179220faa942e6cc27b8..7fd6b536fbe50fb1240d84ca3a5e87236940c0f5 100755 --- a/tools/infer_cls.py +++ b/tools/infer_cls.py @@ -57,6 +57,8 @@ def main(): continue elif op_name == 'KeepKeys': op[op_name]['keep_keys'] = ['image'] + elif op_name == "SSLRotateResize": + op[op_name]["mode"] = "test" transforms.append(op) global_config['infer_mode'] = True ops = create_operators(transforms, global_config)