diff --git a/configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml b/configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml index e001c376420c4210c69df90fa6564b1d903cf116..87718cad68743880dbfcd6b4c574bb09b9f53204 100644 --- a/configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml +++ b/configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml @@ -7,11 +7,6 @@ Global: save_epoch_step: 1200 # evaluation is run every 5000 iterations after the 4000th iteration eval_batch_step: [3000, 2000] - # 1. If pretrained_model is saved in static mode, such as classification pretrained model - # from static branch, load_static_weights must be set as True. - # 2. If you want to finetune the pretrained models we provide in the docs, - # you should set load_static_weights as False. - load_static_weights: True cal_metric_during_train: False pretrained_model: ./pretrain_models/MobileNetV3_large_x0_5_pretrained checkpoints: diff --git a/configs/det/ch_ppocr_v2.0/ch_det_res18_db_v2.0.yml b/configs/det/ch_ppocr_v2.0/ch_det_res18_db_v2.0.yml index 4229248df484d48a86d80c7362165f6a11acf32c..7b07ef99648956a70b5a71f1e61f09b592226f90 100644 --- a/configs/det/ch_ppocr_v2.0/ch_det_res18_db_v2.0.yml +++ b/configs/det/ch_ppocr_v2.0/ch_det_res18_db_v2.0.yml @@ -7,11 +7,6 @@ Global: save_epoch_step: 1200 # evaluation is run every 5000 iterations after the 4000th iteration eval_batch_step: [3000, 2000] - # 1. If pretrained_model is saved in static mode, such as classification pretrained model - # from static branch, load_static_weights must be set as True. - # 2. If you want to finetune the pretrained models we provide in the docs, - # you should set load_static_weights as False. - load_static_weights: True cal_metric_during_train: False pretrained_model: ./pretrain_models/ResNet18_vd_pretrained checkpoints: diff --git a/configs/det/det_mv3_db.yml b/configs/det/det_mv3_db.yml index f8aab70543e15ca7e09d95cb3ea3da639692e170..b69ed58cd6f64b82b419d46f4f91458d7d167d84 100644 --- a/configs/det/det_mv3_db.yml +++ b/configs/det/det_mv3_db.yml @@ -7,11 +7,6 @@ Global: save_epoch_step: 1200 # evaluation is run every 2000 iterations eval_batch_step: [0, 2000] - # 1. If pretrained_model is saved in static mode, such as classification pretrained model - # from static branch, load_static_weights must be set as True. - # 2. If you want to finetune the pretrained models we provide in the docs, - # you should set load_static_weights as False. - load_static_weights: True cal_metric_during_train: False pretrained_model: ./pretrain_models/MobileNetV3_large_x0_5_pretrained checkpoints: diff --git a/configs/det/det_mv3_east.yml b/configs/det/det_mv3_east.yml index 187ac16054534da4f916c48dfb1b0e36441d331c..4ae32ab004684f040b5939be7f348d468b4b8024 100644 --- a/configs/det/det_mv3_east.yml +++ b/configs/det/det_mv3_east.yml @@ -7,11 +7,6 @@ Global: save_epoch_step: 1000 # evaluation is run every 5000 iterations after the 4000th iteration eval_batch_step: [4000, 5000] - # 1. If pretrained_model is saved in static mode, such as classification pretrained model - # from static branch, load_static_weights must be set as True. - # 2. If you want to finetune the pretrained models we provide in the docs, - # you should set load_static_weights as False. - load_static_weights: True cal_metric_during_train: False pretrained_model: ./pretrain_models/MobileNetV3_large_x0_5_pretrained checkpoints: diff --git a/configs/det/det_r50_vd_db.yml b/configs/det/det_r50_vd_db.yml index 3fa8948d17f1d84943c444035b8521e219e97209..42b3898ef463b8dba6338f37824ade0c93794212 100644 --- a/configs/det/det_r50_vd_db.yml +++ b/configs/det/det_r50_vd_db.yml @@ -7,11 +7,6 @@ Global: save_epoch_step: 1200 # evaluation is run every 2000 iterations eval_batch_step: [0,2000] - # 1. If pretrained_model is saved in static mode, such as classification pretrained model - # from static branch, load_static_weights must be set as True. - # 2. If you want to finetune the pretrained models we provide in the docs, - # you should set load_static_weights as False. - load_static_weights: True cal_metric_during_train: False pretrained_model: ./pretrain_models/ResNet50_vd_ssld_pretrained checkpoints: diff --git a/configs/det/det_r50_vd_east.yml b/configs/det/det_r50_vd_east.yml index abef0b6116762fb2d5a1c8e7a797ed27832ffe55..0253c5bd9940fa6c0ec7da2c6639c1bc060842ca 100644 --- a/configs/det/det_r50_vd_east.yml +++ b/configs/det/det_r50_vd_east.yml @@ -7,11 +7,6 @@ Global: save_epoch_step: 1000 # evaluation is run every 5000 iterations after the 4000th iteration eval_batch_step: [4000, 5000] - # 1. If pretrained_model is saved in static mode, such as classification pretrained model - # from static branch, load_static_weights must be set as True. - # 2. If you want to finetune the pretrained models we provide in the docs, - # you should set load_static_weights as False. - load_static_weights: True cal_metric_during_train: False pretrained_model: ./pretrain_models/ResNet50_vd_pretrained/ checkpoints: diff --git a/configs/det/det_r50_vd_sast_icdar15.yml b/configs/det/det_r50_vd_sast_icdar15.yml index c90327b22b9c73111c997e84cfdd47d0721ee5b9..dbfcefca964e73d42298fbbbc1e654b3bd809c77 100755 --- a/configs/det/det_r50_vd_sast_icdar15.yml +++ b/configs/det/det_r50_vd_sast_icdar15.yml @@ -7,11 +7,6 @@ Global: save_epoch_step: 1000 # evaluation is run every 5000 iterations after the 4000th iteration eval_batch_step: [4000, 5000] - # 1. If pretrained_model is saved in static mode, such as classification pretrained model - # from static branch, load_static_weights must be set as True. - # 2. If you want to finetune the pretrained models we provide in the docs, - # you should set load_static_weights as False. - load_static_weights: True cal_metric_during_train: False pretrained_model: ./pretrain_models/ResNet50_vd_ssld_pretrained/ checkpoints: diff --git a/configs/det/det_r50_vd_sast_totaltext.yml b/configs/det/det_r50_vd_sast_totaltext.yml index e6f467c6ec78d453ce56fc0c9dffa35b71cb24c7..88dd31f3c21b184d956ad718dae808bb6054532e 100755 --- a/configs/det/det_r50_vd_sast_totaltext.yml +++ b/configs/det/det_r50_vd_sast_totaltext.yml @@ -7,11 +7,6 @@ Global: save_epoch_step: 1000 # evaluation is run every 5000 iterations after the 4000th iteration eval_batch_step: [4000, 5000] - # 1. If pretrained_model is saved in static mode, such as classification pretrained model - # from static branch, load_static_weights must be set as True. - # 2. If you want to finetune the pretrained models we provide in the docs, - # you should set load_static_weights as False. - load_static_weights: True cal_metric_during_train: False pretrained_model: ./pretrain_models/ResNet50_vd_ssld_pretrained/ checkpoints: diff --git a/configs/e2e/e2e_r50_vd_pg.yml b/configs/e2e/e2e_r50_vd_pg.yml index 8128bde15faa089933022e4ecc46de77da92627c..4a6e19f4461c7236f3a9a5253437eff97fa72f67 100644 --- a/configs/e2e/e2e_r50_vd_pg.yml +++ b/configs/e2e/e2e_r50_vd_pg.yml @@ -7,11 +7,6 @@ Global: save_epoch_step: 10 # evaluation is run every 0 iterationss after the 1000th iteration eval_batch_step: [ 0, 1000 ] - # 1. If pretrained_model is saved in static mode, such as classification pretrained model - # from static branch, load_static_weights must be set as True. - # 2. If you want to finetune the pretrained models we provide in the docs, - # you should set load_static_weights as False. - load_static_weights: False cal_metric_during_train: False pretrained_model: checkpoints: diff --git a/doc/doc_ch/detection.md b/doc/doc_ch/detection.md index a8dee65a220e3c66d8502181dd2a542cb01a29b5..13e60920eb58b6be1ac3905758ef70fe61f74388 100644 --- a/doc/doc_ch/detection.md +++ b/doc/doc_ch/detection.md @@ -45,26 +45,17 @@ json.dumps编码前的图像标注信息是包含多个字典的list,字典中 ## 快速启动训练 首先下载模型backbone的pretrain model,PaddleOCR的检测模型目前支持两种backbone,分别是MobileNetV3、ResNet_vd系列, -您可以根据需求使用[PaddleClas](https://github.com/PaddlePaddle/PaddleClas/tree/master/ppcls/modeling/architectures)中的模型更换backbone。 +您可以根据需求使用[PaddleClas](https://github.com/PaddlePaddle/PaddleClas/tree/master/ppcls/modeling/architectures)中的模型更换backbone, +对应的backbone预训练模型可以从[PaddleClas repo 主页中找到下载链接](https://github.com/PaddlePaddle/PaddleClas#mobile-series)。 ```shell cd PaddleOCR/ +# 根据backbone的不同选择下载对应的预训练模型 # 下载MobileNetV3的预训练模型 -wget -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV3_large_x0_5_pretrained.tar +wget -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV3_large_x0_5_pretrained.pdparams # 或,下载ResNet18_vd的预训练模型 -wget -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/ResNet18_vd_pretrained.tar +wget -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNet18_vd_pretrained.pdparams # 或,下载ResNet50_vd的预训练模型 -wget -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_vd_ssld_pretrained.tar - -# 解压预训练模型文件,以MobileNetV3为例 -tar -xf ./pretrain_models/MobileNetV3_large_x0_5_pretrained.tar ./pretrain_models/ - -# 注:正确解压backbone预训练权重文件后,文件夹下包含众多以网络层命名的权重文件,格式如下: -./pretrain_models/MobileNetV3_large_x0_5_pretrained/ - └─ conv_last_bn_mean - └─ conv_last_bn_offset - └─ conv_last_bn_scale - └─ conv_last_bn_variance - └─ ...... +wget -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNet50_vd_ssld_pretrained.pdparams ``` @@ -120,16 +111,16 @@ python3 tools/eval.py -c configs/det/det_mv3_db.yml -o Global.checkpoints="{pat 测试单张图像的检测效果 ```shell -python3 tools/infer_det.py -c configs/det/det_mv3_db.yml -o Global.infer_img="./doc/imgs_en/img_10.jpg" Global.pretrained_model="./output/det_db/best_accuracy" Global.load_static_weights=false +python3 tools/infer_det.py -c configs/det/det_mv3_db.yml -o Global.infer_img="./doc/imgs_en/img_10.jpg" Global.pretrained_model="./output/det_db/best_accuracy" ``` 测试DB模型时,调整后处理阈值, ```shell -python3 tools/infer_det.py -c configs/det/det_mv3_db.yml -o Global.infer_img="./doc/imgs_en/img_10.jpg" Global.pretrained_model="./output/det_db/best_accuracy" Global.load_static_weights=false PostProcess.box_thresh=0.6 PostProcess.unclip_ratio=1.5 +python3 tools/infer_det.py -c configs/det/det_mv3_db.yml -o Global.infer_img="./doc/imgs_en/img_10.jpg" Global.pretrained_model="./output/det_db/best_accuracy" PostProcess.box_thresh=0.6 PostProcess.unclip_ratio=1.5 ``` 测试文件夹下所有图像的检测效果 ```shell -python3 tools/infer_det.py -c configs/det/det_mv3_db.yml -o Global.infer_img="./doc/imgs_en/" Global.pretrained_model="./output/det_db/best_accuracy" Global.load_static_weights=false +python3 tools/infer_det.py -c configs/det/det_mv3_db.yml -o Global.infer_img="./doc/imgs_en/" Global.pretrained_model="./output/det_db/best_accuracy" ``` diff --git a/doc/doc_ch/inference.md b/doc/doc_ch/inference.md index 547e29be521bef5cf20ef4d6356c5a5a7bf09f69..97e3b92bdf19874927d1cc87e5b14ed7197c3184 100755 --- a/doc/doc_ch/inference.md +++ b/doc/doc_ch/inference.md @@ -49,10 +49,9 @@ wget -P ./ch_lite/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobi # -c 后面设置训练算法的yml配置文件 # -o 配置可选参数 # Global.pretrained_model 参数设置待转换的训练模型地址,不用添加文件后缀 .pdmodel,.pdopt或.pdparams。 -# Global.load_static_weights 参数需要设置为 False。 # Global.save_inference_dir参数设置转换的模型将保存的地址。 -python3 tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o Global.pretrained_model=./ch_lite/ch_ppocr_mobile_v2.0_det_train/best_accuracy Global.load_static_weights=False Global.save_inference_dir=./inference/det_db/ +python3 tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o Global.pretrained_model=./ch_lite/ch_ppocr_mobile_v2.0_det_train/best_accuracy Global.save_inference_dir=./inference/det_db/ ``` 转inference模型时,使用的配置文件和训练时使用的配置文件相同。另外,还需要设置配置文件中的`Global.pretrained_model`参数,其指向训练中保存的模型参数文件。 转换成功后,在模型保存目录下有三个文件: @@ -76,10 +75,9 @@ wget -P ./ch_lite/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobi # -c 后面设置训练算法的yml配置文件 # -o 配置可选参数 # Global.pretrained_model 参数设置待转换的训练模型地址,不用添加文件后缀 .pdmodel,.pdopt或.pdparams。 -# Global.load_static_weights 参数需要设置为 False。 # Global.save_inference_dir参数设置转换的模型将保存的地址。 -python3 tools/export_model.py -c configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml -o Global.pretrained_model=./ch_lite/ch_ppocr_mobile_v2.0_rec_train/best_accuracy Global.load_static_weights=False Global.save_inference_dir=./inference/rec_crnn/ +python3 tools/export_model.py -c configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml -o Global.pretrained_model=./ch_lite/ch_ppocr_mobile_v2.0_rec_train/best_accuracy Global.save_inference_dir=./inference/rec_crnn/ ``` **注意:**如果您是在自己的数据集上训练的模型,并且调整了中文字符的字典文件,请注意修改配置文件中的`character_dict_path`是否是所需要的字典文件。 @@ -105,10 +103,9 @@ wget -P ./ch_lite/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobi # -c 后面设置训练算法的yml配置文件 # -o 配置可选参数 # Global.pretrained_model 参数设置待转换的训练模型地址,不用添加文件后缀 .pdmodel,.pdopt或.pdparams。 -# Global.load_static_weights 参数需要设置为 False。 # Global.save_inference_dir参数设置转换的模型将保存的地址。 -python3 tools/export_model.py -c configs/cls/cls_mv3.yml -o Global.pretrained_model=./ch_lite/ch_ppocr_mobile_v2.0_cls_train/best_accuracy Global.load_static_weights=False Global.save_inference_dir=./inference/cls/ +python3 tools/export_model.py -c configs/cls/cls_mv3.yml -o Global.pretrained_model=./ch_lite/ch_ppocr_mobile_v2.0_cls_train/best_accuracy Global.save_inference_dir=./inference/cls/ ``` 转换成功后,在目录下有三个文件: @@ -164,7 +161,7 @@ python3 tools/infer/predict_det.py --image_dir="./doc/imgs/2.jpg" --det_model_di 首先将DB文本检测训练过程中保存的模型,转换成inference model。以基于Resnet50_vd骨干网络,在ICDAR2015英文数据集训练的模型为例( [模型下载地址](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_db_v2.0_train.tar) ),可以使用如下命令进行转换: ``` -python3 tools/export_model.py -c configs/det/det_r50_vd_db.yml -o Global.pretrained_model=./det_r50_vd_db_v2.0_train/best_accuracy Global.load_static_weights=False Global.save_inference_dir=./inference/det_db +python3 tools/export_model.py -c configs/det/det_r50_vd_db.yml -o Global.pretrained_model=./det_r50_vd_db_v2.0_train/best_accuracy Global.save_inference_dir=./inference/det_db ``` DB文本检测模型推理,可以执行如下命令: @@ -185,7 +182,7 @@ python3 tools/infer/predict_det.py --image_dir="./doc/imgs_en/img_10.jpg" --det_ 首先将EAST文本检测训练过程中保存的模型,转换成inference model。以基于Resnet50_vd骨干网络,在ICDAR2015英文数据集训练的模型为例( [模型下载地址](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_east_v2.0_train.tar) ),可以使用如下命令进行转换: ``` -python3 tools/export_model.py -c configs/det/det_r50_vd_east.yml -o Global.pretrained_model=./det_r50_vd_east_v2.0_train/best_accuracy Global.load_static_weights=False Global.save_inference_dir=./inference/det_east +python3 tools/export_model.py -c configs/det/det_r50_vd_east.yml -o Global.pretrained_model=./det_r50_vd_east_v2.0_train/best_accuracy Global.save_inference_dir=./inference/det_east ``` **EAST文本检测模型推理,需要设置参数`--det_algorithm="EAST"`**,可以执行如下命令: @@ -205,7 +202,7 @@ python3 tools/infer/predict_det.py --det_algorithm="EAST" --image_dir="./doc/img #### (1). 四边形文本检测模型(ICDAR2015) 首先将SAST文本检测训练过程中保存的模型,转换成inference model。以基于Resnet50_vd骨干网络,在ICDAR2015英文数据集训练的模型为例([模型下载地址](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_sast_icdar15_v2.0_train.tar)),可以使用如下命令进行转换: ``` -python3 tools/export_model.py -c configs/det/det_r50_vd_sast_icdar15.yml -o Global.pretrained_model=./det_r50_vd_sast_icdar15_v2.0_train/best_accuracy Global.load_static_weights=False Global.save_inference_dir=./inference/det_sast_ic15 +python3 tools/export_model.py -c configs/det/det_r50_vd_sast_icdar15.yml -o Global.pretrained_model=./det_r50_vd_sast_icdar15_v2.0_train/best_accuracy Global.save_inference_dir=./inference/det_sast_ic15 ``` **SAST文本检测模型推理,需要设置参数`--det_algorithm="SAST"`**,可以执行如下命令: @@ -220,7 +217,7 @@ python3 tools/infer/predict_det.py --det_algorithm="SAST" --image_dir="./doc/img 首先将SAST文本检测训练过程中保存的模型,转换成inference model。以基于Resnet50_vd骨干网络,在Total-Text英文数据集训练的模型为例([模型下载地址](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_sast_totaltext_v2.0_train.tar)),可以使用如下命令进行转换: ``` -python3 tools/export_model.py -c configs/det/det_r50_vd_sast_totaltext.yml -o Global.pretrained_model=./det_r50_vd_sast_totaltext_v2.0_train/best_accuracy Global.load_static_weights=False Global.save_inference_dir=./inference/det_sast_tt +python3 tools/export_model.py -c configs/det/det_r50_vd_sast_totaltext.yml -o Global.pretrained_model=./det_r50_vd_sast_totaltext_v2.0_train/best_accuracy Global.save_inference_dir=./inference/det_sast_tt ``` @@ -270,7 +267,7 @@ Predicts of ./doc/imgs_words/ch/word_4.jpg:('实力活力', 0.98458153) 的模型为例( [模型下载地址](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_none_bilstm_ctc_v2.0_train.tar) ),可以使用如下命令进行转换: ``` -python3 tools/export_model.py -c configs/rec/rec_r34_vd_none_bilstm_ctc.yml -o Global.pretrained_model=./rec_r34_vd_none_bilstm_ctc_v2.0_train/best_accuracy Global.load_static_weights=False Global.save_inference_dir=./inference/rec_crnn +python3 tools/export_model.py -c configs/rec/rec_r34_vd_none_bilstm_ctc.yml -o Global.pretrained_model=./rec_r34_vd_none_bilstm_ctc_v2.0_train/best_accuracy Global.save_inference_dir=./inference/rec_crnn ``` CRNN 文本识别模型推理,可以执行如下命令: diff --git a/doc/doc_en/detection_en.md b/doc/doc_en/detection_en.md index 3ee9092cc6a6f50b19f20df646c9cb1949d5d80f..27223775c7576c2b35683e342c289e6dea1cf2ca 100644 --- a/doc/doc_en/detection_en.md +++ b/doc/doc_en/detection_en.md @@ -39,27 +39,16 @@ If you want to train PaddleOCR on other datasets, please build the annotation fi ## TRAINING First download the pretrained model. The detection model of PaddleOCR currently supports 3 backbones, namely MobileNetV3, ResNet18_vd and ResNet50_vd. You can use the model in [PaddleClas](https://github.com/PaddlePaddle/PaddleClas/tree/master/ppcls/modeling/architectures) to replace backbone according to your needs. +And the responding download link of backbone pretrain weights can be found in [PaddleClas repo](https://github.com/PaddlePaddle/PaddleClas#mobile-series). ```shell cd PaddleOCR/ # Download the pre-trained model of MobileNetV3 -wget -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV3_large_x0_5_pretrained.tar +wget -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV3_large_x0_5_pretrained.pdparams # or, download the pre-trained model of ResNet18_vd -wget -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/ResNet18_vd_pretrained.tar +wget -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNet18_vd_pretrained.pdparams # or, download the pre-trained model of ResNet50_vd -wget -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_vd_ssld_pretrained.tar +wget -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNet50_vd_ssld_pretrained.pdparams -# decompressing the pre-training model file, take MobileNetV3 as an example -tar -xf ./pretrain_models/MobileNetV3_large_x0_5_pretrained.tar ./pretrain_models/ - -# Note: After decompressing the backbone pre-training weight file correctly, the file list in the folder is as follows: -./pretrain_models/MobileNetV3_large_x0_5_pretrained/ - └─ conv_last_bn_mean - └─ conv_last_bn_offset - └─ conv_last_bn_scale - └─ conv_last_bn_variance - └─ ...... - -``` #### START TRAINING *If CPU version installed, please set the parameter `use_gpu` to `false` in the configuration.* @@ -113,16 +102,16 @@ python3 tools/eval.py -c configs/det/det_mv3_db.yml -o Global.checkpoints="{pat Test the detection result on a single image: ```shell -python3 tools/infer_det.py -c configs/det/det_mv3_db.yml -o Global.infer_img="./doc/imgs_en/img_10.jpg" Global.pretrained_model="./output/det_db/best_accuracy" Global.load_static_weights=false +python3 tools/infer_det.py -c configs/det/det_mv3_db.yml -o Global.infer_img="./doc/imgs_en/img_10.jpg" Global.pretrained_model="./output/det_db/best_accuracy" ``` When testing the DB model, adjust the post-processing threshold: ```shell -python3 tools/infer_det.py -c configs/det/det_mv3_db.yml -o Global.infer_img="./doc/imgs_en/img_10.jpg" Global.pretrained_model="./output/det_db/best_accuracy" Global.load_static_weights=false PostProcess.box_thresh=0.6 PostProcess.unclip_ratio=1.5 +python3 tools/infer_det.py -c configs/det/det_mv3_db.yml -o Global.infer_img="./doc/imgs_en/img_10.jpg" Global.pretrained_model="./output/det_db/best_accuracy" PostProcess.box_thresh=0.6 PostProcess.unclip_ratio=1.5 ``` Test the detection result on all images in the folder: ```shell -python3 tools/infer_det.py -c configs/det/det_mv3_db.yml -o Global.infer_img="./doc/imgs_en/" Global.pretrained_model="./output/det_db/best_accuracy" Global.load_static_weights=false +python3 tools/infer_det.py -c configs/det/det_mv3_db.yml -o Global.infer_img="./doc/imgs_en/" Global.pretrained_model="./output/det_db/best_accuracy" ``` diff --git a/doc/doc_en/inference_en.md b/doc/doc_en/inference_en.md index 2ada84a08dcb228d5749bd03e0b89283dad89a77..a78bb6b6fef98f8d025b8f21a9ecfe67bb2af007 100755 --- a/doc/doc_en/inference_en.md +++ b/doc/doc_en/inference_en.md @@ -52,10 +52,9 @@ The above model is a DB algorithm trained with MobileNetV3 as the backbone. To c # -c Set the training algorithm yml configuration file # -o Set optional parameters # Global.pretrained_model parameter Set the training model address to be converted without adding the file suffix .pdmodel, .pdopt or .pdparams. -# Global.load_static_weights needs to be set to False # Global.save_inference_dir Set the address where the converted model will be saved. -python3 tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o Global.pretrained_model=./ch_lite/ch_ppocr_mobile_v2.0_det_train/best_accuracy Global.load_static_weights=False Global.save_inference_dir=./inference/det_db/ +python3 tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o Global.pretrained_model=./ch_lite/ch_ppocr_mobile_v2.0_det_train/best_accuracy Global.save_inference_dir=./inference/det_db/ ``` When converting to an inference model, the configuration file used is the same as the configuration file used during training. In addition, you also need to set the `Global.pretrained_model` parameter in the configuration file. @@ -80,10 +79,9 @@ The recognition model is converted to the inference model in the same way as the # -c Set the training algorithm yml configuration file # -o Set optional parameters # Global.pretrained_model parameter Set the training model address to be converted without adding the file suffix .pdmodel, .pdopt or .pdparams. -# Global.load_static_weights needs to be set to False # Global.save_inference_dir Set the address where the converted model will be saved. -python3 tools/export_model.py -c configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml -o Global.pretrained_model=./ch_lite/ch_ppocr_mobile_v2.0_rec_train/best_accuracy Global.load_static_weights=False Global.save_inference_dir=./inference/rec_crnn/ +python3 tools/export_model.py -c configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml -o Global.pretrained_model=./ch_lite/ch_ppocr_mobile_v2.0_rec_train/best_accuracy Global.save_inference_dir=./inference/rec_crnn/ ``` If you have a model trained on your own dataset with a different dictionary file, please make sure that you modify the `character_dict_path` in the configuration file to your dictionary file path. @@ -109,10 +107,9 @@ The angle classification model is converted to the inference model in the same w # -c Set the training algorithm yml configuration file # -o Set optional parameters # Global.pretrained_model parameter Set the training model address to be converted without adding the file suffix .pdmodel, .pdopt or .pdparams. -# Global.load_static_weights needs to be set to False # Global.save_inference_dir Set the address where the converted model will be saved. -python3 tools/export_model.py -c configs/cls/cls_mv3.yml -o Global.pretrained_model=./ch_lite/ch_ppocr_mobile_v2.0_cls_train/best_accuracy Global.load_static_weights=False Global.save_inference_dir=./inference/cls/ +python3 tools/export_model.py -c configs/cls/cls_mv3.yml -o Global.pretrained_model=./ch_lite/ch_ppocr_mobile_v2.0_cls_train/best_accuracy Global.save_inference_dir=./inference/cls/ ``` After the conversion is successful, there are two files in the directory: @@ -171,7 +168,7 @@ python3 tools/infer/predict_det.py --image_dir="./doc/imgs/22.jpg" --det_model_d First, convert the model saved in the DB text detection training process into an inference model. Taking the model based on the Resnet50_vd backbone network and trained on the ICDAR2015 English dataset as an example ([model download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_db_v2.0_train.tar)), you can use the following command to convert: ``` -python3 tools/export_model.py -c configs/det/det_r50_vd_db.yml -o Global.pretrained_model=./det_r50_vd_db_v2.0_train/best_accuracy Global.load_static_weights=False Global.save_inference_dir=./inference/det_db +python3 tools/export_model.py -c configs/det/det_r50_vd_db.yml -o Global.pretrained_model=./det_r50_vd_db_v2.0_train/best_accuracy Global.save_inference_dir=./inference/det_db ``` DB text detection model inference, you can execute the following command: @@ -192,7 +189,7 @@ The visualized text detection results are saved to the `./inference_results` fol First, convert the model saved in the EAST text detection training process into an inference model. Taking the model based on the Resnet50_vd backbone network and trained on the ICDAR2015 English dataset as an example ([model download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_east_v2.0_train.tar)), you can use the following command to convert: ``` -python3 tools/export_model.py -c configs/det/det_r50_vd_east.yml -o Global.pretrained_model=./det_r50_vd_east_v2.0_train/best_accuracy Global.load_static_weights=False Global.save_inference_dir=./inference/det_east +python3 tools/export_model.py -c configs/det/det_r50_vd_east.yml -o Global.pretrained_model=./det_r50_vd_east_v2.0_train/best_accuracy Global.save_inference_dir=./inference/det_east ``` **For EAST text detection model inference, you need to set the parameter ``--det_algorithm="EAST"``**, run the following command: @@ -213,7 +210,7 @@ The visualized text detection results are saved to the `./inference_results` fol First, convert the model saved in the SAST text detection training process into an inference model. Taking the model based on the Resnet50_vd backbone network and trained on the ICDAR2015 English dataset as an example ([model download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_sast_icdar15_v2.0_train.tar)), you can use the following command to convert: ``` -python3 tools/export_model.py -c configs/det/det_r50_vd_sast_icdar15.yml -o Global.pretrained_model=./det_r50_vd_sast_icdar15_v2.0_train/best_accuracy Global.load_static_weights=False Global.save_inference_dir=./inference/det_sast_ic15 +python3 tools/export_model.py -c configs/det/det_r50_vd_sast_icdar15.yml -o Global.pretrained_model=./det_r50_vd_sast_icdar15_v2.0_train/best_accuracy Global.save_inference_dir=./inference/det_sast_ic15 ``` **For SAST quadrangle text detection model inference, you need to set the parameter `--det_algorithm="SAST"`**, run the following command: @@ -230,7 +227,7 @@ The visualized text detection results are saved to the `./inference_results` fol First, convert the model saved in the SAST text detection training process into an inference model. Taking the model based on the Resnet50_vd backbone network and trained on the Total-Text English dataset as an example ([model download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_sast_totaltext_v2.0_train.tar)), you can use the following command to convert: ``` -python3 tools/export_model.py -c configs/det/det_r50_vd_sast_totaltext.yml -o Global.pretrained_model=./det_r50_vd_sast_totaltext_v2.0_train/best_accuracy Global.load_static_weights=False Global.save_inference_dir=./inference/det_sast_tt +python3 tools/export_model.py -c configs/det/det_r50_vd_sast_totaltext.yml -o Global.pretrained_model=./det_r50_vd_sast_totaltext_v2.0_train/best_accuracy Global.save_inference_dir=./inference/det_sast_tt ``` **For SAST curved text detection model inference, you need to set the parameter `--det_algorithm="SAST"` and `--det_sast_polygon=True`**, run the following command: @@ -279,7 +276,7 @@ Taking CRNN as an example, we introduce the recognition model inference based on First, convert the model saved in the CRNN text recognition training process into an inference model. Taking the model based on Resnet34_vd backbone network, using MJSynth and SynthText (two English text recognition synthetic datasets) for training, as an example ([model download address](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_none_bilstm_ctc_v2.0_train.tar)). It can be converted as follow: ``` -python3 tools/export_model.py -c configs/det/rec_r34_vd_none_bilstm_ctc.yml -o Global.pretrained_model=./rec_r34_vd_none_bilstm_ctc_v2.0_train/best_accuracy Global.load_static_weights=False Global.save_inference_dir=./inference/rec_crnn +python3 tools/export_model.py -c configs/det/rec_r34_vd_none_bilstm_ctc.yml -o Global.pretrained_model=./rec_r34_vd_none_bilstm_ctc_v2.0_train/best_accuracy Global.save_inference_dir=./inference/rec_crnn ``` For CRNN text recognition model inference, execute the following commands: