diff --git a/configs/cls/cls_mv3.yml b/configs/cls/cls_mv3.yml index b165bc4830f01f0e63c43b1d6a9635e432dc7605..5e643dc3839b2e2edf3c811db813dd6a90797366 100644 --- a/configs/cls/cls_mv3.yml +++ b/configs/cls/cls_mv3.yml @@ -7,7 +7,6 @@ Global: save_epoch_step: 3 # evaluation is run every 5000 iterations after the 4000th iteration eval_batch_step: [0, 1000] - # if pretrained_model is saved in static mode, load_static_weights must set to True cal_metric_during_train: True pretrained_model: checkpoints: @@ -93,4 +92,4 @@ Eval: shuffle: False drop_last: False batch_size_per_card: 512 - num_workers: 4 \ No newline at end of file + num_workers: 4 diff --git a/configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml b/configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml index fd88495928b18c300386c6a9fd0cf57d840db21e..e001c376420c4210c69df90fa6564b1d903cf116 100644 --- a/configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml +++ b/configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml @@ -7,7 +7,10 @@ Global: save_epoch_step: 1200 # evaluation is run every 5000 iterations after the 4000th iteration eval_batch_step: [3000, 2000] - # if pretrained_model is saved in static mode, load_static_weights must set to True + # 1. If pretrained_model is saved in static mode, such as classification pretrained model + # from static branch, load_static_weights must be set as True. + # 2. If you want to finetune the pretrained models we provide in the docs, + # you should set load_static_weights as False. load_static_weights: True cal_metric_during_train: False pretrained_model: ./pretrain_models/MobileNetV3_large_x0_5_pretrained diff --git a/configs/det/ch_ppocr_v2.0/ch_det_res18_db_v2.0.yml b/configs/det/ch_ppocr_v2.0/ch_det_res18_db_v2.0.yml index 2694601254935be7d003148681334263d734579a..4229248df484d48a86d80c7362165f6a11acf32c 100644 --- a/configs/det/ch_ppocr_v2.0/ch_det_res18_db_v2.0.yml +++ b/configs/det/ch_ppocr_v2.0/ch_det_res18_db_v2.0.yml @@ -7,7 +7,10 @@ Global: save_epoch_step: 1200 # evaluation is run every 5000 iterations after the 4000th iteration eval_batch_step: [3000, 2000] - # if pretrained_model is saved in static mode, load_static_weights must set to True + # 1. If pretrained_model is saved in static mode, such as classification pretrained model + # from static branch, load_static_weights must be set as True. + # 2. If you want to finetune the pretrained models we provide in the docs, + # you should set load_static_weights as False. load_static_weights: True cal_metric_during_train: False pretrained_model: ./pretrain_models/ResNet18_vd_pretrained diff --git a/configs/det/det_mv3_db.yml b/configs/det/det_mv3_db.yml index 00a16b5a38f2a271825a470684be401944eafec0..f8aab70543e15ca7e09d95cb3ea3da639692e170 100644 --- a/configs/det/det_mv3_db.yml +++ b/configs/det/det_mv3_db.yml @@ -7,7 +7,10 @@ Global: save_epoch_step: 1200 # evaluation is run every 2000 iterations eval_batch_step: [0, 2000] - # if pretrained_model is saved in static mode, load_static_weights must set to True + # 1. If pretrained_model is saved in static mode, such as classification pretrained model + # from static branch, load_static_weights must be set as True. + # 2. If you want to finetune the pretrained models we provide in the docs, + # you should set load_static_weights as False. load_static_weights: True cal_metric_during_train: False pretrained_model: ./pretrain_models/MobileNetV3_large_x0_5_pretrained diff --git a/configs/det/det_mv3_east.yml b/configs/det/det_mv3_east.yml index 05581a761cc91abce6d1e83bcdd7aacef1349148..187ac16054534da4f916c48dfb1b0e36441d331c 100644 --- a/configs/det/det_mv3_east.yml +++ b/configs/det/det_mv3_east.yml @@ -7,7 +7,10 @@ Global: save_epoch_step: 1000 # evaluation is run every 5000 iterations after the 4000th iteration eval_batch_step: [4000, 5000] - # if pretrained_model is saved in static mode, load_static_weights must set to True + # 1. If pretrained_model is saved in static mode, such as classification pretrained model + # from static branch, load_static_weights must be set as True. + # 2. If you want to finetune the pretrained models we provide in the docs, + # you should set load_static_weights as False. load_static_weights: True cal_metric_during_train: False pretrained_model: ./pretrain_models/MobileNetV3_large_x0_5_pretrained diff --git a/configs/det/det_r50_vd_db.yml b/configs/det/det_r50_vd_db.yml index 19c059d6737f9e98f33e6fc3b074587b24361dfc..3fa8948d17f1d84943c444035b8521e219e97209 100644 --- a/configs/det/det_r50_vd_db.yml +++ b/configs/det/det_r50_vd_db.yml @@ -7,7 +7,10 @@ Global: save_epoch_step: 1200 # evaluation is run every 2000 iterations eval_batch_step: [0,2000] - # if pretrained_model is saved in static mode, load_static_weights must set to True + # 1. If pretrained_model is saved in static mode, such as classification pretrained model + # from static branch, load_static_weights must be set as True. + # 2. If you want to finetune the pretrained models we provide in the docs, + # you should set load_static_weights as False. load_static_weights: True cal_metric_during_train: False pretrained_model: ./pretrain_models/ResNet50_vd_ssld_pretrained diff --git a/configs/det/det_r50_vd_east.yml b/configs/det/det_r50_vd_east.yml index b8fe55d4ac5473f6c0392820f5b1f651448bdddd..abef0b6116762fb2d5a1c8e7a797ed27832ffe55 100644 --- a/configs/det/det_r50_vd_east.yml +++ b/configs/det/det_r50_vd_east.yml @@ -7,7 +7,10 @@ Global: save_epoch_step: 1000 # evaluation is run every 5000 iterations after the 4000th iteration eval_batch_step: [4000, 5000] - # if pretrained_model is saved in static mode, load_static_weights must set to True + # 1. If pretrained_model is saved in static mode, such as classification pretrained model + # from static branch, load_static_weights must be set as True. + # 2. If you want to finetune the pretrained models we provide in the docs, + # you should set load_static_weights as False. load_static_weights: True cal_metric_during_train: False pretrained_model: ./pretrain_models/ResNet50_vd_pretrained/ diff --git a/configs/det/det_r50_vd_sast_icdar15.yml b/configs/det/det_r50_vd_sast_icdar15.yml index a989bc8fc754ca88e3bff2de2a6db1060301fdd5..c24cae90132c68d662e9edb7a7975e358fb40d9c 100755 --- a/configs/det/det_r50_vd_sast_icdar15.yml +++ b/configs/det/det_r50_vd_sast_icdar15.yml @@ -7,7 +7,10 @@ Global: save_epoch_step: 1000 # evaluation is run every 5000 iterations after the 4000th iteration eval_batch_step: [4000, 5000] - # if pretrained_model is saved in static mode, load_static_weights must set to True + # 1. If pretrained_model is saved in static mode, such as classification pretrained model + # from static branch, load_static_weights must be set as True. + # 2. If you want to finetune the pretrained models we provide in the docs, + # you should set load_static_weights as False. load_static_weights: True cal_metric_during_train: False pretrained_model: ./pretrain_models/ResNet50_vd_ssld_pretrained/ diff --git a/configs/det/det_r50_vd_sast_totaltext.yml b/configs/det/det_r50_vd_sast_totaltext.yml index e040c4207e497a7bf237a84c9c8d1b7c33a2dde8..e6f467c6ec78d453ce56fc0c9dffa35b71cb24c7 100755 --- a/configs/det/det_r50_vd_sast_totaltext.yml +++ b/configs/det/det_r50_vd_sast_totaltext.yml @@ -7,7 +7,10 @@ Global: save_epoch_step: 1000 # evaluation is run every 5000 iterations after the 4000th iteration eval_batch_step: [4000, 5000] - # if pretrained_model is saved in static mode, load_static_weights must set to True + # 1. If pretrained_model is saved in static mode, such as classification pretrained model + # from static branch, load_static_weights must be set as True. + # 2. If you want to finetune the pretrained models we provide in the docs, + # you should set load_static_weights as False. load_static_weights: True cal_metric_during_train: False pretrained_model: ./pretrain_models/ResNet50_vd_ssld_pretrained/ diff --git a/configs/rec/ch_ppocr_v2.0/rec_chinese_common_train_v2.0.yml b/configs/rec/ch_ppocr_v2.0/rec_chinese_common_train_v2.0.yml index 1db3e1cb8633f03f91d1d44064a19f7661e57b12..6a524e22cf4dea4c573d6b67e752c8527e973185 100644 --- a/configs/rec/ch_ppocr_v2.0/rec_chinese_common_train_v2.0.yml +++ b/configs/rec/ch_ppocr_v2.0/rec_chinese_common_train_v2.0.yml @@ -7,7 +7,6 @@ Global: save_epoch_step: 3 # evaluation is run every 5000 iterations after the 4000th iteration eval_batch_step: [0, 2000] - # if pretrained_model is saved in static mode, load_static_weights must set to True cal_metric_during_train: True pretrained_model: checkpoints: diff --git a/configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml b/configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml index dc9d650f30f3d1086616a81c27aaf5db389a1fe7..c96621c5684f3861a7bc2f5aa8d9684e6512e228 100644 --- a/configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml +++ b/configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml @@ -7,7 +7,6 @@ Global: save_epoch_step: 3 # evaluation is run every 5000 iterations after the 4000th iteration eval_batch_step: [0, 2000] - # if pretrained_model is saved in static mode, load_static_weights must set to True cal_metric_during_train: True pretrained_model: checkpoints: diff --git a/configs/rec/rec_icdar15_train.yml b/configs/rec/rec_icdar15_train.yml index 8a743b501618dc6dabf9b1d419431a148236c690..5ae47c67d8b062746d422daac44011fb5aca38e2 100644 --- a/configs/rec/rec_icdar15_train.yml +++ b/configs/rec/rec_icdar15_train.yml @@ -7,7 +7,6 @@ Global: save_epoch_step: 3 # evaluation is run every 2000 iterations eval_batch_step: [0, 2000] - # if pretrained_model is saved in static mode, load_static_weights must set to True cal_metric_during_train: True pretrained_model: checkpoints: diff --git a/configs/rec/rec_mv3_none_bilstm_ctc.yml b/configs/rec/rec_mv3_none_bilstm_ctc.yml index 00c1db885e000d80ed3c3f42c2afbaa11c452ab5..900e98b6b34de824d9afb91b1867a86fe2debc24 100644 --- a/configs/rec/rec_mv3_none_bilstm_ctc.yml +++ b/configs/rec/rec_mv3_none_bilstm_ctc.yml @@ -7,7 +7,6 @@ Global: save_epoch_step: 3 # evaluation is run every 2000 iterations eval_batch_step: [0, 2000] - # if pretrained_model is saved in static mode, load_static_weights must set to True cal_metric_during_train: True pretrained_model: checkpoints: diff --git a/configs/rec/rec_mv3_none_none_ctc.yml b/configs/rec/rec_mv3_none_none_ctc.yml index 6711b1d23f843551d72e1dffc003637734727754..6d86b90c007cab9708cab6db6d8e3045dd5187fb 100644 --- a/configs/rec/rec_mv3_none_none_ctc.yml +++ b/configs/rec/rec_mv3_none_none_ctc.yml @@ -7,7 +7,6 @@ Global: save_epoch_step: 3 # evaluation is run every 2000 iterations eval_batch_step: [0, 2000] - # if pretrained_model is saved in static mode, load_static_weights must set to True cal_metric_during_train: True pretrained_model: checkpoints: diff --git a/configs/rec/rec_mv3_tps_bilstm_att.yml b/configs/rec/rec_mv3_tps_bilstm_att.yml index 0ce067343c0fbcff9bb204ed6902a2566d2b769c..1b10410a73e768a3bbcd2ee7068e8a00f5d999cb 100644 --- a/configs/rec/rec_mv3_tps_bilstm_att.yml +++ b/configs/rec/rec_mv3_tps_bilstm_att.yml @@ -7,7 +7,6 @@ Global: save_epoch_step: 3 # evaluation is run every 5000 iterations after the 4000th iteration eval_batch_step: [0, 2000] - # if pretrained_model is saved in static mode, load_static_weights must set to True cal_metric_during_train: True pretrained_model: checkpoints: diff --git a/configs/rec/rec_mv3_tps_bilstm_ctc.yml b/configs/rec/rec_mv3_tps_bilstm_ctc.yml index 4e86709942bcde410dc22df439fdd40e9a94fdef..026c6a9dfbd6b6b543c0b4260c43cbf98e192e7b 100644 --- a/configs/rec/rec_mv3_tps_bilstm_ctc.yml +++ b/configs/rec/rec_mv3_tps_bilstm_ctc.yml @@ -7,7 +7,6 @@ Global: save_epoch_step: 3 # evaluation is run every 2000 iterations eval_batch_step: [0, 2000] - # if pretrained_model is saved in static mode, load_static_weights must set to True cal_metric_during_train: True pretrained_model: checkpoints: diff --git a/configs/rec/rec_r34_vd_none_bilstm_ctc.yml b/configs/rec/rec_r34_vd_none_bilstm_ctc.yml index e4d301a6a173ea772898c0528c4b3082670870ff..4052d426e51aa8c6e82ec216cfd65226922be602 100644 --- a/configs/rec/rec_r34_vd_none_bilstm_ctc.yml +++ b/configs/rec/rec_r34_vd_none_bilstm_ctc.yml @@ -7,7 +7,6 @@ Global: save_epoch_step: 3 # evaluation is run every 2000 iterations eval_batch_step: [0, 2000] - # if pretrained_model is saved in static mode, load_static_weights must set to True cal_metric_during_train: True pretrained_model: checkpoints: diff --git a/configs/rec/rec_r34_vd_none_none_ctc.yml b/configs/rec/rec_r34_vd_none_none_ctc.yml index 4a17a004228185db7e52dd71aadcff36d407d2cf..c3e1d9a3a91ab6a51e28d458623aea788b952ca0 100644 --- a/configs/rec/rec_r34_vd_none_none_ctc.yml +++ b/configs/rec/rec_r34_vd_none_none_ctc.yml @@ -7,7 +7,6 @@ Global: save_epoch_step: 3 # evaluation is run every 2000 iterations eval_batch_step: [0, 2000] - # if pretrained_model is saved in static mode, load_static_weights must set to True cal_metric_during_train: True pretrained_model: checkpoints: diff --git a/configs/rec/rec_r34_vd_tps_bilstm_att.yml b/configs/rec/rec_r34_vd_tps_bilstm_att.yml index 02aeb8c522a62b9a3c6b90f818df5081428b652e..e25eca9545b8c688f121f3ea239592493333e1ae 100644 --- a/configs/rec/rec_r34_vd_tps_bilstm_att.yml +++ b/configs/rec/rec_r34_vd_tps_bilstm_att.yml @@ -7,7 +7,6 @@ Global: save_epoch_step: 3 # evaluation is run every 5000 iterations after the 4000th iteration eval_batch_step: [0, 2000] - # if pretrained_model is saved in static mode, load_static_weights must set to True cal_metric_during_train: True pretrained_model: checkpoints: diff --git a/configs/rec/rec_r34_vd_tps_bilstm_ctc.yml b/configs/rec/rec_r34_vd_tps_bilstm_ctc.yml index 62edf84379ec1be9ef5f7155b240099f5fbb7b00..8b80b82dd6e16f4b4a921f7a36531755852bd70a 100644 --- a/configs/rec/rec_r34_vd_tps_bilstm_ctc.yml +++ b/configs/rec/rec_r34_vd_tps_bilstm_ctc.yml @@ -7,7 +7,6 @@ Global: save_epoch_step: 3 # evaluation is run every 2000 iterations eval_batch_step: [0, 2000] - # if pretrained_model is saved in static mode, load_static_weights must set to True cal_metric_during_train: True pretrained_model: checkpoints: diff --git a/configs/rec/rec_r50_fpn_srn.yml b/configs/rec/rec_r50_fpn_srn.yml index ec7f170560f5309818d537953a93c180b9de0bb7..6285c95dfab5146024f70c50635de4ba480d79a1 100644 --- a/configs/rec/rec_r50_fpn_srn.yml +++ b/configs/rec/rec_r50_fpn_srn.yml @@ -7,7 +7,6 @@ Global: save_epoch_step: 3 # evaluation is run every 5000 iterations after the 4000th iteration eval_batch_step: [0, 5000] - # if pretrained_model is saved in static mode, load_static_weights must set to True cal_metric_during_train: True pretrained_model: checkpoints: diff --git a/deploy/cpp_infer/docs/windows_vs2019_build.md b/deploy/cpp_infer/docs/windows_vs2019_build.md index 21fbf4e0eb95ee82475164047d8051e90e9e224f..0f243bf8f54b5cd50e9fa2faab29b064e694e45c 100644 --- a/deploy/cpp_infer/docs/windows_vs2019_build.md +++ b/deploy/cpp_infer/docs/windows_vs2019_build.md @@ -14,7 +14,7 @@ PaddleOCR在Windows 平台下基于`Visual Studio 2019 Community` 进行了测 ### Step1: 下载PaddlePaddle C++ 预测库 fluid_inference -PaddlePaddle C++ 预测库针对不同的`CPU`和`CUDA`版本提供了不同的预编译版本,请根据实际情况下载: [C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/advanced_guide/inference_deployment/inference/windows_cpp_inference.html) +PaddlePaddle C++ 预测库针对不同的`CPU`和`CUDA`版本提供了不同的预编译版本,请根据实际情况下载: [C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/05_inference_deployment/inference/windows_cpp_inference.html) 解压后`D:\projects\fluid_inference`目录包含内容为: ``` diff --git a/deploy/cpp_infer/readme.md b/deploy/cpp_infer/readme.md index b563ecf48c2aba03e25a03ae0328c244bb900356..41836915104c9d6a12b393ba6f0cd70de9809580 100644 --- a/deploy/cpp_infer/readme.md +++ b/deploy/cpp_infer/readme.md @@ -72,9 +72,21 @@ opencv3/ * 有2种方式获取Paddle预测库,下面进行详细介绍。 -#### 1.2.1 预测库源码编译 +#### 1.2.1 直接下载安装 + +* [Paddle预测库官网](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/05_inference_deployment/inference/build_and_install_lib_cn.html)上提供了不同cuda版本的Linux预测库,可以在官网查看并选择合适的预测库版本。 + +* 下载之后使用下面的方法解压。 + +``` +tar -xf paddle_inference.tgz +``` + +最终会在当前的文件夹中生成`paddle_inference/`的子文件夹。 + +#### 1.2.2 预测库源码编译 * 如果希望获取最新预测库特性,可以从Paddle github上克隆最新代码,源码编译预测库。 -* 可以参考[Paddle预测库官网](https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.html)的说明,从github上获取Paddle代码,然后进行编译,生成最新的预测库。使用git获取代码方法如下。 +* 可以参考[Paddle预测库官网](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/05_inference_deployment/inference/build_and_install_lib_cn.html)的说明,从github上获取Paddle代码,然后进行编译,生成最新的预测库。使用git获取代码方法如下。 ```shell git clone https://github.com/PaddlePaddle/Paddle.git @@ -100,7 +112,7 @@ make -j make inference_lib_dist ``` -更多编译参数选项可以参考Paddle C++预测库官网:[https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.html](https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.html)。 +更多编译参数选项可以参考Paddle C++预测库官网:[https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/05_inference_deployment/inference/build_and_install_lib_cn.html](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/05_inference_deployment/inference/build_and_install_lib_cn.html)。 * 编译完成之后,可以在`build/paddle_inference_install_dir/`文件下看到生成了以下文件及文件夹。 @@ -115,17 +127,7 @@ build/paddle_inference_install_dir/ 其中`paddle`就是C++预测所需的Paddle库,`version.txt`中包含当前预测库的版本信息。 -#### 1.2.2 直接下载安装 - -* [Paddle预测库官网](https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.html)上提供了不同cuda版本的Linux预测库,可以在官网查看并选择合适的预测库版本。 -* 下载之后使用下面的方法解压。 - -``` -tar -xf paddle_inference.tgz -``` - -最终会在当前的文件夹中生成`paddle_inference/`的子文件夹。 ## 2 开始运行 @@ -223,7 +225,7 @@ char_list_file ../../ppocr/utils/ppocr_keys_v1.txt # 字典文件 visualize 1 # 是否对结果进行可视化,为1时,会在当前文件夹下保存文件名为`ocr_vis.png`的预测结果。 ``` -* PaddleOCR也支持多语言的预测,更多细节可以参考[识别文档](../../doc/doc_ch/recognition.md)中的多语言字典与模型部分。 +* PaddleOCR也支持多语言的预测,更多支持的语言和模型可以参考[识别文档](../../doc/doc_ch/recognition.md)中的多语言字典与模型部分,如果希望进行多语言预测,只需将修改`tools/config.txt`中的`char_list_file`(字典文件路径)以及`rec_model_dir`(inference模型路径)字段即可。 最终屏幕上会输出检测结果如下。 @@ -234,4 +236,4 @@ visualize 1 # 是否对结果进行可视化,为1时,会在当前文件夹 ### 2.3 注意 -* 在使用Paddle预测库时,推荐使用2.0.0-beta0版本的预测库。 +* 在使用Paddle预测库时,推荐使用2.0.0版本的预测库。 diff --git a/deploy/cpp_infer/readme_en.md b/deploy/cpp_infer/readme_en.md index 41c764bc18a69965da6ad2ea521f438840c286e6..6bc49e9479cbe9bae927b1f3772cb5082de97d30 100644 --- a/deploy/cpp_infer/readme_en.md +++ b/deploy/cpp_infer/readme_en.md @@ -74,10 +74,23 @@ opencv3/ * There are 2 ways to obtain the Paddle inference library, described in detail below. +#### 1.2.1 Direct download and installation -#### 1.2.1 Compile from the source code +* Different cuda versions of the Linux inference library (based on GCC 4.8.2) are provided on the +[Paddle inference library official website](https://www.paddlepaddle.org.cn/documentation/docs/en/develop/guides/05_inference_deployment/inference/build_and_install_lib_en.html). You can view and select the appropriate version of the inference library on the official website. + + +* After downloading, use the following method to uncompress. + +``` +tar -xf paddle_inference.tgz +``` + +Finally you can see the following files in the folder of `paddle_inference/`. + +#### 1.2.2 Compile from the source code * If you want to get the latest Paddle inference library features, you can download the latest code from Paddle github repository and compile the inference library from the source code. -* You can refer to [Paddle inference library] (https://www.paddlepaddle.org.cn/documentation/docs/en/advanced_guide/inference_deployment/inference/build_and_install_lib_en.html) to get the Paddle source code from github, and then compile To generate the latest inference library. The method of using git to access the code is as follows. +* You can refer to [Paddle inference library] (https://www.paddlepaddle.org.cn/documentation/docs/en/develop/guides/05_inference_deployment/inference/build_and_install_lib_en.html) to get the Paddle source code from github, and then compile To generate the latest inference library. The method of using git to access the code is as follows. ```shell @@ -104,7 +117,7 @@ make -j make inference_lib_dist ``` -For more compilation parameter options, please refer to the official website of the Paddle C++ inference library:[https://www.paddlepaddle.org.cn/documentation/docs/en/advanced_guide/inference_deployment/inference/build_and_install_lib_en.html](https://www.paddlepaddle.org.cn/documentation/docs/en/advanced_guide/inference_deployment/inference/build_and_install_lib_en.html). +For more compilation parameter options, please refer to the official website of the Paddle C++ inference library:[https://www.paddlepaddle.org.cn/documentation/docs/en/develop/guides/05_inference_deployment/inference/build_and_install_lib_en.html](https://www.paddlepaddle.org.cn/documentation/docs/en/develop/guides/05_inference_deployment/inference/build_and_install_lib_en.html). * After the compilation process, you can see the following files in the folder of `build/paddle_inference_install_dir/`. @@ -120,22 +133,6 @@ build/paddle_inference_install_dir/ Among them, `paddle` is the Paddle library required for C++ prediction later, and `version.txt` contains the version information of the current inference library. - -#### 1.2.2 Direct download and installation - -* Different cuda versions of the Linux inference library (based on GCC 4.8.2) are provided on the -[Paddle inference library official website](https://www.paddlepaddle.org.cn/documentation/docs/en/advanced_guide/inference_deployment/inference/build_and_install_lib_en.html). You can view and select the appropriate version of the inference library on the official website. - - -* After downloading, use the following method to uncompress. - -``` -tar -xf paddle_inference.tgz -``` - -Finally you can see the following files in the folder of `paddle_inference/`. - - ## 2. Compile and run the demo ### 2.1 Export the inference model @@ -233,7 +230,7 @@ char_list_file ../../ppocr/utils/ppocr_keys_v1.txt # dictionary file visualize 1 # Whether to visualize the results,when it is set as 1, The prediction result will be save in the image file `./ocr_vis.png`. ``` -* Multi-language inference is also supported in PaddleOCR, for more details, please refer to part of multi-language dictionaries and models in [recognition tutorial](../../doc/doc_en/recognition_en.md). +* Multi-language inference is also supported in PaddleOCR, you can refer to [recognition tutorial](../../doc/doc_en/recognition_en.md) for more supported languages and models in PaddleOCR. Specifically, if you want to infer using multi-language models, you just need to modify values of `char_list_file` and `rec_model_dir` in file `tools/config.txt`. The detection results will be shown on the screen, which is as follows. @@ -245,4 +242,4 @@ The detection results will be shown on the screen, which is as follows. ### 2.3 Notes -* Paddle2.0.0-beta0 inference model library is recommended for this toturial. +* Paddle2.0.0 inference model library is recommended for this toturial. diff --git a/doc/doc_ch/models_list.md b/doc/doc_ch/models_list.md index fbfb3838b7f860678b10ef4507ebf6c0d4b815c9..efb75f86734a1200ef9ed0a53747e569c6414663 100644 --- a/doc/doc_ch/models_list.md +++ b/doc/doc_ch/models_list.md @@ -12,9 +12,14 @@ PaddleOCR提供的可下载模型包括`推理模型`、`训练模型`、`预训 |模型类型|模型格式|简介| |--- | --- | --- | -|推理模型|inference.pdmodel、inference.pdiparams|用于python预测引擎推理,[详情](./inference.md)| +|推理模型|inference.pdmodel、inference.pdiparams|用于预测引擎推理,[详情](./inference.md)| |训练模型、预训练模型|\*.pdparams、\*.pdopt、\*.states |训练过程中保存的模型的参数、优化器状态和训练中间信息,多用于模型指标评估和恢复训练| -|slim模型|\*.nb|用于lite部署| +|slim模型|\*.nb|经过飞桨模型压缩工具PaddleSlim压缩后的模型,适用于移动端/IoT端等端侧部署场景(需使用飞桨Paddle Lite部署)。| + + +各个模型的关系如下面的示意图所示。 + +![](../imgs/model_prod_flow_ch.png) diff --git a/doc/doc_en/models_list_en.md b/doc/doc_en/models_list_en.md index 33033f8348fa4fb08d6e8998ff53cd62349c214e..577f2aa5e334f347065a70e2647f1ed9f5634b0c 100644 --- a/doc/doc_en/models_list_en.md +++ b/doc/doc_en/models_list_en.md @@ -12,9 +12,13 @@ The downloadable models provided by PaddleOCR include `inference model`, `traine |model type|model format|description| |--- | --- | --- | -|inference model|inference.pdmodel、inference.pdiparams|Used for reasoning based on Python prediction engine,[detail](./inference_en.md)| +|inference model|inference.pdmodel、inference.pdiparams|Used for inference based on Paddle inference engine,[detail](./inference_en.md)| |trained model, pre-trained model|\*.pdparams、\*.pdopt、\*.states |The checkpoints model saved in the training process, which stores the parameters of the model, mostly used for model evaluation and continuous training.| -|slim model|\*.nb|Generally used for Lite deployment| +|slim model|\*.nb| Model compressed by PaddleSim (a model compression tool using PaddlePaddle), which is suitable for mobile-side deployment scenarios (Paddle-Lite is needed for slim model deployment). | + +Relationship of the above models is as follows. + +![](../imgs_en/model_prod_flow_en.png) ### 1. Text Detection Model @@ -80,7 +84,7 @@ If you want to train your own model, you can prepare the training set file, veri cd {your/path/}PaddleOCR/configs/rec/multi_language/ # The -l or --language parameter is required # --train modify train_list path -# --val modify eval_list path +# --val modify eval_list path # --data_dir modify data dir # -o modify default parameters # --dict Change the dictionary path. The example uses the default dictionary path, so that this parameter can be empty.