diff --git a/doc/doc_ch/config.md b/doc/doc_ch/config.md index fae0c677999b1586bff19b3508cfaeae884b95bd..ae16263e5272641f95d5e8842da08ac65d7a0b12 100644 --- a/doc/doc_ch/config.md +++ b/doc/doc_ch/config.md @@ -58,6 +58,6 @@ | beta1 | 设置一阶矩估计的指数衰减率 | 0.9 | \ | | beta2 | 设置二阶矩估计的指数衰减率 | 0.999 | \ | | decay | 是否使用decay | \ | \ | -| function(decay) | 设置decay方式 | cosine_decay | \ | +| function(decay) | 设置decay方式 | cosine_decay | 目前只支持cosin_decay | | step_each_epoch | 每个epoch包含多少次迭代 | 20 | 计算方式:total_image_num / (batch_size_per_card * card_size) | | total_epoch | 总共迭代多少个epoch | 1000 | 与Global.epoch_num 一致 | diff --git a/doc/doc_en/config_en.md b/doc/doc_en/config_en.md index 80558e7aa3f243036cad3b674562b9c60651db54..41c2bb86c57146b57f451484b1d9397c4d83fbff 100644 --- a/doc/doc_en/config_en.md +++ b/doc/doc_en/config_en.md @@ -58,6 +58,6 @@ Take `rec_icdar15_train.yml` as an example: | beta1 | Set the exponential decay rate for the 1st moment estimates | 0.9 | \ | | beta2 | Set the exponential decay rate for the 2nd moment estimates | 0.999 | \ | | decay | Whether to use decay | \ | \ | -| function(decay) | Set the decay function | cosine_decay | \ | +| function(decay) | Set the decay function | cosine_decay | Only support cosine_decay | | step_each_epoch | The number of steps in an epoch. | 20 | Calculation :total_image_num / (batch_size_per_card * card_size) | | total_epoch | The number of epochs | 1000 | Consistent with Global.epoch_num | diff --git a/ppocr/optimizer.py b/ppocr/optimizer.py index 8598e48b4df09408ed3259bcf5ba55b6dbc698ff..c50b14c8d926d6e5b3798f5248ab1f80cbb57011 100755 --- a/ppocr/optimizer.py +++ b/ppocr/optimizer.py @@ -15,6 +15,9 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function import paddle.fluid as fluid +from ppocr.utils.utility import initial_logger + +logger = initial_logger() def AdamDecay(params, parameter_list=None): @@ -38,6 +41,8 @@ def AdamDecay(params, parameter_list=None): learning_rate=base_lr, step_each_epoch=step_each_epoch, epochs=total_epoch) + else: + logger.info("Only support Cosine decay currently") optimizer = fluid.optimizer.Adam( learning_rate=base_lr, beta1=beta1,