From 4b921e2f16382121a7ef27c0a03b2f5f6335adfb Mon Sep 17 00:00:00 2001 From: littletomatodonkey Date: Wed, 28 Oct 2020 07:41:42 +0000 Subject: [PATCH] add opt doc --- ppocr/optimizer.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/ppocr/optimizer.py b/ppocr/optimizer.py index fd315cd1..90d402a6 100644 --- a/ppocr/optimizer.py +++ b/ppocr/optimizer.py @@ -29,9 +29,17 @@ def cosine_decay_with_warmup(learning_rate, step_each_epoch, epochs=500, warmup_minibatch=1000): - """Applies cosine decay to the learning rate. + """ + Applies cosine decay to the learning rate. lr = 0.05 * (math.cos(epoch * (math.pi / 120)) + 1) decrease lr for every mini-batch and start with warmup. + args: + learning_rate(float): initial learning rate + step_each_epoch (int): number of step for each epoch in training process + epochs(int): number of training epochs + warmup_minibatch(int): number of minibatch for warmup + return: + lr(tensor): learning rate tensor """ global_step = _decay_step_counter() lr = fluid.layers.tensor.create_global_var( @@ -65,6 +73,7 @@ def AdamDecay(params, parameter_list=None): params(dict): the super parameters parameter_list (list): list of Variable names to update to minimize loss return: + optimizer: a Adam optimizer instance """ base_lr = params['base_lr'] beta1 = params['beta1'] @@ -121,6 +130,7 @@ def RMSProp(params, parameter_list=None): params(dict): the super parameters parameter_list (list): list of Variable names to update to minimize loss return: + optimizer: a RMSProp optimizer instance """ base_lr = params.get("base_lr", 0.001) l2_decay = params.get("l2_decay", 0.00005) -- GitLab