learning_rate.py 4.8 KB
Newer Older
S
shippingwang 已提交
1
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
W
WuHaobo 已提交
2
#
S
shippingwang 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
W
WuHaobo 已提交
6 7 8
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
S
shippingwang 已提交
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
W
WuHaobo 已提交
14 15 16 17 18 19 20 21

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import sys
import math

22 23 24 25
from paddle.optimizer.lr_scheduler import LinearLrWarmup
from paddle.optimizer.lr_scheduler import PiecewiseLR
from paddle.optimizer.lr_scheduler import CosineAnnealingLR
from paddle.optimizer.lr_scheduler import ExponentialLR
W
WuHaobo 已提交
26 27 28 29

__all__ = ['LearningRateBuilder']


30
class Cosine(CosineAnnealingLR):
W
WuHaobo 已提交
31 32 33 34 35 36 37 38 39 40 41
    """
    Cosine learning rate decay
    lr = 0.05 * (math.cos(epoch * (math.pi / epochs)) + 1)

    Args:
        lr(float): initial learning rate
        step_each_epoch(int): steps each epoch
        epochs(int): total training epochs
    """

    def __init__(self, lr, step_each_epoch, epochs, **kwargs):
42 43 44
        super(Cosine, self).__init__(
            learning_rate=lr,
            T_max=step_each_epoch * epochs, )
W
WuHaobo 已提交
45

46
        self.update_specified = False
W
WuHaobo 已提交
47 48


49
class Piecewise(PiecewiseLR):
W
WuHaobo 已提交
50 51 52 53 54 55 56 57 58 59 60
    """
    Piecewise learning rate decay

    Args:
        lr(float): initial learning rate
        step_each_epoch(int): steps each epoch
        decay_epochs(list): piecewise decay epochs
        gamma(float): decay factor
    """

    def __init__(self, lr, step_each_epoch, decay_epochs, gamma=0.1, **kwargs):
61 62 63 64
        boundaries = [step_each_epoch * e for e in decay_epochs]
        lr_values = [lr * (gamma**i) for i in range(len(boundaries) + 1)]
        super(Piecewise, self).__init__(
            boundaries=boundaries, values=lr_values)
W
WuHaobo 已提交
65

66
        self.update_specified = False
W
WuHaobo 已提交
67 68


69
class CosineWarmup(LinearLrWarmup):
W
WuHaobo 已提交
70 71 72 73 74 75 76 77 78 79 80 81 82
    """
    Cosine learning rate decay with warmup
    [0, warmup_epoch): linear warmup
    [warmup_epoch, epochs): cosine decay

    Args:
        lr(float): initial learning rate
        step_each_epoch(int): steps each epoch
        epochs(int): total training epochs
        warmup_epoch(int): epoch num of warmup
    """

    def __init__(self, lr, step_each_epoch, epochs, warmup_epoch=5, **kwargs):
83 84 85 86 87 88
        assert epochs > warmup_epoch, "total epoch({}) should be larger than warmup_epoch({}) in CosineWarmup.".format(
            epochs, warmup_epoch)
        warmup_step = warmup_epoch * step_each_epoch
        start_lr = 0.0
        end_lr = lr
        lr_sch = Cosine(lr, step_each_epoch, epochs - warmup_epoch)
89

90 91 92 93 94
        super(CosineWarmup, self).__init__(
            learning_rate=lr_sch,
            warmup_steps=warmup_step,
            start_lr=start_lr,
            end_lr=end_lr)
W
WuHaobo 已提交
95

96
        self.update_specified = False
W
WuHaobo 已提交
97 98


99
class ExponentialWarmup(LinearLrWarmup):
S
shippingwang 已提交
100 101 102 103 104 105 106 107 108 109 110 111 112
    """
    Exponential learning rate decay with warmup
    [0, warmup_epoch): linear warmup
    [warmup_epoch, epochs): Exponential decay

    Args:
        lr(float): initial learning rate
        step_each_epoch(int): steps each epoch
        decay_epochs(float): decay epochs
        decay_rate(float): decay rate
        warmup_epoch(int): epoch num of warmup
    """

S
shippingwang 已提交
113 114 115 116 117 118 119
    def __init__(self,
                 lr,
                 step_each_epoch,
                 decay_epochs=2.4,
                 decay_rate=0.97,
                 warmup_epoch=5,
                 **kwargs):
120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
        warmup_step = warmup_epoch * step_each_epoch
        start_lr = 0.0
        end_lr = lr
        lr_sch = ExponentialLR(lr, decay_rate)

        super(ExponentialWarmup, self).__init__(
            learning_rate=lr_sch,
            warmup_steps=warmup_step,
            start_lr=start_lr,
            end_lr=end_lr)

        # NOTE: hac method to update exponential lr scheduler
        self.update_specified = True
        self.update_start_step = warmup_step
        self.update_step_interval = int(decay_epochs * step_each_epoch)
S
shippingwang 已提交
135 136
        self.step_each_epoch = step_each_epoch

S
shippingwang 已提交
137

W
WuHaobo 已提交
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
class LearningRateBuilder():
    """
    Build learning rate variable
    https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/layers_cn.html

    Args:
        function(str): class name of learning rate
        params(dict): parameters used for init the class
    """

    def __init__(self,
                 function='Linear',
                 params={'lr': 0.1,
                         'steps': 100,
                         'end_lr': 0.0}):
        self.function = function
        self.params = params

    def __call__(self):
        mod = sys.modules[__name__]
158
        lr = getattr(mod, self.function)(**self.params)
W
WuHaobo 已提交
159
        return lr