learning_rate_scheduler.py 12.4 KB
Newer Older
Q
Qiao Longfei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Y
yuyang18 已提交
14 15 16 17 18 19 20 21
"""
When training a model, it's often useful to decay the
learning rate during training process, this is called
learning_rate_decay. There are many strategies to do
this, this module will provide some classical method.
User can also implement their own learning_rate_decay
strategy according to this module.
"""
Q
Qiao Longfei 已提交
22

23 24
from __future__ import print_function

25 26 27 28
from . import control_flow
from . import nn
from . import ops
from . import tensor
29
from ..initializer import init_on_cpu
30
from ..framework import default_main_program, Parameter, unique_name, name_scope
31
from ..imperative import base as imperative_base
M
minqiyang 已提交
32
from ..imperative import learning_rate_scheduler as imperate_lr
Q
Qiao Longfei 已提交
33

34 35
__all__ = [
    'exponential_decay', 'natural_exp_decay', 'inverse_time_decay',
W
Wu Yi 已提交
36
    'polynomial_decay', 'piecewise_decay', 'noam_decay', 'append_LARS'
37
]
Q
Qiao Longfei 已提交
38 39


40
def _decay_step_counter(begin=0):
Y
Yu Yang 已提交
41
    # the first global step is zero in learning rate decay
42
    global_step = nn.autoincreased_step_counter(
43
        counter_name='@LR_DECAY_COUNTER@', begin=begin, step=1)
44
    global_step = tensor.cast(global_step, 'float32')
Y
Yu Yang 已提交
45 46 47
    return global_step


48
def noam_decay(d_model, warmup_steps):
Y
yuyang18 已提交
49 50 51 52 53 54 55 56 57 58
    """
    Noam decay method. The numpy implementation of noam decay as follows.

    >>> import numpy as np
    >>> lr_value = np.power(d_model, -0.5) * np.min([
    >>>                         np.power(current_steps, -0.5),
    >>>                         np.power(warmup_steps, -1.5) * current_steps])

    Please reference `attention is all you need
    <https://arxiv.org/pdf/1706.03762.pdf>`_.
59 60 61

    Args:
        d_model(Variable): The dimensionality of input and output of model.
Y
yuyang18 已提交
62

63 64 65 66 67
        warmup_steps(Variable): A super parameter.

    Returns:
        The decayed learning rate.
    """
68 69
    with default_main_program()._lr_schedule_guard():
        global_step = _decay_step_counter(1)
F
fengjiayi 已提交
70

71 72 73
        a = global_step**-0.5
        b = (warmup_steps**-1.5) * global_step
        lr_value = (d_model**-0.5) * nn.elementwise_min(a, b)
74

75
    return lr_value
76 77


Y
Yu Yang 已提交
78
def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False):
F
fengjiayi 已提交
79
    """
80
    Applies exponential decay to the learning rate.
F
fengjiayi 已提交
81

82 83
    When training a model, it is often recommended to lower the learning rate as the
    training progresses. By using this function, the learning rate will be decayed by
F
fengjiayi 已提交
84 85 86 87 88 89
    'decay_rate' every 'decay_steps' steps.

    >>> if staircase == True:
    >>>     decayed_learning_rate = learning_rate * decay_rate ^ floor(global_step / decay_steps)
    >>> else:
    >>>     decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps)
Q
Qiao Longfei 已提交
90 91

    Args:
F
fengjiayi 已提交
92 93 94 95 96
        learning_rate(Variable|float): The initial learning rate.
        decay_steps(int): See the decay computation above.
        decay_rate(float): The decay rate. See the decay computation above.
        staircase(Boolean): If True, decay the learning rate at discrete intervals.
                            Default: False
Q
Qiao Longfei 已提交
97 98

    Returns:
F
fengjiayi 已提交
99
        Variable: The decayed learning rate
F
fengjiayi 已提交
100 101 102 103 104 105 106 107 108 109 110 111 112

    Examples:
        .. code-block:: python

          base_lr = 0.1
          sgd_optimizer = fluid.optimizer.SGD(
                learning_rate=fluid.layers.exponential_decay(
                    learning_rate=base_lr,
                    decay_steps=10000,
                    decay_rate=0.5,
                    staircase=True))
          sgd_optimizer.minimize(avg_cost)

Q
Qiao Longfei 已提交
113
    """
114 115
    with default_main_program()._lr_schedule_guard():
        global_step = _decay_step_counter()
Q
Qiao Longfei 已提交
116

117 118 119 120
        div_res = global_step / decay_steps
        if staircase:
            div_res = ops.floor(div_res)
        decayed_lr = learning_rate * (decay_rate**div_res)
121

122
        return decayed_lr
Q
Qiao Longfei 已提交
123 124


Y
Yu Yang 已提交
125
def natural_exp_decay(learning_rate, decay_steps, decay_rate, staircase=False):
Q
Qiao Longfei 已提交
126 127
    """Applies natural exponential decay to the initial learning rate.

Y
Yu Yang 已提交
128 129 130 131 132
    >>> if not staircase:
    >>>     decayed_learning_rate = learning_rate * exp(- decay_rate * (global_step / decay_steps))
    >>> else:
    >>>     decayed_learning_rate = learning_rate * exp(- decay_rate * (global_step / decay_steps))

Q
Qiao Longfei 已提交
133 134 135 136 137 138 139 140 141 142
    Args:
        learning_rate: A scalar float32 value or a Variable. This
          will be the initial learning rate during training
        decay_steps: A Python `int32` number.
        decay_rate: A Python `float` number.
        staircase: Boolean. If set true, decay the learning rate every decay_steps.

    Returns:
        The decayed learning rate
    """
143 144
    with default_main_program()._lr_schedule_guard():
        global_step = _decay_step_counter()
Q
Qiao Longfei 已提交
145

146 147 148 149
        div_res = global_step / decay_steps
        if staircase:
            div_res = ops.floor(div_res)
        decayed_lr = learning_rate * ops.exp(-1 * decay_rate * div_res)
150

151
        return decayed_lr
Q
Qiao Longfei 已提交
152 153


Y
Yu Yang 已提交
154
def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False):
F
fengjiayi 已提交
155 156
    """
    Applies inverse time decay to the initial learning rate.
Q
Qiao Longfei 已提交
157

158 159
    When training a model, it is often recommended to lower the learning rate as the
    training progresses. By using this function, an inverse decay function will be
F
fengjiayi 已提交
160
    applied to the initial learning rate.
Q
Qiao Longfei 已提交
161

F
fengjiayi 已提交
162
    >>> if staircase == True:
Y
Yu Yang 已提交
163 164 165 166
    >>>     decayed_learning_rate = learning_rate / (1 + decay_rate * floor(global_step / decay_step))
    >>> else:
    >>>     decayed_learning_rate = learning_rate / (1 + decay_rate * global_step / decay_step)

Q
Qiao Longfei 已提交
167
    Args:
F
fengjiayi 已提交
168 169 170 171 172
        learning_rate(Variable|float): The initial learning rate.
        decay_steps(int): See the decay computation above.
        decay_rate(float): The decay rate. See the decay computation above.
        staircase(Boolean): If True, decay the learning rate at discrete intervals.
                            Default: False
Q
Qiao Longfei 已提交
173 174

    Returns:
F
fengjiayi 已提交
175
        Variable: The decayed learning rate
F
fengjiayi 已提交
176 177 178 179 180 181 182 183 184 185 186 187

    Examples:
        .. code-block:: python

          base_lr = 0.1
          sgd_optimizer = fluid.optimizer.SGD(
                learning_rate=fluid.layers.inverse_time_decay(
                    learning_rate=base_lr,
                    decay_steps=10000,
                    decay_rate=0.5,
                    staircase=True))
          sgd_optimizer.minimize(avg_cost)
Q
Qiao Longfei 已提交
188
    """
189 190
    with default_main_program()._lr_schedule_guard():
        global_step = _decay_step_counter()
Q
Qiao Longfei 已提交
191

192 193 194
        div_res = global_step / decay_steps
        if staircase:
            div_res = ops.floor(div_res)
195

196
        decayed_lr = learning_rate / (1 + decay_rate * div_res)
Q
Qiao Longfei 已提交
197

198
        return decayed_lr
199 200 201 202 203 204 205


def polynomial_decay(learning_rate,
                     decay_steps,
                     end_learning_rate=0.0001,
                     power=1.0,
                     cycle=False):
Q
qiaolongfei 已提交
206 207 208
    """
    Applies polynomial decay to the initial learning rate.

Q
qiaolongfei 已提交
209
    .. code-block:: python
Q
qiaolongfei 已提交
210 211 212 213 214 215 216

     if cycle:
       decay_steps = decay_steps * ceil(global_step / decay_steps)
     else:
       global_step = min(global_step, decay_steps)
       decayed_learning_rate = (learning_rate - end_learning_rate) *
            (1 - global_step / decay_steps) ^ power + end_learning_rate
217 218

    Args:
Q
qiaolongfei 已提交
219
        learning_rate(Variable|float32): A scalar float32 value or a Variable. This
Q
update  
qiaolongfei 已提交
220
          will be the initial learning rate during training.
Q
qiaolongfei 已提交
221
        decay_steps(int32): A Python `int32` number.
Q
update  
qiaolongfei 已提交
222 223 224
        end_learning_rate(float): A Python `float` number.
        power(float): A Python `float` number.
        cycle(bool): If set true, decay the learning rate every decay_steps.
225 226

    Returns:
Q
update  
qiaolongfei 已提交
227
        Variable: The decayed learning rate
228
    """
229 230
    with default_main_program()._lr_schedule_guard():
        global_step = _decay_step_counter()
231

232 233 234 235 236 237
        if cycle:
            div_res = ops.ceil(global_step / decay_steps)
            zero_var = tensor.fill_constant(
                shape=[1], dtype='float32', value=0.0)
            one_var = tensor.fill_constant(
                shape=[1], dtype='float32', value=1.0)
238

239 240 241 242 243 244 245 246
            with control_flow.Switch() as switch:
                with switch.case(global_step == zero_var):
                    tensor.assign(input=one_var, output=div_res)
            decay_steps = decay_steps * div_res
        else:
            decay_steps_var = tensor.fill_constant(
                shape=[1], dtype='float32', value=float(decay_steps))
            global_step = nn.elementwise_min(x=global_step, y=decay_steps_var)
247

248 249 250
        decayed_lr = (learning_rate - end_learning_rate) * \
            ((1 - global_step / decay_steps) ** power) + end_learning_rate
        return decayed_lr
251 252


Y
Yu Yang 已提交
253
def piecewise_decay(boundaries, values):
254 255
    """Applies piecewise decay to the initial learning rate.

X
Xin Pan 已提交
256 257 258 259 260 261 262 263 264 265 266 267
      The algorithm can be described as the code below.

      .. code-block:: python

        boundaries = [10000, 20000]
        values = [1.0, 0.5, 0.1]
        if step < 10000:
            learning_rate = 1.0
        elif 10000 <= step < 20000:
            learning_rate = 0.5
        else:
            learning_rate = 0.1
X
Xin Pan 已提交
268 269 270 271 272 273 274 275
    Args:
        boundaries: A list of steps numbers.
        values: A list of learning rate values that will be picked during
            different step boundaries.

    Returns:
        The decayed learning rate.

X
Xin Pan 已提交
276

277
    """
278 279 280 281
    with default_main_program()._lr_schedule_guard():
        if len(values) - len(boundaries) != 1:
            raise ValueError("len(values) - len(boundaries) should be 1")

282
        if imperative_base.enabled():
M
minqiyang 已提交
283
            decay = imperate_lr.PiecewiseDecay(boundaries, values, 0)
284 285 286
            return decay
        else:
            global_step = _decay_step_counter()
287

288 289 290 291 292 293
            lr = tensor.create_global_var(
                shape=[1],
                value=0.0,
                dtype='float32',
                persistable=True,
                name="learning_rate")
294

295 296 297 298 299 300 301 302 303 304 305 306
            with control_flow.Switch() as switch:
                for i in range(len(boundaries)):
                    boundary_val = tensor.fill_constant(
                        shape=[1],
                        dtype='float32',
                        value=float(boundaries[i]),
                        force_cpu=True)
                    value_var = tensor.fill_constant(
                        shape=[1], dtype='float32', value=float(values[i]))
                    with switch.case(global_step < boundary_val):
                        tensor.assign(value_var, lr)
                last_value_var = tensor.fill_constant(
307 308
                    shape=[1],
                    dtype='float32',
309 310 311
                    value=float(values[len(values) - 1]))
                with switch.default():
                    tensor.assign(last_value_var, lr)
312

313
            return lr
W
Wu Yi 已提交
314 315 316


def append_LARS(params_grads, learning_rate, weight_decay):
T
Tink_Y 已提交
317 318 319
    """
    Applies LARS (LAYER-WISE ADAPTIVE RATE SCALING) to learning rate for
    each layer.
W
Wu Yi 已提交
320 321 322 323 324 325 326 327

    Args:
        learning_rate: A learning rate Variable. This
          is the global learning rate for LARS.
        weight_decay: A Python `float` number.

    Returns:
        The decayed learning rate
T
Tink_Y 已提交
328 329
    Examples:
        .. code-block:: python
M
minqiyang 已提交
330

T
Tink_Y 已提交
331 332
            learning_rate *= local_gw_ratio * sqrt(sumsq(param))
                        / (sqrt(sumsq(gradient))+ weight_decay * sqrt(sumsq(param)))
W
Wu Yi 已提交
333 334 335 336 337 338 339 340 341
    """

    def _balanced_weight(param_norm, grad_norm):
        if weight_decay == 1.0:
            return grad_norm + param_norm
        else:
            return grad_norm + weight_decay * param_norm

    for param, grad in params_grads:
342 343 344 345 346 347 348 349 350 351 352 353 354
        with param.block.program.optimized_guard(
            [param, grad]), name_scope("optimizer"):
            param_lr = param.optimize_attr['learning_rate']
            param_norm = ops.sqrt(nn.reduce_sum(input=ops.square(param)))
            grad_norm = ops.sqrt(nn.reduce_sum(input=ops.square(grad)))
            if type(param_lr) == float and param_lr == 1.0:
                decayed_lr = learning_rate * param_norm \
                    / _balanced_weight(param_norm, grad_norm)
            else:
                decayed_lr = learning_rate * param_lr * param_norm \
                    / _balanced_weight(param_norm, grad_norm)
            # set back param local learning rate
            param.optimize_attr['learning_rate'] = decayed_lr