learning_rate_scheduler.py 44.5 KB
Newer Older
M
minqiyang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

M
minqiyang 已提交
15
import math
16
import warnings
zhouweiwei2014's avatar
zhouweiwei2014 已提交
17
import numpy as np
M
minqiyang 已提交
18

H
HongyuJia 已提交
19
import paddle
M
minqiyang 已提交
20
from .. import unique_name
21 22
from ..framework import Variable
from ..data_feeder import check_type
M
minqiyang 已提交
23

24
__all__ = [
25 26 27 28 29 30 31 32 33 34 35 36
    'NoamDecay',
    'PiecewiseDecay',
    'NaturalExpDecay',
    'ExponentialDecay',
    'InverseTimeDecay',
    'PolynomialDecay',
    'CosineDecay',
    'LinearLrWarmup',
    'ReduceLROnPlateau',
    'StepDecay',
    'MultiStepDecay',
    'LambdaDecay',
37
]
M
minqiyang 已提交
38 39


40
class LearningRateDecay:
M
minqiyang 已提交
41 42
    """
    Base class of learning rate decay
43

44 45 46
    Define the common interface of an LearningRateDecay.
    User should not use this class directly,
    but need to use one of it's implementation.
M
minqiyang 已提交
47 48
    """

M
minqiyang 已提交
49 50 51
    def __init__(self, begin=0, step=1, dtype='float32'):
        self.step_num = begin
        self.step_size = step
M
minqiyang 已提交
52 53 54 55 56
        self.dtype = dtype

    def __call__(self):
        lr = self.step()
        if isinstance(lr, float):
M
minqiyang 已提交
57
            lr = self.create_lr_var(lr)
M
minqiyang 已提交
58
        self.step_num += self.step_size
M
minqiyang 已提交
59 60
        return lr

M
minqiyang 已提交
61
    def create_lr_var(self, lr):
62 63 64
        """
        convert lr from float to variable

65
        Args:
66 67 68 69
            lr: learning rate
        Returns:
            learning rate variable
        """
M
minqiyang 已提交
70
        from .. import layers
71

72
        lr = paddle.static.create_global_var(
M
minqiyang 已提交
73 74 75 76
            name=unique_name.generate("learning_rate"),
            shape=[1],
            value=float(lr),
            dtype=self.dtype,
77 78
            persistable=False,
        )
M
minqiyang 已提交
79
        return lr
M
minqiyang 已提交
80

81
    # Note: If you want to change what optimizer.state_dict stores, just overwrite this functions,
82
    # "self.step_num" will be stored by default.
83 84 85 86 87 88 89 90 91 92 93 94 95
    def state_dict(self):
        """
        Returns the state of the scheduler as a :class:`dict`.

        It is a subset of self.__dict__ .
        """
        self._state_keys()
        state_dict = {}
        for key in self.keys:
            if key not in self.__dict__:
                continue
            value = self.__dict__[key]
            if isinstance(value, Variable):
96 97 98 99
                assert (
                    value.size == 1
                ), "size of Variable in state_dict must be 1"
                value = float(value)
100 101 102 103 104 105 106 107 108 109
            state_dict[key] = value

        return state_dict

    def _state_keys(self):
        """
        set the keys in self.__dict__ that are needed to be saved.
        """
        self.keys = ['step_num']

110
    def set_state_dict(self, state_dict):
111 112 113 114 115 116 117 118 119
        """
        Loads the schedulers state.
        """
        self._state_keys()
        for key in self.keys:
            if key in state_dict:
                self.__dict__[key] = state_dict[key]
            else:
                raise RuntimeError(
120 121 122 123
                    "Please check whether state_dict is correct for optimizer. Can't find [ {} ] in state_dict".format(
                        key
                    )
                )
124 125 126 127 128
        if len(state_dict) > len(self.keys):
            warnings.warn(
                "There are some unused values in state_dict. Maybe the optimizer have different 'LearningRateDecay' when invoking state_dict and set_dict"
            )

129 130 131
    # [aliases] Compatible with old method names
    set_dict = set_state_dict

M
minqiyang 已提交
132 133 134 135
    def step(self):
        raise NotImplementedError()


M
minqiyang 已提交
136
class PiecewiseDecay(LearningRateDecay):
137
    """
138
    :api_attr: imperative
139

D
DuYao 已提交
140
    Piecewise decay scheduler.
141 142 143 144 145

    The algorithm can be described as the code below.

    .. code-block:: text

D
DuYao 已提交
146 147 148 149 150 151 152 153 154 155
        boundaries = [10000, 20000]
        values = [1.0, 0.5, 0.1]
        if global_step < 10000:
            learning_rate = 1.0
        elif 10000 <= global_step < 20000:
            learning_rate = 0.5
        else:
            learning_rate = 0.1

    Parameters:
156
        boundaries(list): A list of steps numbers. The type of element in the list is python int.
D
DuYao 已提交
157 158
        values(list): A list of learning rate values that will be picked during
            different step boundaries. The type of element in the list is python float.
T
tianshuo78520a 已提交
159
        begin(int): The begin step to initialize the global_step in the description above.
D
DuYao 已提交
160
        step(int, optional): The step size used to calculate the new global_step in the description above.
T
tianshuo78520a 已提交
161
            The default value is 1.
D
DuYao 已提交
162 163
        dtype(str, optional): The data type used to create the learning rate variable. The data type can be set as
            'float32', 'float64'. The default value is 'float32'.
164

165
    Returns:
D
DuYao 已提交
166
        None.
167

168 169 170 171
    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
172
          import paddle
173 174 175
          boundaries = [10000, 20000]
          values = [1.0, 0.5, 0.1]
          with fluid.dygraph.guard():
176
              emb = paddle.nn.Embedding(10, 10)
177
              optimizer = fluid.optimizer.SGD(
178 179
                 learning_rate=fluid.dygraph.PiecewiseDecay(boundaries, values, 0),
                 parameter_list = emb.parameters() )
180 181
    """

M
minqiyang 已提交
182
    def __init__(self, boundaries, values, begin, step=1, dtype='float32'):
183
        super().__init__(begin, step, dtype)
M
minqiyang 已提交
184 185 186 187 188
        self.boundaries = boundaries
        self.values = values

        self.vars = []
        for value in values:
189
            self.vars.append(value)
M
minqiyang 已提交
190 191

    def step(self):
M
minqiyang 已提交
192 193
        for i in range(len(self.boundaries)):
            if self.step_num < self.boundaries[i]:
M
minqiyang 已提交
194
                return self.vars[i]
195
        return self.create_lr_var(self.vars[len(self.values) - 1])
196 197 198


class NaturalExpDecay(LearningRateDecay):
199
    r"""
200 201
    :api_attr: imperative

202
    Applies natural exponential decay to the initial learning rate.
203

D
DuYao 已提交
204
    The algorithm can be described as following.
205

D
DuYao 已提交
206 207
    .. math::

208
        decayed\_learning\_rate = learning\_rate * e^{y}
D
DuYao 已提交
209 210 211 212 213 214 215 216 217 218 219

    If staircase is set to False, then:

    .. math::

        y = - decay\_rate * \\frac{global\_step}{decay\_steps}

    If staircase is set to True, then:

    .. math::

220
        y = - decay\_rate * math.floor(\\frac{global\_step}{decay\_steps})
D
DuYao 已提交
221 222

    Parameters:
223 224
        learning_rate(Variable|float): The initial learning rate. If the type
            is Variable, it's a tensor with shape [1], the data type can be
D
DuYao 已提交
225 226 227
            float32 or float64. It also can be set to python int number.
        decay_steps(int): The decay step size. It determines the decay cycle.
        decay_rate(int): The decay rate.
228
        staircase(bool, optional): If set to True, decay the learning rate at discrete intervals. The
D
DuYao 已提交
229 230 231
            default value is False.
        begin(int, optional): The begin step. The initial value of global_step described above. The default value is 0.
        step(int, optional): The step size used to calculate the new global_step in the description above.
T
tianshuo78520a 已提交
232
            The default value is 1.
D
DuYao 已提交
233 234
        dtype(str, optional): The data type used to create the learning rate variable. The data type can be set as
            'float32', 'float64'. The default value is 'float32'.
235

236
    Returns:
D
DuYao 已提交
237
        None.
238

239 240 241
    Examples:
        .. code-block:: python

242
            import paddle.fluid as fluid
243
            import paddle
244 245
            base_lr = 0.1
            with fluid.dygraph.guard():
246
                emb = paddle.nn.Embedding(10, 10)
247 248 249 250 251 252 253
                sgd_optimizer = fluid.optimizer.SGD(
                        learning_rate=fluid.dygraph.NaturalExpDecay(
                            learning_rate=base_lr,
                            decay_steps=10000,
                            decay_rate=0.5,
                            staircase=True),
                        parameter_list=emb.parameters())
254 255 256

    """

257 258 259 260 261 262 263 264 265 266
    def __init__(
        self,
        learning_rate,
        decay_steps,
        decay_rate,
        staircase=False,
        begin=0,
        step=1,
        dtype='float32',
    ):
267
        super().__init__(begin, step, dtype)
268 269 270 271 272 273 274 275
        self.learning_rate = learning_rate
        self.decay_steps = decay_steps
        self.decay_rate = decay_rate
        self.staircase = staircase

    def step(self):
        div_res = self.create_lr_var(self.step_num / self.decay_steps)
        if self.staircase:
276 277
            div_res = paddle.floor(div_res)
        decayed_lr = self.learning_rate * paddle.exp(
278 279
            -1 * self.decay_rate * div_res
        )
280 281 282 283 284

        return decayed_lr


class ExponentialDecay(LearningRateDecay):
285
    r"""
286 287
    :api_attr: imperative

288 289
    Applies exponential decay to the learning rate.

D
DuYao 已提交
290
    The algorithm can be described as following.
291

D
DuYao 已提交
292
    .. math::
293

294
        decayed\_learning\_rate = learning\_rate * decay\_rate ^ y
D
DuYao 已提交
295 296 297 298 299

    If staircase is set to False, then:

    .. math::

300
        y = \\frac{global\_step}{decay\_steps}
D
DuYao 已提交
301 302 303 304 305 306 307 308 309

    If staircase is set to True, then:

    .. math::

        y = math.floor(\\frac{global\_step}{decay\_steps})


    Parameters:
310 311
        learning_rate(Variable|float): The initial learning rate. If the type
            is Variable, it's a tensor with shape [1], the data type can be
D
DuYao 已提交
312 313 314
            float32 or float64. It also can be set to python int number.
        decay_steps(int): The decay step size. It determines the decay cycle.
        decay_rate(float): The decay rate.
315
        staircase(bool, optional): If set to True, decay the learning rate at discrete intervals. The
D
DuYao 已提交
316 317 318
            default value is False.
        begin(int, optional): The begin step. The initial value of global_step described above. The default value is 0.
        step(int, optional): The step size used to calculate the new global_step in the description above.
T
tianshuo78520a 已提交
319
            The default value is 1.
D
DuYao 已提交
320 321
        dtype(str, optional): The data type used to create the learning rate variable. The data type can be set as
            'float32', 'float64'. The default value is 'float32'.
322

323
    Returns:
D
DuYao 已提交
324
        None.
325

326 327 328 329 330 331 332
    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
          base_lr = 0.1
          with fluid.dygraph.guard():
              sgd_optimizer = fluid.optimizer.SGD(
333 334 335 336 337
                    learning_rate=fluid.dygraph.ExponentialDecay(
                        learning_rate=base_lr,
                        decay_steps=10000,
                        decay_rate=0.5,
                        staircase=True))
338 339 340

    """

341 342 343 344 345 346 347 348 349 350
    def __init__(
        self,
        learning_rate,
        decay_steps,
        decay_rate,
        staircase=False,
        begin=0,
        step=1,
        dtype='float32',
    ):
351
        super().__init__(begin, step, dtype)
352 353 354 355 356 357 358 359
        self.learning_rate = learning_rate
        self.decay_steps = decay_steps
        self.decay_rate = decay_rate
        self.staircase = staircase

    def step(self):
        div_res = self.create_lr_var(self.step_num / self.decay_steps)
        if self.staircase:
360
            div_res = paddle.floor(div_res)
361 362 363 364 365 366 367

        decayed_lr = self.learning_rate * (self.decay_rate**div_res)

        return decayed_lr


class InverseTimeDecay(LearningRateDecay):
368
    r"""
369 370
    :api_attr: imperative

371 372
    Applies inverse time decay to the initial learning rate.

D
DuYao 已提交
373 374 375 376 377
    The algorithm can be described as following.
    If staircase is set to False, then:

    .. math::

378
        decayed\_learning\_rate = \\frac{learning\_rate}{1 + decay\_rate * \\frac{global\_step}{decay\_step}}
D
DuYao 已提交
379 380 381 382 383 384 385 386

    If staircase is set to True, then:

    .. math::

        decayed\_learning\_rate = \\frac{learning\_rate}{1 + decay\_rate * math.floor(\\frac{global\_step}{decay\_step})}

    Parameters:
387 388
        learning_rate(Variable|float): The initial learning rate. If the type
            is Variable, it's a tensor with shape [1], the data type can be
D
DuYao 已提交
389 390 391
            float32 or float64. It also can be set to python int number.
        decay_steps(int): The decay step size. It determines the decay cycle.
        decay_rate(float): The decay rate.
392
        staircase(bool, optional): If set to True, decay the learning rate at discrete intervals. The
D
DuYao 已提交
393 394 395
            default value is False.
        begin(int, optional): The begin step. The initial value of global_step described above. The default value is 0.
        step(int, optional): The step size used to calculate the new global_step in the description above.
T
tianshuo78520a 已提交
396
            The default value is 1.
397
        dtype(str, optional): The data type used to create the learning rate variable. The data type can be
D
DuYao 已提交
398
            'float32', 'float64'. The default value is 'float32'.
399

400
    Returns:
D
DuYao 已提交
401
        None.
402

403 404 405 406
    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
407
          import paddle
408 409
          base_lr = 0.1
          with fluid.dygraph.guard():
410
              emb = paddle.nn.Embedding(10, 10)
411
              sgd_optimizer = fluid.optimizer.SGD(
412 413 414 415 416
                  learning_rate=fluid.dygraph.InverseTimeDecay(
                        learning_rate=base_lr,
                        decay_steps=10000,
                        decay_rate=0.5,
                        staircase=True),
417
                  parameter_list = emb.parameters())
418 419 420

    """

421 422 423 424 425 426 427 428 429 430
    def __init__(
        self,
        learning_rate,
        decay_steps,
        decay_rate,
        staircase=False,
        begin=0,
        step=1,
        dtype='float32',
    ):
431
        super().__init__(begin, step, dtype)
432 433 434 435 436 437 438 439
        self.learning_rate = learning_rate
        self.decay_steps = decay_steps
        self.decay_rate = decay_rate
        self.staircase = staircase

    def step(self):
        div_res = self.create_lr_var(self.step_num / self.decay_steps)
        if self.staircase:
440
            div_res = paddle.floor(div_res)
441 442 443 444 445 446 447

        decayed_lr = self.learning_rate / (1 + self.decay_rate * div_res)

        return decayed_lr


class PolynomialDecay(LearningRateDecay):
448
    r"""
449 450
    :api_attr: imperative

451 452
    Applies polynomial decay to the initial learning rate.

D
DuYao 已提交
453 454 455 456 457 458
    The algorithm can be described as following.

    If cycle is set to True, then:

    .. math::

459
        decay\_steps & = decay\_steps * math.ceil(\\frac{global\_step}{decay\_steps})
460

D
DuYao 已提交
461 462 463 464 465 466
        decayed\_learning\_rate & = (learning\_rate-end\_learning\_rate)*(1-\\frac{global\_step}{decay\_steps})^{power}+end\_learning\_rate

    If cycle is set to False, then:

    .. math::

467
        global\_step & = min(global\_step, decay\_steps)
D
DuYao 已提交
468 469 470 471

        decayed\_learning\_rate & = (learning\_rate-end\_learning\_rate)*(1-\\frac{global\_step}{decay\_steps})^{power}+end\_learning\_rate

    Parameters:
472 473
        learning_rate(Variable|float): The initial learning rate. If the type
            is Variable, it's a tensor with shape [1], the data type can be
D
DuYao 已提交
474
            float32 or float64. It also can be set to python int number.
475
        decay_steps(int): The decay step size. It determines the decay cycle.
D
DuYao 已提交
476 477 478 479 480
        end_learning_rate(float, optional): The minimum final learning rate. The default value is 0.0001.
        power(float, optional): Power of polynomial. The default value is 1.0.
        cycle(bool, optional): If set true, decay the learning rate every decay_steps. The default value is False.
        begin(int, optional): The begin step. The initial value of global_step described above. The default value is 0.
        step(int, optional): The step size used to calculate the new global_step in the description above.
T
tianshuo78520a 已提交
481
            The default value is 1.
D
DuYao 已提交
482 483
        dtype(str, optional): The data type used to create the learning rate variable. The data type can be set as
            'float32', 'float64'. The default value is 'float32'.
484

485
    Returns:
D
DuYao 已提交
486
        None.
487

488 489 490 491
    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
492
          import paddle
493 494 495 496
          start_lr = 0.01
          total_step = 5000
          end_lr = 0
          with fluid.dygraph.guard():
497
              emb = paddle.nn.Embedding(10, 10)
498 499
              optimizer  = fluid.optimizer.SGD(
                  learning_rate = fluid.dygraph.PolynomialDecay(
500 501
                  start_lr, total_step, end_lr, power=1.0),
                  parameter_list = emb.parameters())
502 503 504

    """

505 506 507 508 509 510 511 512 513 514 515
    def __init__(
        self,
        learning_rate,
        decay_steps,
        end_learning_rate=0.0001,
        power=1.0,
        cycle=False,
        begin=0,
        step=1,
        dtype='float32',
    ):
516
        super().__init__(begin, step, dtype)
517 518 519 520 521 522 523
        self.learning_rate = learning_rate
        self.decay_steps = decay_steps
        self.end_learning_rate = end_learning_rate
        self.power = power
        self.cycle = cycle

    def step(self):
M
minqiyang 已提交
524 525
        tmp_step_num = self.step_num
        tmp_decay_steps = self.decay_steps
526
        if self.cycle:
527
            div_res = paddle.ceil(
528 529
                self.create_lr_var(tmp_step_num / float(self.decay_steps))
            )
530

M
minqiyang 已提交
531 532
            if tmp_step_num == 0:
                div_res = self.create_lr_var(1.0)
M
minqiyang 已提交
533
            tmp_decay_steps = self.decay_steps * div_res
534
        else:
535
            tmp_step_num = self.create_lr_var(
536 537 538 539
                tmp_step_num
                if tmp_step_num < self.decay_steps
                else self.decay_steps
            )
M
minqiyang 已提交
540

541 542 543
        decayed_lr = (self.learning_rate - self.end_learning_rate) * (
            (1 - tmp_step_num / tmp_decay_steps) ** self.power
        ) + self.end_learning_rate
M
minqiyang 已提交
544
        return decayed_lr
545

M
minqiyang 已提交
546 547

class CosineDecay(LearningRateDecay):
548
    r"""
549 550
    :api_attr: imperative

551 552
    Applies cosine decay to the learning rate.

D
DuYao 已提交
553
    The algorithm can be described as following.
554 555 556

    .. math::

D
DuYao 已提交
557
        decayed\_learning\_rate = learning\_rate * 0.5 * (math.cos(global\_step * \\frac{math.pi}{step\_each\_epoch} ) + 1)
558

D
DuYao 已提交
559
    Parameters:
560 561
        learning_rate(Variable|float): The initial learning rate. If the type
            is Variable, it's a tensor with shape [1], the data type can be
D
DuYao 已提交
562 563 564 565 566
            float32 or float64. It also can be set to python int number.
        step_each_epoch(int): The number of steps in an epoch.
        epochs(int): The number of epochs.
        begin(int, optional): The begin step. The initial value of global_step described above. The default value is 0.
        step(int, optional): The step size used to calculate the new global_step in the description above.
T
tianshuo78520a 已提交
567
            The default value is 1.
D
DuYao 已提交
568 569
        dtype(str, optional): The data type used to create the learning rate variable. The data type can be set as
            'float32', 'float64'. The default value is 'float32'.
570

571
    Returns:
D
DuYao 已提交
572
        None.
573

574
    Examples:
575
        .. code-block:: python
576

577
            base_lr = 0.1
578 579
            with fluid.dygraph.guard():
                optimizer  = fluid.optimizer.SGD(
580 581
                    learning_rate = fluid.dygraph.CosineDecay(
                            base_lr, 10000, 120) )
582 583
    """

584 585 586 587 588 589 590 591 592
    def __init__(
        self,
        learning_rate,
        step_each_epoch,
        epochs,
        begin=0,
        step=1,
        dtype='float32',
    ):
593
        super().__init__(begin, step, dtype)
M
minqiyang 已提交
594 595 596 597 598
        self.learning_rate = learning_rate
        self.step_each_epoch = step_each_epoch
        self.epochs = epochs

    def step(self):
599
        cur_epoch = paddle.floor(
600 601 602 603 604
            self.create_lr_var(self.step_num / self.step_each_epoch)
        )
        decayed_lr = (
            self.learning_rate
            * 0.5
605
            * (paddle.cos(cur_epoch * math.pi / self.epochs) + 1)
606
        )
M
minqiyang 已提交
607 608 609 610
        return decayed_lr


class NoamDecay(LearningRateDecay):
611
    r"""
612 613
    :api_attr: imperative

614
    Applies Noam decay to the initial learning rate.
D
DuYao 已提交
615 616 617 618 619

    The algorithm can be described as following.

    .. math::

620
        decayed\_learning\_rate = learning\_rate * d_{model}^{-0.5} * min(global\_step^{-0.5}, global\_step * warmup\_steps^{-1.5})
D
DuYao 已提交
621

622
    Please reference `attention is all you need <https://arxiv.org/pdf/1706.03762.pdf>`_
D
DuYao 已提交
623 624

    Parameters:
625
        d$_{model}$(Variable|int): The dimensionality of input and output feature vector of model. If type is Variable,
D
DuYao 已提交
626
            it's a tensor with shape [1] and the data type can be int32 or int64. The type can also be python int.
627
        warmup_steps(Variable|int): The number of warmup steps. A super parameter. If type is Variable,
D
DuYao 已提交
628 629 630
            it's a tensor with shape [1] and the data type can be int32 or int64. The type can also be python int.
        begin(int, optional): The begin step. The initial value of global_step described above. The default value is 0.
        step(int, optional): The step size used to calculate the new global_step in the description above.
T
tianshuo78520a 已提交
631
            The default value is 1.
D
DuYao 已提交
632 633
        dtype(str, optional): The data type used to create the learning rate variable. The data type can be set as
            'float32', 'float64'. The default value is 'float32'.
634 635 636
        learning_rate(Variable|float|int): The initial learning rate. If the type
            is Variable, it's a tensor with shape [1], the data type can be
            float32 or float64. It also can be set to python int number. Default 1.0
637

638
    Returns:
D
DuYao 已提交
639
        None.
640

641 642 643 644
    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
645
          import paddle
646 647 648
          warmup_steps = 100
          learning_rate = 0.01
          with fluid.dygraph.guard():
649
              emb = paddle.nn.Embedding(10, 10)
650 651 652
              optimizer  = fluid.optimizer.SGD(
                  learning_rate = fluid.dygraph.NoamDecay(
                         1/(warmup_steps *(learning_rate ** 2)),
653 654
                         warmup_steps),
                  parameter_list = emb.parameters())
655 656
    """

657 658 659 660 661 662 663 664 665
    def __init__(
        self,
        d_model,
        warmup_steps,
        begin=1,
        step=1,
        dtype='float32',
        learning_rate=1.0,
    ):
666
        super().__init__(begin, step, dtype)
667
        self.learning_rate = learning_rate
M
minqiyang 已提交
668 669 670 671 672
        self.d_model = d_model
        self.warmup_steps = warmup_steps

    def step(self):
        from .. import layers
673

M
minqiyang 已提交
674 675
        a = self.create_lr_var(self.step_num**-0.5)
        b = self.create_lr_var((self.warmup_steps**-1.5) * self.step_num)
676
        lr_value = (
677
            self.learning_rate * (self.d_model**-0.5) * paddle.minimum(a, b)
678
        )
M
minqiyang 已提交
679
        return lr_value
H
hong 已提交
680 681 682 683


class LinearLrWarmup(LearningRateDecay):
    """
684 685
    :api_attr: imperative

H
hong 已提交
686 687
    This operator use the linear learning rate warm up strategy to adjust the learning rate preliminarily before the normal learning rate scheduling.
    For more information, please refer to `Bag of Tricks for Image Classification with Convolutional Neural Networks <https://arxiv.org/abs/1812.01187>`_
688

H
hong 已提交
689
    When global_step < warmup_steps, learning rate is updated as:
690

H
hong 已提交
691
    .. code-block:: text
692

H
hong 已提交
693 694
            linear_step = end_lr - start_lr
            lr = start_lr + linear_step * (global_step / warmup_steps)
695

H
hong 已提交
696
    where start_lr is the initial learning rate, and end_lr is the final learning rate;
697

H
hong 已提交
698
    When global_step >= warmup_steps, learning rate is updated as:
699

H
hong 已提交
700
    .. code-block:: text
701

H
hong 已提交
702
            lr = learning_rate
703

H
hong 已提交
704
    where lr is the learning_rate after warm-up.
705

H
hong 已提交
706 707 708 709 710 711 712
    Args:
        learning_rate (Variable|float): Learning_rate after warm-up, it could be 1D-Tensor or single value with the data type of float32.
        warmup_steps (int): Steps for warm up.
        start_lr (float): Initial learning rate of warm up.
        end_lr (float): Final learning rate of warm up.
        begin(int, optional): The begin step. The initial value of global_step described above. The default value is 0.
        step(int, optional): The step size used to calculate the new global_step in the description above.
T
tianshuo78520a 已提交
713
            The default value is 1.
H
hong 已提交
714 715
        dtype(str, optional): The data type used to create the learning rate variable. The data type can be set as
            'float32', 'float64'. The default value is 'float32'.
716

H
hong 已提交
717 718
    Returns:
        Variable: Warm-up learning rate with the same data type as learning_rate.
719 720


H
hong 已提交
721
    Examples:
722

H
hong 已提交
723
    .. code-block:: python
724

H
hong 已提交
725
        import paddle.fluid as fluid
726 727

        learning_rate = 0.1
H
hong 已提交
728
        warmup_steps = 50
729
        start_lr = 0
H
hong 已提交
730 731
        end_lr = 0.1

732
        with fluid.dygraph.guard():
H
hong 已提交
733
            lr_decay = fluid.dygraph.LinearLrWarmup( learning_rate, warmup_steps, start_lr, end_lr)
734 735


H
hong 已提交
736 737
    """

738 739 740 741 742 743 744 745 746 747
    def __init__(
        self,
        learning_rate,
        warmup_steps,
        start_lr,
        end_lr,
        begin=1,
        step=1,
        dtype='float32',
    ):
748
        super().__init__(begin, step, dtype)
749 750 751 752 753
        type_check = (
            isinstance(learning_rate, float)
            or isinstance(learning_rate, int)
            or isinstance(learning_rate, LearningRateDecay)
        )
H
hong 已提交
754 755
        if not type_check:
            raise TypeError(
756 757 758 759
                "the type of learning_rate should be [int, float or LearningRateDecay], the current type is {}".format(
                    learning_rate
                )
            )
H
hong 已提交
760 761
        self.learning_rate = learning_rate
        self.warmup_steps = warmup_steps
762
        self.start_lr = start_lr
763 764 765 766 767 768
        assert (
            end_lr > start_lr
        ), "end_lr {} must be greater than start_lr {}".format(end_lr, start_lr)
        self.lr_ratio_before_warmup = (float(end_lr) - float(start_lr)) / float(
            warmup_steps
        )
H
hong 已提交
769 770 771 772 773 774 775

    def step(self):
        base_lr = self.learning_rate
        if isinstance(self.learning_rate, LearningRateDecay):
            base_lr = base_lr()

        from .. import layers
776

H
hong 已提交
777
        if self.step_num < self.warmup_steps:
778
            return self.lr_ratio_before_warmup * self.step_num + self.start_lr
H
hong 已提交
779 780
        else:
            return base_lr
781 782 783 784


class ReduceLROnPlateau(LearningRateDecay):
    """
785 786
    :api_attr: imperative

787
    Reduce learning rate when ``loss`` has stopped descending. Models often benefit from reducing the learning rate
788 789
    by 2 to 10 times once model performance has no longer improvement.

790 791 792
    The ``loss`` is the one which has been pass into ``step`` , it must be 1-D Tensor with shape [1]. When ``loss``
    stop descending for a ``patience`` number of epochs, the learning rate will be reduced to ``learning_rate * decay_rate`` .
    (Specially, ``mode`` can also be set to ``'max`` , in this case, when ``loss`` stop ascending for a ``patience`` number
793 794 795 796 797 798 799
    of epochs, the learning rate will be reduced.)

    In addition, After each reduction, it will wait a ``cooldown`` number of epochs before resuming normal operation.

    Args:
        learning_rate (Variable|float|int): The initial learning rate. It can be set to python float or int number.
            If the type is Variable, it should be 1-D Tensor with shape [1], the data type can be 'float32' or 'float64'.
800 801
        mode (str, optional): ``'min'`` or ``'max'`` can be selected. Normally, it is ``'min'`` , which means that the
            learning rate will reduce when ``loss`` stops descending. Specially, if it's set to ``'max'`` ,  the learning
802
            rate will reduce when ``loss`` stops ascending. Default: ``'min'`` .
803
        decay_rate (float, optional): The Ratio that the learning rate will be reduced. ``new_lr = origin_lr * decay_rate`` .
804
            It should be less than 1.0. Default: 0.1.
805
        patience (int, optional): When ``loss`` doesn't improve for this number of epochs, learing rate will be reduced.
806 807
            Default: 10.
        verbose (bool, optional): If ``True``, prints a message to stdout for each update. Default: ``False``.
808
        threshold (float, optional): ``threshold`` and ``threshold_mode`` will determine the minimum change of ``loss`` .
809 810
            This make tiny changes of ``loss`` will be ignored. Default: 1e-4.
        threshold_mode (str, optional): ``'rel'`` or ``'abs'`` can be selected. In ``'rel'`` mode, the minimum change of ``loss``
811
            is ``last_loss * threshold`` , where ``last_loss`` is ``loss`` in last epoch. In ``'abs'`` mode, the minimum
812 813 814 815 816 817
            change of ``loss`` is ``threshold`` . Default: ``'rel'`` .
        cooldown (int, optional): The number of epochs to wait before resuming normal operation. Default: 0.
        min_lr (float, optional): The lower bound of the learning rate after reduction. Default: 0.
        eps (float, optional): Minimal decay applied to lr. If the difference between new and old lr is smaller than eps, the update is
            ignored. Default: 1e-8.
        dtype (str, optional): The data type used to create the learning rate variable. The data type can be set as
818 819
            'float32', 'float64'. Default: 'float32'.

820 821 822 823
    Returns:
        Reduced learning rate.

    Examples:
824

825 826 827
    .. code-block:: python

        import paddle.fluid as fluid
828
        import paddle
829 830 831 832
        import numpy as np

        with fluid.dygraph.guard():
            x = np.random.uniform(-1, 1, [10, 10]).astype("float32")
833
            linear = paddle.nn.Linear(10, 10)
834 835 836 837 838 839
            input = fluid.dygraph.to_variable(x)

            reduce_lr = fluid.dygraph.ReduceLROnPlateau(
                                    learning_rate = 1.0,
                                    decay_rate = 0.5,
                                    patience = 5,
840
                                    verbose = True,
841 842 843 844 845 846 847 848 849
                                    cooldown = 3)
            adam = fluid.optimizer.Adam(
                learning_rate = reduce_lr,
                parameter_list = linear.parameters())

            for epoch in range(10):
                total_loss = 0
                for bath_id in range(5):
                    out = linear(input)
850
                    loss = paddle.mean(out)
851 852
                    total_loss += loss
                    adam.minimize(loss)
853

854 855 856 857 858
                avg_loss = total_loss/5

                # adjust learning rate according to avg_loss
                reduce_lr.step(avg_loss)
                lr = adam.current_step_lr()
859
                print("current avg_loss is %s, current lr is %s" % (float(avg_loss), lr))
860 861 862

    """

863 864 865 866 867 868 869 870 871 872 873 874 875 876
    def __init__(
        self,
        learning_rate,
        mode='min',
        decay_rate=0.1,
        patience=10,
        verbose=False,
        threshold=1e-4,
        threshold_mode='rel',
        cooldown=0,
        min_lr=0,
        eps=1e-8,
        dtype='float32',
    ):
877
        super().__init__(dtype=dtype)
878 879 880 881 882 883 884 885 886
        mode = mode.lower()
        if mode not in ['min', 'max']:
            raise ValueError('mode ' + mode + ' is unknown!')
        self.mode = mode

        if decay_rate >= 1.0:
            raise ValueError(
                'new_lr = origin_lr * decay_rate and decay_rate should be < 1.0.'
            )
887
        self.decay_rate = self.create_lr_var(decay_rate)
888 889 890

        threshold_mode = threshold_mode.lower()
        if threshold_mode not in ['rel', 'abs']:
891 892 893
            raise ValueError(
                'threshold mode ' + threshold_mode + ' is unknown!'
            )
894
        self.threshold_mode = threshold_mode
895 896 897 898 899 900
        check_type(
            learning_rate,
            'learning_rate',
            (float, int, Variable),
            'ReduceLROnPlateau',
        )
901 902 903
        if not isinstance(learning_rate, (float, int, Variable)):
            raise TypeError(
                "The type of 'learning_rate' in 'ReduceLROnPlateau' must be 'float, int, Variable', but received %s."
904 905
                % type(learning_rate)
            )
906 907 908 909 910 911 912 913 914 915 916 917 918

        self.learning_rate = learning_rate
        self.verbose = verbose
        self.patience = patience
        self.threshold = threshold
        self.threshold_mode = threshold_mode
        self.cooldown = cooldown
        self.min_lr = self.create_lr_var(min_lr)
        self.eps = eps

        self.cooldown_counter = 0
        self.best_loss = None
        self.num_bad_epochs = 0
919 920
        self.epoch_num = 0

921
    # "cooldown_counter / best_loss / num_bad_epochs / epoch_num / learning_rate" will be stored.
922 923
    def _state_keys(self):
        self.keys = [
924 925 926 927 928
            'cooldown_counter',
            'best_loss',
            'num_bad_epochs',
            'epoch_num',
            'learning_rate',
929
        ]
930 931

    def __call__(self):
932 933
        if not isinstance(self.learning_rate, Variable):
            self.learning_rate = self.create_lr_var(self.learning_rate)
934 935 936 937
        return self.learning_rate

    def step(self, loss):
        """
938
        It should be invoked on each epoch. Update the learning rate in optimizer according to ``loss`` .
939 940 941
        The new learning rate will take effect on next call to ``optimizer.minimize`` .

        Args:
942 943 944
            loss (Variable): A ``Variable`` that will be monitored to determine whether the learning rate will reduce.
                If it stop descending for a ``patience`` number of epochs, the learning rate will reduce. It should
                be 1-D Tensor with shape [1].
945 946 947
                Specially, if ``mode`` has been set to ``'max'`` ,  the learning rate will reduce when it stops ascending.
        Returns:
            None
948

949 950 951 952 953 954
        Examples:
            Please refer to the example of current LearningRateDecay.
        """

        # loss must be 1-D Tensor with shape [1]
        check_type(loss, 'loss', Variable, 'ReduceLROnPlateau.step')
zhouweiwei2014's avatar
zhouweiwei2014 已提交
955 956 957
        assert np.prod(loss.shape) == 1, (
            "The number of elements of loss should be 1, but the current loss.shape is {}, whose number of elements is not 1. "
            "Maybe that you should call paddle.mean to process it first.".format(
958 959 960
                loss.shape
            )
        )
961

962
        self.epoch_num += 1
963 964 965 966 967 968 969 970 971 972 973 974
        if self.cooldown_counter > 0:
            self.cooldown_counter -= 1
        else:
            if self.best_loss is None or self._is_better(loss, self.best_loss):
                self.best_loss = loss
                self.num_bad_epochs = 0
            else:
                self.num_bad_epochs += 1

            if self.num_bad_epochs > self.patience:
                self.cooldown_counter = self.cooldown
                self.num_bad_epochs = 0
H
HongyuJia 已提交
975
                new_lr = paddle.maximum(
976 977
                    self.learning_rate * self.decay_rate, self.min_lr
                )
978 979
                if self.learning_rate - new_lr > self.eps:
                    if self.verbose:
980 981
                        print(
                            'Epoch {}: reducing learning rate from {} to {}.'.format(
982 983 984
                                self.epoch_num,
                                float(self.learning_rate),
                                float(new_lr),
985 986
                            )
                        )
987 988 989 990 991 992 993 994 995 996 997 998 999 1000
                    self.learning_rate = new_lr

    def _is_better(self, current, best):
        if self.mode == 'min' and self.threshold_mode == 'rel':
            return current < best - best * self.threshold

        elif self.mode == 'min' and self.threshold_mode == 'abs':
            return current < best - self.threshold

        elif self.mode == 'max' and self.threshold_mode == 'rel':
            return current > best + best * self.threshold

        else:
            return current > best + self.threshold
1001 1002 1003 1004 1005 1006 1007


class _LearningRateEpochDecay(LearningRateDecay):
    """
    :api_attr: imperative

    Base class of learning rate decay, which is updated each epoch.
1008

1009 1010 1011 1012 1013 1014 1015 1016 1017
    Define the common interface of an _LearningRateEpochDecay.
    User should not use this class directly,
    but need to use one of it's implementation. And invoke method: `epoch()` each epoch.
    """

    def __init__(self, learning_rate, dtype=None):
        if not isinstance(learning_rate, (float, int)):
            raise TypeError(
                "The type of 'learning_rate' must be 'float, int', but received %s."
1018 1019
                % type(learning_rate)
            )
1020 1021
        if learning_rate < 0:
            raise ValueError("Invalid learning rate: {}".format(learning_rate))
1022 1023 1024 1025

        self.base_lr = float(learning_rate)

        self.epoch_num = -1
1026
        self.dtype = dtype
1027 1028 1029 1030 1031 1032
        if dtype is None:
            self.dtype = "float32"
        self.learning_rate = self.create_lr_var(self.base_lr)

        self.epoch()

1033 1034
    # For those subclass who overload _LearningRateEpochDecay, "self.epoch_num/learning_rate" will be stored by default.
    # you can change it for your subclass.
1035 1036 1037
    def _state_keys(self):
        self.keys = ['epoch_num', 'learning_rate']

1038
    def __call__(self):
1039
        """
1040 1041
        Return last computed learning rate on current epoch.
        """
1042 1043
        if not isinstance(self.learning_rate, Variable):
            self.learning_rate = self.create_lr_var(self.learning_rate)
1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066
        return self.learning_rate

    def epoch(self, epoch=None):
        """
        compueted learning_rate and update it when invoked.
        """
        if epoch is None:
            self.epoch_num += 1
        else:
            self.epoch_num = epoch

        self.learning_rate = self.get_lr()

    def get_lr(self):
        raise NotImplementedError


class StepDecay(_LearningRateEpochDecay):
    """
    :api_attr: imperative

    Decays the learning rate of ``optimizer`` by ``decay_rate`` every ``step_size`` number of epoch.

1067
    The algorithm can be described as the code below.
1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081

    .. code-block:: text

        learning_rate = 0.5
        step_size = 30
        decay_rate = 0.1

        learning_rate = 0.5     if epoch < 30
        learning_rate = 0.05    if 30 <= epoch < 60
        learning_rate = 0.005   if 60 <= epoch < 90
        ...

    Parameters:
        learning_rate (float|int): The initial learning rate. It can be set to python float or int number.
1082
        step_size (int): Period of learning rate decay.
1083
        decay_rate (float, optional): The Ratio that the learning rate will be reduced. ``new_lr = origin_lr * decay_rate`` .
1084 1085 1086 1087 1088 1089 1090
            It should be less than 1.0. Default: 0.1.

    Returns:
        None.

    Examples:
        .. code-block:: python
1091

1092 1093
            import paddle.fluid as fluid
            import numpy as np
1094
            import paddle
1095 1096
            with fluid.dygraph.guard():
                x = np.random.uniform(-1, 1, [10, 10]).astype("float32")
1097
                linear = paddle.nn.Linear(10, 10)
1098 1099 1100 1101 1102 1103 1104
                input = fluid.dygraph.to_variable(x)
                scheduler = fluid.dygraph.StepDecay(0.5, step_size=3)
                adam = fluid.optimizer.Adam(learning_rate = scheduler, parameter_list = linear.parameters())

                for epoch in range(9):
                    for batch_id in range(5):
                        out = linear(input)
1105
                        loss = paddle.mean(out)
1106
                        adam.minimize(loss)
1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124
                    scheduler.epoch()

                    print("epoch:{}, current lr is {}" .format(epoch, adam.current_step_lr()))
                    # epoch:0, current lr is 0.5
                    # epoch:1, current lr is 0.5
                    # epoch:2, current lr is 0.5
                    # epoch:3, current lr is 0.05
                    # epoch:4, current lr is 0.05
                    # epoch:5, current lr is 0.05
                    # epoch:6, current lr is 0.005
                    # epoch:7, current lr is 0.005
                    # epoch:8, current lr is 0.005

    """

    def __init__(self, learning_rate, step_size, decay_rate=0.1):
        if not isinstance(step_size, int):
            raise TypeError(
1125 1126 1127
                "The type of 'step_size' must be 'int', but received %s."
                % type(step_size)
            )
1128 1129 1130 1131 1132
        if decay_rate >= 1.0:
            raise ValueError('decay_rate should be < 1.0.')

        self.step_size = step_size
        self.decay_rate = decay_rate
1133
        super().__init__(learning_rate)
1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146

    def get_lr(self):
        decay_rate = self.create_lr_var(self.decay_rate)
        i = self.epoch_num // self.step_size
        return self.base_lr * (decay_rate**i)


class MultiStepDecay(_LearningRateEpochDecay):
    """
    :api_attr: imperative

    Decays the learning rate of ``optimizer`` by ``decay_rate`` once ``epoch`` reaches one of the milestones.

1147
    The algorithm can be described as the code below.
1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161

    .. code-block:: text

        learning_rate = 0.5
        milestones = [30, 50]
        decay_rate = 0.1
        if epoch < 30:
            learning_rate = 0.5
        elif epoch < 50:
            learning_rate = 0.05
        else:
            learning_rate = 0.005

    Parameters:
1162
        learning_rate (float|int): The initial learning rate. It can be set to python float or int number.
1163
        milestones (tuple|list): List or tuple of each boundaries. Must be increasing.
1164
        decay_rate (float, optional): The Ratio that the learning rate will be reduced. ``new_lr = origin_lr * decay_rate`` .
1165 1166 1167 1168 1169 1170 1171
            It should be less than 1.0. Default: 0.1.

    Returns:
        None.

    Examples:
        .. code-block:: python
1172

1173 1174
            import paddle.fluid as fluid
            import numpy as np
1175
            import paddle
1176 1177
            with fluid.dygraph.guard():
                x = np.random.uniform(-1, 1, [10, 10]).astype("float32")
1178
                linear = paddle.nn.Linear(10, 10)
1179 1180 1181 1182 1183 1184 1185
                input = fluid.dygraph.to_variable(x)
                scheduler = fluid.dygraph.MultiStepDecay(0.5, milestones=[3, 5])
                adam = fluid.optimizer.Adam(learning_rate = scheduler, parameter_list = linear.parameters())

                for epoch in range(6):
                    for batch_id in range(5):
                        out = linear(input)
1186
                        loss = paddle.mean(out)
1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203
                        adam.minimize(loss)
                    scheduler.epoch()

                    print("epoch:{}, current lr is {}" .format(epoch, adam.current_step_lr()))
                    # epoch:0, current lr is 0.5
                    # epoch:1, current lr is 0.5
                    # epoch:2, current lr is 0.5
                    # epoch:3, current lr is 0.05
                    # epoch:4, current lr is 0.05
                    # epoch:5, current lr is 0.005

    """

    def __init__(self, learning_rate, milestones, decay_rate=0.1):
        if not isinstance(milestones, (tuple, list)):
            raise TypeError(
                "The type of 'milestones' in 'MultiStepDecay' must be 'tuple, list', but received %s."
1204 1205
                % type(milestones)
            )
1206

1207 1208
        if not all(
            [
1209 1210
                milestones[i] < milestones[i + 1]
                for i in range(len(milestones) - 1)
1211 1212
            ]
        ):
1213 1214 1215 1216 1217 1218
            raise ValueError('The elements of milestones must be incremented')
        if decay_rate >= 1.0:
            raise ValueError('decay_rate should be < 1.0.')

        self.milestones = milestones
        self.decay_rate = decay_rate
1219
        super().__init__(learning_rate)
1220 1221 1222 1223 1224 1225 1226

    def get_lr(self):
        decay_rate = self.create_lr_var(self.decay_rate)
        for i in range(len(self.milestones)):
            if self.epoch_num < self.milestones[i]:
                return self.base_lr * (decay_rate**i)

1227
        return self.base_lr * (decay_rate ** len(self.milestones))
1228 1229 1230 1231 1232 1233 1234 1235 1236


class LambdaDecay(_LearningRateEpochDecay):
    """
    :api_attr: imperative

    Sets the learning rate of ``optimizer`` to the initial lr times a multiplicative factor, and this multiplicative
    factor is computed by function ``lr_lambda`` . ``lr_lambda`` is funciton which receives ``epoch`` .

1237
    The algorithm can be described as the code below.
1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249

    .. code-block:: text

        learning_rate = 0.5        # init learning_rate
        lr_lambda = lambda epoch: 0.95 ** epoch

        learning_rate = 0.5        # epoch 0
        learning_rate = 0.475      # epoch 1
        learning_rate = 0.45125    # epoch 2

    Parameters:
        learning_rate (float|int): The initial learning rate. It can be set to python float or int number.
1250
        lr_lambda (function): A function which computes a multiplicative factor given an integer parameter ``epoch`` , and
1251
            then multiply the initial learning rate by this multiplicative factor.
1252

1253 1254 1255 1256 1257
    Returns:
        None.

    Examples:
        .. code-block:: python
1258

1259 1260
            import paddle.fluid as fluid
            import numpy as np
1261
            import paddle
1262 1263
            with fluid.dygraph.guard():
                x = np.random.uniform(-1, 1, [10, 10]).astype("float32")
1264
                linear = paddle.nn.Linear(10, 10)
1265 1266 1267 1268 1269 1270 1271
                input = fluid.dygraph.to_variable(x)
                scheduler = fluid.dygraph.LambdaDecay(0.5, lr_lambda=lambda x: 0.95**x)
                adam = fluid.optimizer.Adam(learning_rate = scheduler, parameter_list = linear.parameters())

                for epoch in range(6):
                    for batch_id in range(5):
                        out = linear(input)
1272
                        loss = paddle.mean(out)
1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286
                        adam.minimize(loss)
                    scheduler.epoch()

                    print("epoch:%d, current lr is %f" .format(epoch, adam.current_step_lr()))
                    # epoch:0, current lr is 0.5
                    # epoch:1, current lr is 0.475
                    # epoch:2, current lr is 0.45125

    """

    def __init__(self, learning_rate, lr_lambda):
        if not callable(lr_lambda):
            raise TypeError(
                "The type of 'lr_lambda' in 'LambdaDecay' must be 'function', but received %s."
1287 1288
                % type(lr_lambda)
            )
1289 1290

        self.lr_lambda = lr_lambda
1291
        super().__init__(learning_rate)
1292 1293 1294 1295 1296

    def get_lr(self):
        base_lr = self.create_lr_var(self.base_lr)

        return self.base_lr * self.lr_lambda(self.epoch_num)