optimizer.py 60.2 KB
Newer Older
M
MRXLT 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import numpy as np
import logging
from collections import defaultdict

19
import paddle
20 21 22 23 24 25
from paddle.fluid.framework import (
    Variable,
    default_main_program,
    device_guard,
    name_scope,
)
M
MRXLT 已提交
26 27 28 29

from ..fluid import framework
from ..fluid import layers
from ..fluid import unique_name
30
from ..fluid.backward import _get_no_grad_set_name, append_backward
31 32 33 34 35
from ..fluid.clip import (
    GradientClipBase,
    append_gradient_clip_ops,
    error_clip_callback,
)
36
from ..fluid.framework import program_guard, Parameter
M
MRXLT 已提交
37 38 39 40
from ..fluid.initializer import Constant
from ..fluid.layer_helper import LayerHelper
from ..fluid.dygraph import base as imperative_base
from paddle.fluid import core
41
from .lr import LRScheduler
42
from paddle import _C_ops, _legacy_C_ops
43 44 45 46 47 48
from paddle.fluid.framework import (
    _in_legacy_dygraph,
    _in_eager_without_dygraph_check,
    _current_expected_place,
    in_dygraph_mode,
)
M
MRXLT 已提交
49

50 51
__all__ = []

M
MRXLT 已提交
52

53
@framework.static_only
54 55 56 57 58 59 60 61
def append_backward_new(
    loss_list,
    parameter_list=None,
    no_grad_set=None,
    callbacks=None,
    checkpoints=None,
    distop_context=None,
):
62
    from paddle.incubate.autograd.primx import orig2prim, Transform
63

64
    program = default_main_program()
65 66 67
    assert (
        program.num_blocks == 1
    ), "The append_backward_new interface is designed to process only one block."
68
    block = program.current_block()
69
    for el in loss_list:
70 71 72
        assert (
            el.block == block
        ), 'variable in loss_list should be in current block of main program'
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100

    orig2prim(block)
    ad = Transform(block)
    if parameter_list is None:
        parameter_list = program.global_block().all_parameters()
    param_dot, loss_dot = ad.linearize(parameter_list, loss_list)
    loss_bar, param_bar = ad.transpose(loss_dot, param_dot)

    # remove param_dot and their constructor ops
    op_indexes = []
    for var in param_dot:
        if var is not None:
            op_index = block.ops.index(var.op)
            assert op_index >= 0
            op_indexes.append(op_index)

    ad.erase_ops(sorted(op_indexes))
    ad.erase_dots(param_dot)

    if len(parameter_list) == 1:
        params_and_grads = [(parameter_list, param_bar)]
    else:
        params_and_grads = []
        for i, param in enumerate(parameter_list):
            params_and_grads.append((param, param_bar[i]))
    return params_and_grads


101
class Optimizer:
102
    r"""Optimizer Base class.
M
MRXLT 已提交
103 104 105 106 107 108

    Define the common interface of an optimizer.
    User should not use this class directly,
    but need to use one of it's implementation.

    Args:
109 110
        learning_rate (float|LRScheduler): The learning rate used to update ``Parameter``.
            It can be a float value or any subclass of ``LRScheduler`` .
111
        parameters (list|tuple, optional): List/Tuple of ``Tensor`` names to update to minimize ``loss``. \
112 113 114 115
            This parameter is required in dygraph mode. And you can specify different options for \
            different parameter groups such as the learning rate, weight decay, etc, \
            then the parameters are list of dict. Note that the learning_rate in paramter groups \
            represents the scale of base learning_rate. \
M
MRXLT 已提交
116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
            The default value is None in static mode, at this time all parameters will be updated.
        weight_decay (float|WeightDecayRegularizer, optional): The strategy of regularization. \
            It canbe a float value as coeff of L2 regularization or \
            :ref:`api_fluid_regularizer_L1Decay`, :ref:`api_fluid_regularizer_L2Decay`.
            If a parameter has set regularizer using :ref:`api_fluid_ParamAttr` already, \
            the regularization setting here in optimizer will be ignored for this parameter. \
            Otherwise, the regularization setting here in optimizer will take effect. \
            Default None, meaning there is no regularization.
        grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of \
            some derived class of ``GradientClipBase`` . There are three cliping strategies \
            ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` , \
            :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping.
        name (str, optional): Normally there is no need for user to set this property.
            For more information, please refer to :ref:`api_guide_Name`.
            The default value is None.

    Returns:
133 134
       Base class for optimizer.

M
MRXLT 已提交
135 136 137 138 139 140
    Examples:
        .. code-block:: python

            #Take the subclass adam as an example
            import paddle
            linear = paddle.nn.Linear(10, 10)
141
            inp = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
M
MRXLT 已提交
142 143 144 145
            out = linear(inp)
            loss = paddle.mean(out)
            adam = paddle.optimizer.Adam(learning_rate=0.1,
                    parameters=linear.parameters())
R
Roc 已提交
146
            loss.backward()
M
MRXLT 已提交
147 148 149
            adam.step()
            adam.clear_grad()

150
            #Take the subclass sgd as an example
151
            #optimize parameters in linear_1 and linear2 in different options.
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
            #Note that the learning_rate of linear_2 is 0.01.
            linear_1 = paddle.nn.Linear(10, 10)
            linear_2 = paddle.nn.Linear(10, 10)
            inp = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
            out = linear_1(inp)
            out = linear_2(out)
            loss = paddle.mean(out)
            sgd = paddle.optimizer.SGD(
                learning_rate=0.1,
                parameters=[{
                    'params': linear_1.parameters()
                }, {
                    'params': linear_2.parameters(),
                    'weight_decay': 0.001,
                    'learning_rate': 0.1
                }],
168
                weight_decay=0.01)
R
Roc 已提交
169
            loss.backward()
170 171 172
            sgd.step()
            sgd.clear_grad()

M
MRXLT 已提交
173 174
    """

175
    @imperative_base.no_grad
176 177 178 179 180 181 182 183
    def __init__(
        self,
        learning_rate,
        parameters=None,
        weight_decay=None,
        grad_clip=None,
        name=None,
    ):
184

185 186 187 188
        if parameters is not None:
            # paddle.Tensor is also iterable, so here we don't check whether
            # the input is iterable, if the input is paddle.Tensor, the
            # list(paddle.Tensor) will be a error value
189
            if isinstance(parameters, (paddle.Tensor, core.eager.Tensor)):
190 191
                raise TypeError(
                    "`parameters` argument given to the optimizer should be "
192 193 194 195
                    "an iterable of paddle Tensors, but got argument type is `{}`.".format(
                        type(parameters)
                    )
                )
196 197 198 199
            if isinstance(parameters, dict):
                raise TypeError(
                    "`parameters` argument should not get dict type, "
                    "if parameter groups is needed, please set `parameters`"
200 201
                    " as list of dict"
                )
202 203 204 205
            self._parameter_list = list(parameters)
        else:
            self._parameter_list = None

M
MRXLT 已提交
206
        self._name = name
J
Jiabin Yang 已提交
207
        if framework._non_static_mode():
M
MRXLT 已提交
208 209 210 211 212
            if self._parameter_list is None:
                raise AttributeError(
                    "parameters argument given to the Optimizer should not be None in dygraph mode."
                )
            if weight_decay is not None:
213 214
                if not isinstance(self._parameter_list[0], dict):
                    for param in self._parameter_list:
215 216 217 218
                        if (
                            hasattr(param, 'regularizer')
                            and param.regularizer is not None
                        ):
219 220 221
                            logging.info(
                                "If regularizer of a Parameter has been set by 'paddle.ParamAttr' or 'static.WeightNormParamAttr' already. "
                                "The weight_decay[%s] in Optimizer will not take effect, and it will only be applied to other Parameters!"
222 223
                                % weight_decay.__str__()
                            )
224 225
                            break

226
        if not isinstance(learning_rate, (float, LRScheduler)):
227
            raise TypeError(
228 229 230
                "learning rate should be float or LRScheduler, got %s here"
                % type(learning_rate)
            )
M
MRXLT 已提交
231 232 233 234 235 236 237
        if grad_clip is not None:
            if not isinstance(grad_clip, GradientClipBase):
                raise TypeError(
                    "'grad_clip' should be an instance of GradientClipBase's derived class"
                )
        if isinstance(weight_decay, float):
            from ..fluid.regularizer import L2Decay
238

M
MRXLT 已提交
239 240 241 242 243
            self.regularization = L2Decay(weight_decay)
        else:
            self.regularization = weight_decay
        self._grad_clip = grad_clip
        self._learning_rate = learning_rate
L
Leo Chen 已提交
244

M
MRXLT 已提交
245
        self._dtype = None
L
Leo Chen 已提交
246 247
        # Infer the dtype form parameter
        if self._parameter_list:
248 249
            if isinstance(self._parameter_list[0], dict):
                for param_group in self._parameter_list:
250 251 252
                    assert (
                        'params' in param_group
                    ), 'params should be set in parameters if parameter groups are optimized in different options'
253 254 255
                self._dtype = self._parameter_list[0]['params'][0].dtype
            else:
                self._dtype = self._parameter_list[0].dtype
L
Leo Chen 已提交
256

M
MRXLT 已提交
257 258 259 260 261 262 263 264 265 266 267 268 269
        # each program should have a independent learning rate
        # program -> tensor(learning_rate)
        self._learning_rate_map = dict()
        # Dictionary of accumulators. Some optimizer subclasses need to
        # allocate and manage extra tensors associated with the parameters
        # to train. These tensors are called accumulators.
        # {accum_name : { paramter_name : accumulator_for_parameter, ...}, ...}
        self._accumulators = defaultdict(lambda: dict())
        self.helper = None
        self._opti_name_list = []
        self._accumulators_holder = {}
        self._param_device_map = dict()
        self.clear_gradients = self.clear_grad
270 271
        self._default_dict = {
            'weight_decay': self.regularization,
272
            'grad_clip': self._grad_clip,
273 274 275 276 277 278 279 280
        }

        self._param_groups = []
        if self._parameter_list and isinstance(self._parameter_list[0], dict):
            for param_group in self._parameter_list:
                self._add_param_group(param_group.copy())
        else:
            self._param_groups = self._parameter_list
M
MRXLT 已提交
281

282
        # NOTE: Multi Tensor: Pass in all parameters and gradients to the op kernel of the Optimizer at one time for updating for dygraph mode.
Z
zhangbo9674 已提交
283
        # Optimizer support list: [ paddle.optimizer.Momentum, paddle.optimizer.Adam].
284 285
        self._use_multi_tensor = None

286
        self._param_dict = self._create_multi_tensor_dict()
287 288 289 290 291
        self._auxiliary_vars = {}

    def _set_auxiliary_var(self, key, val):
        self._auxiliary_vars[key] = val

292 293 294 295 296 297 298
    def _create_multi_tensor_dict(self):
        n = len(self._param_groups) if self._param_groups is not None else 1
        return {
            'FP32_LODTensor': [[] for _ in range(n)],
            'FP16_LODTensor': [[] for _ in range(n)],
        }

299 300 301
    def _get_auxiliary_var(self, key):
        return self._auxiliary_vars.get(key, None)

M
MRXLT 已提交
302 303 304
    @framework.dygraph_only
    def state_dict(self):
        '''
305
        Get state dict information from optimizer. It contain all the tensor used by optimizer. For Adam optimizer, contains beta1, beta2, momentum etc. If LRScheduler have been used, global_step will be include in state dict.
M
MRXLT 已提交
306 307
        If the optimizer never be called(minimize function), the state_dict is empty.

308
        Args:
M
MRXLT 已提交
309 310 311 312
            None

        Returns:
            state_dict(dict) : dict contains all the Tensor used by optimizer
313

M
MRXLT 已提交
314 315 316 317
        Examples:
            .. code-block:: python

                import paddle
M
MRXLT 已提交
318
                emb = paddle.nn.Embedding(10, 10)
M
MRXLT 已提交
319 320 321 322 323 324 325 326 327

                adam = paddle.optimizer.Adam(0.001, parameters=emb.parameters())
                state_dict = adam.state_dict()

        '''
        state_dict = {}
        for k, v in self._accumulators.items():
            for para_name, var_tmp in v.items():
                state_dict[var_tmp.name] = var_tmp
328 329 330 331
        # if has master weight and then save master weight
        if hasattr(self, "_master_weights"):
            if len(self._master_weights) != 0:
                state_dict["master_weights"] = self._master_weights
M
MRXLT 已提交
332
        # global step if use lr decay
333
        if isinstance(self._learning_rate, LRScheduler):
M
MRXLT 已提交
334 335 336 337 338 339
            state_dict["LR_Scheduler"] = self._learning_rate.state_dict()
        return state_dict

    @framework.dygraph_only
    def set_state_dict(self, state_dict):
        '''
340
        Load optimizer state dict. For Adam optimizer, contains beta1, beta2, momentum etc. If LRScheduler have been used, global_step will be changed.
M
MRXLT 已提交
341

342
        Args:
M
MRXLT 已提交
343 344 345
            state_dict(dict) : Dict contains all the Tensor needed by optimizer
        Return:
            None
346

M
MRXLT 已提交
347 348 349 350 351
        Examples:
            .. code-block:: python

                import paddle

352
                emb = paddle.nn.Embedding(10, 10)
M
MRXLT 已提交
353

354 355
                layer_state_dict = emb.state_dict()
                paddle.save(layer_state_dict, "emb.pdparams")
M
MRXLT 已提交
356

357
                scheduler = paddle.optimizer.lr.NoamDecay(
358 359 360 361 362 363
                    d_model=0.01, warmup_steps=100, verbose=True)
                adam = paddle.optimizer.Adam(
                    learning_rate=scheduler,
                    parameters=emb.parameters())
                opt_state_dict = adam.state_dict()
                paddle.save(opt_state_dict, "adam.pdopt")
M
MRXLT 已提交
364

365
                opti_state_dict = paddle.load("adam.pdopt")
M
MRXLT 已提交
366 367 368
                adam.set_state_dict(opti_state_dict)

        '''
369
        if isinstance(self._learning_rate, LRScheduler):
370
            self._learning_rate.set_state_dict(state_dict["LR_Scheduler"])
M
MRXLT 已提交
371

372
        # NOTE: exclude learning rate scheduler's state from
373 374 375 376
        # _accumulators_holder.
        state_dict = state_dict.copy()
        if "LR_Scheduler" in state_dict:
            state_dict.pop("LR_Scheduler")
377 378 379 380
        if "master_weights" in state_dict:
            if hasattr(self, "_master_weights"):
                self._master_weights = state_dict["master_weights"]
            state_dict.pop("master_weights")
M
MRXLT 已提交
381 382 383
        self._accumulators_holder = state_dict
        for k, v in self._accumulators.items():
            for para_name, var_tmp in v.items():
384 385 386
                assert (
                    var_tmp.name in state_dict
                ), "optimizer Tensor {} not found".format(var_tmp.name)
M
MRXLT 已提交
387 388 389 390 391 392 393 394 395 396 397 398 399
                var = var_tmp.value()
                tensor = var.get_tensor()
                model_np = np.array(tensor)

                load_para = state_dict[var_tmp.name]

                if isinstance(load_para, Variable):
                    load_para_np = load_para.numpy()
                elif isinstance(load_para, core.VarBase):
                    load_para_np = load_para.numpy()
                elif isinstance(load_para, np.ndarray):
                    load_para_np = load_para
                else:
400 401 402 403 404 405 406 407 408 409 410
                    raise RuntimeError(
                        "State dict type {} not supprt".format(
                            str(type(load_para))
                        )
                    )

                assert (
                    model_np.shape == load_para_np.shape
                ), "Parameter shape not match, Dygraph Parameter [ {} ] need tensor with shape {} but load tensor with shape {}".format(
                    model_np.name, model_np.shape, load_para_np.shape
                )
M
MRXLT 已提交
411

412 413 414 415 416
                assert (
                    model_np.dtype == load_para_np.dtype
                ), "Parameter dtype not match, Dygraph Parameter [ {} ] need tensor with dtype {}  but load tensor with dtype {}".format(
                    model_np.name, model_np.dtype, load_para_np.dtype
                )
M
MRXLT 已提交
417 418 419 420 421 422 423

                tensor.set(load_para_np, framework._current_expected_place())

    def get_opti_var_name_list(self):
        return self._opti_name_list

    def _create_global_learning_rate(self):
424
        # lr var can't be float16 or bfloat16, for pure fp16 or bf16 training, should extra handle the dtype for lr
425 426 427 428 429 430
        _lr_dtype = (
            paddle.get_default_dtype() if self._dtype is None else self._dtype
        )
        _lr_dtype = (
            paddle.float32
            if (
431 432 433 434 435 436 437 438
                (
                    paddle.get_default_dtype() != "float16"
                    and _lr_dtype == paddle.float16
                )
                or (
                    paddle.get_default_dtype() != "bfloat16"
                    and _lr_dtype == paddle.bfloat16
                )
439 440 441
            )
            else _lr_dtype
        )
442
        if isinstance(self._learning_rate, LRScheduler):
443 444 445 446 447
            lr_var = self._global_learning_rate()
            # only create global lr_var once
            if not isinstance(lr_var, framework.Variable):
                lr_name = unique_name.generate('learning_rate')
                self._learning_rate._var_name = lr_name
448 449 450 451 452 453 454
                lr_var = self.helper.create_global_variable(
                    name=lr_name,
                    shape=[1],
                    persistable=True,
                    stop_gradient=True,
                    dtype=_lr_dtype,
                )
455 456 457
                main_prog = framework.default_main_program()
                main_prog.lr_sheduler = self._learning_rate
                main_prog.lr_var = lr_var
M
MRXLT 已提交
458

459
                self._learning_rate_map[
460 461
                    framework.default_main_program()
                ] = lr_var
M
MRXLT 已提交
462

463 464
            lr_value = float(self._learning_rate())
            self.helper.set_variable_initializer(
465 466
                lr_var, initializer=Constant(value=lr_value)
            )
467 468 469
        elif isinstance(self._learning_rate, float):
            # only create global lr_var once
            lr = self._global_learning_rate()
M
MRXLT 已提交
470 471 472
            if isinstance(lr, framework.Variable):
                return
            else:
473 474 475
                self._learning_rate_map[
                    framework.default_main_program()
                ] = layers.create_global_var(
476 477 478
                    name=unique_name.generate("learning_rate"),
                    shape=[1],
                    value=float(self._learning_rate),
479
                    dtype=_lr_dtype,
480 481
                    persistable=True,
                )
M
MRXLT 已提交
482 483 484 485 486

    @framework.dygraph_only
    def set_lr(self, value):
        """
        :api_attr: imperative
487

488
        Set the value of the learning rate manually in the optimizer. If the optimizer use LRScheduler,
M
MRXLT 已提交
489 490 491
        this API cannot be invoked, because it will lead to conflict.

        Args:
M
MRXLT 已提交
492
            value (float): the value of learning rate
M
MRXLT 已提交
493 494 495

        Returns:
            None
496

M
MRXLT 已提交
497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518
        Examples:
            .. code-block:: python

                import paddle
                linear = paddle.nn.Linear(10, 10)

                adam = paddle.optimizer.Adam(0.1, parameters=linear.parameters())

                # set learning rate manually by python float value
                lr_list = [0.2, 0.3, 0.4, 0.5, 0.6]
                for i in range(5):
                    adam.set_lr(lr_list[i])
                    lr = adam.get_lr()
                    print("current lr is {}".format(lr))
                # Print:
                #    current lr is 0.2
                #    current lr is 0.3
                #    current lr is 0.4
                #    current lr is 0.5
                #    current lr is 0.6

        """
519
        if not isinstance(value, (int, float)):
M
MRXLT 已提交
520
            raise TypeError(
521
                "The type of 'value' in optimizer.set_lr must be float, but received %s."
522 523
                % (type(value))
            )
524
        if isinstance(self._learning_rate, LRScheduler):
M
MRXLT 已提交
525
            raise RuntimeError(
526
                "optimizer's learning rate can't be LRScheduler when invoke this API, because this will lead to conflict."
M
MRXLT 已提交
527
            )
528 529 530
        self._learning_rate = float(value)
        current_lr = self._global_learning_rate()
        if current_lr is not None:
531 532
            if in_dygraph_mode():
                place = _current_expected_place()
533 534 535 536 537 538 539
                _C_ops.full_(
                    current_lr,
                    list(current_lr.shape),
                    float(value),
                    current_lr.dtype,
                    place,
                )
540 541

            elif _in_legacy_dygraph():
542 543 544 545 546 547 548 549 550
                _legacy_C_ops.fill_constant(
                    current_lr,
                    'value',
                    float(value),
                    'dtype',
                    current_lr.dtype,
                    'shape',
                    list(current_lr.shape),
                )
551 552
            else:
                global_block = framework.default_main_program().global_block()
553 554 555 556 557 558 559 560 561 562
                global_block.append_op(
                    type='fill_constant',
                    outputs={'Out': [current_lr]},
                    attrs={
                        'dtype': current_lr.dtype,
                        'shape': list(current_lr.shape),
                        'value': float(value),
                    },
                    stop_gradient=True,
                )
M
MRXLT 已提交
563 564 565

    def get_lr(self):
        """
566
        Get current learning rate of optimizer.
567 568
        If 'LRScheduler' is not used, the return value is all the same.
        If 'LRScheduler' is used, the return value is the current scheduled learing rete.
M
MRXLT 已提交
569

M
MRXLT 已提交
570
        Returns:
571
            float: The current learning rate of optimizer.
M
MRXLT 已提交
572 573 574 575

        Examples:
            .. code-block:: python

576
                # train on default dynamic graph mode
M
MRXLT 已提交
577
                import paddle
578 579 580 581 582 583 584 585 586 587 588
                import numpy as np
                emb = paddle.nn.Embedding(10, 3)

                ## example1: LRScheduler is not used, return the same value is all the same
                adam = paddle.optimizer.Adam(0.01, parameters = emb.parameters())
                for batch in range(10):
                    input = paddle.randint(low=0, high=5, shape=[5])
                    out = emb(input)
                    out.backward()
                    print("Learning rate of step{}: {}".format(batch, adam.get_lr())) # 0.01
                    adam.step()
M
MRXLT 已提交
589

590 591 592 593 594 595 596 597
                ## example2: StepDecay is used, return the scheduled learning rate
                scheduler = paddle.optimizer.lr.StepDecay(learning_rate=0.5, step_size=2, gamma=0.1)
                adam = paddle.optimizer.Adam(scheduler, parameters = emb.parameters())
                for batch in range(10):
                    input = paddle.randint(low=0, high=5, shape=[5])
                    out = emb(input)
                    out.backward()
                    print("Learning rate of step{}: {}".format(batch, adam.get_lr())) # 0.5->0.05...
M
MRXLT 已提交
598
                    adam.step()
599
                    scheduler.step()
600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618

                # train on static graph mode
                paddle.enable_static()
                main_prog = paddle.static.Program()
                start_prog = paddle.static.Program()
                with paddle.static.program_guard(main_prog, start_prog):
                    x = paddle.static.data(name='x', shape=[None, 10])
                    z = paddle.static.nn.fc(x, 100)
                    loss = paddle.mean(z)
                    scheduler = paddle.optimizer.lr.StepDecay(learning_rate=0.5, step_size=2, gamma=0.1)
                    adam = paddle.optimizer.Adam(learning_rate=scheduler)
                    adam.minimize(loss)

                exe = paddle.static.Executor()
                exe.run(start_prog)
                for batch in range(10):
                    print("Learning rate of step{}: {}", adam.get_lr())     # 0.5->0.05->0.005...
                    out = exe.run(main_prog, feed={'x': np.random.randn(3, 10).astype('float32')})
                    scheduler.step()
M
MRXLT 已提交
619 620 621 622 623

        """
        if isinstance(self._learning_rate, float):
            return self._learning_rate
        else:
624
            return self._learning_rate()
M
MRXLT 已提交
625 626 627 628 629 630 631 632 633 634 635

    def _global_learning_rate(self, program=None):
        """
        get global decayed learning rate
        :return:
        """
        if program is None:
            program = framework.default_main_program()
        return self._learning_rate_map.get(program, None)

    def _append_optimize_op(self, block, param_and_grad):
636
        """append optimize operator to block and return all the added optimize_op"""
M
MRXLT 已提交
637 638 639 640 641 642 643
        raise NotImplementedError(
            "Class \"Optimizer\" connot be used directly as an optimizer, please use its subclasses such as \"Adam\""
        )

    def _create_param_lr(self, param_and_grad):
        # create learning rate tensor for every parameter
        param = param_and_grad[0]
644 645 646 647
        if hasattr(param, 'optimize_attr'):
            param_lr = param.optimize_attr['learning_rate']
            if type(param_lr) == Variable:
                return param_lr
M
MRXLT 已提交
648
            else:
649 650 651 652
                if param_lr == 1.0:
                    return self._global_learning_rate()
                else:
                    with default_main_program()._lr_schedule_guard(
653 654
                        is_with_opt=True
                    ), framework.name_scope('scale_with_param_lr'):
655 656 657
                        return self._global_learning_rate() * param_lr
        else:
            return self._global_learning_rate()
M
MRXLT 已提交
658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680

    def _create_accumulators(self, block, parameters):
        """Create all accumulators needed by the parameters

        Args:
            block: the block in which the loss tensor is present
            parameters: list of parameter tensors for the optimizer
        """
        pass

    def _finish_update(self, block, parameters_and_grads):
        """Finish any custom updates needed
           before completing an optimization step

        Args:
            block: the block in which the loss tensor is present
            parameters: list of parameter tensors for the optimizer

        Returns:
            None
        """
        pass

681 682 683 684 685 686 687 688 689 690
    def _add_accumulator(
        self,
        name,
        param,
        dtype=None,
        fill_value=0.0,
        shape=None,
        type=None,
        device=None,
    ):
M
MRXLT 已提交
691 692 693 694 695 696 697 698 699 700 701
        """Utility function to add an accumulator for a parameter

        Args:
            block: the block in which the loss tensor is present
            name: name of the accumulator
            param: parameter tensor for which accumulator is to be added
            dtype: data type of the accumulator tensor
            fill_value: value to initialize the accumulator tensor
        """
        if self._name is not None:
            name = self._name + "_" + name
702 703 704 705
        if (
            name in self._accumulators
            and param.name in self._accumulators[name]
        ):
J
Jiabin Yang 已提交
706
            if framework._non_static_mode():
M
MRXLT 已提交
707
                return self._accumulators[name][param.name]
708 709
            raise Exception(
                "Accumulator {} already exists for parameter {}".format(
710 711 712
                    name, param.name
                )
            )
713
        if shape is None:
M
MRXLT 已提交
714 715 716 717 718 719 720 721 722 723 724
            shape = param.shape
        assert isinstance(self.helper, LayerHelper)

        var_name = param.name + "_" + name
        var_name = unique_name.generate(var_name)
        self._opti_name_list.append(var_name)

        var = self.helper.create_global_variable(
            name=var_name,
            persistable=True,
            dtype=dtype or param.dtype,
725
            type=core.VarDesc.VarType.LOD_TENSOR
726 727
            if framework._in_eager_without_dygraph_check()
            else (param.type if type is None else type),
M
MRXLT 已提交
728
            shape=shape,
729 730
            belong_to_optimizer=True,
        )
M
MRXLT 已提交
731 732 733 734
        if device is None:
            device = self._get_device_for_param(param.name)
        with device_guard(device):
            self.helper.set_variable_initializer(
735 736
                var, initializer=Constant(value=float(fill_value))
            )
M
MRXLT 已提交
737

J
Jiabin Yang 已提交
738
        if framework._non_static_mode():
M
MRXLT 已提交
739
            if len(self._accumulators_holder) > 0:
740 741 742 743 744
                assert (
                    var_name in self._accumulators_holder
                ), "Optimizer set error, {} should in state dict".format(
                    var_name
                )
M
MRXLT 已提交
745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761
                var.set_value(self._accumulators_holder[var_name])

        self._accumulators[name][param.name] = var
        return var

    def _get_accumulator(self, name, param):
        """Utility function to fetch an accumulator for a parameter

        Args:
            name: name of the accumulator
            param: parameter tensor for which accumulator is to be fetched

        Returns:
            accumulator tensor for the parameter
        """
        if self._name is not None:
            name = self._name + "_" + name
762 763 764 765
        if (
            name not in self._accumulators
            or param.name not in self._accumulators[name]
        ):
766 767
            raise Exception(
                "Accumulator {} does not exist for parameter {}".format(
768 769 770
                    name, param.name
                )
            )
M
MRXLT 已提交
771 772 773 774
        return self._accumulators[name][param.name]

    def _update_param_device_map(self, parameters_and_grads, target_block):
        for param_and_grad in parameters_and_grads:
775
            if param_and_grad[0].stop_gradient is False:
M
MRXLT 已提交
776 777
                param_name = param_and_grad[0].name
                ops = target_block.ops
778 779
                device_attr_name = (
                    core.op_proto_and_checker_maker.kOpDeviceAttrName()
M
MRXLT 已提交
780 781 782 783 784
                )
                for op in ops:
                    input_arg_names = op.input_arg_names
                    if param_name in input_arg_names:
                        self._param_device_map[param_name] = op.attr(
785 786
                            device_attr_name
                        )
M
MRXLT 已提交
787 788 789 790 791 792 793 794
                        break

    def _get_device_for_param(self, param_name):
        device = None
        if param_name in self._param_device_map:
            device = self._param_device_map[param_name]
        return device

795 796 797
    def _create_optimization_pass(
        self, parameters_and_grads, param_group_idx=0
    ):
M
MRXLT 已提交
798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824
        """Add optimization operators to update gradients to tensors.

        Args:
          parameters_and_grads(list(tuple(Tensor, Tensor))):
            a list of (tensor, gradient) pair to update.

        Returns:
          return_op_list: a list of operators that will complete one step of
            optimization. This will include parameter update ops, global step
            update ops and any other custom ops required by subclasses to manage
            their internal state.
        """
        # This is a default implementation of create_optimization_pass that
        # can be shared by most optimizers. This implementation assumes that
        # the subclass will implement the _append_optimize_op method and the
        #  _initialize_tensors method. The subclass can extend the
        # _create_accumulators method if it needs to create accumulators
        # for parameters and extend _finish_update method to add custom ops.

        # Allways called under program_guard use global block as loss block
        # But if current block is in control flow, append optimize op in the
        # grad block of current block

        global_block = framework.default_main_program().global_block()
        target_block = global_block
        current_block = framework.default_main_program().current_block()
        if current_block.idx != global_block.idx:
825 826 827
            assert (
                current_block.backward_block_idx != -1
            ), "current block is not global_block, but it doesn't have backward block."
M
MRXLT 已提交
828
            target_block = framework.default_main_program().blocks[
829 830
                current_block.backward_block_idx
            ]
M
MRXLT 已提交
831 832 833

        start = len(target_block.ops)
        self.helper = LayerHelper(self.__class__.__name__)
834

M
MRXLT 已提交
835 836
        self._create_global_learning_rate()

Z
zhangbo9674 已提交
837 838
        # NOTE: Multi Tensor support [ Momentum, Adam ] for dygraph mode
        if self._use_multi_tensor and self.__class__.__name__ in [
839 840
            'Momentum',
            'Adam',
Z
zhangbo9674 已提交
841
        ]:
842
            if (
843 844 845
                len(self._param_dict['FP32_LODTensor'][param_group_idx]) == 0
                and len(self._param_dict['FP16_LODTensor'][param_group_idx])
                == 0
846
            ):
847
                if isinstance(parameters_and_grads, list):
848
                    assert param_group_idx == 0
849 850 851 852 853 854 855
                    self._multi_tensor_init(
                        target_block,
                        [
                            p[0]
                            for p in parameters_and_grads
                            if not p[0].stop_gradient
                        ],
856
                        param_group_idx,
857
                    )
858 859
                else:
                    self._update_param_group(parameters_and_grads)
860 861 862 863 864 865 866
                    self._multi_tensor_init(
                        target_block,
                        [
                            p[0]
                            for p in parameters_and_grads['params']
                            if not p[0].stop_gradient
                        ],
867
                        param_group_idx,
868
                    )
J
Jiabin Yang 已提交
869
            if framework._non_static_mode():
870
                self._append_optimize_multi_tensor_op(
871 872 873
                    target_block,
                    parameters_and_grads,
                    param_group_idx=param_group_idx,
874
                )
875
            else:
876 877 878
                self._update_param_device_map(
                    parameters_and_grads, target_block
                )
879 880 881
                # NOTE: Multi Tensor requires all parameters to be in the same device and program.
                # param_grad_list = [p_0,g_0,p_1,g_1,....]
                param_grad_list = []
882
                for param_and_grad in parameters_and_grads:
883 884 885 886
                    if (
                        not param_and_grad[0].stop_gradient
                        and param_and_grad[1] is not None
                    ):
887 888 889
                        param_grad_list.append(param_and_grad[0])
                        param_grad_list.append(param_and_grad[1])
                with param_grad_list[0].block.program._optimized_guard(
890 891
                    param_grad_list
                ), name_scope("optimizer"):
892 893 894
                    device = self._get_device_for_param(param_grad_list[0].name)
                    with device_guard(device):
                        self._append_optimize_multi_tensor_op(
895 896 897
                            target_block,
                            parameters_and_grads,
                            param_group_idx=param_group_idx,
898
                        )
899
        else:
J
Jiabin Yang 已提交
900
            if not framework._non_static_mode():
901 902 903 904 905 906 907 908
                params_grads_device_map = (
                    parameters_and_grads['params']
                    if isinstance(parameters_and_grads, dict)
                    else parameters_and_grads
                )
                self._update_param_device_map(
                    params_grads_device_map, target_block
                )
909

910
            if isinstance(parameters_and_grads, list):
911 912 913 914 915 916 917 918
                self._create_accumulators(
                    target_block,
                    [
                        p[0]
                        for p in parameters_and_grads
                        if not p[0].stop_gradient
                    ],
                )
919
            else:
920 921
                params_acc_dict = parameters_and_grads.copy()
                params_acc_dict['params'] = [
922 923
                    p[0]
                    for p in params_acc_dict['params']
924 925 926 927
                    if not p[0].stop_gradient
                ]
                self._create_accumulators(target_block, params_acc_dict)

J
Jiabin Yang 已提交
928
            if framework._non_static_mode():
929 930 931 932 933
                if isinstance(parameters_and_grads, list):
                    for param_and_grad in parameters_and_grads:
                        if param_and_grad[1] is None:
                            continue
                        if param_and_grad[0].stop_gradient is False:
934 935 936
                            self._append_optimize_op(
                                target_block, param_and_grad
                            )
937 938 939 940 941 942 943
                else:
                    for param_and_grad in parameters_and_grads['params']:
                        if param_and_grad[1] is None:
                            continue
                        if param_and_grad[0].stop_gradient is False:
                            param_grad_dict = dict()
                            param_grad_dict['params'] = param_and_grad
944 945 946 947 948 949 950 951 952 953
                            param_grad_dict.update(
                                {
                                    k: v
                                    for k, v in parameters_and_grads.items()
                                    if k != 'params'
                                }
                            )
                            self._append_optimize_op(
                                target_block, param_grad_dict
                            )
954 955
            else:
                for param_and_grad in parameters_and_grads:
956 957
                    if param_and_grad[1] is None:
                        continue
958
                    with param_and_grad[0].block.program._optimized_guard(
959 960
                        param_and_grad
                    ), name_scope("optimizer"):
961
                        if param_and_grad[0].stop_gradient is False:
962
                            device = self._get_device_for_param(
963 964
                                param_and_grad[0].name
                            )
965 966
                            with device_guard(device):
                                optimize_op = self._append_optimize_op(
967 968
                                    target_block, param_and_grad
                                )
M
MRXLT 已提交
969 970 971 972 973 974 975 976 977 978 979

        # Get custom finish ops for subclasses
        # FIXME: Need to fix this once we figure out how to handle dependencies
        self._finish_update(target_block, parameters_and_grads)

        end = len(target_block.ops)
        return target_block._slice_ops(start, end)

    def _append_dgc_ops(self, param_and_grad):
        pass

980 981 982 983 984 985 986 987
    def backward(
        self,
        loss,
        startup_program=None,
        parameters=None,
        no_grad_set=None,
        callbacks=None,
    ):
M
MRXLT 已提交
988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012
        """
        The first part of ``minimize``, do auto-diff to append backward operations for
        the current program.

        Args:
            loss (Tensor): ``loss`` tensor to run optimizations.
            startup_program (Program, optional): :ref:`api_fluid_Program` for
                initializing parameters in ``parameters``. The default value
                is None, at this time :ref:`api_fluid_default_startup_program` will be used.
            parameters (list, optional): List of ``Tensor`` or ``Tensor.name`` to update
                to minimize ``loss``. The default value is None, at this time all parameters
                will be updated.
            no_grad_set (set, optional): Set of ``Tensor``  or ``Tensor.name`` that don't need
                to be updated. The default value is None.
            callbacks (list, optional): list of callable objects to run when appending backward
                operator for one parameter. The default value is None.

        Return:
            list: list of (param, grad) tensor pairs, param is ``Parameter``,
                grad is the gradient value corresponding to the parameter.

        Examples:
            .. code-block:: python

                import paddle
1013 1014
                x = paddle.arange(26, dtype="float32").reshape([2, 13])

M
MRXLT 已提交
1015
                linear = paddle.nn.Linear(13, 5)
M
MRXLT 已提交
1016
                # This can be any optimizer supported by dygraph.
1017
                adam = paddle.optimizer.Adam(learning_rate = 0.01,
M
MRXLT 已提交
1018
                                            parameters = linear.parameters())
1019
                out = linear(x)
M
MRXLT 已提交
1020 1021 1022 1023 1024
                out.backward()
                adam.step()
                adam.clear_grad()
        """
        act_no_grad_set = None
J
Jiabin Yang 已提交
1025
        if framework._non_static_mode():
M
MRXLT 已提交
1026 1027 1028 1029
            pass
        else:
            act_no_grad_set = self._get_no_grad_set(loss, no_grad_set)

L
Leo Chen 已提交
1030 1031 1032 1033
        # Infer dtype by loss if None
        if self._dtype is None:
            self._dtype = loss.dtype

J
Jiabin Yang 已提交
1034
        if framework._non_static_mode():
1035
            parameter_list = parameters if parameters else self._parameter_list
1036

1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
            if framework.in_dygraph_mode():
                # It is very time-consuming to call c++ functions in a loop on the python side.
                # We put this part of the code on the c++ side to improve the speed in eager mode.
                params_grads = []
                grads = core.eager.get_all_grads(parameter_list)
                for index, grad in enumerate(grads):
                    if grad is not None:
                        params_grads.append((parameter_list[index], grad))
            else:
                # Keep the original code to support legacy mode.
                # Delete the else branch when the legacy mode exits.
                params_grads = []
                for param in parameter_list:
                    if param.stop_gradient:
                        continue
                    if param._grad_ivar() is not None:
                        # create gradient tensor
                        grad_var = param._grad_ivar()
                        params_grads.append((param, grad_var))
M
MRXLT 已提交
1056 1057 1058 1059
        else:
            if callbacks is None:
                callbacks = [error_clip_callback]
            else:
1060
                assert isinstance(callbacks, list)
M
MRXLT 已提交
1061
            program = loss.block.program
1062 1063
            assert len(loss.shape) == 1 and loss.shape[0] == 1, (
                "The loss.shape should be (1L,), but the current loss.shape is {}. "
M
MRXLT 已提交
1064
                "Maybe that you should call paddle.mean to process the current loss.".format(
1065 1066 1067 1068
                    loss.shape
                )
            )
            parameter_list = parameters if parameters else self._parameter_list
M
MRXLT 已提交
1069
            with program_guard(program, startup_program):
1070
                from paddle.incubate.autograd.utils import prim_enabled
1071

1072
                if prim_enabled():
1073 1074 1075
                    params_grads = append_backward_new(
                        [loss], parameter_list, act_no_grad_set, callbacks
                    )
1076
                else:
1077 1078 1079
                    params_grads = append_backward(
                        loss, parameter_list, act_no_grad_set, callbacks
                    )
M
MRXLT 已提交
1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100
                # Note: since we can't use all_reduce_op now,
                #  dgc_op should be the last op of one grad.
                self._append_dgc_ops(params_grads)
        return params_grads

    def apply_gradients(self, params_grads):
        """
        Second part of `minimize`, appending optimization operators for
        given `params_grads` pairs.

        Args:
            params_grads (list): list of (param, grad) pair to do optimization.

        Returns:
            list: A list of operators appended to the current program.

        Examples:
            .. code-block:: python

                import paddle

1101
                inp = paddle.uniform([10, 10], dtype="float32", min=-0.1, max=0.1)
M
MRXLT 已提交
1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121
                linear = paddle.nn.Linear(10, 10)
                out = linear(inp)
                loss = paddle.mean(out)
                optimizer = paddle.optimizer.Adam(learning_rate=0.1,
                        parameters=linear.parameters())
                params_grads = optimizer.backward(loss)
                optimizer.apply_gradients(params_grads)

        """

        params_grads = sorted(params_grads, key=lambda x: x[0].name)

        # 'optimizer(grad_clip)' or 'set_gradient_clip'
        if self._grad_clip is not None:
            params_grads = self._grad_clip(params_grads)
        else:

            params_grads = append_gradient_clip_ops(params_grads)

        # Add regularization if any
1122 1123 1124
        params_grads = self.append_regularization_ops(
            params_grads, self.regularization
        )
M
MRXLT 已提交
1125 1126 1127 1128

        optimize_ops = self._create_optimization_pass(params_grads)
        return optimize_ops

1129 1130 1131
    def _apply_optimize(
        self, loss, startup_program, params_grads, param_group_idx=0
    ):
M
MRXLT 已提交
1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
        """
        Second part of `minimize`, appending optimization operators for
        given `params_grads` pairs.
        Args:
            loss (Tensor): loss tensor to run optimizations.
            startup_program (Program): startup_program for initializing parameters
                in `parameters`.
            params_grads (list): list of (param, grad) pair to do optimization.
        Returns:
            list: A list of operators appended to the current program.
        """
J
Jiabin Yang 已提交
1143
        if framework._non_static_mode():
1144 1145 1146 1147
            with program_guard(
                framework.default_main_program(),
                framework.default_startup_program(),
            ):
1148 1149 1150
                if isinstance(params_grads, list):
                    if self._grad_clip is not None:
                        params_grads = self._grad_clip(params_grads)
1151
                    params_grads = self.append_regularization_ops(
1152 1153
                        params_grads, self.regularization
                    )
1154 1155 1156
                else:
                    grad_clip = params_grads['grad_clip']
                    if grad_clip is not None:
1157
                        params_grads['params'] = grad_clip(
1158 1159
                            params_grads['params']
                        )
1160

1161
                    params_grads['params'] = self.append_regularization_ops(
1162 1163
                        params_grads['params'], self.regularization
                    )
1164 1165 1166
                optimize_ops = self._create_optimization_pass(
                    params_grads, param_group_idx=param_group_idx
                )
M
MRXLT 已提交
1167
        else:
1168
            assert param_group_idx == 0
M
MRXLT 已提交
1169 1170 1171 1172 1173
            program = loss.block.program
            with program_guard(program, startup_program):
                optimize_ops = self.apply_gradients(params_grads)
        return optimize_ops

1174
    def _create_regularization_of_grad(self, param, grad, regularization=None):
1175
        """Create and add backward regularization Operators
1176

1177 1178 1179
        Function helper of append_regularization_ops.
        """
        # If no gradient or no regularization is specified,  then we don't need to do anything
1180
        if grad is None or (
1181 1182 1183 1184 1185 1186
            (
                not hasattr(param, 'regularizer')
                or (hasattr(param, 'regularizer') and param.regularizer is None)
            )
            and regularization is None
        ):
1187 1188 1189 1190 1191 1192 1193 1194 1195 1196
            return grad
        regularization_term = None
        if hasattr(param, 'regularizer') and param.regularizer is not None:
            # Add variable for regularization term in grad block
            regularization_term = param.regularizer(param, grad, grad.block)
        elif regularization is not None:
            regularization_term = regularization(param, grad, grad.block)

        assert regularization_term is not None

1197
        if framework.in_dygraph_mode():
Y
YuanRisheng 已提交
1198
            return _C_ops.add_n([grad, regularization_term])
1199
        elif framework._in_legacy_dygraph():
1200
            return _legacy_C_ops.sum([grad, regularization_term])
1201

1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212
        new_grad = grad
        if grad.type == core.VarDesc.VarType.SELECTED_ROWS:
            # FIXME(zcd): If the grad is SELECTED_ROWS, after regularization,
            # the grad's type and name will be changed. But the gradient's name
            # is used in ParallelExecutor Reduce mode, so I add a flag for
            # the new_grad here.
            new_grad = grad.block.create_var(
                name=grad.name + core.kNewGradSuffix(),
                dtype=param.dtype,
                shape=param.shape,
                lod_level=param.lod_level,
1213 1214
                type=core.VarDesc.VarType.LOD_TENSOR,
            )
1215 1216 1217

        inputs = {"X": [grad, regularization_term]}
        outputs = {"Out": [new_grad]}
1218
        grad.block.append_op(type='sum', inputs=inputs, outputs=outputs)
1219 1220 1221

        return new_grad

1222 1223 1224
    def append_regularization_ops(
        self, parameters_and_grads, regularization=None
    ):
1225
        r"""Create and add backward regularization Operators
1226

1227 1228 1229 1230
        Creates and adds backward regularization operators in the BlockDesc.
        This will add gradients of the regularizer function to the gradients
        of the parameters and return these modified gradients. This is the
        same as implementing weight decay in optimizers for regularization.
1231

1232 1233 1234 1235 1236
        Args:
            parameters_and_grads: A list of (parameters, gradients) pairs
                                  that need to be regularized.
            regularization: A global regularizer. If the parameter is not
                            set. It will be applied with regularizer.
1237

1238 1239 1240
        Returns:
            list[(Variable, Variable)]: list of (parameters, gradients) \
            pair with the regularized gradient
1241

1242 1243 1244 1245
        Raises:
            Exception: Unknown regularization type
        """
        params_and_grads = []
J
Jiabin Yang 已提交
1246
        if framework._non_static_mode():
1247
            for param, grad in parameters_and_grads:
1248
                new_grad = self._create_regularization_of_grad(
1249 1250
                    param, grad, regularization
                )
1251 1252 1253 1254 1255
                params_and_grads.append((param, new_grad))
        else:
            repeate_regularizer = False
            with framework.name_scope('regularization'):
                for param, grad in parameters_and_grads:
1256 1257 1258 1259 1260
                    if (
                        not repeate_regularizer
                        and param.regularizer is not None
                        and regularization is not None
                    ):
1261 1262 1263 1264
                        repeate_regularizer = True
                        logging.info(
                            "If regularizer of a Parameter has been set by 'fluid.ParamAttr' or 'fluid.WeightNormParamAttr' already. "
                            "The Regularization[%s] in Optimizer will not take effect, and it will only be applied to other Parameters!"
1265 1266
                            % regularization.__str__()
                        )
1267 1268
                    with param.block.program._optimized_guard([param, grad]):
                        new_grad = self._create_regularization_of_grad(
1269 1270
                            param, grad, regularization
                        )
1271 1272 1273
                        params_and_grads.append((param, new_grad))
        return params_and_grads

M
MRXLT 已提交
1274 1275 1276
    def _get_no_grad_set(self, loss, no_grad_set=None):
        no_grad_set = _get_no_grad_set_name(no_grad_set)
        parameters = loss.block.program.global_block().all_parameters()
1277
        param_no_trainable = set(
1278 1279
            [param.name for param in parameters if param.stop_gradient is True]
        )
M
MRXLT 已提交
1280 1281 1282 1283 1284 1285
        # If the parameter is no trainable, it should not have a gradient.
        no_grad_set.update(param_no_trainable)

        return no_grad_set

    @framework.dygraph_only
1286
    def clear_grad(self, set_to_zero=True):
M
MRXLT 已提交
1287 1288
        """
        Clear the gradients of all optimized parameters for model.
1289 1290

        If not, new gradient will accumulat on previous gradient.
1291 1292

        There are two method to clear grad: set_to_zero or delete grad.
1293

1294 1295
        Args:
            set_to_zero (bool, optional): If set grads to zero or not, default is True.
1296

M
MRXLT 已提交
1297 1298
        Returns:
            None
1299

M
MRXLT 已提交
1300 1301 1302 1303
        Examples:
            .. code-block:: python

                import paddle
1304

1305
                a = paddle.arange(26, dtype="float32").reshape([2, 13])
M
MRXLT 已提交
1306
                linear = paddle.nn.Linear(13, 5)
M
MRXLT 已提交
1307
                # This can be any optimizer supported by dygraph.
1308
                adam = paddle.optimizer.Adam(learning_rate = 0.01,
M
MRXLT 已提交
1309 1310 1311 1312 1313 1314 1315
                                            parameters = linear.parameters())
                out = linear(a)
                out.backward()
                adam.step()
                adam.clear_grad()

        """
1316
        param_list = []
1317
        if self._parameter_list is None or not isinstance(
1318 1319
            self._parameter_list[0], dict
        ):
1320 1321
            for p in self._parameter_list:
                if not p.stop_gradient:
1322
                    param_list.append(p)
1323 1324 1325 1326
        else:
            for param_group in self._param_groups:
                for p in param_group['params']:
                    if not p.stop_gradient:
1327
                        param_list.append(p)
1328

J
Jiabin Yang 已提交
1329
        if _in_eager_without_dygraph_check():
1330
            for p in param_list:
1331
                p.clear_gradient(set_to_zero)
1332 1333
        else:
            core.clear_gradients(param_list, set_to_zero)
M
MRXLT 已提交
1334

1335
    @imperative_base.no_grad
1336 1337 1338
    def minimize(
        self, loss, startup_program=None, parameters=None, no_grad_set=None
    ):
M
MRXLT 已提交
1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356
        """
        Add operations to minimize ``loss`` by updating ``parameters``.

        Args:
            loss (Tensor): A ``Tensor`` containing the value to minimize.
            startup_program (Program, optional): :ref:`api_fluid_Program` for
                initializing parameters in ``parameters``. The default value
                is None, at this time :ref:`api_fluid_default_startup_program` will be used.
            parameters (list, optional): List of ``Tensor`` or ``Tensor.name`` to update
                to minimize ``loss``. The default value is None, at this time all parameters
                will be updated.
            no_grad_set (set, optional): Set of ``Tensor``  or ``Tensor.name`` that don't need
                to be updated. The default value is None.

        Returns:
            tuple: tuple (optimize_ops, params_grads), A list of operators appended
            by minimize and a list of (param, grad) tensor pairs, param is
            ``Parameter``, grad is the gradient value corresponding to the parameter.
1357 1358
            In static graph mode, the returned tuple can be passed to ``fetch_list`` in ``Executor.run()`` to
            indicate program pruning. If so, the program will be pruned by ``feed`` and
M
MRXLT 已提交
1359 1360 1361 1362
            ``fetch_list`` before run, see details in ``Executor``.

        Examples:
            .. code-block:: python
1363

M
MRXLT 已提交
1364
                import paddle
M
MRXLT 已提交
1365
                linear = paddle.nn.Linear(10, 10)
1366 1367
                input = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
                out = linear(input)
M
MRXLT 已提交
1368 1369 1370 1371 1372 1373 1374 1375
                loss = paddle.mean(out)

                beta1 = paddle.to_tensor([0.9], dtype="float32")
                beta2 = paddle.to_tensor([0.99], dtype="float32")

                adam = paddle.optimizer.Adam(learning_rate=0.1,
                        parameters=linear.parameters(),
                        weight_decay=0.01)
R
Roc 已提交
1376
                loss.backward()
M
MRXLT 已提交
1377 1378 1379
                adam.minimize(loss)
                adam.clear_grad()

M
MRXLT 已提交
1380 1381 1382
        """
        assert isinstance(loss, Variable), "The loss should be an Tensor."

1383
        parameter_list = parameters if parameters else self._parameter_list
1384

1385 1386 1387 1388 1389 1390
        params_grads = self.backward(
            loss,
            startup_program=startup_program,
            parameters=parameter_list,
            no_grad_set=no_grad_set,
        )
M
MRXLT 已提交
1391

1392 1393 1394
        optimize_ops = self._apply_optimize(
            loss, startup_program=startup_program, params_grads=params_grads
        )
M
MRXLT 已提交
1395 1396 1397

        return optimize_ops, params_grads

L
Leo Chen 已提交
1398
    @imperative_base.no_grad
M
MRXLT 已提交
1399 1400 1401
    @framework.dygraph_only
    def step(self):
        """
M
MRXLT 已提交
1402
        Execute the optimizer and update parameters once.
1403

M
MRXLT 已提交
1404 1405 1406 1407 1408 1409 1410
        Returns:
            None

        Examples:
            .. code-block:: python

                import paddle
1411

1412
                a = paddle.arange(26, dtype="float32").reshape([2, 13])
M
MRXLT 已提交
1413
                linear = paddle.nn.Linear(13, 5)
M
MRXLT 已提交
1414
                # This can be any optimizer supported by dygraph.
1415
                adam = paddle.optimizer.Adam(learning_rate = 0.01,
1416
                                        parameters = linear.parameters())
M
MRXLT 已提交
1417 1418 1419 1420 1421
                out = linear(a)
                out.backward()
                adam.step()
                adam.clear_grad()
        """
1422 1423 1424 1425 1426 1427 1428 1429 1430 1431

        if not isinstance(self._param_groups[0], dict):
            params_grads = []
            for param in self._param_groups:
                if param.stop_gradient:
                    continue
                if param._grad_ivar() is not None:
                    grad_var = param._grad_ivar()
                    params_grads.append((param, grad_var))

1432
            self._apply_optimize(
1433 1434 1435 1436
                loss=None,
                startup_program=None,
                params_grads=params_grads,
                param_group_idx=0,
1437
            )
1438 1439 1440

        else:
            # optimize parameters in groups
1441
            for idx, param_group in enumerate(self._param_groups):
1442 1443 1444 1445 1446 1447 1448 1449
                params_grads = defaultdict(lambda: list())
                for param in param_group['params']:
                    if param.stop_gradient:
                        continue
                    if param._grad_ivar() is not None:
                        grad_var = param._grad_ivar()
                        params_grads['params'].append((param, grad_var))
                params_grads.update(
1450 1451 1452
                    {k: v for k, v in param_group.items() if k != 'params'}
                )
                self._apply_optimize(
1453 1454 1455 1456
                    loss=None,
                    startup_program=None,
                    params_grads=params_grads,
                    param_group_idx=idx,
1457
                )
1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472

    def _add_param_group(self, param_group):
        """
        Add a param group to parameter_list.

        Args:
            param_group (dict): The group of Tensors to be optimzed with
            different optimization options.
        """
        params = param_group['params']
        if isinstance(params, Parameter):
            param_group['params'] = [params]
        elif isinstance(params, set):
            raise TypeError(
                "optimizer parameters should be in ordered collections,"
1473 1474
                "but received set, please use list instead."
            )
1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487
        else:
            param_group['params'] = list(params)

        # Update optimization options for each groups
        for k, v in self._default_dict.items():
            param_group.setdefault(k, v)

        param_set = set()
        for group in self._param_groups:
            param_set.update(set(group['params']))

        if not param_set.isdisjoint(set(param_group['params'])):
            raise ValueError(
1488 1489
                "some parameters appear in more than one parameter group"
            )
1490 1491 1492 1493 1494

        for param in param_group['params']:
            weight_decay = param_group['weight_decay']
            if isinstance(weight_decay, float):
                from ..fluid.regularizer import L2Decay
1495

1496 1497 1498 1499
                regularization = L2Decay(weight_decay)
            else:
                regularization = weight_decay
            param.regularizer = regularization
W
wangguanzhong 已提交
1500
            param.optimize_attr['learning_rate'] = param_group.get(
1501 1502
                'learning_rate', 1.0
            )
1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513

        self._param_groups.append(param_group)

    def _update_param_group(self, parameters):
        """
        Update the param group with new entry
        Args:
            parameters (dict): The extra group of Tensors to be optimzed with
            different optimization options. Only used in child class.
        """
        pass
1514 1515

    @framework.dygraph_only
1516
    def _multi_tensor_init(self, target_block, parameters, param_group_idx):
1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527
        """
        All parameters used for optimizer (such as: parameters, master_weight, velocity_acc for momentum) calculations are grouped into a python list by data type (float16, float32).
        This function will be overridden in the corresponding optimizer file.

        Args:
            target_block: the block in which the loss tensor is present
            parameters: list of parameter tensors for the optimizer
        """
        pass

    @framework.dygraph_only
1528
    def _append_optimize_multi_tensor_op(
1529
        self, target_block, parameters_and_grads, param_group_idx
1530
    ):
1531
        """
1532 1533 1534
        For Multi Tensor, append optimize merged_operator to block.
        """
        pass
1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548

    def _is_dtype_fp16_or_bf16(self, dtype):
        """
        check the dtype is fp16 or the dtype is bf16
        :param dtype: instance of core.VarDesc.VarType
        :return: True if dtype is one of fp16 or bf16, False otherwise
        """
        assert isinstance(
            dtype, core.VarDesc.VarType
        ), "The dtype should be an instance of core.VarDesc.VarType."
        return (
            dtype == core.VarDesc.VarType.FP16
            or dtype == core.VarDesc.VarType.BF16
        )