optimizer.py 61.5 KB
Newer Older
M
MRXLT 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import logging
from collections import defaultdict

18 19
import numpy as np

20
import paddle
21
import paddle.autograd as imperative_base
22
from paddle import _C_ops
23
from paddle.fluid import core
24 25
from paddle.fluid.framework import (
    Variable,
26 27
    _current_expected_place,
    _in_eager_without_dygraph_check,
28 29
    default_main_program,
    device_guard,
30
    in_dygraph_mode,
31 32
    name_scope,
)
M
MRXLT 已提交
33

34
from ..fluid import framework, unique_name
35
from ..fluid.backward import _get_no_grad_set_name, append_backward
36
from ..fluid.framework import Parameter, program_guard
M
MRXLT 已提交
37
from ..fluid.layer_helper import LayerHelper
38
from .lr import LRScheduler
M
MRXLT 已提交
39

40 41
__all__ = []

M
MRXLT 已提交
42

43
@framework.static_only
44 45 46 47 48 49 50 51
def append_backward_new(
    loss_list,
    parameter_list=None,
    no_grad_set=None,
    callbacks=None,
    checkpoints=None,
    distop_context=None,
):
52
    from paddle.incubate.autograd.primx import Transform, orig2prim
53

54
    program = default_main_program()
55 56 57
    assert (
        program.num_blocks == 1
    ), "The append_backward_new interface is designed to process only one block."
58
    block = program.current_block()
59
    for el in loss_list:
60 61 62
        assert (
            el.block == block
        ), 'variable in loss_list should be in current block of main program'
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90

    orig2prim(block)
    ad = Transform(block)
    if parameter_list is None:
        parameter_list = program.global_block().all_parameters()
    param_dot, loss_dot = ad.linearize(parameter_list, loss_list)
    loss_bar, param_bar = ad.transpose(loss_dot, param_dot)

    # remove param_dot and their constructor ops
    op_indexes = []
    for var in param_dot:
        if var is not None:
            op_index = block.ops.index(var.op)
            assert op_index >= 0
            op_indexes.append(op_index)

    ad.erase_ops(sorted(op_indexes))
    ad.erase_dots(param_dot)

    if len(parameter_list) == 1:
        params_and_grads = [(parameter_list, param_bar)]
    else:
        params_and_grads = []
        for i, param in enumerate(parameter_list):
            params_and_grads.append((param, param_bar[i]))
    return params_and_grads


91
class Optimizer:
92
    r"""Optimizer Base class.
M
MRXLT 已提交
93 94 95 96 97 98

    Define the common interface of an optimizer.
    User should not use this class directly,
    but need to use one of it's implementation.

    Args:
99 100
        learning_rate (float|LRScheduler): The learning rate used to update ``Parameter``.
            It can be a float value or any subclass of ``LRScheduler`` .
101
        parameters (list|tuple, optional): List/Tuple of ``Tensor`` names to update to minimize ``loss``. \
102 103 104 105
            This parameter is required in dygraph mode. And you can specify different options for \
            different parameter groups such as the learning rate, weight decay, etc, \
            then the parameters are list of dict. Note that the learning_rate in paramter groups \
            represents the scale of base learning_rate. \
106
            The default value is None in static graph mode, at this time all parameters will be updated.
M
MRXLT 已提交
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
        weight_decay (float|WeightDecayRegularizer, optional): The strategy of regularization. \
            It canbe a float value as coeff of L2 regularization or \
            :ref:`api_fluid_regularizer_L1Decay`, :ref:`api_fluid_regularizer_L2Decay`.
            If a parameter has set regularizer using :ref:`api_fluid_ParamAttr` already, \
            the regularization setting here in optimizer will be ignored for this parameter. \
            Otherwise, the regularization setting here in optimizer will take effect. \
            Default None, meaning there is no regularization.
        grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of \
            some derived class of ``GradientClipBase`` . There are three cliping strategies \
            ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` , \
            :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping.
        name (str, optional): Normally there is no need for user to set this property.
            For more information, please refer to :ref:`api_guide_Name`.
            The default value is None.

    Returns:
123 124
       Base class for optimizer.

M
MRXLT 已提交
125 126 127 128 129 130
    Examples:
        .. code-block:: python

            #Take the subclass adam as an example
            import paddle
            linear = paddle.nn.Linear(10, 10)
131
            inp = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
M
MRXLT 已提交
132 133 134 135
            out = linear(inp)
            loss = paddle.mean(out)
            adam = paddle.optimizer.Adam(learning_rate=0.1,
                    parameters=linear.parameters())
R
Roc 已提交
136
            loss.backward()
M
MRXLT 已提交
137 138 139
            adam.step()
            adam.clear_grad()

140
            #Take the subclass sgd as an example
141
            #optimize parameters in linear_1 and linear2 in different options.
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
            #Note that the learning_rate of linear_2 is 0.01.
            linear_1 = paddle.nn.Linear(10, 10)
            linear_2 = paddle.nn.Linear(10, 10)
            inp = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
            out = linear_1(inp)
            out = linear_2(out)
            loss = paddle.mean(out)
            sgd = paddle.optimizer.SGD(
                learning_rate=0.1,
                parameters=[{
                    'params': linear_1.parameters()
                }, {
                    'params': linear_2.parameters(),
                    'weight_decay': 0.001,
                    'learning_rate': 0.1
                }],
158
                weight_decay=0.01)
R
Roc 已提交
159
            loss.backward()
160 161 162
            sgd.step()
            sgd.clear_grad()

M
MRXLT 已提交
163 164
    """

165
    @imperative_base.no_grad()
166 167 168 169 170 171 172 173
    def __init__(
        self,
        learning_rate,
        parameters=None,
        weight_decay=None,
        grad_clip=None,
        name=None,
    ):
174

175 176 177 178
        if parameters is not None:
            # paddle.Tensor is also iterable, so here we don't check whether
            # the input is iterable, if the input is paddle.Tensor, the
            # list(paddle.Tensor) will be a error value
179
            if isinstance(parameters, (paddle.Tensor, core.eager.Tensor)):
180 181
                raise TypeError(
                    "`parameters` argument given to the optimizer should be "
182 183 184 185
                    "an iterable of paddle Tensors, but got argument type is `{}`.".format(
                        type(parameters)
                    )
                )
186 187 188 189
            if isinstance(parameters, dict):
                raise TypeError(
                    "`parameters` argument should not get dict type, "
                    "if parameter groups is needed, please set `parameters`"
190 191
                    " as list of dict"
                )
192 193 194 195
            self._parameter_list = list(parameters)
        else:
            self._parameter_list = None

M
MRXLT 已提交
196
        self._name = name
J
Jiabin Yang 已提交
197
        if framework._non_static_mode():
M
MRXLT 已提交
198 199 200 201 202
            if self._parameter_list is None:
                raise AttributeError(
                    "parameters argument given to the Optimizer should not be None in dygraph mode."
                )
            if weight_decay is not None:
203 204
                if not isinstance(self._parameter_list[0], dict):
                    for param in self._parameter_list:
205 206 207 208
                        if (
                            hasattr(param, 'regularizer')
                            and param.regularizer is not None
                        ):
209 210 211
                            logging.info(
                                "If regularizer of a Parameter has been set by 'paddle.ParamAttr' or 'static.WeightNormParamAttr' already. "
                                "The weight_decay[%s] in Optimizer will not take effect, and it will only be applied to other Parameters!"
212 213
                                % weight_decay.__str__()
                            )
214 215
                            break

216
        if not isinstance(learning_rate, (float, LRScheduler)):
217
            raise TypeError(
218 219 220
                "learning rate should be float or LRScheduler, got %s here"
                % type(learning_rate)
            )
M
MRXLT 已提交
221
        if grad_clip is not None:
222
            if not isinstance(grad_clip, paddle.nn.clip.GradientClipBase):
M
MRXLT 已提交
223 224 225 226 227
                raise TypeError(
                    "'grad_clip' should be an instance of GradientClipBase's derived class"
                )
        if isinstance(weight_decay, float):
            from ..fluid.regularizer import L2Decay
228

M
MRXLT 已提交
229 230 231 232 233
            self.regularization = L2Decay(weight_decay)
        else:
            self.regularization = weight_decay
        self._grad_clip = grad_clip
        self._learning_rate = learning_rate
L
Leo Chen 已提交
234

M
MRXLT 已提交
235
        self._dtype = None
L
Leo Chen 已提交
236 237
        # Infer the dtype form parameter
        if self._parameter_list:
238 239
            if isinstance(self._parameter_list[0], dict):
                for param_group in self._parameter_list:
240 241 242
                    assert (
                        'params' in param_group
                    ), 'params should be set in parameters if parameter groups are optimized in different options'
243 244 245
                self._dtype = self._parameter_list[0]['params'][0].dtype
            else:
                self._dtype = self._parameter_list[0].dtype
L
Leo Chen 已提交
246

M
MRXLT 已提交
247 248 249 250 251 252 253 254 255 256 257 258 259
        # each program should have a independent learning rate
        # program -> tensor(learning_rate)
        self._learning_rate_map = dict()
        # Dictionary of accumulators. Some optimizer subclasses need to
        # allocate and manage extra tensors associated with the parameters
        # to train. These tensors are called accumulators.
        # {accum_name : { paramter_name : accumulator_for_parameter, ...}, ...}
        self._accumulators = defaultdict(lambda: dict())
        self.helper = None
        self._opti_name_list = []
        self._accumulators_holder = {}
        self._param_device_map = dict()
        self.clear_gradients = self.clear_grad
260 261
        self._default_dict = {
            'weight_decay': self.regularization,
262
            'grad_clip': self._grad_clip,
263 264 265 266 267 268 269 270
        }

        self._param_groups = []
        if self._parameter_list and isinstance(self._parameter_list[0], dict):
            for param_group in self._parameter_list:
                self._add_param_group(param_group.copy())
        else:
            self._param_groups = self._parameter_list
M
MRXLT 已提交
271

272
        # NOTE: Multi Tensor: Pass in all parameters and gradients to the op kernel of the Optimizer at one time for updating for dygraph mode.
Z
zhangbo9674 已提交
273
        # Optimizer support list: [ paddle.optimizer.Momentum, paddle.optimizer.Adam].
274 275
        self._use_multi_tensor = None

276
        self._param_dict = self._create_multi_tensor_dict()
277 278 279 280 281
        self._auxiliary_vars = {}

    def _set_auxiliary_var(self, key, val):
        self._auxiliary_vars[key] = val

282 283 284 285 286 287 288
    def _create_multi_tensor_dict(self):
        n = len(self._param_groups) if self._param_groups is not None else 1
        return {
            'FP32_LODTensor': [[] for _ in range(n)],
            'FP16_LODTensor': [[] for _ in range(n)],
        }

289 290 291
    def _get_auxiliary_var(self, key):
        return self._auxiliary_vars.get(key, None)

M
MRXLT 已提交
292 293 294
    @framework.dygraph_only
    def state_dict(self):
        '''
295
        Get state dict information from optimizer. It contain all the tensor used by optimizer. For Adam optimizer, contains beta1, beta2, momentum etc. If LRScheduler have been used, global_step will be include in state dict.
M
MRXLT 已提交
296 297
        If the optimizer never be called(minimize function), the state_dict is empty.

298
        Args:
M
MRXLT 已提交
299 300 301 302
            None

        Returns:
            state_dict(dict) : dict contains all the Tensor used by optimizer
303

M
MRXLT 已提交
304 305 306 307
        Examples:
            .. code-block:: python

                import paddle
M
MRXLT 已提交
308
                emb = paddle.nn.Embedding(10, 10)
M
MRXLT 已提交
309 310 311 312 313 314 315 316 317

                adam = paddle.optimizer.Adam(0.001, parameters=emb.parameters())
                state_dict = adam.state_dict()

        '''
        state_dict = {}
        for k, v in self._accumulators.items():
            for para_name, var_tmp in v.items():
                state_dict[var_tmp.name] = var_tmp
318 319 320 321
        # if has master weight and then save master weight
        if hasattr(self, "_master_weights"):
            if len(self._master_weights) != 0:
                state_dict["master_weights"] = self._master_weights
M
MRXLT 已提交
322
        # global step if use lr decay
323
        if isinstance(self._learning_rate, LRScheduler):
M
MRXLT 已提交
324 325 326 327 328 329
            state_dict["LR_Scheduler"] = self._learning_rate.state_dict()
        return state_dict

    @framework.dygraph_only
    def set_state_dict(self, state_dict):
        '''
330
        Load optimizer state dict. For Adam optimizer, contains beta1, beta2, momentum etc. If LRScheduler have been used, global_step will be changed.
M
MRXLT 已提交
331

332
        Args:
M
MRXLT 已提交
333 334 335
            state_dict(dict) : Dict contains all the Tensor needed by optimizer
        Return:
            None
336

M
MRXLT 已提交
337 338 339 340 341
        Examples:
            .. code-block:: python

                import paddle

342
                emb = paddle.nn.Embedding(10, 10)
M
MRXLT 已提交
343

344 345
                layer_state_dict = emb.state_dict()
                paddle.save(layer_state_dict, "emb.pdparams")
M
MRXLT 已提交
346

347
                scheduler = paddle.optimizer.lr.NoamDecay(
348 349 350 351 352 353
                    d_model=0.01, warmup_steps=100, verbose=True)
                adam = paddle.optimizer.Adam(
                    learning_rate=scheduler,
                    parameters=emb.parameters())
                opt_state_dict = adam.state_dict()
                paddle.save(opt_state_dict, "adam.pdopt")
M
MRXLT 已提交
354

355
                opti_state_dict = paddle.load("adam.pdopt")
M
MRXLT 已提交
356 357 358
                adam.set_state_dict(opti_state_dict)

        '''
359
        if isinstance(self._learning_rate, LRScheduler):
360
            self._learning_rate.set_state_dict(state_dict["LR_Scheduler"])
M
MRXLT 已提交
361

362
        # NOTE: exclude learning rate scheduler's state from
363 364 365 366
        # _accumulators_holder.
        state_dict = state_dict.copy()
        if "LR_Scheduler" in state_dict:
            state_dict.pop("LR_Scheduler")
367 368 369 370
        if "master_weights" in state_dict:
            if hasattr(self, "_master_weights"):
                self._master_weights = state_dict["master_weights"]
            state_dict.pop("master_weights")
M
MRXLT 已提交
371 372 373
        self._accumulators_holder = state_dict
        for k, v in self._accumulators.items():
            for para_name, var_tmp in v.items():
374 375 376
                assert (
                    var_tmp.name in state_dict
                ), "optimizer Tensor {} not found".format(var_tmp.name)
M
MRXLT 已提交
377 378 379 380 381 382 383 384 385 386 387 388 389
                var = var_tmp.value()
                tensor = var.get_tensor()
                model_np = np.array(tensor)

                load_para = state_dict[var_tmp.name]

                if isinstance(load_para, Variable):
                    load_para_np = load_para.numpy()
                elif isinstance(load_para, core.VarBase):
                    load_para_np = load_para.numpy()
                elif isinstance(load_para, np.ndarray):
                    load_para_np = load_para
                else:
390 391 392 393 394 395 396 397 398 399 400
                    raise RuntimeError(
                        "State dict type {} not supprt".format(
                            str(type(load_para))
                        )
                    )

                assert (
                    model_np.shape == load_para_np.shape
                ), "Parameter shape not match, Dygraph Parameter [ {} ] need tensor with shape {} but load tensor with shape {}".format(
                    model_np.name, model_np.shape, load_para_np.shape
                )
M
MRXLT 已提交
401

402 403 404 405 406
                assert (
                    model_np.dtype == load_para_np.dtype
                ), "Parameter dtype not match, Dygraph Parameter [ {} ] need tensor with dtype {}  but load tensor with dtype {}".format(
                    model_np.name, model_np.dtype, load_para_np.dtype
                )
M
MRXLT 已提交
407 408 409 410 411 412 413

                tensor.set(load_para_np, framework._current_expected_place())

    def get_opti_var_name_list(self):
        return self._opti_name_list

    def _create_global_learning_rate(self):
414
        # lr var can't be float16 or bfloat16, for pure fp16 or bf16 training, should extra handle the dtype for lr
415 416 417 418 419 420
        _lr_dtype = (
            paddle.get_default_dtype() if self._dtype is None else self._dtype
        )
        _lr_dtype = (
            paddle.float32
            if (
421 422 423 424 425 426 427 428
                (
                    paddle.get_default_dtype() != "float16"
                    and _lr_dtype == paddle.float16
                )
                or (
                    paddle.get_default_dtype() != "bfloat16"
                    and _lr_dtype == paddle.bfloat16
                )
429 430 431
            )
            else _lr_dtype
        )
432
        if isinstance(self._learning_rate, LRScheduler):
433 434 435 436 437
            lr_var = self._global_learning_rate()
            # only create global lr_var once
            if not isinstance(lr_var, framework.Variable):
                lr_name = unique_name.generate('learning_rate')
                self._learning_rate._var_name = lr_name
438 439
                lr_var = self.helper.create_global_variable(
                    name=lr_name,
440
                    shape=[],
441 442 443 444
                    persistable=True,
                    stop_gradient=True,
                    dtype=_lr_dtype,
                )
445 446 447
                main_prog = framework.default_main_program()
                main_prog.lr_sheduler = self._learning_rate
                main_prog.lr_var = lr_var
M
MRXLT 已提交
448

449
                self._learning_rate_map[
450 451
                    framework.default_main_program()
                ] = lr_var
M
MRXLT 已提交
452

453 454
            lr_value = float(self._learning_rate())
            self.helper.set_variable_initializer(
455 456
                lr_var,
                initializer=paddle.nn.initializer.Constant(value=lr_value),
457
            )
458 459 460
        elif isinstance(self._learning_rate, float):
            # only create global lr_var once
            lr = self._global_learning_rate()
M
MRXLT 已提交
461 462 463
            if isinstance(lr, framework.Variable):
                return
            else:
464 465
                self._learning_rate_map[
                    framework.default_main_program()
466
                ] = paddle.static.create_global_var(
467
                    name=unique_name.generate("learning_rate"),
468
                    shape=[],
469
                    value=float(self._learning_rate),
470
                    dtype=_lr_dtype,
471 472
                    persistable=True,
                )
M
MRXLT 已提交
473 474 475 476 477

    @framework.dygraph_only
    def set_lr(self, value):
        """
        :api_attr: imperative
478

479
        Set the value of the learning rate manually in the optimizer. If the optimizer use LRScheduler,
M
MRXLT 已提交
480 481 482
        this API cannot be invoked, because it will lead to conflict.

        Args:
M
MRXLT 已提交
483
            value (float): the value of learning rate
M
MRXLT 已提交
484 485 486

        Returns:
            None
487

M
MRXLT 已提交
488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509
        Examples:
            .. code-block:: python

                import paddle
                linear = paddle.nn.Linear(10, 10)

                adam = paddle.optimizer.Adam(0.1, parameters=linear.parameters())

                # set learning rate manually by python float value
                lr_list = [0.2, 0.3, 0.4, 0.5, 0.6]
                for i in range(5):
                    adam.set_lr(lr_list[i])
                    lr = adam.get_lr()
                    print("current lr is {}".format(lr))
                # Print:
                #    current lr is 0.2
                #    current lr is 0.3
                #    current lr is 0.4
                #    current lr is 0.5
                #    current lr is 0.6

        """
510
        if not isinstance(value, (int, float)):
M
MRXLT 已提交
511
            raise TypeError(
512
                "The type of 'value' in optimizer.set_lr must be float, but received %s."
513 514
                % (type(value))
            )
515
        if isinstance(self._learning_rate, LRScheduler):
M
MRXLT 已提交
516
            raise RuntimeError(
517
                "optimizer's learning rate can't be LRScheduler when invoke this API, because this will lead to conflict."
M
MRXLT 已提交
518
            )
519 520 521
        self._learning_rate = float(value)
        current_lr = self._global_learning_rate()
        if current_lr is not None:
522 523
            if in_dygraph_mode():
                place = _current_expected_place()
524 525 526 527 528 529 530
                _C_ops.full_(
                    current_lr,
                    list(current_lr.shape),
                    float(value),
                    current_lr.dtype,
                    place,
                )
531 532
            else:
                global_block = framework.default_main_program().global_block()
533 534 535 536 537 538 539 540 541 542
                global_block.append_op(
                    type='fill_constant',
                    outputs={'Out': [current_lr]},
                    attrs={
                        'dtype': current_lr.dtype,
                        'shape': list(current_lr.shape),
                        'value': float(value),
                    },
                    stop_gradient=True,
                )
M
MRXLT 已提交
543 544 545

    def get_lr(self):
        """
546
        Get current learning rate of optimizer.
547 548
        If 'LRScheduler' is not used, the return value is all the same.
        If 'LRScheduler' is used, the return value is the current scheduled learing rete.
M
MRXLT 已提交
549

M
MRXLT 已提交
550
        Returns:
551
            float: The current learning rate of optimizer.
M
MRXLT 已提交
552 553 554 555

        Examples:
            .. code-block:: python

556
                # train on default dynamic graph mode
M
MRXLT 已提交
557
                import paddle
558 559 560 561 562 563 564 565 566 567 568
                import numpy as np
                emb = paddle.nn.Embedding(10, 3)

                ## example1: LRScheduler is not used, return the same value is all the same
                adam = paddle.optimizer.Adam(0.01, parameters = emb.parameters())
                for batch in range(10):
                    input = paddle.randint(low=0, high=5, shape=[5])
                    out = emb(input)
                    out.backward()
                    print("Learning rate of step{}: {}".format(batch, adam.get_lr())) # 0.01
                    adam.step()
M
MRXLT 已提交
569

570 571 572 573 574 575 576 577
                ## example2: StepDecay is used, return the scheduled learning rate
                scheduler = paddle.optimizer.lr.StepDecay(learning_rate=0.5, step_size=2, gamma=0.1)
                adam = paddle.optimizer.Adam(scheduler, parameters = emb.parameters())
                for batch in range(10):
                    input = paddle.randint(low=0, high=5, shape=[5])
                    out = emb(input)
                    out.backward()
                    print("Learning rate of step{}: {}".format(batch, adam.get_lr())) # 0.5->0.05...
M
MRXLT 已提交
578
                    adam.step()
579
                    scheduler.step()
580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598

                # train on static graph mode
                paddle.enable_static()
                main_prog = paddle.static.Program()
                start_prog = paddle.static.Program()
                with paddle.static.program_guard(main_prog, start_prog):
                    x = paddle.static.data(name='x', shape=[None, 10])
                    z = paddle.static.nn.fc(x, 100)
                    loss = paddle.mean(z)
                    scheduler = paddle.optimizer.lr.StepDecay(learning_rate=0.5, step_size=2, gamma=0.1)
                    adam = paddle.optimizer.Adam(learning_rate=scheduler)
                    adam.minimize(loss)

                exe = paddle.static.Executor()
                exe.run(start_prog)
                for batch in range(10):
                    print("Learning rate of step{}: {}", adam.get_lr())     # 0.5->0.05->0.005...
                    out = exe.run(main_prog, feed={'x': np.random.randn(3, 10).astype('float32')})
                    scheduler.step()
M
MRXLT 已提交
599 600 601 602 603

        """
        if isinstance(self._learning_rate, float):
            return self._learning_rate
        else:
604
            return self._learning_rate()
M
MRXLT 已提交
605 606 607 608 609 610 611 612 613 614 615

    def _global_learning_rate(self, program=None):
        """
        get global decayed learning rate
        :return:
        """
        if program is None:
            program = framework.default_main_program()
        return self._learning_rate_map.get(program, None)

    def _append_optimize_op(self, block, param_and_grad):
616
        """append optimize operator to block and return all the added optimize_op"""
M
MRXLT 已提交
617 618 619 620 621 622 623
        raise NotImplementedError(
            "Class \"Optimizer\" connot be used directly as an optimizer, please use its subclasses such as \"Adam\""
        )

    def _create_param_lr(self, param_and_grad):
        # create learning rate tensor for every parameter
        param = param_and_grad[0]
624 625 626 627
        if hasattr(param, 'optimize_attr'):
            param_lr = param.optimize_attr['learning_rate']
            if type(param_lr) == Variable:
                return param_lr
M
MRXLT 已提交
628
            else:
629 630 631 632
                if param_lr == 1.0:
                    return self._global_learning_rate()
                else:
                    with default_main_program()._lr_schedule_guard(
633 634
                        is_with_opt=True
                    ), framework.name_scope('scale_with_param_lr'):
635 636 637
                        return self._global_learning_rate() * param_lr
        else:
            return self._global_learning_rate()
M
MRXLT 已提交
638

639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666
    def _create_master_weight(self, param):
        if param.name in self._master_weights:
            var = self._master_weights[param.name]
        else:
            assert isinstance(self.helper, LayerHelper)

            var_name = param.name + "_fp32_master"
            var_name = unique_name.generate(var_name)
            var = paddle.static.create_global_var(
                name=var_name,
                shape=param.shape,
                value=0,
                dtype='float32',
                persistable=True,
            )
            block = self.helper.startup_program.global_block()
            block.append_op(
                type="cast",
                inputs={"X": [param]},
                outputs={"Out": [var]},
                attrs={
                    "in_dtype": param.dtype,
                    "out_dtype": core.VarDesc.VarType.FP32,
                },
            )
            self._master_weights[param.name] = var
        return var

M
MRXLT 已提交
667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688
    def _create_accumulators(self, block, parameters):
        """Create all accumulators needed by the parameters

        Args:
            block: the block in which the loss tensor is present
            parameters: list of parameter tensors for the optimizer
        """
        pass

    def _finish_update(self, block, parameters_and_grads):
        """Finish any custom updates needed
           before completing an optimization step

        Args:
            block: the block in which the loss tensor is present
            parameters: list of parameter tensors for the optimizer

        Returns:
            None
        """
        pass

689 690 691 692 693 694 695 696 697 698
    def _add_accumulator(
        self,
        name,
        param,
        dtype=None,
        fill_value=0.0,
        shape=None,
        type=None,
        device=None,
    ):
M
MRXLT 已提交
699 700 701 702 703 704 705 706 707 708 709
        """Utility function to add an accumulator for a parameter

        Args:
            block: the block in which the loss tensor is present
            name: name of the accumulator
            param: parameter tensor for which accumulator is to be added
            dtype: data type of the accumulator tensor
            fill_value: value to initialize the accumulator tensor
        """
        if self._name is not None:
            name = self._name + "_" + name
710 711 712 713
        if (
            name in self._accumulators
            and param.name in self._accumulators[name]
        ):
J
Jiabin Yang 已提交
714
            if framework._non_static_mode():
M
MRXLT 已提交
715
                return self._accumulators[name][param.name]
716 717
            raise Exception(
                "Accumulator {} already exists for parameter {}".format(
718 719 720
                    name, param.name
                )
            )
721
        if shape is None:
M
MRXLT 已提交
722 723 724 725 726 727 728 729 730 731 732
            shape = param.shape
        assert isinstance(self.helper, LayerHelper)

        var_name = param.name + "_" + name
        var_name = unique_name.generate(var_name)
        self._opti_name_list.append(var_name)

        var = self.helper.create_global_variable(
            name=var_name,
            persistable=True,
            dtype=dtype or param.dtype,
733
            type=core.VarDesc.VarType.LOD_TENSOR
734 735
            if framework._in_eager_without_dygraph_check()
            else (param.type if type is None else type),
M
MRXLT 已提交
736
            shape=shape,
737 738
            belong_to_optimizer=True,
        )
M
MRXLT 已提交
739 740
        if device is None:
            device = self._get_device_for_param(param.name)
741

W
wanghuancoder 已提交
742 743 744 745
        if (
            in_dygraph_mode()
            and (device == 'cpu' or isinstance(device, core.CPUPlace))
            and (not core.is_compiled_with_xpu())
746 747 748 749 750 751 752
        ):
            _C_ops.full_(
                var,
                var.shape,
                str(float(fill_value)),
                var.dtype,
                core.CPUPlace(),
753
            )
754 755 756
        else:
            with device_guard(device):
                self.helper.set_variable_initializer(
757 758 759 760
                    var,
                    initializer=paddle.nn.initializer.Constant(
                        value=float(fill_value)
                    ),
761
                )
M
MRXLT 已提交
762

J
Jiabin Yang 已提交
763
        if framework._non_static_mode():
M
MRXLT 已提交
764
            if len(self._accumulators_holder) > 0:
765 766 767 768 769
                assert (
                    var_name in self._accumulators_holder
                ), "Optimizer set error, {} should in state dict".format(
                    var_name
                )
M
MRXLT 已提交
770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
                var.set_value(self._accumulators_holder[var_name])

        self._accumulators[name][param.name] = var
        return var

    def _get_accumulator(self, name, param):
        """Utility function to fetch an accumulator for a parameter

        Args:
            name: name of the accumulator
            param: parameter tensor for which accumulator is to be fetched

        Returns:
            accumulator tensor for the parameter
        """
        if self._name is not None:
            name = self._name + "_" + name
787 788 789 790
        if (
            name not in self._accumulators
            or param.name not in self._accumulators[name]
        ):
791 792
            raise Exception(
                "Accumulator {} does not exist for parameter {}".format(
793 794 795
                    name, param.name
                )
            )
M
MRXLT 已提交
796 797
        return self._accumulators[name][param.name]

798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825
    def _get_accumulator_master(self, name, param):
        """Utility function to fetch an accumulator for a parameter
        Args:
            name: name of the accumulator
            param: parameter variable for which accumulator is to be fetched
        Returns:
            accumulator variable for the parameter
        """
        if self._name is not None:
            name = self._name + "_" + name
        find_master = self._multi_precision and self._is_dtype_fp16_or_bf16(
            param.dtype
        )
        target_param = (
            self._master_weights[param.name] if find_master else param
        )
        target_name = target_param.name
        if (
            name not in self._accumulators
            or target_name not in self._accumulators[name]
        ):
            raise Exception(
                "Accumulator {} does not exist for parameter {}".format(
                    name, target_name
                )
            )
        return self._accumulators[name][target_name]

M
MRXLT 已提交
826 827
    def _update_param_device_map(self, parameters_and_grads, target_block):
        for param_and_grad in parameters_and_grads:
828
            if param_and_grad[0].stop_gradient is False:
M
MRXLT 已提交
829 830
                param_name = param_and_grad[0].name
                ops = target_block.ops
831 832
                device_attr_name = (
                    core.op_proto_and_checker_maker.kOpDeviceAttrName()
M
MRXLT 已提交
833 834 835 836 837
                )
                for op in ops:
                    input_arg_names = op.input_arg_names
                    if param_name in input_arg_names:
                        self._param_device_map[param_name] = op.attr(
838 839
                            device_attr_name
                        )
M
MRXLT 已提交
840 841 842 843 844 845 846 847
                        break

    def _get_device_for_param(self, param_name):
        device = None
        if param_name in self._param_device_map:
            device = self._param_device_map[param_name]
        return device

848 849 850
    def _create_optimization_pass(
        self, parameters_and_grads, param_group_idx=0
    ):
M
MRXLT 已提交
851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877
        """Add optimization operators to update gradients to tensors.

        Args:
          parameters_and_grads(list(tuple(Tensor, Tensor))):
            a list of (tensor, gradient) pair to update.

        Returns:
          return_op_list: a list of operators that will complete one step of
            optimization. This will include parameter update ops, global step
            update ops and any other custom ops required by subclasses to manage
            their internal state.
        """
        # This is a default implementation of create_optimization_pass that
        # can be shared by most optimizers. This implementation assumes that
        # the subclass will implement the _append_optimize_op method and the
        #  _initialize_tensors method. The subclass can extend the
        # _create_accumulators method if it needs to create accumulators
        # for parameters and extend _finish_update method to add custom ops.

        # Allways called under program_guard use global block as loss block
        # But if current block is in control flow, append optimize op in the
        # grad block of current block

        global_block = framework.default_main_program().global_block()
        target_block = global_block
        current_block = framework.default_main_program().current_block()
        if current_block.idx != global_block.idx:
878 879 880
            assert (
                current_block.backward_block_idx != -1
            ), "current block is not global_block, but it doesn't have backward block."
M
MRXLT 已提交
881
            target_block = framework.default_main_program().blocks[
882 883
                current_block.backward_block_idx
            ]
M
MRXLT 已提交
884 885 886

        start = len(target_block.ops)
        self.helper = LayerHelper(self.__class__.__name__)
887

M
MRXLT 已提交
888 889
        self._create_global_learning_rate()

Z
zhangbo9674 已提交
890 891
        # NOTE: Multi Tensor support [ Momentum, Adam ] for dygraph mode
        if self._use_multi_tensor and self.__class__.__name__ in [
892 893
            'Momentum',
            'Adam',
Z
zhangbo9674 已提交
894
        ]:
895
            if (
896 897 898
                len(self._param_dict['FP32_LODTensor'][param_group_idx]) == 0
                and len(self._param_dict['FP16_LODTensor'][param_group_idx])
                == 0
899
            ):
900
                if isinstance(parameters_and_grads, list):
901
                    assert param_group_idx == 0
902 903 904 905 906 907 908
                    self._multi_tensor_init(
                        target_block,
                        [
                            p[0]
                            for p in parameters_and_grads
                            if not p[0].stop_gradient
                        ],
909
                        param_group_idx,
910
                    )
911 912
                else:
                    self._update_param_group(parameters_and_grads)
913 914 915 916 917 918 919
                    self._multi_tensor_init(
                        target_block,
                        [
                            p[0]
                            for p in parameters_and_grads['params']
                            if not p[0].stop_gradient
                        ],
920
                        param_group_idx,
921
                    )
J
Jiabin Yang 已提交
922
            if framework._non_static_mode():
923
                self._append_optimize_multi_tensor_op(
924 925 926
                    target_block,
                    parameters_and_grads,
                    param_group_idx=param_group_idx,
927
                )
928
            else:
929 930 931
                self._update_param_device_map(
                    parameters_and_grads, target_block
                )
932 933 934
                # NOTE: Multi Tensor requires all parameters to be in the same device and program.
                # param_grad_list = [p_0,g_0,p_1,g_1,....]
                param_grad_list = []
935
                for param_and_grad in parameters_and_grads:
936 937 938 939
                    if (
                        not param_and_grad[0].stop_gradient
                        and param_and_grad[1] is not None
                    ):
940 941 942
                        param_grad_list.append(param_and_grad[0])
                        param_grad_list.append(param_and_grad[1])
                with param_grad_list[0].block.program._optimized_guard(
943 944
                    param_grad_list
                ), name_scope("optimizer"):
945 946 947
                    device = self._get_device_for_param(param_grad_list[0].name)
                    with device_guard(device):
                        self._append_optimize_multi_tensor_op(
948 949 950
                            target_block,
                            parameters_and_grads,
                            param_group_idx=param_group_idx,
951
                        )
952
        else:
J
Jiabin Yang 已提交
953
            if not framework._non_static_mode():
954 955 956 957 958 959 960 961
                params_grads_device_map = (
                    parameters_and_grads['params']
                    if isinstance(parameters_and_grads, dict)
                    else parameters_and_grads
                )
                self._update_param_device_map(
                    params_grads_device_map, target_block
                )
962

963
            if isinstance(parameters_and_grads, list):
964 965 966 967 968 969 970 971
                self._create_accumulators(
                    target_block,
                    [
                        p[0]
                        for p in parameters_and_grads
                        if not p[0].stop_gradient
                    ],
                )
972
            else:
973 974
                params_acc_dict = parameters_and_grads.copy()
                params_acc_dict['params'] = [
975 976
                    p[0]
                    for p in params_acc_dict['params']
977 978 979 980
                    if not p[0].stop_gradient
                ]
                self._create_accumulators(target_block, params_acc_dict)

J
Jiabin Yang 已提交
981
            if framework._non_static_mode():
982 983 984 985 986 987 988 989
                if isinstance(parameters_and_grads, list):
                    for param_and_grad in parameters_and_grads:
                        if param_and_grad[1] is None:
                            continue
                        if param_and_grad[0].stop_gradient is False:
                            self._append_optimize_op(
                                target_block, param_and_grad
                            )
990
                else:
991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006
                    for param_and_grad in parameters_and_grads['params']:
                        if param_and_grad[1] is None:
                            continue
                        if param_and_grad[0].stop_gradient is False:
                            param_grad_dict = dict()
                            param_grad_dict['params'] = param_and_grad
                            param_grad_dict.update(
                                {
                                    k: v
                                    for k, v in parameters_and_grads.items()
                                    if k != 'params'
                                }
                            )
                            self._append_optimize_op(
                                target_block, param_grad_dict
                            )
1007 1008
            else:
                for param_and_grad in parameters_and_grads:
1009 1010
                    if param_and_grad[1] is None:
                        continue
1011
                    with param_and_grad[0].block.program._optimized_guard(
1012 1013
                        param_and_grad
                    ), name_scope("optimizer"):
1014
                        if param_and_grad[0].stop_gradient is False:
1015
                            device = self._get_device_for_param(
1016 1017
                                param_and_grad[0].name
                            )
1018 1019
                            with device_guard(device):
                                optimize_op = self._append_optimize_op(
1020 1021
                                    target_block, param_and_grad
                                )
M
MRXLT 已提交
1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032

        # Get custom finish ops for subclasses
        # FIXME: Need to fix this once we figure out how to handle dependencies
        self._finish_update(target_block, parameters_and_grads)

        end = len(target_block.ops)
        return target_block._slice_ops(start, end)

    def _append_dgc_ops(self, param_and_grad):
        pass

1033 1034 1035 1036 1037 1038 1039 1040
    def backward(
        self,
        loss,
        startup_program=None,
        parameters=None,
        no_grad_set=None,
        callbacks=None,
    ):
M
MRXLT 已提交
1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065
        """
        The first part of ``minimize``, do auto-diff to append backward operations for
        the current program.

        Args:
            loss (Tensor): ``loss`` tensor to run optimizations.
            startup_program (Program, optional): :ref:`api_fluid_Program` for
                initializing parameters in ``parameters``. The default value
                is None, at this time :ref:`api_fluid_default_startup_program` will be used.
            parameters (list, optional): List of ``Tensor`` or ``Tensor.name`` to update
                to minimize ``loss``. The default value is None, at this time all parameters
                will be updated.
            no_grad_set (set, optional): Set of ``Tensor``  or ``Tensor.name`` that don't need
                to be updated. The default value is None.
            callbacks (list, optional): list of callable objects to run when appending backward
                operator for one parameter. The default value is None.

        Return:
            list: list of (param, grad) tensor pairs, param is ``Parameter``,
                grad is the gradient value corresponding to the parameter.

        Examples:
            .. code-block:: python

                import paddle
1066 1067
                x = paddle.arange(26, dtype="float32").reshape([2, 13])

M
MRXLT 已提交
1068
                linear = paddle.nn.Linear(13, 5)
M
MRXLT 已提交
1069
                # This can be any optimizer supported by dygraph.
1070
                adam = paddle.optimizer.Adam(learning_rate = 0.01,
M
MRXLT 已提交
1071
                                            parameters = linear.parameters())
1072
                out = linear(x)
M
MRXLT 已提交
1073 1074 1075 1076 1077
                out.backward()
                adam.step()
                adam.clear_grad()
        """
        act_no_grad_set = None
J
Jiabin Yang 已提交
1078
        if framework._non_static_mode():
M
MRXLT 已提交
1079 1080 1081 1082
            pass
        else:
            act_no_grad_set = self._get_no_grad_set(loss, no_grad_set)

L
Leo Chen 已提交
1083 1084 1085 1086
        # Infer dtype by loss if None
        if self._dtype is None:
            self._dtype = loss.dtype

1087
        if framework.in_dygraph_mode():
1088
            parameter_list = parameters if parameters else self._parameter_list
1089

1090 1091 1092 1093 1094 1095 1096
            # It is very time-consuming to call c++ functions in a loop on the python side.
            # We put this part of the code on the c++ side to improve the speed in eager mode.
            params_grads = []
            grads = core.eager.get_all_grads(parameter_list)
            for index, grad in enumerate(grads):
                if grad is not None:
                    params_grads.append((parameter_list[index], grad))
M
MRXLT 已提交
1097 1098
        else:
            if callbacks is None:
1099
                callbacks = [paddle.nn.clip.error_clip_callback]
M
MRXLT 已提交
1100
            else:
1101
                assert isinstance(callbacks, list)
M
MRXLT 已提交
1102
            program = loss.block.program
1103 1104
            assert len(loss.shape) == 1 and loss.shape[0] == 1, (
                "The loss.shape should be (1L,), but the current loss.shape is {}. "
M
MRXLT 已提交
1105
                "Maybe that you should call paddle.mean to process the current loss.".format(
1106 1107 1108 1109
                    loss.shape
                )
            )
            parameter_list = parameters if parameters else self._parameter_list
M
MRXLT 已提交
1110
            with program_guard(program, startup_program):
1111
                from paddle.incubate.autograd.utils import prim_enabled
1112

1113
                if prim_enabled():
1114 1115 1116
                    params_grads = append_backward_new(
                        [loss], parameter_list, act_no_grad_set, callbacks
                    )
1117
                else:
1118 1119 1120
                    params_grads = append_backward(
                        loss, parameter_list, act_no_grad_set, callbacks
                    )
M
MRXLT 已提交
1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141
                # Note: since we can't use all_reduce_op now,
                #  dgc_op should be the last op of one grad.
                self._append_dgc_ops(params_grads)
        return params_grads

    def apply_gradients(self, params_grads):
        """
        Second part of `minimize`, appending optimization operators for
        given `params_grads` pairs.

        Args:
            params_grads (list): list of (param, grad) pair to do optimization.

        Returns:
            list: A list of operators appended to the current program.

        Examples:
            .. code-block:: python

                import paddle

1142
                inp = paddle.uniform([10, 10], dtype="float32", min=-0.1, max=0.1)
M
MRXLT 已提交
1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159
                linear = paddle.nn.Linear(10, 10)
                out = linear(inp)
                loss = paddle.mean(out)
                optimizer = paddle.optimizer.Adam(learning_rate=0.1,
                        parameters=linear.parameters())
                params_grads = optimizer.backward(loss)
                optimizer.apply_gradients(params_grads)

        """

        params_grads = sorted(params_grads, key=lambda x: x[0].name)

        # 'optimizer(grad_clip)' or 'set_gradient_clip'
        if self._grad_clip is not None:
            params_grads = self._grad_clip(params_grads)
        else:

1160
            params_grads = paddle.nn.clip.append_gradient_clip_ops(params_grads)
M
MRXLT 已提交
1161 1162

        # Add regularization if any
1163 1164 1165
        params_grads = self.append_regularization_ops(
            params_grads, self.regularization
        )
M
MRXLT 已提交
1166 1167 1168 1169

        optimize_ops = self._create_optimization_pass(params_grads)
        return optimize_ops

1170 1171 1172
    def _apply_optimize(
        self, loss, startup_program, params_grads, param_group_idx=0
    ):
M
MRXLT 已提交
1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183
        """
        Second part of `minimize`, appending optimization operators for
        given `params_grads` pairs.
        Args:
            loss (Tensor): loss tensor to run optimizations.
            startup_program (Program): startup_program for initializing parameters
                in `parameters`.
            params_grads (list): list of (param, grad) pair to do optimization.
        Returns:
            list: A list of operators appended to the current program.
        """
J
Jiabin Yang 已提交
1184
        if framework._non_static_mode():
1185 1186 1187 1188
            with program_guard(
                framework.default_main_program(),
                framework.default_startup_program(),
            ):
1189 1190 1191
                if isinstance(params_grads, list):
                    if self._grad_clip is not None:
                        params_grads = self._grad_clip(params_grads)
1192
                    params_grads = self.append_regularization_ops(
1193 1194
                        params_grads, self.regularization
                    )
1195 1196 1197
                else:
                    grad_clip = params_grads['grad_clip']
                    if grad_clip is not None:
1198
                        params_grads['params'] = grad_clip(
1199 1200
                            params_grads['params']
                        )
1201

1202
                    params_grads['params'] = self.append_regularization_ops(
1203 1204
                        params_grads['params'], self.regularization
                    )
1205 1206 1207
                optimize_ops = self._create_optimization_pass(
                    params_grads, param_group_idx=param_group_idx
                )
M
MRXLT 已提交
1208
        else:
1209
            assert param_group_idx == 0
M
MRXLT 已提交
1210 1211 1212 1213 1214
            program = loss.block.program
            with program_guard(program, startup_program):
                optimize_ops = self.apply_gradients(params_grads)
        return optimize_ops

1215
    def _create_regularization_of_grad(self, param, grad, regularization=None):
1216
        """Create and add backward regularization Operators
1217

1218 1219 1220
        Function helper of append_regularization_ops.
        """
        # If no gradient or no regularization is specified,  then we don't need to do anything
1221
        if grad is None or (
1222 1223 1224 1225 1226 1227
            (
                not hasattr(param, 'regularizer')
                or (hasattr(param, 'regularizer') and param.regularizer is None)
            )
            and regularization is None
        ):
1228 1229 1230 1231 1232 1233 1234 1235 1236 1237
            return grad
        regularization_term = None
        if hasattr(param, 'regularizer') and param.regularizer is not None:
            # Add variable for regularization term in grad block
            regularization_term = param.regularizer(param, grad, grad.block)
        elif regularization is not None:
            regularization_term = regularization(param, grad, grad.block)

        assert regularization_term is not None

1238
        if framework.in_dygraph_mode():
Y
YuanRisheng 已提交
1239
            return _C_ops.add_n([grad, regularization_term])
1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253
        else:
            new_grad = grad
            if grad.type == core.VarDesc.VarType.SELECTED_ROWS:
                # FIXME(zcd): If the grad is SELECTED_ROWS, after regularization,
                # the grad's type and name will be changed. But the gradient's name
                # is used in ParallelExecutor Reduce mode, so I add a flag for
                # the new_grad here.
                new_grad = grad.block.create_var(
                    name=grad.name + core.kNewGradSuffix(),
                    dtype=param.dtype,
                    shape=param.shape,
                    lod_level=param.lod_level,
                    type=core.VarDesc.VarType.LOD_TENSOR,
                )
1254

1255 1256 1257
            inputs = {"X": [grad, regularization_term]}
            outputs = {"Out": [new_grad]}
            grad.block.append_op(type='sum', inputs=inputs, outputs=outputs)
1258

1259
            return new_grad
1260

1261 1262 1263
    def append_regularization_ops(
        self, parameters_and_grads, regularization=None
    ):
1264
        r"""Create and add backward regularization Operators
1265

1266 1267 1268 1269
        Creates and adds backward regularization operators in the BlockDesc.
        This will add gradients of the regularizer function to the gradients
        of the parameters and return these modified gradients. This is the
        same as implementing weight decay in optimizers for regularization.
1270

1271 1272 1273 1274 1275
        Args:
            parameters_and_grads: A list of (parameters, gradients) pairs
                                  that need to be regularized.
            regularization: A global regularizer. If the parameter is not
                            set. It will be applied with regularizer.
1276

1277 1278 1279
        Returns:
            list[(Variable, Variable)]: list of (parameters, gradients) \
            pair with the regularized gradient
1280

1281 1282 1283 1284
        Raises:
            Exception: Unknown regularization type
        """
        params_and_grads = []
J
Jiabin Yang 已提交
1285
        if framework._non_static_mode():
1286
            for param, grad in parameters_and_grads:
1287
                new_grad = self._create_regularization_of_grad(
1288 1289
                    param, grad, regularization
                )
1290 1291 1292 1293 1294
                params_and_grads.append((param, new_grad))
        else:
            repeate_regularizer = False
            with framework.name_scope('regularization'):
                for param, grad in parameters_and_grads:
1295 1296 1297 1298 1299
                    if (
                        not repeate_regularizer
                        and param.regularizer is not None
                        and regularization is not None
                    ):
1300 1301 1302 1303
                        repeate_regularizer = True
                        logging.info(
                            "If regularizer of a Parameter has been set by 'fluid.ParamAttr' or 'fluid.WeightNormParamAttr' already. "
                            "The Regularization[%s] in Optimizer will not take effect, and it will only be applied to other Parameters!"
1304 1305
                            % regularization.__str__()
                        )
1306 1307
                    with param.block.program._optimized_guard([param, grad]):
                        new_grad = self._create_regularization_of_grad(
1308 1309
                            param, grad, regularization
                        )
1310 1311 1312
                        params_and_grads.append((param, new_grad))
        return params_and_grads

M
MRXLT 已提交
1313 1314 1315
    def _get_no_grad_set(self, loss, no_grad_set=None):
        no_grad_set = _get_no_grad_set_name(no_grad_set)
        parameters = loss.block.program.global_block().all_parameters()
1316
        param_no_trainable = set(
1317 1318
            [param.name for param in parameters if param.stop_gradient is True]
        )
M
MRXLT 已提交
1319 1320 1321 1322 1323 1324
        # If the parameter is no trainable, it should not have a gradient.
        no_grad_set.update(param_no_trainable)

        return no_grad_set

    @framework.dygraph_only
1325
    def clear_grad(self, set_to_zero=True):
M
MRXLT 已提交
1326 1327
        """
        Clear the gradients of all optimized parameters for model.
1328 1329

        If not, new gradient will accumulat on previous gradient.
1330 1331

        There are two method to clear grad: set_to_zero or delete grad.
1332

1333 1334
        Args:
            set_to_zero (bool, optional): If set grads to zero or not, default is True.
1335

M
MRXLT 已提交
1336 1337
        Returns:
            None
1338

M
MRXLT 已提交
1339 1340 1341 1342
        Examples:
            .. code-block:: python

                import paddle
1343

1344
                a = paddle.arange(26, dtype="float32").reshape([2, 13])
M
MRXLT 已提交
1345
                linear = paddle.nn.Linear(13, 5)
M
MRXLT 已提交
1346
                # This can be any optimizer supported by dygraph.
1347
                adam = paddle.optimizer.Adam(learning_rate = 0.01,
M
MRXLT 已提交
1348 1349 1350 1351 1352 1353 1354
                                            parameters = linear.parameters())
                out = linear(a)
                out.backward()
                adam.step()
                adam.clear_grad()

        """
1355
        param_list = []
1356
        if self._parameter_list is None or not isinstance(
1357 1358
            self._parameter_list[0], dict
        ):
1359 1360
            for p in self._parameter_list:
                if not p.stop_gradient:
1361
                    param_list.append(p)
1362 1363 1364 1365
        else:
            for param_group in self._param_groups:
                for p in param_group['params']:
                    if not p.stop_gradient:
1366
                        param_list.append(p)
1367

J
Jiabin Yang 已提交
1368
        if _in_eager_without_dygraph_check():
1369
            for p in param_list:
1370
                p.clear_gradient(set_to_zero)
1371 1372
        else:
            core.clear_gradients(param_list, set_to_zero)
M
MRXLT 已提交
1373

1374
    @imperative_base.no_grad()
1375 1376 1377
    def minimize(
        self, loss, startup_program=None, parameters=None, no_grad_set=None
    ):
M
MRXLT 已提交
1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395
        """
        Add operations to minimize ``loss`` by updating ``parameters``.

        Args:
            loss (Tensor): A ``Tensor`` containing the value to minimize.
            startup_program (Program, optional): :ref:`api_fluid_Program` for
                initializing parameters in ``parameters``. The default value
                is None, at this time :ref:`api_fluid_default_startup_program` will be used.
            parameters (list, optional): List of ``Tensor`` or ``Tensor.name`` to update
                to minimize ``loss``. The default value is None, at this time all parameters
                will be updated.
            no_grad_set (set, optional): Set of ``Tensor``  or ``Tensor.name`` that don't need
                to be updated. The default value is None.

        Returns:
            tuple: tuple (optimize_ops, params_grads), A list of operators appended
            by minimize and a list of (param, grad) tensor pairs, param is
            ``Parameter``, grad is the gradient value corresponding to the parameter.
1396 1397
            In static graph mode, the returned tuple can be passed to ``fetch_list`` in ``Executor.run()`` to
            indicate program pruning. If so, the program will be pruned by ``feed`` and
M
MRXLT 已提交
1398 1399 1400 1401
            ``fetch_list`` before run, see details in ``Executor``.

        Examples:
            .. code-block:: python
1402

M
MRXLT 已提交
1403
                import paddle
M
MRXLT 已提交
1404
                linear = paddle.nn.Linear(10, 10)
1405 1406
                input = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
                out = linear(input)
M
MRXLT 已提交
1407 1408 1409 1410 1411 1412 1413 1414
                loss = paddle.mean(out)

                beta1 = paddle.to_tensor([0.9], dtype="float32")
                beta2 = paddle.to_tensor([0.99], dtype="float32")

                adam = paddle.optimizer.Adam(learning_rate=0.1,
                        parameters=linear.parameters(),
                        weight_decay=0.01)
R
Roc 已提交
1415
                loss.backward()
M
MRXLT 已提交
1416 1417 1418
                adam.minimize(loss)
                adam.clear_grad()

M
MRXLT 已提交
1419 1420 1421
        """
        assert isinstance(loss, Variable), "The loss should be an Tensor."

1422
        parameter_list = parameters if parameters else self._parameter_list
1423

1424 1425 1426 1427 1428 1429
        params_grads = self.backward(
            loss,
            startup_program=startup_program,
            parameters=parameter_list,
            no_grad_set=no_grad_set,
        )
M
MRXLT 已提交
1430

1431 1432 1433
        optimize_ops = self._apply_optimize(
            loss, startup_program=startup_program, params_grads=params_grads
        )
M
MRXLT 已提交
1434 1435 1436

        return optimize_ops, params_grads

1437
    @imperative_base.no_grad()
M
MRXLT 已提交
1438 1439 1440
    @framework.dygraph_only
    def step(self):
        """
M
MRXLT 已提交
1441
        Execute the optimizer and update parameters once.
1442

M
MRXLT 已提交
1443 1444 1445 1446 1447 1448 1449
        Returns:
            None

        Examples:
            .. code-block:: python

                import paddle
1450

1451
                a = paddle.arange(26, dtype="float32").reshape([2, 13])
M
MRXLT 已提交
1452
                linear = paddle.nn.Linear(13, 5)
M
MRXLT 已提交
1453
                # This can be any optimizer supported by dygraph.
1454
                adam = paddle.optimizer.Adam(learning_rate = 0.01,
1455
                                        parameters = linear.parameters())
M
MRXLT 已提交
1456 1457 1458 1459 1460
                out = linear(a)
                out.backward()
                adam.step()
                adam.clear_grad()
        """
1461 1462 1463 1464 1465 1466 1467 1468 1469 1470

        if not isinstance(self._param_groups[0], dict):
            params_grads = []
            for param in self._param_groups:
                if param.stop_gradient:
                    continue
                if param._grad_ivar() is not None:
                    grad_var = param._grad_ivar()
                    params_grads.append((param, grad_var))

1471
            self._apply_optimize(
1472 1473 1474 1475
                loss=None,
                startup_program=None,
                params_grads=params_grads,
                param_group_idx=0,
1476
            )
1477 1478 1479

        else:
            # optimize parameters in groups
1480
            for idx, param_group in enumerate(self._param_groups):
1481 1482 1483 1484 1485 1486 1487 1488
                params_grads = defaultdict(lambda: list())
                for param in param_group['params']:
                    if param.stop_gradient:
                        continue
                    if param._grad_ivar() is not None:
                        grad_var = param._grad_ivar()
                        params_grads['params'].append((param, grad_var))
                params_grads.update(
1489 1490 1491
                    {k: v for k, v in param_group.items() if k != 'params'}
                )
                self._apply_optimize(
1492 1493 1494 1495
                    loss=None,
                    startup_program=None,
                    params_grads=params_grads,
                    param_group_idx=idx,
1496
                )
1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511

    def _add_param_group(self, param_group):
        """
        Add a param group to parameter_list.

        Args:
            param_group (dict): The group of Tensors to be optimzed with
            different optimization options.
        """
        params = param_group['params']
        if isinstance(params, Parameter):
            param_group['params'] = [params]
        elif isinstance(params, set):
            raise TypeError(
                "optimizer parameters should be in ordered collections,"
1512 1513
                "but received set, please use list instead."
            )
1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526
        else:
            param_group['params'] = list(params)

        # Update optimization options for each groups
        for k, v in self._default_dict.items():
            param_group.setdefault(k, v)

        param_set = set()
        for group in self._param_groups:
            param_set.update(set(group['params']))

        if not param_set.isdisjoint(set(param_group['params'])):
            raise ValueError(
1527 1528
                "some parameters appear in more than one parameter group"
            )
1529 1530 1531 1532 1533

        for param in param_group['params']:
            weight_decay = param_group['weight_decay']
            if isinstance(weight_decay, float):
                from ..fluid.regularizer import L2Decay
1534

1535 1536 1537 1538
                regularization = L2Decay(weight_decay)
            else:
                regularization = weight_decay
            param.regularizer = regularization
W
wangguanzhong 已提交
1539
            param.optimize_attr['learning_rate'] = param_group.get(
1540 1541
                'learning_rate', 1.0
            )
1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552

        self._param_groups.append(param_group)

    def _update_param_group(self, parameters):
        """
        Update the param group with new entry
        Args:
            parameters (dict): The extra group of Tensors to be optimzed with
            different optimization options. Only used in child class.
        """
        pass
1553 1554

    @framework.dygraph_only
1555
    def _multi_tensor_init(self, target_block, parameters, param_group_idx):
1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566
        """
        All parameters used for optimizer (such as: parameters, master_weight, velocity_acc for momentum) calculations are grouped into a python list by data type (float16, float32).
        This function will be overridden in the corresponding optimizer file.

        Args:
            target_block: the block in which the loss tensor is present
            parameters: list of parameter tensors for the optimizer
        """
        pass

    @framework.dygraph_only
1567
    def _append_optimize_multi_tensor_op(
1568
        self, target_block, parameters_and_grads, param_group_idx
1569
    ):
1570
        """
1571 1572 1573
        For Multi Tensor, append optimize merged_operator to block.
        """
        pass
1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587

    def _is_dtype_fp16_or_bf16(self, dtype):
        """
        check the dtype is fp16 or the dtype is bf16
        :param dtype: instance of core.VarDesc.VarType
        :return: True if dtype is one of fp16 or bf16, False otherwise
        """
        assert isinstance(
            dtype, core.VarDesc.VarType
        ), "The dtype should be an instance of core.VarDesc.VarType."
        return (
            dtype == core.VarDesc.VarType.FP16
            or dtype == core.VarDesc.VarType.BF16
        )