optimizer.py 57.2 KB
Newer Older
M
MRXLT 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import numpy as np
import logging
from collections import defaultdict

19
import paddle
20
from paddle.fluid.framework import Variable, default_main_program, device_guard, name_scope
M
MRXLT 已提交
21 22 23 24

from ..fluid import framework
from ..fluid import layers
from ..fluid import unique_name
25 26
from ..fluid.backward import _get_no_grad_set_name, append_backward
from ..fluid.clip import GradientClipBase, append_gradient_clip_ops, error_clip_callback
27
from ..fluid.framework import program_guard, Parameter
M
MRXLT 已提交
28 29 30 31
from ..fluid.initializer import Constant
from ..fluid.layer_helper import LayerHelper
from ..fluid.dygraph import base as imperative_base
from paddle.fluid import core
32
from .lr import LRScheduler
33
from paddle import _C_ops, _legacy_C_ops
34
from paddle.fluid.framework import _in_legacy_dygraph, _in_eager_without_dygraph_check, _current_expected_place, in_dygraph_mode
M
MRXLT 已提交
35

36 37
__all__ = []

M
MRXLT 已提交
38

39 40 41 42 43 44 45 46 47 48 49
@framework.static_only
def append_backward_new(loss_list,
                        parameter_list=None,
                        no_grad_set=None,
                        callbacks=None,
                        checkpoints=None,
                        distop_context=None):
    from paddle.incubate.autograd.primx import orig2prim, Transform
    program = default_main_program()
    assert program.num_blocks == 1, "The append_backward_new interface is designed to process only one block."
    block = program.current_block()
50
    for el in loss_list:
51
        assert el.block == block, 'variable in loss_list should be in current block of main program'
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79

    orig2prim(block)
    ad = Transform(block)
    if parameter_list is None:
        parameter_list = program.global_block().all_parameters()
    param_dot, loss_dot = ad.linearize(parameter_list, loss_list)
    loss_bar, param_bar = ad.transpose(loss_dot, param_dot)

    # remove param_dot and their constructor ops
    op_indexes = []
    for var in param_dot:
        if var is not None:
            op_index = block.ops.index(var.op)
            assert op_index >= 0
            op_indexes.append(op_index)

    ad.erase_ops(sorted(op_indexes))
    ad.erase_dots(param_dot)

    if len(parameter_list) == 1:
        params_and_grads = [(parameter_list, param_bar)]
    else:
        params_and_grads = []
        for i, param in enumerate(parameter_list):
            params_and_grads.append((param, param_bar[i]))
    return params_and_grads


M
MRXLT 已提交
80
class Optimizer(object):
81
    r"""Optimizer Base class.
M
MRXLT 已提交
82 83 84 85 86 87

    Define the common interface of an optimizer.
    User should not use this class directly,
    but need to use one of it's implementation.

    Args:
88 89
        learning_rate (float|LRScheduler): The learning rate used to update ``Parameter``.
            It can be a float value or any subclass of ``LRScheduler`` .
90
        parameters (list|tuple, optional): List/Tuple of ``Tensor`` names to update to minimize ``loss``. \
91 92 93 94
            This parameter is required in dygraph mode. And you can specify different options for \
            different parameter groups such as the learning rate, weight decay, etc, \
            then the parameters are list of dict. Note that the learning_rate in paramter groups \
            represents the scale of base learning_rate. \
M
MRXLT 已提交
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
            The default value is None in static mode, at this time all parameters will be updated.
        weight_decay (float|WeightDecayRegularizer, optional): The strategy of regularization. \
            It canbe a float value as coeff of L2 regularization or \
            :ref:`api_fluid_regularizer_L1Decay`, :ref:`api_fluid_regularizer_L2Decay`.
            If a parameter has set regularizer using :ref:`api_fluid_ParamAttr` already, \
            the regularization setting here in optimizer will be ignored for this parameter. \
            Otherwise, the regularization setting here in optimizer will take effect. \
            Default None, meaning there is no regularization.
        grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of \
            some derived class of ``GradientClipBase`` . There are three cliping strategies \
            ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` , \
            :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping.
        name (str, optional): Normally there is no need for user to set this property.
            For more information, please refer to :ref:`api_guide_Name`.
            The default value is None.

    Returns:
112 113
       Base class for optimizer.

M
MRXLT 已提交
114 115 116 117 118 119
    Examples:
        .. code-block:: python

            #Take the subclass adam as an example
            import paddle
            linear = paddle.nn.Linear(10, 10)
120
            inp = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
M
MRXLT 已提交
121 122 123 124
            out = linear(inp)
            loss = paddle.mean(out)
            adam = paddle.optimizer.Adam(learning_rate=0.1,
                    parameters=linear.parameters())
R
Roc 已提交
125
            loss.backward()
M
MRXLT 已提交
126 127 128
            adam.step()
            adam.clear_grad()

129
            #Take the subclass sgd as an example
130
            #optimize parameters in linear_1 and linear2 in different options.
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
            #Note that the learning_rate of linear_2 is 0.01.
            linear_1 = paddle.nn.Linear(10, 10)
            linear_2 = paddle.nn.Linear(10, 10)
            inp = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
            out = linear_1(inp)
            out = linear_2(out)
            loss = paddle.mean(out)
            sgd = paddle.optimizer.SGD(
                learning_rate=0.1,
                parameters=[{
                    'params': linear_1.parameters()
                }, {
                    'params': linear_2.parameters(),
                    'weight_decay': 0.001,
                    'learning_rate': 0.1
                }],
147
                weight_decay=0.01)
R
Roc 已提交
148
            loss.backward()
149 150 151
            sgd.step()
            sgd.clear_grad()

M
MRXLT 已提交
152 153
    """

154
    @imperative_base.no_grad
M
MRXLT 已提交
155 156 157 158 159 160
    def __init__(self,
                 learning_rate,
                 parameters=None,
                 weight_decay=None,
                 grad_clip=None,
                 name=None):
161

162 163 164 165
        if parameters is not None:
            # paddle.Tensor is also iterable, so here we don't check whether
            # the input is iterable, if the input is paddle.Tensor, the
            # list(paddle.Tensor) will be a error value
166
            if isinstance(parameters, (paddle.Tensor, core.eager.Tensor)):
167 168
                raise TypeError(
                    "`parameters` argument given to the optimizer should be "
169 170
                    "an iterable of paddle Tensors, but got argument type is `{}`."
                    .format(type(parameters)))
171 172 173 174 175
            if isinstance(parameters, dict):
                raise TypeError(
                    "`parameters` argument should not get dict type, "
                    "if parameter groups is needed, please set `parameters`"
                    " as list of dict")
176 177 178 179
            self._parameter_list = list(parameters)
        else:
            self._parameter_list = None

M
MRXLT 已提交
180
        self._name = name
J
Jiabin Yang 已提交
181
        if framework._non_static_mode():
M
MRXLT 已提交
182 183 184 185 186
            if self._parameter_list is None:
                raise AttributeError(
                    "parameters argument given to the Optimizer should not be None in dygraph mode."
                )
            if weight_decay is not None:
187 188
                if not isinstance(self._parameter_list[0], dict):
                    for param in self._parameter_list:
189 190
                        if hasattr(param, 'regularizer'
                                   ) and param.regularizer is not None:
191 192 193 194 195 196
                            logging.info(
                                "If regularizer of a Parameter has been set by 'paddle.ParamAttr' or 'static.WeightNormParamAttr' already. "
                                "The weight_decay[%s] in Optimizer will not take effect, and it will only be applied to other Parameters!"
                                % weight_decay.__str__())
                            break

197
        if not isinstance(learning_rate, (float, LRScheduler)):
198
            raise TypeError(
199
                "learning rate should be float or LRScheduler, got %s here" %
200
                type(learning_rate))
M
MRXLT 已提交
201 202 203 204 205 206 207 208 209 210 211 212
        if grad_clip is not None:
            if not isinstance(grad_clip, GradientClipBase):
                raise TypeError(
                    "'grad_clip' should be an instance of GradientClipBase's derived class"
                )
        if isinstance(weight_decay, float):
            from ..fluid.regularizer import L2Decay
            self.regularization = L2Decay(weight_decay)
        else:
            self.regularization = weight_decay
        self._grad_clip = grad_clip
        self._learning_rate = learning_rate
L
Leo Chen 已提交
213

M
MRXLT 已提交
214
        self._dtype = None
L
Leo Chen 已提交
215 216
        # Infer the dtype form parameter
        if self._parameter_list:
217 218 219 220 221 222 223
            if isinstance(self._parameter_list[0], dict):
                for param_group in self._parameter_list:
                    assert 'params' in param_group, \
                        'params should be set in parameters if parameter groups are optimized in different options'
                self._dtype = self._parameter_list[0]['params'][0].dtype
            else:
                self._dtype = self._parameter_list[0].dtype
L
Leo Chen 已提交
224

M
MRXLT 已提交
225 226 227 228 229 230 231 232 233 234 235 236 237
        # each program should have a independent learning rate
        # program -> tensor(learning_rate)
        self._learning_rate_map = dict()
        # Dictionary of accumulators. Some optimizer subclasses need to
        # allocate and manage extra tensors associated with the parameters
        # to train. These tensors are called accumulators.
        # {accum_name : { paramter_name : accumulator_for_parameter, ...}, ...}
        self._accumulators = defaultdict(lambda: dict())
        self.helper = None
        self._opti_name_list = []
        self._accumulators_holder = {}
        self._param_device_map = dict()
        self.clear_gradients = self.clear_grad
238 239 240 241 242 243 244 245 246 247 248
        self._default_dict = {
            'weight_decay': self.regularization,
            'grad_clip': self._grad_clip
        }

        self._param_groups = []
        if self._parameter_list and isinstance(self._parameter_list[0], dict):
            for param_group in self._parameter_list:
                self._add_param_group(param_group.copy())
        else:
            self._param_groups = self._parameter_list
M
MRXLT 已提交
249

250
        # NOTE: Multi Tensor: Pass in all parameters and gradients to the op kernel of the Optimizer at one time for updating for dygraph mode.
Z
zhangbo9674 已提交
251
        # Optimizer support list: [ paddle.optimizer.Momentum, paddle.optimizer.Adam].
252 253 254
        self._use_multi_tensor = None
        self._param_dict = {'FP32_LODTensor': [], 'FP16_LODTensor': []}

255 256 257 258 259 260 261 262
        self._auxiliary_vars = {}

    def _set_auxiliary_var(self, key, val):
        self._auxiliary_vars[key] = val

    def _get_auxiliary_var(self, key):
        return self._auxiliary_vars.get(key, None)

M
MRXLT 已提交
263 264 265
    @framework.dygraph_only
    def state_dict(self):
        '''
266
        Get state dict information from optimizer. It contain all the tensor used by optimizer. For Adam optimizer, contains beta1, beta2, momentum etc. If LRScheduler have been used, global_step will be include in state dict.
M
MRXLT 已提交
267 268
        If the optimizer never be called(minimize function), the state_dict is empty.

269
        Args:
M
MRXLT 已提交
270 271 272 273
            None

        Returns:
            state_dict(dict) : dict contains all the Tensor used by optimizer
274

M
MRXLT 已提交
275 276 277 278
        Examples:
            .. code-block:: python

                import paddle
M
MRXLT 已提交
279
                emb = paddle.nn.Embedding(10, 10)
M
MRXLT 已提交
280 281 282 283 284 285 286 287 288

                adam = paddle.optimizer.Adam(0.001, parameters=emb.parameters())
                state_dict = adam.state_dict()

        '''
        state_dict = {}
        for k, v in self._accumulators.items():
            for para_name, var_tmp in v.items():
                state_dict[var_tmp.name] = var_tmp
289 290 291 292
        # if has master weight and then save master weight
        if hasattr(self, "_master_weights"):
            if len(self._master_weights) != 0:
                state_dict["master_weights"] = self._master_weights
M
MRXLT 已提交
293
        # global step if use lr decay
294
        if isinstance(self._learning_rate, LRScheduler):
M
MRXLT 已提交
295 296 297 298 299 300
            state_dict["LR_Scheduler"] = self._learning_rate.state_dict()
        return state_dict

    @framework.dygraph_only
    def set_state_dict(self, state_dict):
        '''
301
        Load optimizer state dict. For Adam optimizer, contains beta1, beta2, momentum etc. If LRScheduler have been used, global_step will be changed.
M
MRXLT 已提交
302

303
        Args:
M
MRXLT 已提交
304 305 306
            state_dict(dict) : Dict contains all the Tensor needed by optimizer
        Return:
            None
307

M
MRXLT 已提交
308 309 310 311 312
        Examples:
            .. code-block:: python

                import paddle

313
                emb = paddle.nn.Embedding(10, 10)
M
MRXLT 已提交
314

315 316
                layer_state_dict = emb.state_dict()
                paddle.save(layer_state_dict, "emb.pdparams")
M
MRXLT 已提交
317

318
                scheduler = paddle.optimizer.lr.NoamDecay(
319 320 321 322 323 324
                    d_model=0.01, warmup_steps=100, verbose=True)
                adam = paddle.optimizer.Adam(
                    learning_rate=scheduler,
                    parameters=emb.parameters())
                opt_state_dict = adam.state_dict()
                paddle.save(opt_state_dict, "adam.pdopt")
M
MRXLT 已提交
325

326
                opti_state_dict = paddle.load("adam.pdopt")
M
MRXLT 已提交
327 328 329
                adam.set_state_dict(opti_state_dict)

        '''
330
        if isinstance(self._learning_rate, LRScheduler):
331
            self._learning_rate.set_state_dict(state_dict["LR_Scheduler"])
M
MRXLT 已提交
332

333
        # NOTE: exclude learning rate scheduler's state from
334 335 336 337
        # _accumulators_holder.
        state_dict = state_dict.copy()
        if "LR_Scheduler" in state_dict:
            state_dict.pop("LR_Scheduler")
338 339 340 341
        if "master_weights" in state_dict:
            if hasattr(self, "_master_weights"):
                self._master_weights = state_dict["master_weights"]
            state_dict.pop("master_weights")
M
MRXLT 已提交
342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364
        self._accumulators_holder = state_dict
        for k, v in self._accumulators.items():
            for para_name, var_tmp in v.items():
                assert var_tmp.name in state_dict, \
                        "optimizer Tensor {} not found".format( var_tmp.name )
                var = var_tmp.value()
                tensor = var.get_tensor()
                model_np = np.array(tensor)

                load_para = state_dict[var_tmp.name]

                if isinstance(load_para, Variable):
                    load_para_np = load_para.numpy()
                elif isinstance(load_para, core.VarBase):
                    load_para_np = load_para.numpy()
                elif isinstance(load_para, np.ndarray):
                    load_para_np = load_para
                else:
                    raise RuntimeError("State dict type {} not supprt".format(
                        str(type(load_para))))

                assert model_np.shape == load_para_np.shape,  \
                                          "Parameter shape not match, Dygraph Parameter [ {} ] need tensor with shape {} but load tensor with shape {}".format(
J
Jiangxinz 已提交
365
                                                 model_np.name, model_np.shape, load_para_np.shape)
M
MRXLT 已提交
366 367 368

                assert model_np.dtype == load_para_np.dtype, \
                                          "Parameter dtype not match, Dygraph Parameter [ {} ] need tensor with dtype {}  but load tensor with dtype {}".format(
J
Jiangxinz 已提交
369
                                                model_np.name, model_np.dtype, load_para_np.dtype)
M
MRXLT 已提交
370 371 372 373 374 375 376

                tensor.set(load_para_np, framework._current_expected_place())

    def get_opti_var_name_list(self):
        return self._opti_name_list

    def _create_global_learning_rate(self):
377 378 379 380 381 382
        # lr var can't be float16, for pure fp16 training, should extra handle the dtype for lr
        _lr_dtype = paddle.get_default_dtype(
        ) if self._dtype is None else self._dtype
        _lr_dtype = paddle.float32 if (
            paddle.get_default_dtype() != "float16"
            and _lr_dtype == paddle.float16) else _lr_dtype
383
        if isinstance(self._learning_rate, LRScheduler):
384 385 386 387 388
            lr_var = self._global_learning_rate()
            # only create global lr_var once
            if not isinstance(lr_var, framework.Variable):
                lr_name = unique_name.generate('learning_rate')
                self._learning_rate._var_name = lr_name
389 390 391 392 393
                lr_var = self.helper.create_global_variable(name=lr_name,
                                                            shape=[1],
                                                            persistable=True,
                                                            stop_gradient=True,
                                                            dtype=_lr_dtype)
394 395 396
                main_prog = framework.default_main_program()
                main_prog.lr_sheduler = self._learning_rate
                main_prog.lr_var = lr_var
M
MRXLT 已提交
397

398 399
                self._learning_rate_map[
                    framework.default_main_program()] = lr_var
M
MRXLT 已提交
400

401 402 403 404 405 406
            lr_value = float(self._learning_rate())
            self.helper.set_variable_initializer(
                lr_var, initializer=Constant(value=lr_value))
        elif isinstance(self._learning_rate, float):
            # only create global lr_var once
            lr = self._global_learning_rate()
M
MRXLT 已提交
407 408 409
            if isinstance(lr, framework.Variable):
                return
            else:
410 411 412 413 414
                self._learning_rate_map[framework.default_main_program(
                )] = layers.create_global_var(
                    name=unique_name.generate("learning_rate"),
                    shape=[1],
                    value=float(self._learning_rate),
415
                    dtype=_lr_dtype,
416
                    persistable=True)
M
MRXLT 已提交
417 418 419 420 421

    @framework.dygraph_only
    def set_lr(self, value):
        """
        :api_attr: imperative
422

423
        Set the value of the learning rate manually in the optimizer. If the optimizer use LRScheduler,
M
MRXLT 已提交
424 425 426
        this API cannot be invoked, because it will lead to conflict.

        Args:
M
MRXLT 已提交
427
            value (float): the value of learning rate
M
MRXLT 已提交
428 429 430

        Returns:
            None
431

M
MRXLT 已提交
432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
        Examples:
            .. code-block:: python

                import paddle
                linear = paddle.nn.Linear(10, 10)

                adam = paddle.optimizer.Adam(0.1, parameters=linear.parameters())

                # set learning rate manually by python float value
                lr_list = [0.2, 0.3, 0.4, 0.5, 0.6]
                for i in range(5):
                    adam.set_lr(lr_list[i])
                    lr = adam.get_lr()
                    print("current lr is {}".format(lr))
                # Print:
                #    current lr is 0.2
                #    current lr is 0.3
                #    current lr is 0.4
                #    current lr is 0.5
                #    current lr is 0.6

        """
454
        if not isinstance(value, (int, float)):
M
MRXLT 已提交
455
            raise TypeError(
456
                "The type of 'value' in optimizer.set_lr must be float, but received %s."
M
MRXLT 已提交
457
                % (type(value)))
458
        if isinstance(self._learning_rate, LRScheduler):
M
MRXLT 已提交
459
            raise RuntimeError(
460
                "optimizer's learning rate can't be LRScheduler when invoke this API, because this will lead to conflict."
M
MRXLT 已提交
461
            )
462 463 464
        self._learning_rate = float(value)
        current_lr = self._global_learning_rate()
        if current_lr is not None:
465 466
            if in_dygraph_mode():
                place = _current_expected_place()
467 468
                _C_ops.full_(current_lr, list(current_lr.shape), float(value),
                             current_lr.dtype, place)
469 470

            elif _in_legacy_dygraph():
471 472 473
                _legacy_C_ops.fill_constant(current_lr, 'value', float(value),
                                            'dtype', current_lr.dtype, 'shape',
                                            list(current_lr.shape))
474 475
            else:
                global_block = framework.default_main_program().global_block()
476 477 478 479 480 481 482 483
                global_block.append_op(type='fill_constant',
                                       outputs={'Out': [current_lr]},
                                       attrs={
                                           'dtype': current_lr.dtype,
                                           'shape': list(current_lr.shape),
                                           'value': float(value)
                                       },
                                       stop_gradient=True)
M
MRXLT 已提交
484 485 486

    def get_lr(self):
        """
487
        Get current learning rate of optimizer.
488 489
        If 'LRScheduler' is not used, the return value is all the same.
        If 'LRScheduler' is used, the return value is the current scheduled learing rete.
M
MRXLT 已提交
490

M
MRXLT 已提交
491
        Returns:
492
            float: The current learning rate of optimizer.
M
MRXLT 已提交
493 494 495 496

        Examples:
            .. code-block:: python

497
                # train on default dynamic graph mode
M
MRXLT 已提交
498
                import paddle
499 500 501 502 503 504 505 506 507 508 509
                import numpy as np
                emb = paddle.nn.Embedding(10, 3)

                ## example1: LRScheduler is not used, return the same value is all the same
                adam = paddle.optimizer.Adam(0.01, parameters = emb.parameters())
                for batch in range(10):
                    input = paddle.randint(low=0, high=5, shape=[5])
                    out = emb(input)
                    out.backward()
                    print("Learning rate of step{}: {}".format(batch, adam.get_lr())) # 0.01
                    adam.step()
M
MRXLT 已提交
510

511 512 513 514 515 516 517 518
                ## example2: StepDecay is used, return the scheduled learning rate
                scheduler = paddle.optimizer.lr.StepDecay(learning_rate=0.5, step_size=2, gamma=0.1)
                adam = paddle.optimizer.Adam(scheduler, parameters = emb.parameters())
                for batch in range(10):
                    input = paddle.randint(low=0, high=5, shape=[5])
                    out = emb(input)
                    out.backward()
                    print("Learning rate of step{}: {}".format(batch, adam.get_lr())) # 0.5->0.05...
M
MRXLT 已提交
519
                    adam.step()
520
                    scheduler.step()
521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539

                # train on static graph mode
                paddle.enable_static()
                main_prog = paddle.static.Program()
                start_prog = paddle.static.Program()
                with paddle.static.program_guard(main_prog, start_prog):
                    x = paddle.static.data(name='x', shape=[None, 10])
                    z = paddle.static.nn.fc(x, 100)
                    loss = paddle.mean(z)
                    scheduler = paddle.optimizer.lr.StepDecay(learning_rate=0.5, step_size=2, gamma=0.1)
                    adam = paddle.optimizer.Adam(learning_rate=scheduler)
                    adam.minimize(loss)

                exe = paddle.static.Executor()
                exe.run(start_prog)
                for batch in range(10):
                    print("Learning rate of step{}: {}", adam.get_lr())     # 0.5->0.05->0.005...
                    out = exe.run(main_prog, feed={'x': np.random.randn(3, 10).astype('float32')})
                    scheduler.step()
M
MRXLT 已提交
540 541 542 543 544

        """
        if isinstance(self._learning_rate, float):
            return self._learning_rate
        else:
545
            return self._learning_rate()
M
MRXLT 已提交
546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565

    def _global_learning_rate(self, program=None):
        """
        get global decayed learning rate
        :return:
        """
        if program is None:
            program = framework.default_main_program()
        return self._learning_rate_map.get(program, None)

    def _append_optimize_op(self, block, param_and_grad):
        """ append optimize operator to block and return all the added optimize_op
        """
        raise NotImplementedError(
            "Class \"Optimizer\" connot be used directly as an optimizer, please use its subclasses such as \"Adam\""
        )

    def _create_param_lr(self, param_and_grad):
        # create learning rate tensor for every parameter
        param = param_and_grad[0]
566 567 568 569
        if hasattr(param, 'optimize_attr'):
            param_lr = param.optimize_attr['learning_rate']
            if type(param_lr) == Variable:
                return param_lr
M
MRXLT 已提交
570
            else:
571 572 573 574 575 576 577 578 579
                if param_lr == 1.0:
                    return self._global_learning_rate()
                else:
                    with default_main_program()._lr_schedule_guard(
                            is_with_opt=True), framework.name_scope(
                                'scale_with_param_lr'):
                        return self._global_learning_rate() * param_lr
        else:
            return self._global_learning_rate()
M
MRXLT 已提交
580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621

    def _create_accumulators(self, block, parameters):
        """Create all accumulators needed by the parameters

        Args:
            block: the block in which the loss tensor is present
            parameters: list of parameter tensors for the optimizer
        """
        pass

    def _finish_update(self, block, parameters_and_grads):
        """Finish any custom updates needed
           before completing an optimization step

        Args:
            block: the block in which the loss tensor is present
            parameters: list of parameter tensors for the optimizer

        Returns:
            None
        """
        pass

    def _add_accumulator(self,
                         name,
                         param,
                         dtype=None,
                         fill_value=0.0,
                         shape=None,
                         type=None,
                         device=None):
        """Utility function to add an accumulator for a parameter

        Args:
            block: the block in which the loss tensor is present
            name: name of the accumulator
            param: parameter tensor for which accumulator is to be added
            dtype: data type of the accumulator tensor
            fill_value: value to initialize the accumulator tensor
        """
        if self._name is not None:
            name = self._name + "_" + name
622 623
        if (name in self._accumulators
                and param.name in self._accumulators[name]):
J
Jiabin Yang 已提交
624
            if framework._non_static_mode():
M
MRXLT 已提交
625
                return self._accumulators[name][param.name]
626 627 628
            raise Exception(
                "Accumulator {} already exists for parameter {}".format(
                    name, param.name))
M
MRXLT 已提交
629 630 631 632 633 634 635 636 637 638 639 640
        if shape == None:
            shape = param.shape
        assert isinstance(self.helper, LayerHelper)

        var_name = param.name + "_" + name
        var_name = unique_name.generate(var_name)
        self._opti_name_list.append(var_name)

        var = self.helper.create_global_variable(
            name=var_name,
            persistable=True,
            dtype=dtype or param.dtype,
641
            type=core.VarDesc.VarType.LOD_TENSOR
J
Jiabin Yang 已提交
642 643
            if framework._in_eager_without_dygraph_check() else
            (param.type if type is None else type),
M
MRXLT 已提交
644 645 646 647 648 649 650 651
            shape=shape,
            belong_to_optimizer=True)
        if device is None:
            device = self._get_device_for_param(param.name)
        with device_guard(device):
            self.helper.set_variable_initializer(
                var, initializer=Constant(value=float(fill_value)))

J
Jiabin Yang 已提交
652
        if framework._non_static_mode():
M
MRXLT 已提交
653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672
            if len(self._accumulators_holder) > 0:
                assert var_name in self._accumulators_holder, \
                        "Optimizer set error, {} should in state dict".format( var_name )
                var.set_value(self._accumulators_holder[var_name])

        self._accumulators[name][param.name] = var
        return var

    def _get_accumulator(self, name, param):
        """Utility function to fetch an accumulator for a parameter

        Args:
            name: name of the accumulator
            param: parameter tensor for which accumulator is to be fetched

        Returns:
            accumulator tensor for the parameter
        """
        if self._name is not None:
            name = self._name + "_" + name
673 674 675 676 677
        if (name not in self._accumulators
                or param.name not in self._accumulators[name]):
            raise Exception(
                "Accumulator {} does not exist for parameter {}".format(
                    name, param.name))
M
MRXLT 已提交
678 679 680 681
        return self._accumulators[name][param.name]

    def _update_param_device_map(self, parameters_and_grads, target_block):
        for param_and_grad in parameters_and_grads:
682
            if param_and_grad[0].stop_gradient is False:
M
MRXLT 已提交
683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734
                param_name = param_and_grad[0].name
                ops = target_block.ops
                device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName(
                )
                for op in ops:
                    input_arg_names = op.input_arg_names
                    if param_name in input_arg_names:
                        self._param_device_map[param_name] = op.attr(
                            device_attr_name)
                        break

    def _get_device_for_param(self, param_name):
        device = None
        if param_name in self._param_device_map:
            device = self._param_device_map[param_name]
        return device

    def _create_optimization_pass(self, parameters_and_grads):
        """Add optimization operators to update gradients to tensors.

        Args:
          parameters_and_grads(list(tuple(Tensor, Tensor))):
            a list of (tensor, gradient) pair to update.

        Returns:
          return_op_list: a list of operators that will complete one step of
            optimization. This will include parameter update ops, global step
            update ops and any other custom ops required by subclasses to manage
            their internal state.
        """
        # This is a default implementation of create_optimization_pass that
        # can be shared by most optimizers. This implementation assumes that
        # the subclass will implement the _append_optimize_op method and the
        #  _initialize_tensors method. The subclass can extend the
        # _create_accumulators method if it needs to create accumulators
        # for parameters and extend _finish_update method to add custom ops.

        # Allways called under program_guard use global block as loss block
        # But if current block is in control flow, append optimize op in the
        # grad block of current block

        global_block = framework.default_main_program().global_block()
        target_block = global_block
        current_block = framework.default_main_program().current_block()
        if current_block.idx != global_block.idx:
            assert current_block.backward_block_idx != -1, \
                "current block is not global_block, but it doesn't have backward block."
            target_block = framework.default_main_program().blocks[
                current_block.backward_block_idx]

        start = len(target_block.ops)
        self.helper = LayerHelper(self.__class__.__name__)
735

M
MRXLT 已提交
736 737
        self._create_global_learning_rate()

Z
zhangbo9674 已提交
738 739 740 741
        # NOTE: Multi Tensor support [ Momentum, Adam ] for dygraph mode
        if self._use_multi_tensor and self.__class__.__name__ in [
                'Momentum', 'Adam'
        ]:
742 743 744 745
            if len(self._param_dict['FP32_LODTensor']) == 0 and len(
                    self._param_dict['FP16_LODTensor']) == 0:
                if isinstance(parameters_and_grads, list):
                    self._multi_tensor_init(target_block, [
746 747
                        p[0]
                        for p in parameters_and_grads if not p[0].stop_gradient
748 749 750 751 752 753 754
                    ])
                else:
                    self._update_param_group(parameters_and_grads)
                    self._multi_tensor_init(target_block, [
                        p[0] for p in parameters_and_grads['params']
                        if not p[0].stop_gradient
                    ])
J
Jiabin Yang 已提交
755
            if framework._non_static_mode():
756 757 758 759 760 761 762 763
                self._append_optimize_multi_tensor_op(target_block,
                                                      parameters_and_grads)
            else:
                self._update_param_device_map(parameters_and_grads,
                                              target_block)
                # NOTE: Multi Tensor requires all parameters to be in the same device and program.
                # param_grad_list = [p_0,g_0,p_1,g_1,....]
                param_grad_list = []
764
                for param_and_grad in parameters_and_grads:
765 766 767 768 769 770 771 772 773 774 775
                    if not param_and_grad[0].stop_gradient and param_and_grad[
                            1] is not None:
                        param_grad_list.append(param_and_grad[0])
                        param_grad_list.append(param_and_grad[1])
                with param_grad_list[0].block.program._optimized_guard(
                        param_grad_list), name_scope("optimizer"):
                    device = self._get_device_for_param(param_grad_list[0].name)
                    with device_guard(device):
                        self._append_optimize_multi_tensor_op(
                            target_block, parameters_and_grads)
        else:
J
Jiabin Yang 已提交
776
            if not framework._non_static_mode():
777 778 779 780 781 782
                params_grads_device_map = parameters_and_grads[
                    'params'] if isinstance(parameters_and_grads,
                                            dict) else parameters_and_grads
                self._update_param_device_map(params_grads_device_map,
                                              target_block)

783 784 785 786
            if isinstance(parameters_and_grads, list):
                self._create_accumulators(target_block, [
                    p[0] for p in parameters_and_grads if not p[0].stop_gradient
                ])
787
            else:
788 789 790 791 792 793 794
                params_acc_dict = parameters_and_grads.copy()
                params_acc_dict['params'] = [
                    p[0] for p in params_acc_dict['params']
                    if not p[0].stop_gradient
                ]
                self._create_accumulators(target_block, params_acc_dict)

J
Jiabin Yang 已提交
795
            if framework._non_static_mode():
796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818
                if isinstance(parameters_and_grads, list):
                    for param_and_grad in parameters_and_grads:
                        if param_and_grad[1] is None:
                            continue
                        if param_and_grad[0].stop_gradient is False:
                            self._append_optimize_op(target_block,
                                                     param_and_grad)
                else:
                    for param_and_grad in parameters_and_grads['params']:
                        if param_and_grad[1] is None:
                            continue
                        if param_and_grad[0].stop_gradient is False:
                            param_grad_dict = dict()
                            param_grad_dict['params'] = param_and_grad
                            param_grad_dict.update({
                                k: v
                                for k, v in parameters_and_grads.items()
                                if k != 'params'
                            })
                            self._append_optimize_op(target_block,
                                                     param_grad_dict)
            else:
                for param_and_grad in parameters_and_grads:
819 820
                    if param_and_grad[1] is None:
                        continue
821 822 823
                    with param_and_grad[0].block.program._optimized_guard(
                            param_and_grad), name_scope("optimizer"):
                        if param_and_grad[0].stop_gradient is False:
824 825
                            device = self._get_device_for_param(
                                param_and_grad[0].name)
826 827 828
                            with device_guard(device):
                                optimize_op = self._append_optimize_op(
                                    target_block, param_and_grad)
M
MRXLT 已提交
829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873

        # Get custom finish ops for subclasses
        # FIXME: Need to fix this once we figure out how to handle dependencies
        self._finish_update(target_block, parameters_and_grads)

        end = len(target_block.ops)
        return target_block._slice_ops(start, end)

    def _append_dgc_ops(self, param_and_grad):
        pass

    def backward(self,
                 loss,
                 startup_program=None,
                 parameters=None,
                 no_grad_set=None,
                 callbacks=None):
        """
        The first part of ``minimize``, do auto-diff to append backward operations for
        the current program.

        Args:
            loss (Tensor): ``loss`` tensor to run optimizations.
            startup_program (Program, optional): :ref:`api_fluid_Program` for
                initializing parameters in ``parameters``. The default value
                is None, at this time :ref:`api_fluid_default_startup_program` will be used.
            parameters (list, optional): List of ``Tensor`` or ``Tensor.name`` to update
                to minimize ``loss``. The default value is None, at this time all parameters
                will be updated.
            no_grad_set (set, optional): Set of ``Tensor``  or ``Tensor.name`` that don't need
                to be updated. The default value is None.
            callbacks (list, optional): list of callable objects to run when appending backward
                operator for one parameter. The default value is None.

        Return:
            list: list of (param, grad) tensor pairs, param is ``Parameter``,
                grad is the gradient value corresponding to the parameter.

        Examples:
            .. code-block:: python

                import paddle
                import numpy as np
                value = np.arange(26).reshape(2, 13).astype("float32")
                a = paddle.to_tensor(value)
M
MRXLT 已提交
874
                linear = paddle.nn.Linear(13, 5)
M
MRXLT 已提交
875
                # This can be any optimizer supported by dygraph.
876
                adam = paddle.optimizer.Adam(learning_rate = 0.01,
M
MRXLT 已提交
877 878 879 880 881 882 883
                                            parameters = linear.parameters())
                out = linear(a)
                out.backward()
                adam.step()
                adam.clear_grad()
        """
        act_no_grad_set = None
J
Jiabin Yang 已提交
884
        if framework._non_static_mode():
M
MRXLT 已提交
885 886 887 888
            pass
        else:
            act_no_grad_set = self._get_no_grad_set(loss, no_grad_set)

L
Leo Chen 已提交
889 890 891 892
        # Infer dtype by loss if None
        if self._dtype is None:
            self._dtype = loss.dtype

J
Jiabin Yang 已提交
893
        if framework._non_static_mode():
894 895 896
            parameter_list = parameters if parameters \
                else self._parameter_list

M
MRXLT 已提交
897
            params_grads = []
898
            for param in parameter_list:
899
                if param.stop_gradient:
M
MRXLT 已提交
900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917
                    continue
                if param._grad_ivar() is not None:
                    # create gradient tensor
                    grad_var = param._grad_ivar()
                    params_grads.append((param, grad_var))
        else:
            if callbacks is None:
                callbacks = [error_clip_callback]
            else:
                assert (isinstance(callbacks, list))
            program = loss.block.program
            assert len(loss.shape) == 1 and loss.shape[0] == 1, \
                "The loss.shape should be (1L,), but the current loss.shape is {}. " \
                "Maybe that you should call paddle.mean to process the current loss.".format(
                    loss.shape)
            parameter_list = parameters if parameters \
                else self._parameter_list
            with program_guard(program, startup_program):
918 919
                from paddle.incubate.autograd.utils import prim_enabled
                if prim_enabled():
920 921 922
                    params_grads = append_backward_new([loss], parameter_list,
                                                       act_no_grad_set,
                                                       callbacks)
923 924 925
                else:
                    params_grads = append_backward(loss, parameter_list,
                                                   act_no_grad_set, callbacks)
M
MRXLT 已提交
926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969
                # Note: since we can't use all_reduce_op now,
                #  dgc_op should be the last op of one grad.
                self._append_dgc_ops(params_grads)
        return params_grads

    def apply_gradients(self, params_grads):
        """
        Second part of `minimize`, appending optimization operators for
        given `params_grads` pairs.

        Args:
            params_grads (list): list of (param, grad) pair to do optimization.

        Returns:
            list: A list of operators appended to the current program.

        Examples:
            .. code-block:: python

                import paddle
                import numpy as np

                inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
                linear = paddle.nn.Linear(10, 10)
                inp = paddle.to_tensor(inp)
                out = linear(inp)
                loss = paddle.mean(out)
                optimizer = paddle.optimizer.Adam(learning_rate=0.1,
                        parameters=linear.parameters())
                params_grads = optimizer.backward(loss)
                optimizer.apply_gradients(params_grads)

        """

        params_grads = sorted(params_grads, key=lambda x: x[0].name)

        # 'optimizer(grad_clip)' or 'set_gradient_clip'
        if self._grad_clip is not None:
            params_grads = self._grad_clip(params_grads)
        else:

            params_grads = append_gradient_clip_ops(params_grads)

        # Add regularization if any
970 971
        params_grads = self.append_regularization_ops(params_grads,
                                                      self.regularization)
M
MRXLT 已提交
972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987

        optimize_ops = self._create_optimization_pass(params_grads)
        return optimize_ops

    def _apply_optimize(self, loss, startup_program, params_grads):
        """
        Second part of `minimize`, appending optimization operators for
        given `params_grads` pairs.
        Args:
            loss (Tensor): loss tensor to run optimizations.
            startup_program (Program): startup_program for initializing parameters
                in `parameters`.
            params_grads (list): list of (param, grad) pair to do optimization.
        Returns:
            list: A list of operators appended to the current program.
        """
J
Jiabin Yang 已提交
988
        if framework._non_static_mode():
M
MRXLT 已提交
989 990
            with program_guard(framework.default_main_program(),
                               framework.default_startup_program()):
991 992 993
                if isinstance(params_grads, list):
                    if self._grad_clip is not None:
                        params_grads = self._grad_clip(params_grads)
994
                    params_grads = self.append_regularization_ops(
995 996 997 998
                        params_grads, self.regularization)
                else:
                    grad_clip = params_grads['grad_clip']
                    if grad_clip is not None:
999 1000
                        params_grads['params'] = grad_clip(
                            params_grads['params'])
1001

1002
                    params_grads['params'] = self.append_regularization_ops(
1003
                        params_grads['params'], self.regularization)
M
MRXLT 已提交
1004 1005 1006 1007 1008 1009 1010
                optimize_ops = self._create_optimization_pass(params_grads)
        else:
            program = loss.block.program
            with program_guard(program, startup_program):
                optimize_ops = self.apply_gradients(params_grads)
        return optimize_ops

1011 1012
    def _create_regularization_of_grad(self, param, grad, regularization=None):
        """ Create and add backward regularization Operators
1013

1014 1015 1016
        Function helper of append_regularization_ops.
        """
        # If no gradient or no regularization is specified,  then we don't need to do anything
1017 1018 1019 1020
        if grad is None or (
            (not hasattr(param, 'regularizer') or
             (hasattr(param, 'regularizer') and param.regularizer is None))
                and regularization is None):
1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
            return grad
        regularization_term = None
        if hasattr(param, 'regularizer') and param.regularizer is not None:
            # Add variable for regularization term in grad block
            regularization_term = param.regularizer(param, grad, grad.block)
        elif regularization is not None:
            regularization_term = regularization(param, grad, grad.block)

        assert regularization_term is not None

1031
        if framework.in_dygraph_mode():
Y
YuanRisheng 已提交
1032
            return _C_ops.add_n([grad, regularization_term])
1033
        elif framework._in_legacy_dygraph():
1034
            return _legacy_C_ops.sum([grad, regularization_term])
1035

1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050
        new_grad = grad
        if grad.type == core.VarDesc.VarType.SELECTED_ROWS:
            # FIXME(zcd): If the grad is SELECTED_ROWS, after regularization,
            # the grad's type and name will be changed. But the gradient's name
            # is used in ParallelExecutor Reduce mode, so I add a flag for
            # the new_grad here.
            new_grad = grad.block.create_var(
                name=grad.name + core.kNewGradSuffix(),
                dtype=param.dtype,
                shape=param.shape,
                lod_level=param.lod_level,
                type=core.VarDesc.VarType.LOD_TENSOR)

        inputs = {"X": [grad, regularization_term]}
        outputs = {"Out": [new_grad]}
1051
        grad.block.append_op(type='sum', inputs=inputs, outputs=outputs)
1052 1053 1054 1055 1056 1057 1058

        return new_grad

    def append_regularization_ops(self,
                                  parameters_and_grads,
                                  regularization=None):
        r"""Create and add backward regularization Operators
1059

1060 1061 1062 1063
        Creates and adds backward regularization operators in the BlockDesc.
        This will add gradients of the regularizer function to the gradients
        of the parameters and return these modified gradients. This is the
        same as implementing weight decay in optimizers for regularization.
1064

1065 1066 1067 1068 1069
        Args:
            parameters_and_grads: A list of (parameters, gradients) pairs
                                  that need to be regularized.
            regularization: A global regularizer. If the parameter is not
                            set. It will be applied with regularizer.
1070

1071 1072 1073
        Returns:
            list[(Variable, Variable)]: list of (parameters, gradients) \
            pair with the regularized gradient
1074

1075 1076 1077 1078
        Raises:
            Exception: Unknown regularization type
        """
        params_and_grads = []
J
Jiabin Yang 已提交
1079
        if framework._non_static_mode():
1080
            for param, grad in parameters_and_grads:
1081 1082
                new_grad = self._create_regularization_of_grad(
                    param, grad, regularization)
1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099
                params_and_grads.append((param, new_grad))
        else:
            repeate_regularizer = False
            with framework.name_scope('regularization'):
                for param, grad in parameters_and_grads:
                    if not repeate_regularizer and param.regularizer is not None and regularization is not None:
                        repeate_regularizer = True
                        logging.info(
                            "If regularizer of a Parameter has been set by 'fluid.ParamAttr' or 'fluid.WeightNormParamAttr' already. "
                            "The Regularization[%s] in Optimizer will not take effect, and it will only be applied to other Parameters!"
                            % regularization.__str__())
                    with param.block.program._optimized_guard([param, grad]):
                        new_grad = self._create_regularization_of_grad(
                            param, grad, regularization)
                        params_and_grads.append((param, new_grad))
        return params_and_grads

M
MRXLT 已提交
1100 1101 1102
    def _get_no_grad_set(self, loss, no_grad_set=None):
        no_grad_set = _get_no_grad_set_name(no_grad_set)
        parameters = loss.block.program.global_block().all_parameters()
1103 1104
        param_no_trainable = set(
            [param.name for param in parameters if param.stop_gradient is True])
M
MRXLT 已提交
1105 1106 1107 1108 1109 1110
        # If the parameter is no trainable, it should not have a gradient.
        no_grad_set.update(param_no_trainable)

        return no_grad_set

    @framework.dygraph_only
1111
    def clear_grad(self, set_to_zero=True):
M
MRXLT 已提交
1112 1113
        """
        Clear the gradients of all optimized parameters for model.
1114 1115

        If not, new gradient will accumulat on previous gradient.
1116 1117

        There are two method to clear grad: set_to_zero or delete grad.
1118

1119 1120
        Args:
            set_to_zero (bool, optional): If set grads to zero or not, default is True.
1121

M
MRXLT 已提交
1122 1123
        Returns:
            None
1124

M
MRXLT 已提交
1125 1126 1127 1128 1129
        Examples:
            .. code-block:: python

                import numpy as np
                import paddle
1130

M
MRXLT 已提交
1131 1132
                value = np.arange(26).reshape(2, 13).astype("float32")
                a = paddle.to_tensor(value)
M
MRXLT 已提交
1133
                linear = paddle.nn.Linear(13, 5)
M
MRXLT 已提交
1134
                # This can be any optimizer supported by dygraph.
1135
                adam = paddle.optimizer.Adam(learning_rate = 0.01,
M
MRXLT 已提交
1136 1137 1138 1139 1140 1141 1142
                                            parameters = linear.parameters())
                out = linear(a)
                out.backward()
                adam.step()
                adam.clear_grad()

        """
1143
        param_list = []
1144 1145 1146 1147
        if self._parameter_list is None or not isinstance(
                self._parameter_list[0], dict):
            for p in self._parameter_list:
                if not p.stop_gradient:
1148
                    param_list.append(p)
1149 1150 1151 1152
        else:
            for param_group in self._param_groups:
                for p in param_group['params']:
                    if not p.stop_gradient:
1153
                        param_list.append(p)
1154

J
Jiabin Yang 已提交
1155
        if _in_eager_without_dygraph_check():
1156
            for p in param_list:
1157
                p.clear_gradient(set_to_zero)
1158 1159
        else:
            core.clear_gradients(param_list, set_to_zero)
M
MRXLT 已提交
1160

1161
    @imperative_base.no_grad
M
MRXLT 已提交
1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184
    def minimize(self,
                 loss,
                 startup_program=None,
                 parameters=None,
                 no_grad_set=None):
        """
        Add operations to minimize ``loss`` by updating ``parameters``.

        Args:
            loss (Tensor): A ``Tensor`` containing the value to minimize.
            startup_program (Program, optional): :ref:`api_fluid_Program` for
                initializing parameters in ``parameters``. The default value
                is None, at this time :ref:`api_fluid_default_startup_program` will be used.
            parameters (list, optional): List of ``Tensor`` or ``Tensor.name`` to update
                to minimize ``loss``. The default value is None, at this time all parameters
                will be updated.
            no_grad_set (set, optional): Set of ``Tensor``  or ``Tensor.name`` that don't need
                to be updated. The default value is None.

        Returns:
            tuple: tuple (optimize_ops, params_grads), A list of operators appended
            by minimize and a list of (param, grad) tensor pairs, param is
            ``Parameter``, grad is the gradient value corresponding to the parameter.
1185 1186
            In static graph mode, the returned tuple can be passed to ``fetch_list`` in ``Executor.run()`` to
            indicate program pruning. If so, the program will be pruned by ``feed`` and
M
MRXLT 已提交
1187 1188 1189 1190
            ``fetch_list`` before run, see details in ``Executor``.

        Examples:
            .. code-block:: python
1191

M
MRXLT 已提交
1192
                import paddle
M
MRXLT 已提交
1193
                linear = paddle.nn.Linear(10, 10)
1194 1195
                input = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
                out = linear(input)
M
MRXLT 已提交
1196 1197 1198 1199 1200 1201 1202 1203
                loss = paddle.mean(out)

                beta1 = paddle.to_tensor([0.9], dtype="float32")
                beta2 = paddle.to_tensor([0.99], dtype="float32")

                adam = paddle.optimizer.Adam(learning_rate=0.1,
                        parameters=linear.parameters(),
                        weight_decay=0.01)
R
Roc 已提交
1204
                loss.backward()
M
MRXLT 已提交
1205 1206 1207
                adam.minimize(loss)
                adam.clear_grad()

M
MRXLT 已提交
1208 1209 1210 1211 1212
        """
        assert isinstance(loss, Variable), "The loss should be an Tensor."

        parameter_list = parameters if parameters \
            else self._parameter_list
1213

1214 1215 1216 1217
        params_grads = self.backward(loss,
                                     startup_program=startup_program,
                                     parameters=parameter_list,
                                     no_grad_set=no_grad_set)
M
MRXLT 已提交
1218

1219 1220 1221
        optimize_ops = self._apply_optimize(loss,
                                            startup_program=startup_program,
                                            params_grads=params_grads)
M
MRXLT 已提交
1222 1223 1224

        return optimize_ops, params_grads

L
Leo Chen 已提交
1225
    @imperative_base.no_grad
M
MRXLT 已提交
1226 1227 1228
    @framework.dygraph_only
    def step(self):
        """
M
MRXLT 已提交
1229
        Execute the optimizer and update parameters once.
1230

M
MRXLT 已提交
1231 1232 1233 1234 1235 1236 1237 1238
        Returns:
            None

        Examples:
            .. code-block:: python

                import paddle
                import numpy as np
1239

M
MRXLT 已提交
1240 1241
                value = np.arange(26).reshape(2, 13).astype("float32")
                a = paddle.to_tensor(value)
M
MRXLT 已提交
1242
                linear = paddle.nn.Linear(13, 5)
M
MRXLT 已提交
1243
                # This can be any optimizer supported by dygraph.
1244
                adam = paddle.optimizer.Adam(learning_rate = 0.01,
M
MRXLT 已提交
1245 1246 1247 1248 1249 1250
                                            parameters = linear.parameters())
                out = linear(a)
                out.backward()
                adam.step()
                adam.clear_grad()
        """
1251 1252 1253 1254 1255 1256 1257 1258 1259 1260

        if not isinstance(self._param_groups[0], dict):
            params_grads = []
            for param in self._param_groups:
                if param.stop_gradient:
                    continue
                if param._grad_ivar() is not None:
                    grad_var = param._grad_ivar()
                    params_grads.append((param, grad_var))

1261 1262 1263
            self._apply_optimize(loss=None,
                                 startup_program=None,
                                 params_grads=params_grads)
1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277

        else:
            # optimize parameters in groups
            for param_group in self._param_groups:
                params_grads = defaultdict(lambda: list())
                for param in param_group['params']:
                    if param.stop_gradient:
                        continue
                    if param._grad_ivar() is not None:
                        grad_var = param._grad_ivar()
                        params_grads['params'].append((param, grad_var))
                params_grads.update(
                    {k: v
                     for k, v in param_group.items() if k != 'params'})
1278 1279 1280
                self._apply_optimize(loss=None,
                                     startup_program=None,
                                     params_grads=params_grads)
1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319

    def _add_param_group(self, param_group):
        """
        Add a param group to parameter_list.

        Args:
            param_group (dict): The group of Tensors to be optimzed with
            different optimization options.
        """
        params = param_group['params']
        if isinstance(params, Parameter):
            param_group['params'] = [params]
        elif isinstance(params, set):
            raise TypeError(
                "optimizer parameters should be in ordered collections,"
                "but received set, please use list instead.")
        else:
            param_group['params'] = list(params)

        # Update optimization options for each groups
        for k, v in self._default_dict.items():
            param_group.setdefault(k, v)

        param_set = set()
        for group in self._param_groups:
            param_set.update(set(group['params']))

        if not param_set.isdisjoint(set(param_group['params'])):
            raise ValueError(
                "some parameters appear in more than one parameter group")

        for param in param_group['params']:
            weight_decay = param_group['weight_decay']
            if isinstance(weight_decay, float):
                from ..fluid.regularizer import L2Decay
                regularization = L2Decay(weight_decay)
            else:
                regularization = weight_decay
            param.regularizer = regularization
W
wangguanzhong 已提交
1320 1321
            param.optimize_attr['learning_rate'] = param_group.get(
                'learning_rate', 1.)
1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332

        self._param_groups.append(param_group)

    def _update_param_group(self, parameters):
        """
        Update the param group with new entry
        Args:
            parameters (dict): The extra group of Tensors to be optimzed with
            different optimization options. Only used in child class.
        """
        pass
1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348

    @framework.dygraph_only
    def _multi_tensor_init(self, target_block, parameters):
        """
        All parameters used for optimizer (such as: parameters, master_weight, velocity_acc for momentum) calculations are grouped into a python list by data type (float16, float32).
        This function will be overridden in the corresponding optimizer file.

        Args:
            target_block: the block in which the loss tensor is present
            parameters: list of parameter tensors for the optimizer
        """
        pass

    @framework.dygraph_only
    def _append_optimize_multi_tensor_op(self, target_block,
                                         parameters_and_grads):
1349
        """
1350 1351 1352
        For Multi Tensor, append optimize merged_operator to block.
        """
        pass