optimizer.py 57.6 KB
Newer Older
M
MRXLT 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import numpy as np
import six
import logging
from collections import defaultdict

20
import paddle
M
MRXLT 已提交
21 22 23 24 25 26 27 28
from paddle.fluid.distribute_lookup_table import find_distributed_lookup_table
from paddle.fluid.framework import Program, Variable, name_scope, default_main_program, default_startup_program, device_guard

from ..fluid import framework
from ..fluid import layers
from ..fluid import unique_name
from ..fluid.backward import append_backward, _some_in_set_, _append_grad_suffix_, _get_no_grad_set_name
from ..fluid.clip import GradientClipBase, GradientClipByNorm, error_clip_callback, append_gradient_clip_ops
29
from ..fluid.framework import program_guard, Parameter
M
MRXLT 已提交
30 31 32 33 34 35 36 37 38 39
from ..fluid.initializer import Constant
from ..fluid.layer_helper import LayerHelper
from ..fluid.layers import ops
from ..fluid.dygraph import base as imperative_base
from ..fluid.dygraph import no_grad
from paddle.fluid import core
from paddle.fluid.layers import tensor
from functools import reduce
from ..fluid.wrapped_decorator import signature_safe_contextmanager
from .. import compat as cpt
40
from .lr import LRScheduler
41
import copy
42
from paddle import _C_ops, _legacy_C_ops
43
from paddle.fluid.framework import _in_legacy_dygraph, _in_eager_without_dygraph_check, _current_expected_place, in_dygraph_mode
M
MRXLT 已提交
44

45 46
__all__ = []

M
MRXLT 已提交
47

48 49 50 51 52 53 54 55 56 57 58
@framework.static_only
def append_backward_new(loss_list,
                        parameter_list=None,
                        no_grad_set=None,
                        callbacks=None,
                        checkpoints=None,
                        distop_context=None):
    from paddle.incubate.autograd.primx import orig2prim, Transform
    program = default_main_program()
    assert program.num_blocks == 1, "The append_backward_new interface is designed to process only one block."
    block = program.current_block()
59 60
    for el in loss_list:
        assert el.block == block, f'variable in loss_list should be in current block of main program'
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88

    orig2prim(block)
    ad = Transform(block)
    if parameter_list is None:
        parameter_list = program.global_block().all_parameters()
    param_dot, loss_dot = ad.linearize(parameter_list, loss_list)
    loss_bar, param_bar = ad.transpose(loss_dot, param_dot)

    # remove param_dot and their constructor ops
    op_indexes = []
    for var in param_dot:
        if var is not None:
            op_index = block.ops.index(var.op)
            assert op_index >= 0
            op_indexes.append(op_index)

    ad.erase_ops(sorted(op_indexes))
    ad.erase_dots(param_dot)

    if len(parameter_list) == 1:
        params_and_grads = [(parameter_list, param_bar)]
    else:
        params_and_grads = []
        for i, param in enumerate(parameter_list):
            params_and_grads.append((param, param_bar[i]))
    return params_and_grads


M
MRXLT 已提交
89
class Optimizer(object):
90
    r"""Optimizer Base class.
M
MRXLT 已提交
91 92 93 94 95 96

    Define the common interface of an optimizer.
    User should not use this class directly,
    but need to use one of it's implementation.

    Args:
97 98
        learning_rate (float|LRScheduler): The learning rate used to update ``Parameter``.
            It can be a float value or any subclass of ``LRScheduler`` .
99
        parameters (list|tuple, optional): List/Tuple of ``Tensor`` names to update to minimize ``loss``. \
100 101 102 103
            This parameter is required in dygraph mode. And you can specify different options for \
            different parameter groups such as the learning rate, weight decay, etc, \
            then the parameters are list of dict. Note that the learning_rate in paramter groups \
            represents the scale of base learning_rate. \
M
MRXLT 已提交
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
            The default value is None in static mode, at this time all parameters will be updated.
        weight_decay (float|WeightDecayRegularizer, optional): The strategy of regularization. \
            It canbe a float value as coeff of L2 regularization or \
            :ref:`api_fluid_regularizer_L1Decay`, :ref:`api_fluid_regularizer_L2Decay`.
            If a parameter has set regularizer using :ref:`api_fluid_ParamAttr` already, \
            the regularization setting here in optimizer will be ignored for this parameter. \
            Otherwise, the regularization setting here in optimizer will take effect. \
            Default None, meaning there is no regularization.
        grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of \
            some derived class of ``GradientClipBase`` . There are three cliping strategies \
            ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` , \
            :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping.
        name (str, optional): Normally there is no need for user to set this property.
            For more information, please refer to :ref:`api_guide_Name`.
            The default value is None.

    Returns:
121 122
       Base class for optimizer.

M
MRXLT 已提交
123 124 125 126 127 128
    Examples:
        .. code-block:: python

            #Take the subclass adam as an example
            import paddle
            linear = paddle.nn.Linear(10, 10)
129
            inp = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
M
MRXLT 已提交
130 131 132 133
            out = linear(inp)
            loss = paddle.mean(out)
            adam = paddle.optimizer.Adam(learning_rate=0.1,
                    parameters=linear.parameters())
R
Roc 已提交
134
            loss.backward()
M
MRXLT 已提交
135 136 137
            adam.step()
            adam.clear_grad()

138
            #Take the subclass sgd as an example
139
            #optimize parameters in linear_1 and linear2 in different options.
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
            #Note that the learning_rate of linear_2 is 0.01.
            linear_1 = paddle.nn.Linear(10, 10)
            linear_2 = paddle.nn.Linear(10, 10)
            inp = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
            out = linear_1(inp)
            out = linear_2(out)
            loss = paddle.mean(out)
            sgd = paddle.optimizer.SGD(
                learning_rate=0.1,
                parameters=[{
                    'params': linear_1.parameters()
                }, {
                    'params': linear_2.parameters(),
                    'weight_decay': 0.001,
                    'learning_rate': 0.1
                }],
156
                weight_decay=0.01)
R
Roc 已提交
157
            loss.backward()
158 159 160
            sgd.step()
            sgd.clear_grad()

M
MRXLT 已提交
161 162
    """

163
    @imperative_base.no_grad
M
MRXLT 已提交
164 165 166 167 168 169
    def __init__(self,
                 learning_rate,
                 parameters=None,
                 weight_decay=None,
                 grad_clip=None,
                 name=None):
170

171 172 173 174
        if parameters is not None:
            # paddle.Tensor is also iterable, so here we don't check whether
            # the input is iterable, if the input is paddle.Tensor, the
            # list(paddle.Tensor) will be a error value
175
            if isinstance(parameters, (paddle.Tensor, core.eager.Tensor)):
176 177
                raise TypeError(
                    "`parameters` argument given to the optimizer should be "
178 179
                    "an iterable of paddle Tensors, but got argument type is `{}`."
                    .format(type(parameters)))
180 181 182 183 184
            if isinstance(parameters, dict):
                raise TypeError(
                    "`parameters` argument should not get dict type, "
                    "if parameter groups is needed, please set `parameters`"
                    " as list of dict")
185 186 187 188
            self._parameter_list = list(parameters)
        else:
            self._parameter_list = None

M
MRXLT 已提交
189
        self._name = name
J
Jiabin Yang 已提交
190
        if framework._non_static_mode():
M
MRXLT 已提交
191 192 193 194 195
            if self._parameter_list is None:
                raise AttributeError(
                    "parameters argument given to the Optimizer should not be None in dygraph mode."
                )
            if weight_decay is not None:
196 197
                if not isinstance(self._parameter_list[0], dict):
                    for param in self._parameter_list:
198 199
                        if hasattr(param, 'regularizer'
                                   ) and param.regularizer is not None:
200 201 202 203 204 205
                            logging.info(
                                "If regularizer of a Parameter has been set by 'paddle.ParamAttr' or 'static.WeightNormParamAttr' already. "
                                "The weight_decay[%s] in Optimizer will not take effect, and it will only be applied to other Parameters!"
                                % weight_decay.__str__())
                            break

206
        if not isinstance(learning_rate, (float, LRScheduler)):
207
            raise TypeError(
208
                "learning rate should be float or LRScheduler, got %s here" %
209
                type(learning_rate))
M
MRXLT 已提交
210 211 212 213 214 215 216 217 218 219 220 221
        if grad_clip is not None:
            if not isinstance(grad_clip, GradientClipBase):
                raise TypeError(
                    "'grad_clip' should be an instance of GradientClipBase's derived class"
                )
        if isinstance(weight_decay, float):
            from ..fluid.regularizer import L2Decay
            self.regularization = L2Decay(weight_decay)
        else:
            self.regularization = weight_decay
        self._grad_clip = grad_clip
        self._learning_rate = learning_rate
L
Leo Chen 已提交
222

M
MRXLT 已提交
223
        self._dtype = None
L
Leo Chen 已提交
224 225
        # Infer the dtype form parameter
        if self._parameter_list:
226 227 228 229 230 231 232
            if isinstance(self._parameter_list[0], dict):
                for param_group in self._parameter_list:
                    assert 'params' in param_group, \
                        'params should be set in parameters if parameter groups are optimized in different options'
                self._dtype = self._parameter_list[0]['params'][0].dtype
            else:
                self._dtype = self._parameter_list[0].dtype
L
Leo Chen 已提交
233

M
MRXLT 已提交
234 235 236 237 238 239 240 241 242 243 244 245 246
        # each program should have a independent learning rate
        # program -> tensor(learning_rate)
        self._learning_rate_map = dict()
        # Dictionary of accumulators. Some optimizer subclasses need to
        # allocate and manage extra tensors associated with the parameters
        # to train. These tensors are called accumulators.
        # {accum_name : { paramter_name : accumulator_for_parameter, ...}, ...}
        self._accumulators = defaultdict(lambda: dict())
        self.helper = None
        self._opti_name_list = []
        self._accumulators_holder = {}
        self._param_device_map = dict()
        self.clear_gradients = self.clear_grad
247 248 249 250 251 252 253 254 255 256 257
        self._default_dict = {
            'weight_decay': self.regularization,
            'grad_clip': self._grad_clip
        }

        self._param_groups = []
        if self._parameter_list and isinstance(self._parameter_list[0], dict):
            for param_group in self._parameter_list:
                self._add_param_group(param_group.copy())
        else:
            self._param_groups = self._parameter_list
M
MRXLT 已提交
258

259
        # NOTE: Multi Tensor: Pass in all parameters and gradients to the op kernel of the Optimizer at one time for updating for dygraph mode.
Z
zhangbo9674 已提交
260
        # Optimizer support list: [ paddle.optimizer.Momentum, paddle.optimizer.Adam].
261 262 263
        self._use_multi_tensor = None
        self._param_dict = {'FP32_LODTensor': [], 'FP16_LODTensor': []}

264 265 266 267 268 269 270 271
        self._auxiliary_vars = {}

    def _set_auxiliary_var(self, key, val):
        self._auxiliary_vars[key] = val

    def _get_auxiliary_var(self, key):
        return self._auxiliary_vars.get(key, None)

M
MRXLT 已提交
272 273 274
    @framework.dygraph_only
    def state_dict(self):
        '''
275
        Get state dict information from optimizer. It contain all the tensor used by optimizer. For Adam optimizer, contains beta1, beta2, momentum etc. If LRScheduler have been used, global_step will be include in state dict.
M
MRXLT 已提交
276 277
        If the optimizer never be called(minimize function), the state_dict is empty.

278
        Args:
M
MRXLT 已提交
279 280 281 282
            None

        Returns:
            state_dict(dict) : dict contains all the Tensor used by optimizer
283

M
MRXLT 已提交
284 285 286 287
        Examples:
            .. code-block:: python

                import paddle
M
MRXLT 已提交
288
                emb = paddle.nn.Embedding(10, 10)
M
MRXLT 已提交
289 290 291 292 293 294 295 296 297

                adam = paddle.optimizer.Adam(0.001, parameters=emb.parameters())
                state_dict = adam.state_dict()

        '''
        state_dict = {}
        for k, v in self._accumulators.items():
            for para_name, var_tmp in v.items():
                state_dict[var_tmp.name] = var_tmp
298 299 300 301
        # if has master weight and then save master weight
        if hasattr(self, "_master_weights"):
            if len(self._master_weights) != 0:
                state_dict["master_weights"] = self._master_weights
M
MRXLT 已提交
302
        # global step if use lr decay
303
        if isinstance(self._learning_rate, LRScheduler):
M
MRXLT 已提交
304 305 306 307 308 309
            state_dict["LR_Scheduler"] = self._learning_rate.state_dict()
        return state_dict

    @framework.dygraph_only
    def set_state_dict(self, state_dict):
        '''
310
        Load optimizer state dict. For Adam optimizer, contains beta1, beta2, momentum etc. If LRScheduler have been used, global_step will be changed.
M
MRXLT 已提交
311

312
        Args:
M
MRXLT 已提交
313 314 315
            state_dict(dict) : Dict contains all the Tensor needed by optimizer
        Return:
            None
316

M
MRXLT 已提交
317 318 319 320 321
        Examples:
            .. code-block:: python

                import paddle

322
                emb = paddle.nn.Embedding(10, 10)
M
MRXLT 已提交
323

324 325
                layer_state_dict = emb.state_dict()
                paddle.save(layer_state_dict, "emb.pdparams")
M
MRXLT 已提交
326

327
                scheduler = paddle.optimizer.lr.NoamDecay(
328 329 330 331 332 333
                    d_model=0.01, warmup_steps=100, verbose=True)
                adam = paddle.optimizer.Adam(
                    learning_rate=scheduler,
                    parameters=emb.parameters())
                opt_state_dict = adam.state_dict()
                paddle.save(opt_state_dict, "adam.pdopt")
M
MRXLT 已提交
334

335
                opti_state_dict = paddle.load("adam.pdopt")
M
MRXLT 已提交
336 337 338
                adam.set_state_dict(opti_state_dict)

        '''
339
        if isinstance(self._learning_rate, LRScheduler):
340
            self._learning_rate.set_state_dict(state_dict["LR_Scheduler"])
M
MRXLT 已提交
341

342
        # NOTE: exclude learning rate scheduler's state from
343 344 345 346
        # _accumulators_holder.
        state_dict = state_dict.copy()
        if "LR_Scheduler" in state_dict:
            state_dict.pop("LR_Scheduler")
347 348 349 350
        if "master_weights" in state_dict:
            if hasattr(self, "_master_weights"):
                self._master_weights = state_dict["master_weights"]
            state_dict.pop("master_weights")
M
MRXLT 已提交
351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
        self._accumulators_holder = state_dict
        for k, v in self._accumulators.items():
            for para_name, var_tmp in v.items():
                assert var_tmp.name in state_dict, \
                        "optimizer Tensor {} not found".format( var_tmp.name )
                var = var_tmp.value()
                tensor = var.get_tensor()
                model_np = np.array(tensor)

                load_para = state_dict[var_tmp.name]

                if isinstance(load_para, Variable):
                    load_para_np = load_para.numpy()
                elif isinstance(load_para, core.VarBase):
                    load_para_np = load_para.numpy()
                elif isinstance(load_para, np.ndarray):
                    load_para_np = load_para
                else:
                    raise RuntimeError("State dict type {} not supprt".format(
                        str(type(load_para))))

                assert model_np.shape == load_para_np.shape,  \
                                          "Parameter shape not match, Dygraph Parameter [ {} ] need tensor with shape {} but load tensor with shape {}".format(
J
Jiangxinz 已提交
374
                                                 model_np.name, model_np.shape, load_para_np.shape)
M
MRXLT 已提交
375 376 377

                assert model_np.dtype == load_para_np.dtype, \
                                          "Parameter dtype not match, Dygraph Parameter [ {} ] need tensor with dtype {}  but load tensor with dtype {}".format(
J
Jiangxinz 已提交
378
                                                model_np.name, model_np.dtype, load_para_np.dtype)
M
MRXLT 已提交
379 380 381 382 383 384 385

                tensor.set(load_para_np, framework._current_expected_place())

    def get_opti_var_name_list(self):
        return self._opti_name_list

    def _create_global_learning_rate(self):
386 387 388 389 390 391
        # lr var can't be float16, for pure fp16 training, should extra handle the dtype for lr
        _lr_dtype = paddle.get_default_dtype(
        ) if self._dtype is None else self._dtype
        _lr_dtype = paddle.float32 if (
            paddle.get_default_dtype() != "float16"
            and _lr_dtype == paddle.float16) else _lr_dtype
392
        if isinstance(self._learning_rate, LRScheduler):
393 394 395 396 397
            lr_var = self._global_learning_rate()
            # only create global lr_var once
            if not isinstance(lr_var, framework.Variable):
                lr_name = unique_name.generate('learning_rate')
                self._learning_rate._var_name = lr_name
398 399 400 401 402
                lr_var = self.helper.create_global_variable(name=lr_name,
                                                            shape=[1],
                                                            persistable=True,
                                                            stop_gradient=True,
                                                            dtype=_lr_dtype)
403 404 405
                main_prog = framework.default_main_program()
                main_prog.lr_sheduler = self._learning_rate
                main_prog.lr_var = lr_var
M
MRXLT 已提交
406

407 408
                self._learning_rate_map[
                    framework.default_main_program()] = lr_var
M
MRXLT 已提交
409

410 411 412 413 414 415
            lr_value = float(self._learning_rate())
            self.helper.set_variable_initializer(
                lr_var, initializer=Constant(value=lr_value))
        elif isinstance(self._learning_rate, float):
            # only create global lr_var once
            lr = self._global_learning_rate()
M
MRXLT 已提交
416 417 418
            if isinstance(lr, framework.Variable):
                return
            else:
419 420 421 422 423
                self._learning_rate_map[framework.default_main_program(
                )] = layers.create_global_var(
                    name=unique_name.generate("learning_rate"),
                    shape=[1],
                    value=float(self._learning_rate),
424
                    dtype=_lr_dtype,
425
                    persistable=True)
M
MRXLT 已提交
426 427 428 429 430

    @framework.dygraph_only
    def set_lr(self, value):
        """
        :api_attr: imperative
431

432
        Set the value of the learning rate manually in the optimizer. If the optimizer use LRScheduler,
M
MRXLT 已提交
433 434 435
        this API cannot be invoked, because it will lead to conflict.

        Args:
M
MRXLT 已提交
436
            value (float): the value of learning rate
M
MRXLT 已提交
437 438 439

        Returns:
            None
440

M
MRXLT 已提交
441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462
        Examples:
            .. code-block:: python

                import paddle
                linear = paddle.nn.Linear(10, 10)

                adam = paddle.optimizer.Adam(0.1, parameters=linear.parameters())

                # set learning rate manually by python float value
                lr_list = [0.2, 0.3, 0.4, 0.5, 0.6]
                for i in range(5):
                    adam.set_lr(lr_list[i])
                    lr = adam.get_lr()
                    print("current lr is {}".format(lr))
                # Print:
                #    current lr is 0.2
                #    current lr is 0.3
                #    current lr is 0.4
                #    current lr is 0.5
                #    current lr is 0.6

        """
463
        if not isinstance(value, (int, float)):
M
MRXLT 已提交
464
            raise TypeError(
465
                "The type of 'value' in optimizer.set_lr must be float, but received %s."
M
MRXLT 已提交
466
                % (type(value)))
467
        if isinstance(self._learning_rate, LRScheduler):
M
MRXLT 已提交
468
            raise RuntimeError(
469
                "optimizer's learning rate can't be LRScheduler when invoke this API, because this will lead to conflict."
M
MRXLT 已提交
470
            )
471 472 473
        self._learning_rate = float(value)
        current_lr = self._global_learning_rate()
        if current_lr is not None:
474 475
            if in_dygraph_mode():
                place = _current_expected_place()
476 477
                _C_ops.full_(current_lr, list(current_lr.shape), float(value),
                             current_lr.dtype, place)
478 479

            elif _in_legacy_dygraph():
480 481 482
                _legacy_C_ops.fill_constant(current_lr, 'value', float(value),
                                            'dtype', current_lr.dtype, 'shape',
                                            list(current_lr.shape))
483 484
            else:
                global_block = framework.default_main_program().global_block()
485 486 487 488 489 490 491 492
                global_block.append_op(type='fill_constant',
                                       outputs={'Out': [current_lr]},
                                       attrs={
                                           'dtype': current_lr.dtype,
                                           'shape': list(current_lr.shape),
                                           'value': float(value)
                                       },
                                       stop_gradient=True)
M
MRXLT 已提交
493 494 495

    def get_lr(self):
        """
496
        Get current learning rate of optimizer.
497 498
        If 'LRScheduler' is not used, the return value is all the same.
        If 'LRScheduler' is used, the return value is the current scheduled learing rete.
M
MRXLT 已提交
499

M
MRXLT 已提交
500
        Returns:
501
            float: The current learning rate of optimizer.
M
MRXLT 已提交
502 503 504 505

        Examples:
            .. code-block:: python

506
                # train on default dynamic graph mode
M
MRXLT 已提交
507
                import paddle
508 509 510 511 512 513 514 515 516 517 518
                import numpy as np
                emb = paddle.nn.Embedding(10, 3)

                ## example1: LRScheduler is not used, return the same value is all the same
                adam = paddle.optimizer.Adam(0.01, parameters = emb.parameters())
                for batch in range(10):
                    input = paddle.randint(low=0, high=5, shape=[5])
                    out = emb(input)
                    out.backward()
                    print("Learning rate of step{}: {}".format(batch, adam.get_lr())) # 0.01
                    adam.step()
M
MRXLT 已提交
519

520 521 522 523 524 525 526 527
                ## example2: StepDecay is used, return the scheduled learning rate
                scheduler = paddle.optimizer.lr.StepDecay(learning_rate=0.5, step_size=2, gamma=0.1)
                adam = paddle.optimizer.Adam(scheduler, parameters = emb.parameters())
                for batch in range(10):
                    input = paddle.randint(low=0, high=5, shape=[5])
                    out = emb(input)
                    out.backward()
                    print("Learning rate of step{}: {}".format(batch, adam.get_lr())) # 0.5->0.05...
M
MRXLT 已提交
528
                    adam.step()
529
                    scheduler.step()
530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548

                # train on static graph mode
                paddle.enable_static()
                main_prog = paddle.static.Program()
                start_prog = paddle.static.Program()
                with paddle.static.program_guard(main_prog, start_prog):
                    x = paddle.static.data(name='x', shape=[None, 10])
                    z = paddle.static.nn.fc(x, 100)
                    loss = paddle.mean(z)
                    scheduler = paddle.optimizer.lr.StepDecay(learning_rate=0.5, step_size=2, gamma=0.1)
                    adam = paddle.optimizer.Adam(learning_rate=scheduler)
                    adam.minimize(loss)

                exe = paddle.static.Executor()
                exe.run(start_prog)
                for batch in range(10):
                    print("Learning rate of step{}: {}", adam.get_lr())     # 0.5->0.05->0.005...
                    out = exe.run(main_prog, feed={'x': np.random.randn(3, 10).astype('float32')})
                    scheduler.step()
M
MRXLT 已提交
549 550 551 552 553

        """
        if isinstance(self._learning_rate, float):
            return self._learning_rate
        else:
554
            return self._learning_rate()
M
MRXLT 已提交
555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574

    def _global_learning_rate(self, program=None):
        """
        get global decayed learning rate
        :return:
        """
        if program is None:
            program = framework.default_main_program()
        return self._learning_rate_map.get(program, None)

    def _append_optimize_op(self, block, param_and_grad):
        """ append optimize operator to block and return all the added optimize_op
        """
        raise NotImplementedError(
            "Class \"Optimizer\" connot be used directly as an optimizer, please use its subclasses such as \"Adam\""
        )

    def _create_param_lr(self, param_and_grad):
        # create learning rate tensor for every parameter
        param = param_and_grad[0]
575 576 577 578
        if hasattr(param, 'optimize_attr'):
            param_lr = param.optimize_attr['learning_rate']
            if type(param_lr) == Variable:
                return param_lr
M
MRXLT 已提交
579
            else:
580 581 582 583 584 585 586 587 588
                if param_lr == 1.0:
                    return self._global_learning_rate()
                else:
                    with default_main_program()._lr_schedule_guard(
                            is_with_opt=True), framework.name_scope(
                                'scale_with_param_lr'):
                        return self._global_learning_rate() * param_lr
        else:
            return self._global_learning_rate()
M
MRXLT 已提交
589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630

    def _create_accumulators(self, block, parameters):
        """Create all accumulators needed by the parameters

        Args:
            block: the block in which the loss tensor is present
            parameters: list of parameter tensors for the optimizer
        """
        pass

    def _finish_update(self, block, parameters_and_grads):
        """Finish any custom updates needed
           before completing an optimization step

        Args:
            block: the block in which the loss tensor is present
            parameters: list of parameter tensors for the optimizer

        Returns:
            None
        """
        pass

    def _add_accumulator(self,
                         name,
                         param,
                         dtype=None,
                         fill_value=0.0,
                         shape=None,
                         type=None,
                         device=None):
        """Utility function to add an accumulator for a parameter

        Args:
            block: the block in which the loss tensor is present
            name: name of the accumulator
            param: parameter tensor for which accumulator is to be added
            dtype: data type of the accumulator tensor
            fill_value: value to initialize the accumulator tensor
        """
        if self._name is not None:
            name = self._name + "_" + name
631 632
        if (name in self._accumulators
                and param.name in self._accumulators[name]):
J
Jiabin Yang 已提交
633
            if framework._non_static_mode():
M
MRXLT 已提交
634
                return self._accumulators[name][param.name]
635 636 637
            raise Exception(
                "Accumulator {} already exists for parameter {}".format(
                    name, param.name))
M
MRXLT 已提交
638 639 640 641 642 643 644 645 646 647 648 649
        if shape == None:
            shape = param.shape
        assert isinstance(self.helper, LayerHelper)

        var_name = param.name + "_" + name
        var_name = unique_name.generate(var_name)
        self._opti_name_list.append(var_name)

        var = self.helper.create_global_variable(
            name=var_name,
            persistable=True,
            dtype=dtype or param.dtype,
650
            type=core.VarDesc.VarType.LOD_TENSOR
J
Jiabin Yang 已提交
651 652
            if framework._in_eager_without_dygraph_check() else
            (param.type if type is None else type),
M
MRXLT 已提交
653 654 655 656 657 658 659 660
            shape=shape,
            belong_to_optimizer=True)
        if device is None:
            device = self._get_device_for_param(param.name)
        with device_guard(device):
            self.helper.set_variable_initializer(
                var, initializer=Constant(value=float(fill_value)))

J
Jiabin Yang 已提交
661
        if framework._non_static_mode():
M
MRXLT 已提交
662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681
            if len(self._accumulators_holder) > 0:
                assert var_name in self._accumulators_holder, \
                        "Optimizer set error, {} should in state dict".format( var_name )
                var.set_value(self._accumulators_holder[var_name])

        self._accumulators[name][param.name] = var
        return var

    def _get_accumulator(self, name, param):
        """Utility function to fetch an accumulator for a parameter

        Args:
            name: name of the accumulator
            param: parameter tensor for which accumulator is to be fetched

        Returns:
            accumulator tensor for the parameter
        """
        if self._name is not None:
            name = self._name + "_" + name
682 683 684 685 686
        if (name not in self._accumulators
                or param.name not in self._accumulators[name]):
            raise Exception(
                "Accumulator {} does not exist for parameter {}".format(
                    name, param.name))
M
MRXLT 已提交
687 688 689 690
        return self._accumulators[name][param.name]

    def _update_param_device_map(self, parameters_and_grads, target_block):
        for param_and_grad in parameters_and_grads:
691
            if param_and_grad[0].stop_gradient is False:
M
MRXLT 已提交
692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743
                param_name = param_and_grad[0].name
                ops = target_block.ops
                device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName(
                )
                for op in ops:
                    input_arg_names = op.input_arg_names
                    if param_name in input_arg_names:
                        self._param_device_map[param_name] = op.attr(
                            device_attr_name)
                        break

    def _get_device_for_param(self, param_name):
        device = None
        if param_name in self._param_device_map:
            device = self._param_device_map[param_name]
        return device

    def _create_optimization_pass(self, parameters_and_grads):
        """Add optimization operators to update gradients to tensors.

        Args:
          parameters_and_grads(list(tuple(Tensor, Tensor))):
            a list of (tensor, gradient) pair to update.

        Returns:
          return_op_list: a list of operators that will complete one step of
            optimization. This will include parameter update ops, global step
            update ops and any other custom ops required by subclasses to manage
            their internal state.
        """
        # This is a default implementation of create_optimization_pass that
        # can be shared by most optimizers. This implementation assumes that
        # the subclass will implement the _append_optimize_op method and the
        #  _initialize_tensors method. The subclass can extend the
        # _create_accumulators method if it needs to create accumulators
        # for parameters and extend _finish_update method to add custom ops.

        # Allways called under program_guard use global block as loss block
        # But if current block is in control flow, append optimize op in the
        # grad block of current block

        global_block = framework.default_main_program().global_block()
        target_block = global_block
        current_block = framework.default_main_program().current_block()
        if current_block.idx != global_block.idx:
            assert current_block.backward_block_idx != -1, \
                "current block is not global_block, but it doesn't have backward block."
            target_block = framework.default_main_program().blocks[
                current_block.backward_block_idx]

        start = len(target_block.ops)
        self.helper = LayerHelper(self.__class__.__name__)
744

M
MRXLT 已提交
745 746
        self._create_global_learning_rate()

Z
zhangbo9674 已提交
747 748 749 750
        # NOTE: Multi Tensor support [ Momentum, Adam ] for dygraph mode
        if self._use_multi_tensor and self.__class__.__name__ in [
                'Momentum', 'Adam'
        ]:
751 752 753 754
            if len(self._param_dict['FP32_LODTensor']) == 0 and len(
                    self._param_dict['FP16_LODTensor']) == 0:
                if isinstance(parameters_and_grads, list):
                    self._multi_tensor_init(target_block, [
755 756
                        p[0]
                        for p in parameters_and_grads if not p[0].stop_gradient
757 758 759 760 761 762 763
                    ])
                else:
                    self._update_param_group(parameters_and_grads)
                    self._multi_tensor_init(target_block, [
                        p[0] for p in parameters_and_grads['params']
                        if not p[0].stop_gradient
                    ])
J
Jiabin Yang 已提交
764
            if framework._non_static_mode():
765 766 767 768 769 770 771 772
                self._append_optimize_multi_tensor_op(target_block,
                                                      parameters_and_grads)
            else:
                self._update_param_device_map(parameters_and_grads,
                                              target_block)
                # NOTE: Multi Tensor requires all parameters to be in the same device and program.
                # param_grad_list = [p_0,g_0,p_1,g_1,....]
                param_grad_list = []
773
                for param_and_grad in parameters_and_grads:
774 775 776 777 778 779 780 781 782 783 784
                    if not param_and_grad[0].stop_gradient and param_and_grad[
                            1] is not None:
                        param_grad_list.append(param_and_grad[0])
                        param_grad_list.append(param_and_grad[1])
                with param_grad_list[0].block.program._optimized_guard(
                        param_grad_list), name_scope("optimizer"):
                    device = self._get_device_for_param(param_grad_list[0].name)
                    with device_guard(device):
                        self._append_optimize_multi_tensor_op(
                            target_block, parameters_and_grads)
        else:
J
Jiabin Yang 已提交
785
            if not framework._non_static_mode():
786 787 788 789 790 791
                params_grads_device_map = parameters_and_grads[
                    'params'] if isinstance(parameters_and_grads,
                                            dict) else parameters_and_grads
                self._update_param_device_map(params_grads_device_map,
                                              target_block)

792 793 794 795
            if isinstance(parameters_and_grads, list):
                self._create_accumulators(target_block, [
                    p[0] for p in parameters_and_grads if not p[0].stop_gradient
                ])
796
            else:
797 798 799 800 801 802 803
                params_acc_dict = parameters_and_grads.copy()
                params_acc_dict['params'] = [
                    p[0] for p in params_acc_dict['params']
                    if not p[0].stop_gradient
                ]
                self._create_accumulators(target_block, params_acc_dict)

J
Jiabin Yang 已提交
804
            if framework._non_static_mode():
805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827
                if isinstance(parameters_and_grads, list):
                    for param_and_grad in parameters_and_grads:
                        if param_and_grad[1] is None:
                            continue
                        if param_and_grad[0].stop_gradient is False:
                            self._append_optimize_op(target_block,
                                                     param_and_grad)
                else:
                    for param_and_grad in parameters_and_grads['params']:
                        if param_and_grad[1] is None:
                            continue
                        if param_and_grad[0].stop_gradient is False:
                            param_grad_dict = dict()
                            param_grad_dict['params'] = param_and_grad
                            param_grad_dict.update({
                                k: v
                                for k, v in parameters_and_grads.items()
                                if k != 'params'
                            })
                            self._append_optimize_op(target_block,
                                                     param_grad_dict)
            else:
                for param_and_grad in parameters_and_grads:
828 829
                    if param_and_grad[1] is None:
                        continue
830 831 832
                    with param_and_grad[0].block.program._optimized_guard(
                            param_and_grad), name_scope("optimizer"):
                        if param_and_grad[0].stop_gradient is False:
833 834
                            device = self._get_device_for_param(
                                param_and_grad[0].name)
835 836 837
                            with device_guard(device):
                                optimize_op = self._append_optimize_op(
                                    target_block, param_and_grad)
M
MRXLT 已提交
838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882

        # Get custom finish ops for subclasses
        # FIXME: Need to fix this once we figure out how to handle dependencies
        self._finish_update(target_block, parameters_and_grads)

        end = len(target_block.ops)
        return target_block._slice_ops(start, end)

    def _append_dgc_ops(self, param_and_grad):
        pass

    def backward(self,
                 loss,
                 startup_program=None,
                 parameters=None,
                 no_grad_set=None,
                 callbacks=None):
        """
        The first part of ``minimize``, do auto-diff to append backward operations for
        the current program.

        Args:
            loss (Tensor): ``loss`` tensor to run optimizations.
            startup_program (Program, optional): :ref:`api_fluid_Program` for
                initializing parameters in ``parameters``. The default value
                is None, at this time :ref:`api_fluid_default_startup_program` will be used.
            parameters (list, optional): List of ``Tensor`` or ``Tensor.name`` to update
                to minimize ``loss``. The default value is None, at this time all parameters
                will be updated.
            no_grad_set (set, optional): Set of ``Tensor``  or ``Tensor.name`` that don't need
                to be updated. The default value is None.
            callbacks (list, optional): list of callable objects to run when appending backward
                operator for one parameter. The default value is None.

        Return:
            list: list of (param, grad) tensor pairs, param is ``Parameter``,
                grad is the gradient value corresponding to the parameter.

        Examples:
            .. code-block:: python

                import paddle
                import numpy as np
                value = np.arange(26).reshape(2, 13).astype("float32")
                a = paddle.to_tensor(value)
M
MRXLT 已提交
883
                linear = paddle.nn.Linear(13, 5)
M
MRXLT 已提交
884
                # This can be any optimizer supported by dygraph.
885
                adam = paddle.optimizer.Adam(learning_rate = 0.01,
M
MRXLT 已提交
886 887 888 889 890 891 892
                                            parameters = linear.parameters())
                out = linear(a)
                out.backward()
                adam.step()
                adam.clear_grad()
        """
        act_no_grad_set = None
J
Jiabin Yang 已提交
893
        if framework._non_static_mode():
M
MRXLT 已提交
894 895 896 897
            pass
        else:
            act_no_grad_set = self._get_no_grad_set(loss, no_grad_set)

L
Leo Chen 已提交
898 899 900 901
        # Infer dtype by loss if None
        if self._dtype is None:
            self._dtype = loss.dtype

J
Jiabin Yang 已提交
902
        if framework._non_static_mode():
903 904 905
            parameter_list = parameters if parameters \
                else self._parameter_list

M
MRXLT 已提交
906
            params_grads = []
907
            for param in parameter_list:
908
                if param.stop_gradient:
M
MRXLT 已提交
909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926
                    continue
                if param._grad_ivar() is not None:
                    # create gradient tensor
                    grad_var = param._grad_ivar()
                    params_grads.append((param, grad_var))
        else:
            if callbacks is None:
                callbacks = [error_clip_callback]
            else:
                assert (isinstance(callbacks, list))
            program = loss.block.program
            assert len(loss.shape) == 1 and loss.shape[0] == 1, \
                "The loss.shape should be (1L,), but the current loss.shape is {}. " \
                "Maybe that you should call paddle.mean to process the current loss.".format(
                    loss.shape)
            parameter_list = parameters if parameters \
                else self._parameter_list
            with program_guard(program, startup_program):
927 928
                from paddle.incubate.autograd.utils import prim_enabled
                if prim_enabled():
929 930 931
                    params_grads = append_backward_new([loss], parameter_list,
                                                       act_no_grad_set,
                                                       callbacks)
932 933 934
                else:
                    params_grads = append_backward(loss, parameter_list,
                                                   act_no_grad_set, callbacks)
M
MRXLT 已提交
935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978
                # Note: since we can't use all_reduce_op now,
                #  dgc_op should be the last op of one grad.
                self._append_dgc_ops(params_grads)
        return params_grads

    def apply_gradients(self, params_grads):
        """
        Second part of `minimize`, appending optimization operators for
        given `params_grads` pairs.

        Args:
            params_grads (list): list of (param, grad) pair to do optimization.

        Returns:
            list: A list of operators appended to the current program.

        Examples:
            .. code-block:: python

                import paddle
                import numpy as np

                inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
                linear = paddle.nn.Linear(10, 10)
                inp = paddle.to_tensor(inp)
                out = linear(inp)
                loss = paddle.mean(out)
                optimizer = paddle.optimizer.Adam(learning_rate=0.1,
                        parameters=linear.parameters())
                params_grads = optimizer.backward(loss)
                optimizer.apply_gradients(params_grads)

        """

        params_grads = sorted(params_grads, key=lambda x: x[0].name)

        # 'optimizer(grad_clip)' or 'set_gradient_clip'
        if self._grad_clip is not None:
            params_grads = self._grad_clip(params_grads)
        else:

            params_grads = append_gradient_clip_ops(params_grads)

        # Add regularization if any
979 980
        params_grads = self.append_regularization_ops(params_grads,
                                                      self.regularization)
M
MRXLT 已提交
981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996

        optimize_ops = self._create_optimization_pass(params_grads)
        return optimize_ops

    def _apply_optimize(self, loss, startup_program, params_grads):
        """
        Second part of `minimize`, appending optimization operators for
        given `params_grads` pairs.
        Args:
            loss (Tensor): loss tensor to run optimizations.
            startup_program (Program): startup_program for initializing parameters
                in `parameters`.
            params_grads (list): list of (param, grad) pair to do optimization.
        Returns:
            list: A list of operators appended to the current program.
        """
J
Jiabin Yang 已提交
997
        if framework._non_static_mode():
M
MRXLT 已提交
998 999
            with program_guard(framework.default_main_program(),
                               framework.default_startup_program()):
1000 1001 1002
                if isinstance(params_grads, list):
                    if self._grad_clip is not None:
                        params_grads = self._grad_clip(params_grads)
1003
                    params_grads = self.append_regularization_ops(
1004 1005 1006 1007
                        params_grads, self.regularization)
                else:
                    grad_clip = params_grads['grad_clip']
                    if grad_clip is not None:
1008 1009
                        params_grads['params'] = grad_clip(
                            params_grads['params'])
1010

1011
                    params_grads['params'] = self.append_regularization_ops(
1012
                        params_grads['params'], self.regularization)
M
MRXLT 已提交
1013 1014 1015 1016 1017 1018 1019
                optimize_ops = self._create_optimization_pass(params_grads)
        else:
            program = loss.block.program
            with program_guard(program, startup_program):
                optimize_ops = self.apply_gradients(params_grads)
        return optimize_ops

1020 1021
    def _create_regularization_of_grad(self, param, grad, regularization=None):
        """ Create and add backward regularization Operators
1022

1023 1024 1025
        Function helper of append_regularization_ops.
        """
        # If no gradient or no regularization is specified,  then we don't need to do anything
1026 1027 1028 1029
        if grad is None or (
            (not hasattr(param, 'regularizer') or
             (hasattr(param, 'regularizer') and param.regularizer is None))
                and regularization is None):
1030 1031 1032 1033 1034 1035 1036 1037 1038 1039
            return grad
        regularization_term = None
        if hasattr(param, 'regularizer') and param.regularizer is not None:
            # Add variable for regularization term in grad block
            regularization_term = param.regularizer(param, grad, grad.block)
        elif regularization is not None:
            regularization_term = regularization(param, grad, grad.block)

        assert regularization_term is not None

1040
        if framework.in_dygraph_mode():
Y
YuanRisheng 已提交
1041
            return _C_ops.add_n([grad, regularization_term])
1042
        elif framework._in_legacy_dygraph():
1043
            return _legacy_C_ops.sum([grad, regularization_term])
1044

1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059
        new_grad = grad
        if grad.type == core.VarDesc.VarType.SELECTED_ROWS:
            # FIXME(zcd): If the grad is SELECTED_ROWS, after regularization,
            # the grad's type and name will be changed. But the gradient's name
            # is used in ParallelExecutor Reduce mode, so I add a flag for
            # the new_grad here.
            new_grad = grad.block.create_var(
                name=grad.name + core.kNewGradSuffix(),
                dtype=param.dtype,
                shape=param.shape,
                lod_level=param.lod_level,
                type=core.VarDesc.VarType.LOD_TENSOR)

        inputs = {"X": [grad, regularization_term]}
        outputs = {"Out": [new_grad]}
1060
        grad.block.append_op(type='sum', inputs=inputs, outputs=outputs)
1061 1062 1063 1064 1065 1066 1067

        return new_grad

    def append_regularization_ops(self,
                                  parameters_and_grads,
                                  regularization=None):
        r"""Create and add backward regularization Operators
1068

1069 1070 1071 1072
        Creates and adds backward regularization operators in the BlockDesc.
        This will add gradients of the regularizer function to the gradients
        of the parameters and return these modified gradients. This is the
        same as implementing weight decay in optimizers for regularization.
1073

1074 1075 1076 1077 1078
        Args:
            parameters_and_grads: A list of (parameters, gradients) pairs
                                  that need to be regularized.
            regularization: A global regularizer. If the parameter is not
                            set. It will be applied with regularizer.
1079

1080 1081 1082
        Returns:
            list[(Variable, Variable)]: list of (parameters, gradients) \
            pair with the regularized gradient
1083

1084 1085 1086 1087
        Raises:
            Exception: Unknown regularization type
        """
        params_and_grads = []
J
Jiabin Yang 已提交
1088
        if framework._non_static_mode():
1089
            for param, grad in parameters_and_grads:
1090 1091
                new_grad = self._create_regularization_of_grad(
                    param, grad, regularization)
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108
                params_and_grads.append((param, new_grad))
        else:
            repeate_regularizer = False
            with framework.name_scope('regularization'):
                for param, grad in parameters_and_grads:
                    if not repeate_regularizer and param.regularizer is not None and regularization is not None:
                        repeate_regularizer = True
                        logging.info(
                            "If regularizer of a Parameter has been set by 'fluid.ParamAttr' or 'fluid.WeightNormParamAttr' already. "
                            "The Regularization[%s] in Optimizer will not take effect, and it will only be applied to other Parameters!"
                            % regularization.__str__())
                    with param.block.program._optimized_guard([param, grad]):
                        new_grad = self._create_regularization_of_grad(
                            param, grad, regularization)
                        params_and_grads.append((param, new_grad))
        return params_and_grads

M
MRXLT 已提交
1109 1110 1111
    def _get_no_grad_set(self, loss, no_grad_set=None):
        no_grad_set = _get_no_grad_set_name(no_grad_set)
        parameters = loss.block.program.global_block().all_parameters()
1112 1113
        param_no_trainable = set(
            [param.name for param in parameters if param.stop_gradient is True])
M
MRXLT 已提交
1114 1115 1116 1117 1118 1119
        # If the parameter is no trainable, it should not have a gradient.
        no_grad_set.update(param_no_trainable)

        return no_grad_set

    @framework.dygraph_only
1120
    def clear_grad(self, set_to_zero=True):
M
MRXLT 已提交
1121 1122
        """
        Clear the gradients of all optimized parameters for model.
1123 1124

        If not, new gradient will accumulat on previous gradient.
1125 1126

        There are two method to clear grad: set_to_zero or delete grad.
1127

1128 1129
        Args:
            set_to_zero (bool, optional): If set grads to zero or not, default is True.
1130

M
MRXLT 已提交
1131 1132
        Returns:
            None
1133

M
MRXLT 已提交
1134 1135 1136 1137 1138
        Examples:
            .. code-block:: python

                import numpy as np
                import paddle
1139

M
MRXLT 已提交
1140 1141
                value = np.arange(26).reshape(2, 13).astype("float32")
                a = paddle.to_tensor(value)
M
MRXLT 已提交
1142
                linear = paddle.nn.Linear(13, 5)
M
MRXLT 已提交
1143
                # This can be any optimizer supported by dygraph.
1144
                adam = paddle.optimizer.Adam(learning_rate = 0.01,
M
MRXLT 已提交
1145 1146 1147 1148 1149 1150 1151
                                            parameters = linear.parameters())
                out = linear(a)
                out.backward()
                adam.step()
                adam.clear_grad()

        """
1152
        param_list = []
1153 1154 1155 1156
        if self._parameter_list is None or not isinstance(
                self._parameter_list[0], dict):
            for p in self._parameter_list:
                if not p.stop_gradient:
1157
                    param_list.append(p)
1158 1159 1160 1161
        else:
            for param_group in self._param_groups:
                for p in param_group['params']:
                    if not p.stop_gradient:
1162
                        param_list.append(p)
1163

J
Jiabin Yang 已提交
1164
        if _in_eager_without_dygraph_check():
1165
            for p in param_list:
1166
                p.clear_gradient(set_to_zero)
1167 1168
        else:
            core.clear_gradients(param_list, set_to_zero)
M
MRXLT 已提交
1169

1170
    @imperative_base.no_grad
M
MRXLT 已提交
1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193
    def minimize(self,
                 loss,
                 startup_program=None,
                 parameters=None,
                 no_grad_set=None):
        """
        Add operations to minimize ``loss`` by updating ``parameters``.

        Args:
            loss (Tensor): A ``Tensor`` containing the value to minimize.
            startup_program (Program, optional): :ref:`api_fluid_Program` for
                initializing parameters in ``parameters``. The default value
                is None, at this time :ref:`api_fluid_default_startup_program` will be used.
            parameters (list, optional): List of ``Tensor`` or ``Tensor.name`` to update
                to minimize ``loss``. The default value is None, at this time all parameters
                will be updated.
            no_grad_set (set, optional): Set of ``Tensor``  or ``Tensor.name`` that don't need
                to be updated. The default value is None.

        Returns:
            tuple: tuple (optimize_ops, params_grads), A list of operators appended
            by minimize and a list of (param, grad) tensor pairs, param is
            ``Parameter``, grad is the gradient value corresponding to the parameter.
1194 1195
            In static graph mode, the returned tuple can be passed to ``fetch_list`` in ``Executor.run()`` to
            indicate program pruning. If so, the program will be pruned by ``feed`` and
M
MRXLT 已提交
1196 1197 1198 1199
            ``fetch_list`` before run, see details in ``Executor``.

        Examples:
            .. code-block:: python
1200

M
MRXLT 已提交
1201
                import paddle
M
MRXLT 已提交
1202
                linear = paddle.nn.Linear(10, 10)
1203 1204
                input = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
                out = linear(input)
M
MRXLT 已提交
1205 1206 1207 1208 1209 1210 1211 1212
                loss = paddle.mean(out)

                beta1 = paddle.to_tensor([0.9], dtype="float32")
                beta2 = paddle.to_tensor([0.99], dtype="float32")

                adam = paddle.optimizer.Adam(learning_rate=0.1,
                        parameters=linear.parameters(),
                        weight_decay=0.01)
R
Roc 已提交
1213
                loss.backward()
M
MRXLT 已提交
1214 1215 1216
                adam.minimize(loss)
                adam.clear_grad()

M
MRXLT 已提交
1217 1218 1219 1220 1221
        """
        assert isinstance(loss, Variable), "The loss should be an Tensor."

        parameter_list = parameters if parameters \
            else self._parameter_list
1222

1223 1224 1225 1226
        params_grads = self.backward(loss,
                                     startup_program=startup_program,
                                     parameters=parameter_list,
                                     no_grad_set=no_grad_set)
M
MRXLT 已提交
1227

1228 1229 1230
        optimize_ops = self._apply_optimize(loss,
                                            startup_program=startup_program,
                                            params_grads=params_grads)
M
MRXLT 已提交
1231 1232 1233

        return optimize_ops, params_grads

L
Leo Chen 已提交
1234
    @imperative_base.no_grad
M
MRXLT 已提交
1235 1236 1237
    @framework.dygraph_only
    def step(self):
        """
M
MRXLT 已提交
1238
        Execute the optimizer and update parameters once.
1239

M
MRXLT 已提交
1240 1241 1242 1243 1244 1245 1246 1247
        Returns:
            None

        Examples:
            .. code-block:: python

                import paddle
                import numpy as np
1248

M
MRXLT 已提交
1249 1250
                value = np.arange(26).reshape(2, 13).astype("float32")
                a = paddle.to_tensor(value)
M
MRXLT 已提交
1251
                linear = paddle.nn.Linear(13, 5)
M
MRXLT 已提交
1252
                # This can be any optimizer supported by dygraph.
1253
                adam = paddle.optimizer.Adam(learning_rate = 0.01,
M
MRXLT 已提交
1254 1255 1256 1257 1258 1259
                                            parameters = linear.parameters())
                out = linear(a)
                out.backward()
                adam.step()
                adam.clear_grad()
        """
1260 1261 1262 1263 1264 1265 1266 1267 1268 1269

        if not isinstance(self._param_groups[0], dict):
            params_grads = []
            for param in self._param_groups:
                if param.stop_gradient:
                    continue
                if param._grad_ivar() is not None:
                    grad_var = param._grad_ivar()
                    params_grads.append((param, grad_var))

1270 1271 1272
            self._apply_optimize(loss=None,
                                 startup_program=None,
                                 params_grads=params_grads)
1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286

        else:
            # optimize parameters in groups
            for param_group in self._param_groups:
                params_grads = defaultdict(lambda: list())
                for param in param_group['params']:
                    if param.stop_gradient:
                        continue
                    if param._grad_ivar() is not None:
                        grad_var = param._grad_ivar()
                        params_grads['params'].append((param, grad_var))
                params_grads.update(
                    {k: v
                     for k, v in param_group.items() if k != 'params'})
1287 1288 1289
                self._apply_optimize(loss=None,
                                     startup_program=None,
                                     params_grads=params_grads)
1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328

    def _add_param_group(self, param_group):
        """
        Add a param group to parameter_list.

        Args:
            param_group (dict): The group of Tensors to be optimzed with
            different optimization options.
        """
        params = param_group['params']
        if isinstance(params, Parameter):
            param_group['params'] = [params]
        elif isinstance(params, set):
            raise TypeError(
                "optimizer parameters should be in ordered collections,"
                "but received set, please use list instead.")
        else:
            param_group['params'] = list(params)

        # Update optimization options for each groups
        for k, v in self._default_dict.items():
            param_group.setdefault(k, v)

        param_set = set()
        for group in self._param_groups:
            param_set.update(set(group['params']))

        if not param_set.isdisjoint(set(param_group['params'])):
            raise ValueError(
                "some parameters appear in more than one parameter group")

        for param in param_group['params']:
            weight_decay = param_group['weight_decay']
            if isinstance(weight_decay, float):
                from ..fluid.regularizer import L2Decay
                regularization = L2Decay(weight_decay)
            else:
                regularization = weight_decay
            param.regularizer = regularization
W
wangguanzhong 已提交
1329 1330
            param.optimize_attr['learning_rate'] = param_group.get(
                'learning_rate', 1.)
1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341

        self._param_groups.append(param_group)

    def _update_param_group(self, parameters):
        """
        Update the param group with new entry
        Args:
            parameters (dict): The extra group of Tensors to be optimzed with
            different optimization options. Only used in child class.
        """
        pass
1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357

    @framework.dygraph_only
    def _multi_tensor_init(self, target_block, parameters):
        """
        All parameters used for optimizer (such as: parameters, master_weight, velocity_acc for momentum) calculations are grouped into a python list by data type (float16, float32).
        This function will be overridden in the corresponding optimizer file.

        Args:
            target_block: the block in which the loss tensor is present
            parameters: list of parameter tensors for the optimizer
        """
        pass

    @framework.dygraph_only
    def _append_optimize_multi_tensor_op(self, target_block,
                                         parameters_and_grads):
1358
        """
1359 1360 1361
        For Multi Tensor, append optimize merged_operator to block.
        """
        pass