optimizer.py 59.9 KB
Newer Older
M
MRXLT 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import logging
from collections import defaultdict

18 19
import numpy as np

20
import paddle
21
import paddle.autograd as imperative_base
22
from paddle import _C_ops
23
from paddle.fluid import core
24 25
from paddle.fluid.framework import (
    Variable,
26 27
    _current_expected_place,
    _in_eager_without_dygraph_check,
28 29
    default_main_program,
    device_guard,
30
    in_dygraph_mode,
31 32
    name_scope,
)
M
MRXLT 已提交
33

34
from ..fluid import framework, unique_name
35
from ..fluid.backward import _get_no_grad_set_name, append_backward
36
from ..fluid.framework import Parameter, program_guard
M
MRXLT 已提交
37 38
from ..fluid.initializer import Constant
from ..fluid.layer_helper import LayerHelper
39
from .lr import LRScheduler
M
MRXLT 已提交
40

41 42
__all__ = []

M
MRXLT 已提交
43

44
@framework.static_only
45 46 47 48 49 50 51 52
def append_backward_new(
    loss_list,
    parameter_list=None,
    no_grad_set=None,
    callbacks=None,
    checkpoints=None,
    distop_context=None,
):
53
    from paddle.incubate.autograd.primx import Transform, orig2prim
54

55
    program = default_main_program()
56 57 58
    assert (
        program.num_blocks == 1
    ), "The append_backward_new interface is designed to process only one block."
59
    block = program.current_block()
60
    for el in loss_list:
61 62 63
        assert (
            el.block == block
        ), 'variable in loss_list should be in current block of main program'
64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91

    orig2prim(block)
    ad = Transform(block)
    if parameter_list is None:
        parameter_list = program.global_block().all_parameters()
    param_dot, loss_dot = ad.linearize(parameter_list, loss_list)
    loss_bar, param_bar = ad.transpose(loss_dot, param_dot)

    # remove param_dot and their constructor ops
    op_indexes = []
    for var in param_dot:
        if var is not None:
            op_index = block.ops.index(var.op)
            assert op_index >= 0
            op_indexes.append(op_index)

    ad.erase_ops(sorted(op_indexes))
    ad.erase_dots(param_dot)

    if len(parameter_list) == 1:
        params_and_grads = [(parameter_list, param_bar)]
    else:
        params_and_grads = []
        for i, param in enumerate(parameter_list):
            params_and_grads.append((param, param_bar[i]))
    return params_and_grads


92
class Optimizer:
93
    r"""Optimizer Base class.
M
MRXLT 已提交
94 95 96 97 98 99

    Define the common interface of an optimizer.
    User should not use this class directly,
    but need to use one of it's implementation.

    Args:
100 101
        learning_rate (float|LRScheduler): The learning rate used to update ``Parameter``.
            It can be a float value or any subclass of ``LRScheduler`` .
102
        parameters (list|tuple, optional): List/Tuple of ``Tensor`` names to update to minimize ``loss``. \
103 104 105 106
            This parameter is required in dygraph mode. And you can specify different options for \
            different parameter groups such as the learning rate, weight decay, etc, \
            then the parameters are list of dict. Note that the learning_rate in paramter groups \
            represents the scale of base learning_rate. \
107
            The default value is None in static graph mode, at this time all parameters will be updated.
M
MRXLT 已提交
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
        weight_decay (float|WeightDecayRegularizer, optional): The strategy of regularization. \
            It canbe a float value as coeff of L2 regularization or \
            :ref:`api_fluid_regularizer_L1Decay`, :ref:`api_fluid_regularizer_L2Decay`.
            If a parameter has set regularizer using :ref:`api_fluid_ParamAttr` already, \
            the regularization setting here in optimizer will be ignored for this parameter. \
            Otherwise, the regularization setting here in optimizer will take effect. \
            Default None, meaning there is no regularization.
        grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of \
            some derived class of ``GradientClipBase`` . There are three cliping strategies \
            ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` , \
            :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping.
        name (str, optional): Normally there is no need for user to set this property.
            For more information, please refer to :ref:`api_guide_Name`.
            The default value is None.

    Returns:
124 125
       Base class for optimizer.

M
MRXLT 已提交
126 127 128 129 130 131
    Examples:
        .. code-block:: python

            #Take the subclass adam as an example
            import paddle
            linear = paddle.nn.Linear(10, 10)
132
            inp = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
M
MRXLT 已提交
133 134 135 136
            out = linear(inp)
            loss = paddle.mean(out)
            adam = paddle.optimizer.Adam(learning_rate=0.1,
                    parameters=linear.parameters())
R
Roc 已提交
137
            loss.backward()
M
MRXLT 已提交
138 139 140
            adam.step()
            adam.clear_grad()

141
            #Take the subclass sgd as an example
142
            #optimize parameters in linear_1 and linear2 in different options.
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
            #Note that the learning_rate of linear_2 is 0.01.
            linear_1 = paddle.nn.Linear(10, 10)
            linear_2 = paddle.nn.Linear(10, 10)
            inp = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
            out = linear_1(inp)
            out = linear_2(out)
            loss = paddle.mean(out)
            sgd = paddle.optimizer.SGD(
                learning_rate=0.1,
                parameters=[{
                    'params': linear_1.parameters()
                }, {
                    'params': linear_2.parameters(),
                    'weight_decay': 0.001,
                    'learning_rate': 0.1
                }],
159
                weight_decay=0.01)
R
Roc 已提交
160
            loss.backward()
161 162 163
            sgd.step()
            sgd.clear_grad()

M
MRXLT 已提交
164 165
    """

166
    @imperative_base.no_grad()
167 168 169 170 171 172 173 174
    def __init__(
        self,
        learning_rate,
        parameters=None,
        weight_decay=None,
        grad_clip=None,
        name=None,
    ):
175

176 177 178 179
        if parameters is not None:
            # paddle.Tensor is also iterable, so here we don't check whether
            # the input is iterable, if the input is paddle.Tensor, the
            # list(paddle.Tensor) will be a error value
180
            if isinstance(parameters, (paddle.Tensor, core.eager.Tensor)):
181 182
                raise TypeError(
                    "`parameters` argument given to the optimizer should be "
183 184 185 186
                    "an iterable of paddle Tensors, but got argument type is `{}`.".format(
                        type(parameters)
                    )
                )
187 188 189 190
            if isinstance(parameters, dict):
                raise TypeError(
                    "`parameters` argument should not get dict type, "
                    "if parameter groups is needed, please set `parameters`"
191 192
                    " as list of dict"
                )
193 194 195 196
            self._parameter_list = list(parameters)
        else:
            self._parameter_list = None

M
MRXLT 已提交
197
        self._name = name
J
Jiabin Yang 已提交
198
        if framework._non_static_mode():
M
MRXLT 已提交
199 200 201 202 203
            if self._parameter_list is None:
                raise AttributeError(
                    "parameters argument given to the Optimizer should not be None in dygraph mode."
                )
            if weight_decay is not None:
204 205
                if not isinstance(self._parameter_list[0], dict):
                    for param in self._parameter_list:
206 207 208 209
                        if (
                            hasattr(param, 'regularizer')
                            and param.regularizer is not None
                        ):
210 211 212
                            logging.info(
                                "If regularizer of a Parameter has been set by 'paddle.ParamAttr' or 'static.WeightNormParamAttr' already. "
                                "The weight_decay[%s] in Optimizer will not take effect, and it will only be applied to other Parameters!"
213 214
                                % weight_decay.__str__()
                            )
215 216
                            break

217
        if not isinstance(learning_rate, (float, LRScheduler)):
218
            raise TypeError(
219 220 221
                "learning rate should be float or LRScheduler, got %s here"
                % type(learning_rate)
            )
M
MRXLT 已提交
222
        if grad_clip is not None:
223
            if not isinstance(grad_clip, paddle.nn.clip.GradientClipBase):
M
MRXLT 已提交
224 225 226 227 228
                raise TypeError(
                    "'grad_clip' should be an instance of GradientClipBase's derived class"
                )
        if isinstance(weight_decay, float):
            from ..fluid.regularizer import L2Decay
229

M
MRXLT 已提交
230 231 232 233 234
            self.regularization = L2Decay(weight_decay)
        else:
            self.regularization = weight_decay
        self._grad_clip = grad_clip
        self._learning_rate = learning_rate
L
Leo Chen 已提交
235

M
MRXLT 已提交
236
        self._dtype = None
L
Leo Chen 已提交
237 238
        # Infer the dtype form parameter
        if self._parameter_list:
239 240
            if isinstance(self._parameter_list[0], dict):
                for param_group in self._parameter_list:
241 242 243
                    assert (
                        'params' in param_group
                    ), 'params should be set in parameters if parameter groups are optimized in different options'
244 245 246
                self._dtype = self._parameter_list[0]['params'][0].dtype
            else:
                self._dtype = self._parameter_list[0].dtype
L
Leo Chen 已提交
247

M
MRXLT 已提交
248 249 250 251 252 253 254 255 256 257 258 259 260
        # each program should have a independent learning rate
        # program -> tensor(learning_rate)
        self._learning_rate_map = dict()
        # Dictionary of accumulators. Some optimizer subclasses need to
        # allocate and manage extra tensors associated with the parameters
        # to train. These tensors are called accumulators.
        # {accum_name : { paramter_name : accumulator_for_parameter, ...}, ...}
        self._accumulators = defaultdict(lambda: dict())
        self.helper = None
        self._opti_name_list = []
        self._accumulators_holder = {}
        self._param_device_map = dict()
        self.clear_gradients = self.clear_grad
261 262
        self._default_dict = {
            'weight_decay': self.regularization,
263
            'grad_clip': self._grad_clip,
264 265 266 267 268 269 270 271
        }

        self._param_groups = []
        if self._parameter_list and isinstance(self._parameter_list[0], dict):
            for param_group in self._parameter_list:
                self._add_param_group(param_group.copy())
        else:
            self._param_groups = self._parameter_list
M
MRXLT 已提交
272

273
        # NOTE: Multi Tensor: Pass in all parameters and gradients to the op kernel of the Optimizer at one time for updating for dygraph mode.
Z
zhangbo9674 已提交
274
        # Optimizer support list: [ paddle.optimizer.Momentum, paddle.optimizer.Adam].
275 276
        self._use_multi_tensor = None

277
        self._param_dict = self._create_multi_tensor_dict()
278 279 280 281 282
        self._auxiliary_vars = {}

    def _set_auxiliary_var(self, key, val):
        self._auxiliary_vars[key] = val

283 284 285 286 287 288 289
    def _create_multi_tensor_dict(self):
        n = len(self._param_groups) if self._param_groups is not None else 1
        return {
            'FP32_LODTensor': [[] for _ in range(n)],
            'FP16_LODTensor': [[] for _ in range(n)],
        }

290 291 292
    def _get_auxiliary_var(self, key):
        return self._auxiliary_vars.get(key, None)

M
MRXLT 已提交
293 294 295
    @framework.dygraph_only
    def state_dict(self):
        '''
296
        Get state dict information from optimizer. It contain all the tensor used by optimizer. For Adam optimizer, contains beta1, beta2, momentum etc. If LRScheduler have been used, global_step will be include in state dict.
M
MRXLT 已提交
297 298
        If the optimizer never be called(minimize function), the state_dict is empty.

299
        Args:
M
MRXLT 已提交
300 301 302 303
            None

        Returns:
            state_dict(dict) : dict contains all the Tensor used by optimizer
304

M
MRXLT 已提交
305 306 307 308
        Examples:
            .. code-block:: python

                import paddle
M
MRXLT 已提交
309
                emb = paddle.nn.Embedding(10, 10)
M
MRXLT 已提交
310 311 312 313 314 315 316 317 318

                adam = paddle.optimizer.Adam(0.001, parameters=emb.parameters())
                state_dict = adam.state_dict()

        '''
        state_dict = {}
        for k, v in self._accumulators.items():
            for para_name, var_tmp in v.items():
                state_dict[var_tmp.name] = var_tmp
319 320 321 322
        # if has master weight and then save master weight
        if hasattr(self, "_master_weights"):
            if len(self._master_weights) != 0:
                state_dict["master_weights"] = self._master_weights
M
MRXLT 已提交
323
        # global step if use lr decay
324
        if isinstance(self._learning_rate, LRScheduler):
M
MRXLT 已提交
325 326 327 328 329 330
            state_dict["LR_Scheduler"] = self._learning_rate.state_dict()
        return state_dict

    @framework.dygraph_only
    def set_state_dict(self, state_dict):
        '''
331
        Load optimizer state dict. For Adam optimizer, contains beta1, beta2, momentum etc. If LRScheduler have been used, global_step will be changed.
M
MRXLT 已提交
332

333
        Args:
M
MRXLT 已提交
334 335 336
            state_dict(dict) : Dict contains all the Tensor needed by optimizer
        Return:
            None
337

M
MRXLT 已提交
338 339 340 341 342
        Examples:
            .. code-block:: python

                import paddle

343
                emb = paddle.nn.Embedding(10, 10)
M
MRXLT 已提交
344

345 346
                layer_state_dict = emb.state_dict()
                paddle.save(layer_state_dict, "emb.pdparams")
M
MRXLT 已提交
347

348
                scheduler = paddle.optimizer.lr.NoamDecay(
349 350 351 352 353 354
                    d_model=0.01, warmup_steps=100, verbose=True)
                adam = paddle.optimizer.Adam(
                    learning_rate=scheduler,
                    parameters=emb.parameters())
                opt_state_dict = adam.state_dict()
                paddle.save(opt_state_dict, "adam.pdopt")
M
MRXLT 已提交
355

356
                opti_state_dict = paddle.load("adam.pdopt")
M
MRXLT 已提交
357 358 359
                adam.set_state_dict(opti_state_dict)

        '''
360
        if isinstance(self._learning_rate, LRScheduler):
361
            self._learning_rate.set_state_dict(state_dict["LR_Scheduler"])
M
MRXLT 已提交
362

363
        # NOTE: exclude learning rate scheduler's state from
364 365 366 367
        # _accumulators_holder.
        state_dict = state_dict.copy()
        if "LR_Scheduler" in state_dict:
            state_dict.pop("LR_Scheduler")
368 369 370 371
        if "master_weights" in state_dict:
            if hasattr(self, "_master_weights"):
                self._master_weights = state_dict["master_weights"]
            state_dict.pop("master_weights")
M
MRXLT 已提交
372 373 374
        self._accumulators_holder = state_dict
        for k, v in self._accumulators.items():
            for para_name, var_tmp in v.items():
375 376 377
                assert (
                    var_tmp.name in state_dict
                ), "optimizer Tensor {} not found".format(var_tmp.name)
M
MRXLT 已提交
378 379 380 381 382 383 384 385 386 387 388 389 390
                var = var_tmp.value()
                tensor = var.get_tensor()
                model_np = np.array(tensor)

                load_para = state_dict[var_tmp.name]

                if isinstance(load_para, Variable):
                    load_para_np = load_para.numpy()
                elif isinstance(load_para, core.VarBase):
                    load_para_np = load_para.numpy()
                elif isinstance(load_para, np.ndarray):
                    load_para_np = load_para
                else:
391 392 393 394 395 396 397 398 399 400 401
                    raise RuntimeError(
                        "State dict type {} not supprt".format(
                            str(type(load_para))
                        )
                    )

                assert (
                    model_np.shape == load_para_np.shape
                ), "Parameter shape not match, Dygraph Parameter [ {} ] need tensor with shape {} but load tensor with shape {}".format(
                    model_np.name, model_np.shape, load_para_np.shape
                )
M
MRXLT 已提交
402

403 404 405 406 407
                assert (
                    model_np.dtype == load_para_np.dtype
                ), "Parameter dtype not match, Dygraph Parameter [ {} ] need tensor with dtype {}  but load tensor with dtype {}".format(
                    model_np.name, model_np.dtype, load_para_np.dtype
                )
M
MRXLT 已提交
408 409 410 411 412 413 414

                tensor.set(load_para_np, framework._current_expected_place())

    def get_opti_var_name_list(self):
        return self._opti_name_list

    def _create_global_learning_rate(self):
415
        # lr var can't be float16 or bfloat16, for pure fp16 or bf16 training, should extra handle the dtype for lr
416 417 418 419 420 421
        _lr_dtype = (
            paddle.get_default_dtype() if self._dtype is None else self._dtype
        )
        _lr_dtype = (
            paddle.float32
            if (
422 423 424 425 426 427 428 429
                (
                    paddle.get_default_dtype() != "float16"
                    and _lr_dtype == paddle.float16
                )
                or (
                    paddle.get_default_dtype() != "bfloat16"
                    and _lr_dtype == paddle.bfloat16
                )
430 431 432
            )
            else _lr_dtype
        )
433
        if isinstance(self._learning_rate, LRScheduler):
434 435 436 437 438
            lr_var = self._global_learning_rate()
            # only create global lr_var once
            if not isinstance(lr_var, framework.Variable):
                lr_name = unique_name.generate('learning_rate')
                self._learning_rate._var_name = lr_name
439 440 441 442 443 444 445
                lr_var = self.helper.create_global_variable(
                    name=lr_name,
                    shape=[1],
                    persistable=True,
                    stop_gradient=True,
                    dtype=_lr_dtype,
                )
446 447 448
                main_prog = framework.default_main_program()
                main_prog.lr_sheduler = self._learning_rate
                main_prog.lr_var = lr_var
M
MRXLT 已提交
449

450
                self._learning_rate_map[
451 452
                    framework.default_main_program()
                ] = lr_var
M
MRXLT 已提交
453

454 455
            lr_value = float(self._learning_rate())
            self.helper.set_variable_initializer(
456 457
                lr_var, initializer=Constant(value=lr_value)
            )
458 459 460
        elif isinstance(self._learning_rate, float):
            # only create global lr_var once
            lr = self._global_learning_rate()
M
MRXLT 已提交
461 462 463
            if isinstance(lr, framework.Variable):
                return
            else:
464 465
                self._learning_rate_map[
                    framework.default_main_program()
466
                ] = paddle.static.create_global_var(
467 468 469
                    name=unique_name.generate("learning_rate"),
                    shape=[1],
                    value=float(self._learning_rate),
470
                    dtype=_lr_dtype,
471 472
                    persistable=True,
                )
M
MRXLT 已提交
473 474 475 476 477

    @framework.dygraph_only
    def set_lr(self, value):
        """
        :api_attr: imperative
478

479
        Set the value of the learning rate manually in the optimizer. If the optimizer use LRScheduler,
M
MRXLT 已提交
480 481 482
        this API cannot be invoked, because it will lead to conflict.

        Args:
M
MRXLT 已提交
483
            value (float): the value of learning rate
M
MRXLT 已提交
484 485 486

        Returns:
            None
487

M
MRXLT 已提交
488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509
        Examples:
            .. code-block:: python

                import paddle
                linear = paddle.nn.Linear(10, 10)

                adam = paddle.optimizer.Adam(0.1, parameters=linear.parameters())

                # set learning rate manually by python float value
                lr_list = [0.2, 0.3, 0.4, 0.5, 0.6]
                for i in range(5):
                    adam.set_lr(lr_list[i])
                    lr = adam.get_lr()
                    print("current lr is {}".format(lr))
                # Print:
                #    current lr is 0.2
                #    current lr is 0.3
                #    current lr is 0.4
                #    current lr is 0.5
                #    current lr is 0.6

        """
510
        if not isinstance(value, (int, float)):
M
MRXLT 已提交
511
            raise TypeError(
512
                "The type of 'value' in optimizer.set_lr must be float, but received %s."
513 514
                % (type(value))
            )
515
        if isinstance(self._learning_rate, LRScheduler):
M
MRXLT 已提交
516
            raise RuntimeError(
517
                "optimizer's learning rate can't be LRScheduler when invoke this API, because this will lead to conflict."
M
MRXLT 已提交
518
            )
519 520 521
        self._learning_rate = float(value)
        current_lr = self._global_learning_rate()
        if current_lr is not None:
522 523
            if in_dygraph_mode():
                place = _current_expected_place()
524 525 526 527 528 529 530
                _C_ops.full_(
                    current_lr,
                    list(current_lr.shape),
                    float(value),
                    current_lr.dtype,
                    place,
                )
531 532
            else:
                global_block = framework.default_main_program().global_block()
533 534 535 536 537 538 539 540 541 542
                global_block.append_op(
                    type='fill_constant',
                    outputs={'Out': [current_lr]},
                    attrs={
                        'dtype': current_lr.dtype,
                        'shape': list(current_lr.shape),
                        'value': float(value),
                    },
                    stop_gradient=True,
                )
M
MRXLT 已提交
543 544 545

    def get_lr(self):
        """
546
        Get current learning rate of optimizer.
547 548
        If 'LRScheduler' is not used, the return value is all the same.
        If 'LRScheduler' is used, the return value is the current scheduled learing rete.
M
MRXLT 已提交
549

M
MRXLT 已提交
550
        Returns:
551
            float: The current learning rate of optimizer.
M
MRXLT 已提交
552 553 554 555

        Examples:
            .. code-block:: python

556
                # train on default dynamic graph mode
M
MRXLT 已提交
557
                import paddle
558 559 560 561 562 563 564 565 566 567 568
                import numpy as np
                emb = paddle.nn.Embedding(10, 3)

                ## example1: LRScheduler is not used, return the same value is all the same
                adam = paddle.optimizer.Adam(0.01, parameters = emb.parameters())
                for batch in range(10):
                    input = paddle.randint(low=0, high=5, shape=[5])
                    out = emb(input)
                    out.backward()
                    print("Learning rate of step{}: {}".format(batch, adam.get_lr())) # 0.01
                    adam.step()
M
MRXLT 已提交
569

570 571 572 573 574 575 576 577
                ## example2: StepDecay is used, return the scheduled learning rate
                scheduler = paddle.optimizer.lr.StepDecay(learning_rate=0.5, step_size=2, gamma=0.1)
                adam = paddle.optimizer.Adam(scheduler, parameters = emb.parameters())
                for batch in range(10):
                    input = paddle.randint(low=0, high=5, shape=[5])
                    out = emb(input)
                    out.backward()
                    print("Learning rate of step{}: {}".format(batch, adam.get_lr())) # 0.5->0.05...
M
MRXLT 已提交
578
                    adam.step()
579
                    scheduler.step()
580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598

                # train on static graph mode
                paddle.enable_static()
                main_prog = paddle.static.Program()
                start_prog = paddle.static.Program()
                with paddle.static.program_guard(main_prog, start_prog):
                    x = paddle.static.data(name='x', shape=[None, 10])
                    z = paddle.static.nn.fc(x, 100)
                    loss = paddle.mean(z)
                    scheduler = paddle.optimizer.lr.StepDecay(learning_rate=0.5, step_size=2, gamma=0.1)
                    adam = paddle.optimizer.Adam(learning_rate=scheduler)
                    adam.minimize(loss)

                exe = paddle.static.Executor()
                exe.run(start_prog)
                for batch in range(10):
                    print("Learning rate of step{}: {}", adam.get_lr())     # 0.5->0.05->0.005...
                    out = exe.run(main_prog, feed={'x': np.random.randn(3, 10).astype('float32')})
                    scheduler.step()
M
MRXLT 已提交
599 600 601 602 603

        """
        if isinstance(self._learning_rate, float):
            return self._learning_rate
        else:
604
            return self._learning_rate()
M
MRXLT 已提交
605 606 607 608 609 610 611 612 613 614 615

    def _global_learning_rate(self, program=None):
        """
        get global decayed learning rate
        :return:
        """
        if program is None:
            program = framework.default_main_program()
        return self._learning_rate_map.get(program, None)

    def _append_optimize_op(self, block, param_and_grad):
616
        """append optimize operator to block and return all the added optimize_op"""
M
MRXLT 已提交
617 618 619 620 621 622 623
        raise NotImplementedError(
            "Class \"Optimizer\" connot be used directly as an optimizer, please use its subclasses such as \"Adam\""
        )

    def _create_param_lr(self, param_and_grad):
        # create learning rate tensor for every parameter
        param = param_and_grad[0]
624 625 626 627
        if hasattr(param, 'optimize_attr'):
            param_lr = param.optimize_attr['learning_rate']
            if type(param_lr) == Variable:
                return param_lr
M
MRXLT 已提交
628
            else:
629 630 631 632
                if param_lr == 1.0:
                    return self._global_learning_rate()
                else:
                    with default_main_program()._lr_schedule_guard(
633 634
                        is_with_opt=True
                    ), framework.name_scope('scale_with_param_lr'):
635 636 637
                        return self._global_learning_rate() * param_lr
        else:
            return self._global_learning_rate()
M
MRXLT 已提交
638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660

    def _create_accumulators(self, block, parameters):
        """Create all accumulators needed by the parameters

        Args:
            block: the block in which the loss tensor is present
            parameters: list of parameter tensors for the optimizer
        """
        pass

    def _finish_update(self, block, parameters_and_grads):
        """Finish any custom updates needed
           before completing an optimization step

        Args:
            block: the block in which the loss tensor is present
            parameters: list of parameter tensors for the optimizer

        Returns:
            None
        """
        pass

661 662 663 664 665 666 667 668 669 670
    def _add_accumulator(
        self,
        name,
        param,
        dtype=None,
        fill_value=0.0,
        shape=None,
        type=None,
        device=None,
    ):
M
MRXLT 已提交
671 672 673 674 675 676 677 678 679 680 681
        """Utility function to add an accumulator for a parameter

        Args:
            block: the block in which the loss tensor is present
            name: name of the accumulator
            param: parameter tensor for which accumulator is to be added
            dtype: data type of the accumulator tensor
            fill_value: value to initialize the accumulator tensor
        """
        if self._name is not None:
            name = self._name + "_" + name
682 683 684 685
        if (
            name in self._accumulators
            and param.name in self._accumulators[name]
        ):
J
Jiabin Yang 已提交
686
            if framework._non_static_mode():
M
MRXLT 已提交
687
                return self._accumulators[name][param.name]
688 689
            raise Exception(
                "Accumulator {} already exists for parameter {}".format(
690 691 692
                    name, param.name
                )
            )
693
        if shape is None:
M
MRXLT 已提交
694 695 696 697 698 699 700 701 702 703 704
            shape = param.shape
        assert isinstance(self.helper, LayerHelper)

        var_name = param.name + "_" + name
        var_name = unique_name.generate(var_name)
        self._opti_name_list.append(var_name)

        var = self.helper.create_global_variable(
            name=var_name,
            persistable=True,
            dtype=dtype or param.dtype,
705
            type=core.VarDesc.VarType.LOD_TENSOR
706 707
            if framework._in_eager_without_dygraph_check()
            else (param.type if type is None else type),
M
MRXLT 已提交
708
            shape=shape,
709 710
            belong_to_optimizer=True,
        )
M
MRXLT 已提交
711 712
        if device is None:
            device = self._get_device_for_param(param.name)
713

W
wanghuancoder 已提交
714 715 716 717
        if (
            in_dygraph_mode()
            and (device == 'cpu' or isinstance(device, core.CPUPlace))
            and (not core.is_compiled_with_xpu())
718 719 720 721 722 723 724
        ):
            _C_ops.full_(
                var,
                var.shape,
                str(float(fill_value)),
                var.dtype,
                core.CPUPlace(),
725
            )
726 727 728 729 730
        else:
            with device_guard(device):
                self.helper.set_variable_initializer(
                    var, initializer=Constant(value=float(fill_value))
                )
M
MRXLT 已提交
731

J
Jiabin Yang 已提交
732
        if framework._non_static_mode():
M
MRXLT 已提交
733
            if len(self._accumulators_holder) > 0:
734 735 736 737 738
                assert (
                    var_name in self._accumulators_holder
                ), "Optimizer set error, {} should in state dict".format(
                    var_name
                )
M
MRXLT 已提交
739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755
                var.set_value(self._accumulators_holder[var_name])

        self._accumulators[name][param.name] = var
        return var

    def _get_accumulator(self, name, param):
        """Utility function to fetch an accumulator for a parameter

        Args:
            name: name of the accumulator
            param: parameter tensor for which accumulator is to be fetched

        Returns:
            accumulator tensor for the parameter
        """
        if self._name is not None:
            name = self._name + "_" + name
756 757 758 759
        if (
            name not in self._accumulators
            or param.name not in self._accumulators[name]
        ):
760 761
            raise Exception(
                "Accumulator {} does not exist for parameter {}".format(
762 763 764
                    name, param.name
                )
            )
M
MRXLT 已提交
765 766 767 768
        return self._accumulators[name][param.name]

    def _update_param_device_map(self, parameters_and_grads, target_block):
        for param_and_grad in parameters_and_grads:
769
            if param_and_grad[0].stop_gradient is False:
M
MRXLT 已提交
770 771
                param_name = param_and_grad[0].name
                ops = target_block.ops
772 773
                device_attr_name = (
                    core.op_proto_and_checker_maker.kOpDeviceAttrName()
M
MRXLT 已提交
774 775 776 777 778
                )
                for op in ops:
                    input_arg_names = op.input_arg_names
                    if param_name in input_arg_names:
                        self._param_device_map[param_name] = op.attr(
779 780
                            device_attr_name
                        )
M
MRXLT 已提交
781 782 783 784 785 786 787 788
                        break

    def _get_device_for_param(self, param_name):
        device = None
        if param_name in self._param_device_map:
            device = self._param_device_map[param_name]
        return device

789 790 791
    def _create_optimization_pass(
        self, parameters_and_grads, param_group_idx=0
    ):
M
MRXLT 已提交
792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818
        """Add optimization operators to update gradients to tensors.

        Args:
          parameters_and_grads(list(tuple(Tensor, Tensor))):
            a list of (tensor, gradient) pair to update.

        Returns:
          return_op_list: a list of operators that will complete one step of
            optimization. This will include parameter update ops, global step
            update ops and any other custom ops required by subclasses to manage
            their internal state.
        """
        # This is a default implementation of create_optimization_pass that
        # can be shared by most optimizers. This implementation assumes that
        # the subclass will implement the _append_optimize_op method and the
        #  _initialize_tensors method. The subclass can extend the
        # _create_accumulators method if it needs to create accumulators
        # for parameters and extend _finish_update method to add custom ops.

        # Allways called under program_guard use global block as loss block
        # But if current block is in control flow, append optimize op in the
        # grad block of current block

        global_block = framework.default_main_program().global_block()
        target_block = global_block
        current_block = framework.default_main_program().current_block()
        if current_block.idx != global_block.idx:
819 820 821
            assert (
                current_block.backward_block_idx != -1
            ), "current block is not global_block, but it doesn't have backward block."
M
MRXLT 已提交
822
            target_block = framework.default_main_program().blocks[
823 824
                current_block.backward_block_idx
            ]
M
MRXLT 已提交
825 826 827

        start = len(target_block.ops)
        self.helper = LayerHelper(self.__class__.__name__)
828

M
MRXLT 已提交
829 830
        self._create_global_learning_rate()

Z
zhangbo9674 已提交
831 832
        # NOTE: Multi Tensor support [ Momentum, Adam ] for dygraph mode
        if self._use_multi_tensor and self.__class__.__name__ in [
833 834
            'Momentum',
            'Adam',
Z
zhangbo9674 已提交
835
        ]:
836
            if (
837 838 839
                len(self._param_dict['FP32_LODTensor'][param_group_idx]) == 0
                and len(self._param_dict['FP16_LODTensor'][param_group_idx])
                == 0
840
            ):
841
                if isinstance(parameters_and_grads, list):
842
                    assert param_group_idx == 0
843 844 845 846 847 848 849
                    self._multi_tensor_init(
                        target_block,
                        [
                            p[0]
                            for p in parameters_and_grads
                            if not p[0].stop_gradient
                        ],
850
                        param_group_idx,
851
                    )
852 853
                else:
                    self._update_param_group(parameters_and_grads)
854 855 856 857 858 859 860
                    self._multi_tensor_init(
                        target_block,
                        [
                            p[0]
                            for p in parameters_and_grads['params']
                            if not p[0].stop_gradient
                        ],
861
                        param_group_idx,
862
                    )
J
Jiabin Yang 已提交
863
            if framework._non_static_mode():
864
                self._append_optimize_multi_tensor_op(
865 866 867
                    target_block,
                    parameters_and_grads,
                    param_group_idx=param_group_idx,
868
                )
869
            else:
870 871 872
                self._update_param_device_map(
                    parameters_and_grads, target_block
                )
873 874 875
                # NOTE: Multi Tensor requires all parameters to be in the same device and program.
                # param_grad_list = [p_0,g_0,p_1,g_1,....]
                param_grad_list = []
876
                for param_and_grad in parameters_and_grads:
877 878 879 880
                    if (
                        not param_and_grad[0].stop_gradient
                        and param_and_grad[1] is not None
                    ):
881 882 883
                        param_grad_list.append(param_and_grad[0])
                        param_grad_list.append(param_and_grad[1])
                with param_grad_list[0].block.program._optimized_guard(
884 885
                    param_grad_list
                ), name_scope("optimizer"):
886 887 888
                    device = self._get_device_for_param(param_grad_list[0].name)
                    with device_guard(device):
                        self._append_optimize_multi_tensor_op(
889 890 891
                            target_block,
                            parameters_and_grads,
                            param_group_idx=param_group_idx,
892
                        )
893
        else:
J
Jiabin Yang 已提交
894
            if not framework._non_static_mode():
895 896 897 898 899 900 901 902
                params_grads_device_map = (
                    parameters_and_grads['params']
                    if isinstance(parameters_and_grads, dict)
                    else parameters_and_grads
                )
                self._update_param_device_map(
                    params_grads_device_map, target_block
                )
903

904
            if isinstance(parameters_and_grads, list):
905 906 907 908 909 910 911 912
                self._create_accumulators(
                    target_block,
                    [
                        p[0]
                        for p in parameters_and_grads
                        if not p[0].stop_gradient
                    ],
                )
913
            else:
914 915
                params_acc_dict = parameters_and_grads.copy()
                params_acc_dict['params'] = [
916 917
                    p[0]
                    for p in params_acc_dict['params']
918 919 920 921
                    if not p[0].stop_gradient
                ]
                self._create_accumulators(target_block, params_acc_dict)

J
Jiabin Yang 已提交
922
            if framework._non_static_mode():
923 924 925 926
                found_inf = self._get_auxiliary_var('found_inf')
                if found_inf:
                    if isinstance(found_inf, core.eager.Tensor):
                        self._set_auxiliary_var('found_inf', True)
927
                else:
928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954
                    if isinstance(found_inf, core.eager.Tensor):
                        self._set_auxiliary_var('found_inf', False)
                    if isinstance(parameters_and_grads, list):
                        for param_and_grad in parameters_and_grads:
                            if param_and_grad[1] is None:
                                continue
                            if param_and_grad[0].stop_gradient is False:
                                self._append_optimize_op(
                                    target_block, param_and_grad
                                )
                    else:
                        for param_and_grad in parameters_and_grads['params']:
                            if param_and_grad[1] is None:
                                continue
                            if param_and_grad[0].stop_gradient is False:
                                param_grad_dict = dict()
                                param_grad_dict['params'] = param_and_grad
                                param_grad_dict.update(
                                    {
                                        k: v
                                        for k, v in parameters_and_grads.items()
                                        if k != 'params'
                                    }
                                )
                                self._append_optimize_op(
                                    target_block, param_grad_dict
                                )
955 956
            else:
                for param_and_grad in parameters_and_grads:
957 958
                    if param_and_grad[1] is None:
                        continue
959
                    with param_and_grad[0].block.program._optimized_guard(
960 961
                        param_and_grad
                    ), name_scope("optimizer"):
962
                        if param_and_grad[0].stop_gradient is False:
963
                            device = self._get_device_for_param(
964 965
                                param_and_grad[0].name
                            )
966 967
                            with device_guard(device):
                                optimize_op = self._append_optimize_op(
968 969
                                    target_block, param_and_grad
                                )
M
MRXLT 已提交
970 971 972 973 974 975 976 977 978 979 980

        # Get custom finish ops for subclasses
        # FIXME: Need to fix this once we figure out how to handle dependencies
        self._finish_update(target_block, parameters_and_grads)

        end = len(target_block.ops)
        return target_block._slice_ops(start, end)

    def _append_dgc_ops(self, param_and_grad):
        pass

981 982 983 984 985 986 987 988
    def backward(
        self,
        loss,
        startup_program=None,
        parameters=None,
        no_grad_set=None,
        callbacks=None,
    ):
M
MRXLT 已提交
989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013
        """
        The first part of ``minimize``, do auto-diff to append backward operations for
        the current program.

        Args:
            loss (Tensor): ``loss`` tensor to run optimizations.
            startup_program (Program, optional): :ref:`api_fluid_Program` for
                initializing parameters in ``parameters``. The default value
                is None, at this time :ref:`api_fluid_default_startup_program` will be used.
            parameters (list, optional): List of ``Tensor`` or ``Tensor.name`` to update
                to minimize ``loss``. The default value is None, at this time all parameters
                will be updated.
            no_grad_set (set, optional): Set of ``Tensor``  or ``Tensor.name`` that don't need
                to be updated. The default value is None.
            callbacks (list, optional): list of callable objects to run when appending backward
                operator for one parameter. The default value is None.

        Return:
            list: list of (param, grad) tensor pairs, param is ``Parameter``,
                grad is the gradient value corresponding to the parameter.

        Examples:
            .. code-block:: python

                import paddle
1014 1015
                x = paddle.arange(26, dtype="float32").reshape([2, 13])

M
MRXLT 已提交
1016
                linear = paddle.nn.Linear(13, 5)
M
MRXLT 已提交
1017
                # This can be any optimizer supported by dygraph.
1018
                adam = paddle.optimizer.Adam(learning_rate = 0.01,
M
MRXLT 已提交
1019
                                            parameters = linear.parameters())
1020
                out = linear(x)
M
MRXLT 已提交
1021 1022 1023 1024 1025
                out.backward()
                adam.step()
                adam.clear_grad()
        """
        act_no_grad_set = None
J
Jiabin Yang 已提交
1026
        if framework._non_static_mode():
M
MRXLT 已提交
1027 1028 1029 1030
            pass
        else:
            act_no_grad_set = self._get_no_grad_set(loss, no_grad_set)

L
Leo Chen 已提交
1031 1032 1033 1034
        # Infer dtype by loss if None
        if self._dtype is None:
            self._dtype = loss.dtype

1035
        if framework.in_dygraph_mode():
1036
            parameter_list = parameters if parameters else self._parameter_list
1037

1038 1039 1040 1041 1042 1043 1044
            # It is very time-consuming to call c++ functions in a loop on the python side.
            # We put this part of the code on the c++ side to improve the speed in eager mode.
            params_grads = []
            grads = core.eager.get_all_grads(parameter_list)
            for index, grad in enumerate(grads):
                if grad is not None:
                    params_grads.append((parameter_list[index], grad))
M
MRXLT 已提交
1045 1046
        else:
            if callbacks is None:
1047
                callbacks = [paddle.nn.clip.error_clip_callback]
M
MRXLT 已提交
1048
            else:
1049
                assert isinstance(callbacks, list)
M
MRXLT 已提交
1050
            program = loss.block.program
1051 1052
            assert len(loss.shape) == 1 and loss.shape[0] == 1, (
                "The loss.shape should be (1L,), but the current loss.shape is {}. "
M
MRXLT 已提交
1053
                "Maybe that you should call paddle.mean to process the current loss.".format(
1054 1055 1056 1057
                    loss.shape
                )
            )
            parameter_list = parameters if parameters else self._parameter_list
M
MRXLT 已提交
1058
            with program_guard(program, startup_program):
1059
                from paddle.incubate.autograd.utils import prim_enabled
1060

1061
                if prim_enabled():
1062 1063 1064
                    params_grads = append_backward_new(
                        [loss], parameter_list, act_no_grad_set, callbacks
                    )
1065
                else:
1066 1067 1068
                    params_grads = append_backward(
                        loss, parameter_list, act_no_grad_set, callbacks
                    )
M
MRXLT 已提交
1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089
                # Note: since we can't use all_reduce_op now,
                #  dgc_op should be the last op of one grad.
                self._append_dgc_ops(params_grads)
        return params_grads

    def apply_gradients(self, params_grads):
        """
        Second part of `minimize`, appending optimization operators for
        given `params_grads` pairs.

        Args:
            params_grads (list): list of (param, grad) pair to do optimization.

        Returns:
            list: A list of operators appended to the current program.

        Examples:
            .. code-block:: python

                import paddle

1090
                inp = paddle.uniform([10, 10], dtype="float32", min=-0.1, max=0.1)
M
MRXLT 已提交
1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
                linear = paddle.nn.Linear(10, 10)
                out = linear(inp)
                loss = paddle.mean(out)
                optimizer = paddle.optimizer.Adam(learning_rate=0.1,
                        parameters=linear.parameters())
                params_grads = optimizer.backward(loss)
                optimizer.apply_gradients(params_grads)

        """

        params_grads = sorted(params_grads, key=lambda x: x[0].name)

        # 'optimizer(grad_clip)' or 'set_gradient_clip'
        if self._grad_clip is not None:
            params_grads = self._grad_clip(params_grads)
        else:

1108
            params_grads = paddle.nn.clip.append_gradient_clip_ops(params_grads)
M
MRXLT 已提交
1109 1110

        # Add regularization if any
1111 1112 1113
        params_grads = self.append_regularization_ops(
            params_grads, self.regularization
        )
M
MRXLT 已提交
1114 1115 1116 1117

        optimize_ops = self._create_optimization_pass(params_grads)
        return optimize_ops

1118 1119 1120
    def _apply_optimize(
        self, loss, startup_program, params_grads, param_group_idx=0
    ):
M
MRXLT 已提交
1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131
        """
        Second part of `minimize`, appending optimization operators for
        given `params_grads` pairs.
        Args:
            loss (Tensor): loss tensor to run optimizations.
            startup_program (Program): startup_program for initializing parameters
                in `parameters`.
            params_grads (list): list of (param, grad) pair to do optimization.
        Returns:
            list: A list of operators appended to the current program.
        """
J
Jiabin Yang 已提交
1132
        if framework._non_static_mode():
1133 1134 1135 1136
            with program_guard(
                framework.default_main_program(),
                framework.default_startup_program(),
            ):
1137 1138 1139
                if isinstance(params_grads, list):
                    if self._grad_clip is not None:
                        params_grads = self._grad_clip(params_grads)
1140
                    params_grads = self.append_regularization_ops(
1141 1142
                        params_grads, self.regularization
                    )
1143 1144 1145
                else:
                    grad_clip = params_grads['grad_clip']
                    if grad_clip is not None:
1146
                        params_grads['params'] = grad_clip(
1147 1148
                            params_grads['params']
                        )
1149

1150
                    params_grads['params'] = self.append_regularization_ops(
1151 1152
                        params_grads['params'], self.regularization
                    )
1153 1154 1155
                optimize_ops = self._create_optimization_pass(
                    params_grads, param_group_idx=param_group_idx
                )
M
MRXLT 已提交
1156
        else:
1157
            assert param_group_idx == 0
M
MRXLT 已提交
1158 1159 1160 1161 1162
            program = loss.block.program
            with program_guard(program, startup_program):
                optimize_ops = self.apply_gradients(params_grads)
        return optimize_ops

1163
    def _create_regularization_of_grad(self, param, grad, regularization=None):
1164
        """Create and add backward regularization Operators
1165

1166 1167 1168
        Function helper of append_regularization_ops.
        """
        # If no gradient or no regularization is specified,  then we don't need to do anything
1169
        if grad is None or (
1170 1171 1172 1173 1174 1175
            (
                not hasattr(param, 'regularizer')
                or (hasattr(param, 'regularizer') and param.regularizer is None)
            )
            and regularization is None
        ):
1176 1177 1178 1179 1180 1181 1182 1183 1184 1185
            return grad
        regularization_term = None
        if hasattr(param, 'regularizer') and param.regularizer is not None:
            # Add variable for regularization term in grad block
            regularization_term = param.regularizer(param, grad, grad.block)
        elif regularization is not None:
            regularization_term = regularization(param, grad, grad.block)

        assert regularization_term is not None

1186
        if framework.in_dygraph_mode():
Y
YuanRisheng 已提交
1187
            return _C_ops.add_n([grad, regularization_term])
1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201
        else:
            new_grad = grad
            if grad.type == core.VarDesc.VarType.SELECTED_ROWS:
                # FIXME(zcd): If the grad is SELECTED_ROWS, after regularization,
                # the grad's type and name will be changed. But the gradient's name
                # is used in ParallelExecutor Reduce mode, so I add a flag for
                # the new_grad here.
                new_grad = grad.block.create_var(
                    name=grad.name + core.kNewGradSuffix(),
                    dtype=param.dtype,
                    shape=param.shape,
                    lod_level=param.lod_level,
                    type=core.VarDesc.VarType.LOD_TENSOR,
                )
1202

1203 1204 1205
            inputs = {"X": [grad, regularization_term]}
            outputs = {"Out": [new_grad]}
            grad.block.append_op(type='sum', inputs=inputs, outputs=outputs)
1206

1207
            return new_grad
1208

1209 1210 1211
    def append_regularization_ops(
        self, parameters_and_grads, regularization=None
    ):
1212
        r"""Create and add backward regularization Operators
1213

1214 1215 1216 1217
        Creates and adds backward regularization operators in the BlockDesc.
        This will add gradients of the regularizer function to the gradients
        of the parameters and return these modified gradients. This is the
        same as implementing weight decay in optimizers for regularization.
1218

1219 1220 1221 1222 1223
        Args:
            parameters_and_grads: A list of (parameters, gradients) pairs
                                  that need to be regularized.
            regularization: A global regularizer. If the parameter is not
                            set. It will be applied with regularizer.
1224

1225 1226 1227
        Returns:
            list[(Variable, Variable)]: list of (parameters, gradients) \
            pair with the regularized gradient
1228

1229 1230 1231 1232
        Raises:
            Exception: Unknown regularization type
        """
        params_and_grads = []
J
Jiabin Yang 已提交
1233
        if framework._non_static_mode():
1234
            for param, grad in parameters_and_grads:
1235
                new_grad = self._create_regularization_of_grad(
1236 1237
                    param, grad, regularization
                )
1238 1239 1240 1241 1242
                params_and_grads.append((param, new_grad))
        else:
            repeate_regularizer = False
            with framework.name_scope('regularization'):
                for param, grad in parameters_and_grads:
1243 1244 1245 1246 1247
                    if (
                        not repeate_regularizer
                        and param.regularizer is not None
                        and regularization is not None
                    ):
1248 1249 1250 1251
                        repeate_regularizer = True
                        logging.info(
                            "If regularizer of a Parameter has been set by 'fluid.ParamAttr' or 'fluid.WeightNormParamAttr' already. "
                            "The Regularization[%s] in Optimizer will not take effect, and it will only be applied to other Parameters!"
1252 1253
                            % regularization.__str__()
                        )
1254 1255
                    with param.block.program._optimized_guard([param, grad]):
                        new_grad = self._create_regularization_of_grad(
1256 1257
                            param, grad, regularization
                        )
1258 1259 1260
                        params_and_grads.append((param, new_grad))
        return params_and_grads

M
MRXLT 已提交
1261 1262 1263
    def _get_no_grad_set(self, loss, no_grad_set=None):
        no_grad_set = _get_no_grad_set_name(no_grad_set)
        parameters = loss.block.program.global_block().all_parameters()
1264
        param_no_trainable = set(
1265 1266
            [param.name for param in parameters if param.stop_gradient is True]
        )
M
MRXLT 已提交
1267 1268 1269 1270 1271 1272
        # If the parameter is no trainable, it should not have a gradient.
        no_grad_set.update(param_no_trainable)

        return no_grad_set

    @framework.dygraph_only
1273
    def clear_grad(self, set_to_zero=True):
M
MRXLT 已提交
1274 1275
        """
        Clear the gradients of all optimized parameters for model.
1276 1277

        If not, new gradient will accumulat on previous gradient.
1278 1279

        There are two method to clear grad: set_to_zero or delete grad.
1280

1281 1282
        Args:
            set_to_zero (bool, optional): If set grads to zero or not, default is True.
1283

M
MRXLT 已提交
1284 1285
        Returns:
            None
1286

M
MRXLT 已提交
1287 1288 1289 1290
        Examples:
            .. code-block:: python

                import paddle
1291

1292
                a = paddle.arange(26, dtype="float32").reshape([2, 13])
M
MRXLT 已提交
1293
                linear = paddle.nn.Linear(13, 5)
M
MRXLT 已提交
1294
                # This can be any optimizer supported by dygraph.
1295
                adam = paddle.optimizer.Adam(learning_rate = 0.01,
M
MRXLT 已提交
1296 1297 1298 1299 1300 1301 1302
                                            parameters = linear.parameters())
                out = linear(a)
                out.backward()
                adam.step()
                adam.clear_grad()

        """
1303
        param_list = []
1304
        if self._parameter_list is None or not isinstance(
1305 1306
            self._parameter_list[0], dict
        ):
1307 1308
            for p in self._parameter_list:
                if not p.stop_gradient:
1309
                    param_list.append(p)
1310 1311 1312 1313
        else:
            for param_group in self._param_groups:
                for p in param_group['params']:
                    if not p.stop_gradient:
1314
                        param_list.append(p)
1315

J
Jiabin Yang 已提交
1316
        if _in_eager_without_dygraph_check():
1317
            for p in param_list:
1318
                p.clear_gradient(set_to_zero)
1319 1320
        else:
            core.clear_gradients(param_list, set_to_zero)
M
MRXLT 已提交
1321

1322
    @imperative_base.no_grad()
1323 1324 1325
    def minimize(
        self, loss, startup_program=None, parameters=None, no_grad_set=None
    ):
M
MRXLT 已提交
1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343
        """
        Add operations to minimize ``loss`` by updating ``parameters``.

        Args:
            loss (Tensor): A ``Tensor`` containing the value to minimize.
            startup_program (Program, optional): :ref:`api_fluid_Program` for
                initializing parameters in ``parameters``. The default value
                is None, at this time :ref:`api_fluid_default_startup_program` will be used.
            parameters (list, optional): List of ``Tensor`` or ``Tensor.name`` to update
                to minimize ``loss``. The default value is None, at this time all parameters
                will be updated.
            no_grad_set (set, optional): Set of ``Tensor``  or ``Tensor.name`` that don't need
                to be updated. The default value is None.

        Returns:
            tuple: tuple (optimize_ops, params_grads), A list of operators appended
            by minimize and a list of (param, grad) tensor pairs, param is
            ``Parameter``, grad is the gradient value corresponding to the parameter.
1344 1345
            In static graph mode, the returned tuple can be passed to ``fetch_list`` in ``Executor.run()`` to
            indicate program pruning. If so, the program will be pruned by ``feed`` and
M
MRXLT 已提交
1346 1347 1348 1349
            ``fetch_list`` before run, see details in ``Executor``.

        Examples:
            .. code-block:: python
1350

M
MRXLT 已提交
1351
                import paddle
M
MRXLT 已提交
1352
                linear = paddle.nn.Linear(10, 10)
1353 1354
                input = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
                out = linear(input)
M
MRXLT 已提交
1355 1356 1357 1358 1359 1360 1361 1362
                loss = paddle.mean(out)

                beta1 = paddle.to_tensor([0.9], dtype="float32")
                beta2 = paddle.to_tensor([0.99], dtype="float32")

                adam = paddle.optimizer.Adam(learning_rate=0.1,
                        parameters=linear.parameters(),
                        weight_decay=0.01)
R
Roc 已提交
1363
                loss.backward()
M
MRXLT 已提交
1364 1365 1366
                adam.minimize(loss)
                adam.clear_grad()

M
MRXLT 已提交
1367 1368 1369
        """
        assert isinstance(loss, Variable), "The loss should be an Tensor."

1370
        parameter_list = parameters if parameters else self._parameter_list
1371

1372 1373 1374 1375 1376 1377
        params_grads = self.backward(
            loss,
            startup_program=startup_program,
            parameters=parameter_list,
            no_grad_set=no_grad_set,
        )
M
MRXLT 已提交
1378

1379 1380 1381
        optimize_ops = self._apply_optimize(
            loss, startup_program=startup_program, params_grads=params_grads
        )
M
MRXLT 已提交
1382 1383 1384

        return optimize_ops, params_grads

1385
    @imperative_base.no_grad()
M
MRXLT 已提交
1386 1387 1388
    @framework.dygraph_only
    def step(self):
        """
M
MRXLT 已提交
1389
        Execute the optimizer and update parameters once.
1390

M
MRXLT 已提交
1391 1392 1393 1394 1395 1396 1397
        Returns:
            None

        Examples:
            .. code-block:: python

                import paddle
1398

1399
                a = paddle.arange(26, dtype="float32").reshape([2, 13])
M
MRXLT 已提交
1400
                linear = paddle.nn.Linear(13, 5)
M
MRXLT 已提交
1401
                # This can be any optimizer supported by dygraph.
1402
                adam = paddle.optimizer.Adam(learning_rate = 0.01,
1403
                                        parameters = linear.parameters())
M
MRXLT 已提交
1404 1405 1406 1407 1408
                out = linear(a)
                out.backward()
                adam.step()
                adam.clear_grad()
        """
1409 1410 1411 1412 1413 1414 1415 1416 1417 1418

        if not isinstance(self._param_groups[0], dict):
            params_grads = []
            for param in self._param_groups:
                if param.stop_gradient:
                    continue
                if param._grad_ivar() is not None:
                    grad_var = param._grad_ivar()
                    params_grads.append((param, grad_var))

1419
            self._apply_optimize(
1420 1421 1422 1423
                loss=None,
                startup_program=None,
                params_grads=params_grads,
                param_group_idx=0,
1424
            )
1425 1426 1427

        else:
            # optimize parameters in groups
1428
            for idx, param_group in enumerate(self._param_groups):
1429 1430 1431 1432 1433 1434 1435 1436
                params_grads = defaultdict(lambda: list())
                for param in param_group['params']:
                    if param.stop_gradient:
                        continue
                    if param._grad_ivar() is not None:
                        grad_var = param._grad_ivar()
                        params_grads['params'].append((param, grad_var))
                params_grads.update(
1437 1438 1439
                    {k: v for k, v in param_group.items() if k != 'params'}
                )
                self._apply_optimize(
1440 1441 1442 1443
                    loss=None,
                    startup_program=None,
                    params_grads=params_grads,
                    param_group_idx=idx,
1444
                )
1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459

    def _add_param_group(self, param_group):
        """
        Add a param group to parameter_list.

        Args:
            param_group (dict): The group of Tensors to be optimzed with
            different optimization options.
        """
        params = param_group['params']
        if isinstance(params, Parameter):
            param_group['params'] = [params]
        elif isinstance(params, set):
            raise TypeError(
                "optimizer parameters should be in ordered collections,"
1460 1461
                "but received set, please use list instead."
            )
1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474
        else:
            param_group['params'] = list(params)

        # Update optimization options for each groups
        for k, v in self._default_dict.items():
            param_group.setdefault(k, v)

        param_set = set()
        for group in self._param_groups:
            param_set.update(set(group['params']))

        if not param_set.isdisjoint(set(param_group['params'])):
            raise ValueError(
1475 1476
                "some parameters appear in more than one parameter group"
            )
1477 1478 1479 1480 1481

        for param in param_group['params']:
            weight_decay = param_group['weight_decay']
            if isinstance(weight_decay, float):
                from ..fluid.regularizer import L2Decay
1482

1483 1484 1485 1486
                regularization = L2Decay(weight_decay)
            else:
                regularization = weight_decay
            param.regularizer = regularization
W
wangguanzhong 已提交
1487
            param.optimize_attr['learning_rate'] = param_group.get(
1488 1489
                'learning_rate', 1.0
            )
1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500

        self._param_groups.append(param_group)

    def _update_param_group(self, parameters):
        """
        Update the param group with new entry
        Args:
            parameters (dict): The extra group of Tensors to be optimzed with
            different optimization options. Only used in child class.
        """
        pass
1501 1502

    @framework.dygraph_only
1503
    def _multi_tensor_init(self, target_block, parameters, param_group_idx):
1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514
        """
        All parameters used for optimizer (such as: parameters, master_weight, velocity_acc for momentum) calculations are grouped into a python list by data type (float16, float32).
        This function will be overridden in the corresponding optimizer file.

        Args:
            target_block: the block in which the loss tensor is present
            parameters: list of parameter tensors for the optimizer
        """
        pass

    @framework.dygraph_only
1515
    def _append_optimize_multi_tensor_op(
1516
        self, target_block, parameters_and_grads, param_group_idx
1517
    ):
1518
        """
1519 1520 1521
        For Multi Tensor, append optimize merged_operator to block.
        """
        pass
1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535

    def _is_dtype_fp16_or_bf16(self, dtype):
        """
        check the dtype is fp16 or the dtype is bf16
        :param dtype: instance of core.VarDesc.VarType
        :return: True if dtype is one of fp16 or bf16, False otherwise
        """
        assert isinstance(
            dtype, core.VarDesc.VarType
        ), "The dtype should be an instance of core.VarDesc.VarType."
        return (
            dtype == core.VarDesc.VarType.FP16
            or dtype == core.VarDesc.VarType.BF16
        )