optimizer.py 55.1 KB
Newer Older
1
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14

15
import numpy as np
16
import os
17
import logging
18
from collections import defaultdict
19

20
import paddle
21

22

23 24 25 26 27 28 29 30 31
from paddle.fluid.framework import (
    Program,
    Variable,
    Parameter,
    name_scope,
    default_main_program,
    default_startup_program,
    device_guard,
)
32

33 34
from . import framework
from . import layers
35
from . import unique_name
36 37 38 39 40 41
from .backward import (
    append_backward,
    _some_in_set_,
    _append_grad_suffix_,
    _get_no_grad_set_name,
)
42 43
from .framework import program_guard
from .layer_helper import LayerHelper
44
from .dygraph import base as imperative_base
45
from .dygraph import no_grad
46 47 48 49
from .dygraph.learning_rate_scheduler import (
    LearningRateDecay,
    _LearningRateEpochDecay,
)
50 51
from paddle.fluid import core
from functools import reduce
52
from functools import cmp_to_key
53
from .wrapped_decorator import signature_safe_contextmanager
54
import warnings
55
from paddle import _C_ops, _legacy_C_ops
56 57 58 59
from ..fluid.framework import (
    in_dygraph_mode,
    _current_expected_place,
)
60

L
LoneRanger 已提交
61
__all__ = []
Q
Qiao Longfei 已提交
62 63


64
class Optimizer:
Q
Qiao Longfei 已提交
65 66 67
    """Optimizer Base class.

    Define the common interface of an optimizer.
68 69
    User should not use this class directly,
    but need to use one of it's implementation.
Q
Qiao Longfei 已提交
70 71
    """

72
    @imperative_base.no_grad
73 74 75 76 77 78 79 80 81 82
    def __init__(
        self,
        learning_rate,
        parameter_list=None,
        regularization=None,
        grad_clip=None,
        flatten_param_grads=False,
        align_size=-1,
        name=None,
    ):
83 84
        """
        Args:
85 86
            flatten_param_grads (bool, optional): Whether to flatten all the parameters and grads.
                If true, the parameters and gradients will be coalesce to contiguous mempry,
87 88
                and the grad_clip ops / optimizer ops will be fuse to one operator.
        """
89
        # Because of the loop import, so place it in the function body
90
        from paddle.optimizer.lr import LRScheduler
91 92 93 94

        self._parameter_list = (
            list(parameter_list) if parameter_list is not None else None
        )
95
        self._name = name
姜永久 已提交
96
        if in_dygraph_mode():
97 98 99
            if not isinstance(
                learning_rate, (float, LearningRateDecay, LRScheduler)
            ):
M
minqiyang 已提交
100
                raise TypeError(
101
                    "learning rate should be float or LRScheduler, got %s here"
102 103
                    % type(learning_rate)
                )
104
            if self._parameter_list is None:
105 106 107
                raise AttributeError(
                    "parameter_list argument given to the Optimizer should not be None in dygraph mode."
                )
108 109 110 111 112 113
            if regularization is not None:
                for param in self._parameter_list:
                    if param.regularizer is not None:
                        logging.info(
                            "If regularizer of a Parameter has been set by 'fluid.ParamAttr' or 'fluid.WeightNormParamAttr' already. "
                            "The Regularization[%s] in Optimizer will not take effect, and it will only be applied to other Parameters!"
114 115
                            % regularization.__str__()
                        )
116
                        break
M
minqiyang 已提交
117
        else:
118 119 120
            if not isinstance(
                learning_rate, (float, framework.Variable, LRScheduler)
            ):
M
minqiyang 已提交
121
                raise TypeError(
122
                    "learning rate should be float or LRScheduler, got %s here"
123 124
                    % type(learning_rate)
                )
M
minqiyang 已提交
125

126
        if grad_clip is not None:
127
            if not isinstance(grad_clip, paddle.nn.clip.GradientClipBase):
128 129 130
                raise TypeError(
                    "'grad_clip' should be an instance of GradientClipBase's derived class"
                )
D
dzhwinter 已提交
131
        self.regularization = regularization
132
        self._grad_clip = grad_clip
133
        self._learning_rate = learning_rate
134 135
        self._flatten_param_grads = flatten_param_grads
        self._align_size = align_size
L
Leo Chen 已提交
136

D
dzhwinter 已提交
137
        self._dtype = None
L
Leo Chen 已提交
138 139 140 141
        # Infer the dtype form parameter
        if self._parameter_list:
            self._dtype = self._parameter_list[0].dtype

142
        # each program should have a independent learning rate
143
        # program -> Variable(learning_rate)
Q
qiaolongfei 已提交
144
        self._learning_rate_map = dict()
145
        if isinstance(self._learning_rate, framework.Variable):
146
            self._learning_rate_map[
147 148
                framework.default_main_program()
            ] = self._learning_rate
149 150 151 152 153
        # Dictionary of accumulators. Some optimizer subclasses need to
        # allocate and manage extra variables associated with the parameters
        # to train. These variables are called accumulators.
        # {accum_name : { paramter_name : accumulator_for_parameter, ...}, ...}
        self._accumulators = defaultdict(lambda: dict())
154 155
        # global_accumulator dict, {accum_name : acc_variable, ...}
        self._global_accumulators = {}
156
        self.helper = LayerHelper(self.__class__.__name__)
157
        self._opti_name_list = []
H
hong 已提交
158
        self._accumulators_holder = {}
159
        self._param_device_map = dict()
160 161
        # NOTE(zhiqiu): sometimes we want to add some variables(Tenosr) to the optimizer for a specific optimization,
        # for example, we want to pass 'found_inf' to adam optimizer so it can skip update when found_inf is True.
162
        # And these variables should not be the parameters of Optimizer's construnctor (because not commonly used).
163 164
        # Use _auxiliary_vars together with _set_auxiliary_var/_get_auxiliary_var to achieve that.
        self._auxiliary_vars = dict()
H
hong 已提交
165 166 167 168

    @framework.dygraph_only
    def state_dict(self):
        '''
T
tianshuo78520a 已提交
169 170
        Get state dict information from optimizer. It contain all the variable used by optimizer. For Adam optimizer, contains beta1, beta2, momentum etc. If LearningRateDecay have been used, global_step will be include in state dict.
        If the optimizer never be called(minimize function), the state_dict is empty.
H
hong 已提交
171 172 173

        Args: None
        Return:
T
tianshuo78520a 已提交
174
            state_dict(dict) : dict contains all the variable used by optimizer
175

H
hong 已提交
176 177 178 179
        Examples:
            .. code-block:: python

                import paddle.fluid as fluid
180
                import paddle
181 182

                with fluid.dygraph.guard():
183
                    emb = paddle.nn.Embedding(10, 10)
184

L
LoneRanger 已提交
185
                    adam = paddle.optimizer.Adam(0.001, parameters=emb.parameters())
186
                    state_dict = adam.state_dict()
H
hong 已提交
187 188

        '''
189
        from paddle.optimizer.lr import LRScheduler
190

H
hong 已提交
191 192 193 194
        state_dict = {}
        for k, v in self._accumulators.items():
            for para_name, var_tmp in v.items():
                state_dict[var_tmp.name] = var_tmp
195 196
        for k, v in self._global_accumulators.items():
            state_dict[v.name] = v
H
hong 已提交
197
        # global step if use lr decay
198
        if isinstance(self._learning_rate, LRScheduler):
199 200
            state_dict["LR_Scheduler"] = self._learning_rate.state_dict()
            return state_dict
H
hong 已提交
201
        if isinstance(self._learning_rate, LearningRateDecay):
202 203 204 205
            state_dict["LR_Scheduler"] = self._learning_rate.state_dict()

            if not isinstance(self._learning_rate, _LearningRateEpochDecay):
                var_tmp = None
206
                var_temp = framework._create_tensor(
207 208
                    None, name='global_step', dtype='int32'
                )
209

210
                paddle.tensor.fill_constant(
211 212
                    [1], "int32", self._learning_rate.step_num, out=var_temp
                )
H
hong 已提交
213

214
                state_dict['global_step'] = var_temp
H
hong 已提交
215 216 217
        return state_dict

    @framework.dygraph_only
218
    def set_state_dict(self, state_dict):
H
hong 已提交
219
        '''
T
tianshuo78520a 已提交
220
        Load optimizer state dict. For Adam optimizer, contains beta1, beta2, momentum etc. If LearningRateDecay have been used, global_step will be changed.
H
hong 已提交
221

222
        Args:
H
hong 已提交
223 224 225
            state_dict(dict) : Dict contains all the Variable needed by optimizer
        Return:
            None
226

H
hong 已提交
227 228
        Examples:
            .. code-block:: python
229

230
                import paddle
231 232 233

                paddle.disable_static()

234
                emb = paddle.nn.Embedding(10, 10)
235

236
                state_dict = emb.state_dict()
237
                paddle.save(state_dict, "paddle_dy.pdparams")
238

239
                scheduler = paddle.optimizer.lr.NoamDecay(
240 241 242 243
                    d_model=0.01, warmup_steps=100, verbose=True)
                adam = paddle.optimizer.Adam(
                    learning_rate=scheduler,
                    parameters=emb.parameters())
244
                state_dict = adam.state_dict()
245
                paddle.save(state_dict, "paddle_dy.pdopt")
246

247 248
                para_state_dict = paddle.load("paddle_dy.pdparams")
                opti_state_dict = paddle.load("paddle_dy.pdopt")
H
hong 已提交
249
        '''
250
        from paddle.optimizer.lr import LRScheduler
251

252
        if isinstance(self._learning_rate, LRScheduler):
253
            self._learning_rate.set_dict(state_dict["LR_Scheduler"])
H
hong 已提交
254 255

        if isinstance(self._learning_rate, LearningRateDecay):
256 257 258
            self._learning_rate.set_dict(state_dict["LR_Scheduler"])

            if not isinstance(self._learning_rate, _LearningRateEpochDecay):
259 260 261
                assert (
                    'global_step' in state_dict
                ), 'Global step not in state dict, Dygraph use LearningRateDecay, global_step must in state_dict'
262 263 264 265 266
                global_step = state_dict['global_step']

                if isinstance(global_step, Variable):
                    step_np = global_step
                    step_np = np.array(step_np.value().get_tensor())
267 268 269 270 271
                    assert step_np.shape == (
                        1,
                    ), "global step shape is (1,), the shape is {}".format(
                        step_np.shape
                    )
272 273 274

                    self._learning_rate.step_num = int(step_np[0])
                elif isinstance(global_step, np.ndarray):
275 276 277 278 279
                    assert global_step.shape == (
                        1,
                    ), "global step shape is (1,), the shape is {}".format(
                        global_step.shape
                    )
280 281 282
                    self._learning_rate.step_num = global_step[0]
                else:
                    raise RuntimeError(
W
wanghuancoder 已提交
283
                        "Type not supprt, value in state dict must be [Tensor, Variable, numpy], the type is ",
284 285
                        type(global_step),
                    )
H
hong 已提交
286

287 288 289 290 291 292 293
        def _load_state_para(state_dict, param):
            var = param.value()
            tensor = var.get_tensor()
            model_np = np.array(tensor)
            load_para = state_dict[param.name]
            if isinstance(load_para, Variable):
                load_para_np = load_para.numpy()
W
wanghuancoder 已提交
294
            elif isinstance(load_para, core.eager.Tensor):
295 296 297 298
                load_para_np = load_para.numpy()
            elif isinstance(load_para, np.ndarray):
                load_para_np = load_para
            else:
299 300 301
                raise RuntimeError(
                    "State dict type {} not supprt".format(str(type(load_para)))
                )
302

303 304 305 306 307
            assert (
                model_np.shape == load_para_np.shape
            ), "Parameter shape not match, Dygraph Parameter [ {} ] need tensor with shape {} but load tensor with shape {}".format(
                param.name, model_np.shape, load_para_np.shape
            )
308

309 310 311 312 313
            assert (
                model_np.dtype == load_para_np.dtype
            ), "Parameter dtype not match, Dygraph Parameter [ {} ] need tensor with dtype {}  but load tensor with dtype {}".format(
                param.name, model_np.dtype, load_para_np.dtype
            )
314 315 316

            tensor.set(load_para_np, framework._current_expected_place())

H
hong 已提交
317 318 319
        self._accumulators_holder = state_dict
        for k, v in self._accumulators.items():
            for para_name, var_tmp in v.items():
320 321 322
                assert (
                    var_tmp.name in state_dict
                ), "optimizer variable {} not found".format(var_tmp.name)
323
                _load_state_para(state_dict, var_tmp)
H
hong 已提交
324

325
        for k, v in self._global_accumulators.items():
326 327 328
            assert (
                v.name in state_dict
            ), "optimizer variable {} not found".format(v.name)
329
            _load_state_para(state_dict, v)
330

331 332 333
    # [aliases] Compatible with old method names
    set_dict = set_state_dict

334 335
    def get_opti_var_name_list(self):
        return self._opti_name_list
Q
Qiao Longfei 已提交
336

337 338 339 340 341 342 343 344 345
    def _set_auxiliary_var(self, key, val):
        self._auxiliary_vars[key] = val

    def _get_auxiliary_var(self, key):
        if key in self._auxiliary_vars:
            return self._auxiliary_vars[key]
        else:
            return None

Q
Qiao Longfei 已提交
346
    def _create_global_learning_rate(self):
347
        from paddle.optimizer.lr import LRScheduler
348

349
        if isinstance(self._learning_rate, LRScheduler):
350 351 352 353 354 355 356 357 358 359
            lr_var = self._global_learning_rate()
            # only create global lr_var once
            if not isinstance(lr_var, framework.Variable):
                lr_name = unique_name.generate('learning_rate')
                self._learning_rate._var_name = lr_name
                lr_var = self.helper.create_global_variable(
                    name=lr_name,
                    shape=[1],
                    persistable=True,
                    stop_gradient=True,
360 361
                    dtype='float32' if self._dtype is None else self._dtype,
                )
362
                main_prog = framework.default_main_program()
363
                main_prog.lr_scheduler = self._learning_rate
364
                main_prog.lr_var = lr_var
365
                self._learning_rate_map[
366 367
                    framework.default_main_program()
                ] = lr_var
368 369 370

            lr_value = float(self._learning_rate())
            self.helper.set_variable_initializer(
371 372
                lr_var,
                initializer=paddle.nn.initializer.Constant(value=lr_value),
373
            )
374 375
            return

376 377 378
        if imperative_base.enabled():
            # create learning rate Variable
            if isinstance(self._learning_rate, float):
M
minqiyang 已提交
379 380 381 382 383
                lr = self._global_learning_rate()

                if isinstance(lr, framework.Variable):
                    return
                else:
384 385
                    self._learning_rate_map[
                        framework.default_main_program()
386
                    ] = paddle.static.create_global_var(
M
minqiyang 已提交
387 388 389 390
                        name=unique_name.generate("learning_rate"),
                        shape=[1],
                        value=float(self._learning_rate),
                        dtype='float32' if self._dtype is None else self._dtype,
391 392
                        persistable=True,
                    )
393
            # get learning rate Variable from LearningRateDecay
M
minqiyang 已提交
394
            elif isinstance(self._learning_rate, LearningRateDecay):
395
                self._learning_rate_map[
396 397
                    framework.default_main_program()
                ] = self._learning_rate()
398
            else:
Q
qiaolongfei 已提交
399
                raise TypeError(
400 401
                    "optimizer's learning rate must be float or LearningRateDecay"
                )
402
        else:
403 404 405 406
            lr = self._global_learning_rate()

            if isinstance(lr, framework.Variable):
                return
M
minqiyang 已提交
407 408 409 410 411 412
            else:
                if not isinstance(self._learning_rate, float):
                    raise TypeError(
                        "learning rate variable is create outside optimizer,"
                        "can not create new learning rate variable for new program"
                    )
Q
Qiao Longfei 已提交
413

414
            # create learning rate in the current main program
415
            self._learning_rate_map[
416
                framework.default_main_program()
417
            ] = paddle.static.create_global_var(
418 419 420 421 422 423
                name=unique_name.generate("learning_rate"),
                shape=[1],
                value=float(self._learning_rate),
                dtype='float32' if self._dtype is None else self._dtype,
                persistable=True,
            )
424

425 426 427 428
    @framework.dygraph_only
    def set_lr(self, value):
        """
        :api_attr: imperative
429

430 431 432 433 434 435 436 437
        Set the value of the learning rate manually in the optimizer. If the optimizer use LearningRateDecay,
        this API cannot be invoked, because it will lead to conflict.

        Args:
            value (float|Variable): the value of learning rate

        Returns:
            None
438

439 440 441
        Examples:
            .. code-block:: python

442
                import paddle
443
                import paddle.fluid as fluid
444
                import paddle
445

446
                with fluid.dygraph.guard():
447
                    linear = paddle.nn.Linear(10, 10)
448

L
LoneRanger 已提交
449
                    adam = paddle.optimizer.Adam(0.1, parameters=linear.parameters())
450 451 452 453 454

                    # set learning rate manually by python float value
                    lr_list = [0.2, 0.3, 0.4, 0.5, 0.6]
                    for i in range(5):
                        adam.set_lr(lr_list[i])
L
LoneRanger 已提交
455
                        lr = adam.get_lr()
456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
                        print("current lr is {}".format(lr))
                    # Print:
                    #    current lr is 0.2
                    #    current lr is 0.3
                    #    current lr is 0.4
                    #    current lr is 0.5
                    #    current lr is 0.6





        """
        if not isinstance(value, (framework.Variable, float)):
            raise TypeError(
                "The type of 'value' in optimizer.set_lr must be (float, Variable), but received %s."
472 473
                % (type(value))
            )
474 475 476 477 478 479 480 481
        if isinstance(self._learning_rate, LearningRateDecay):
            raise RuntimeError(
                "optimizer's learning rate can't be LearningRateDecay when invoke this API, because this will lead to conflict."
            )
        if isinstance(value, float):
            self._learning_rate = value
            current_lr = self._global_learning_rate()
            if current_lr is not None:
482 483
                if in_dygraph_mode():
                    place = _current_expected_place()
484 485 486 487 488 489 490
                    _C_ops.full_(
                        current_lr,
                        list(current_lr.shape),
                        float(value),
                        current_lr.dtype,
                        place,
                    )
491
                else:
492 493 494 495 496 497 498 499 500 501 502 503 504
                    global_block = (
                        framework.default_main_program().global_block()
                    )
                    global_block.append_op(
                        type='fill_constant',
                        outputs={'Out': [current_lr]},
                        attrs={
                            'dtype': current_lr.dtype,
                            'shape': list(current_lr.shape),
                            'value': float(value),
                        },
                        stop_gradient=True,
                    )
505
        else:
506 507 508
            assert (
                len(value.shape) == 1 and value.shape[0] == 1
            ), "optimizer's learning rate must be 1-D Tensor with shape[1]"
509 510
            self._learning_rate_map[framework.default_main_program()] = value

511 512 513
    @framework.dygraph_only
    def current_step_lr(self):
        """
514
        :api_attr: imperative
515

516 517 518 519 520 521 522 523 524 525 526
        Get current step learning rate. The return value is all the same When LearningRateDecay is not used,
        otherwise return the step learning rate.

        Returns:
            float: The learning rate of the current step.

        Examples:
            .. code-block:: python

                import paddle.fluid as fluid
                import numpy as np
527
                import paddle
528 529 530

                # example1: LearningRateDecay is not used, return value is all the same
                with fluid.dygraph.guard():
531
                    emb = paddle.nn.Embedding(10, 10)
L
LoneRanger 已提交
532 533
                    adam = paddle.optimizer.Adam(0.001, parameters = emb.parameters())
                    lr = adam.get_lr()
534 535 536 537 538
                    print(lr) # 0.001

                # example2: PiecewiseDecay is used, return the step learning rate
                with fluid.dygraph.guard():
                    inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
539
                    linear = paddle.nn.Linear(10, 10)
540 541
                    inp = fluid.dygraph.to_variable(inp)
                    out = linear(inp)
542
                    loss = paddle.mean(out)
543

544 545
                    bd = [2, 4, 6, 8]
                    value = [0.2, 0.4, 0.6, 0.8, 1.0]
L
LoneRanger 已提交
546 547
                    adam = paddle.optimizer.Adam(paddle.optimizer.lr.PiecewiseDecay(bd, value),
                                           parameters=linear.parameters())
548 549

                    # first step: learning rate is 0.2
L
LoneRanger 已提交
550
                    np.allclose(adam.get_lr(), 0.2, rtol=1e-06, atol=0.0) # True
551 552 553 554 555

                    # learning rate for different steps
                    ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0]
                    for i in range(12):
                        adam.minimize(loss)
L
LoneRanger 已提交
556 557
                        adam.step()
                        lr = adam.get_lr()
558 559 560 561
                        np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True

        """
        current_lr = self._global_learning_rate()
562
        if isinstance(current_lr, framework.Variable):
563
            return float(current_lr)
564 565 566

        if isinstance(self._learning_rate, float):
            return self._learning_rate
567 568
        elif isinstance(self._learning_rate, _LearningRateEpochDecay):
            step_lr = self._learning_rate()
569
            return float(step_lr)
570 571 572 573 574
        else:
            step_lr = self._learning_rate.step()
            if isinstance(step_lr, (float, int)):
                return step_lr
            else:
575
                return float(step_lr)
576

Y
yuyang18 已提交
577
    def _global_learning_rate(self, program=None):
Q
Qiao Longfei 已提交
578 579 580 581
        """
        get global decayed learning rate
        :return:
        """
582 583
        if program is None:
            program = framework.default_main_program()
Q
qiaolongfei 已提交
584
        return self._learning_rate_map.get(program, None)
Q
Qiao Longfei 已提交
585

Q
Qiao Longfei 已提交
586
    def _append_optimize_op(self, block, param_and_grad):
587
        """append optimize operator to block and return all the added optimize_op"""
Q
Qiao Longfei 已提交
588 589
        raise NotImplementedError()

590 591 592 593
    def _create_param_lr(self, param_and_grad):
        # create learning rate variable for every parameter
        param = param_and_grad[0]
        param_lr = param.optimize_attr['learning_rate']
W
Wu Yi 已提交
594 595
        if type(param_lr) == Variable:
            return param_lr
Q
qiaolongfei 已提交
596
        else:
W
Wu Yi 已提交
597
            if param_lr == 1.0:
Y
yuyang18 已提交
598
                return self._global_learning_rate()
W
Wu Yi 已提交
599
            else:
X
Xin Pan 已提交
600
                with default_main_program()._lr_schedule_guard(
601 602
                    is_with_opt=True
                ), framework.name_scope('scale_with_param_lr'):
603
                    return self._global_learning_rate() * param_lr
604

605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646
    def _is_dtype_fp16_or_bf16(self, dtype):
        """
        check the dtype is fp16 or the dtype is bf16
        :param dtype: instance of core.VarDesc.VarType
        :return: True if dtype is one of fp16 or bf16, False otherwise
        """
        assert isinstance(
            dtype, core.VarDesc.VarType
        ), "The dtype should be an instance of core.VarDesc.VarType."
        return (
            dtype == core.VarDesc.VarType.FP16
            or dtype == core.VarDesc.VarType.BF16
        )

    def _create_master_weight(self, param):
        if param.name in self._master_weights:
            var = self._master_weights[param.name]
        else:
            assert isinstance(self.helper, LayerHelper)

            var_name = param.name + "_fp32_master"
            var_name = unique_name.generate(var_name)
            var = paddle.static.create_global_var(
                name=var_name,
                shape=param.shape,
                value=0,
                dtype='float32',
                persistable=True,
            )
            block = self.helper.startup_program.global_block()
            block.append_op(
                type="cast",
                inputs={"X": [param]},
                outputs={"Out": [var]},
                attrs={
                    "in_dtype": param.dtype,
                    "out_dtype": core.VarDesc.VarType.FP32,
                },
            )
            self._master_weights[param.name] = var
        return var

647 648 649 650 651 652
    def _create_accumulators(self, block, parameters):
        """Create all accumulators needed by the parameters

        Args:
            block: the block in which the loss variable is present
            parameters: list of parameter variables for the optimizer
Q
Qiao Longfei 已提交
653
        """
654 655
        pass

656
    def _finish_update(self, block, parameters_and_grads):
657 658 659 660 661 662 663 664
        """Finish any custom updates needed
           before completing an optimization step

        Args:
            block: the block in which the loss variable is present
            parameters: list of parameter variables for the optimizer

        Returns:
Q
qiaolongfei 已提交
665
            None
666 667 668
        """
        pass

669 670 671 672 673 674 675 676 677 678
    def _add_accumulator(
        self,
        name,
        param,
        dtype=None,
        fill_value=0.0,
        shape=None,
        type=None,
        device=None,
    ):
679 680 681 682 683 684 685 686 687
        """Utility function to add an accumulator for a parameter

        Args:
            block: the block in which the loss variable is present
            name: name of the accumulator
            param: parameter variable for which accumulator is to be added
            dtype: data type of the accumulator variable
            fill_value: value to initialize the accumulator variable
        """
W
whs 已提交
688 689
        if self._name is not None:
            name = self._name + "_" + name
690 691 692 693
        if (
            name in self._accumulators
            and param.name in self._accumulators[name]
        ):
姜永久 已提交
694
            if in_dygraph_mode():
X
polish  
Xin Pan 已提交
695
                return self._accumulators[name][param.name]
696 697
            raise Exception(
                "Accumulator {} already exists for parameter {}".format(
698 699 700
                    name, param.name
                )
            )
701
        if shape is None:
702
            shape = param.shape
Q
Qiao Longfei 已提交
703
        assert isinstance(self.helper, LayerHelper)
704 705 706 707 708

        var_name = param.name + "_" + name
        var_name = unique_name.generate(var_name)
        self._opti_name_list.append(var_name)

Q
Qiao Longfei 已提交
709
        var = self.helper.create_global_variable(
710
            name=var_name,
Q
Qiao Longfei 已提交
711
            persistable=True,
F
fengjiayi 已提交
712
            dtype=dtype or param.dtype,
713
            type=core.VarDesc.VarType.LOD_TENSOR
姜永久 已提交
714
            if in_dygraph_mode()
715
            else (param.type if type is None else type),
H
hong 已提交
716
            shape=shape,
717 718
            belong_to_optimizer=True,
        )
719 720 721 722
        if device is None:
            device = self._get_device_for_param(param.name)
        with device_guard(device):
            self.helper.set_variable_initializer(
723 724 725 726
                var,
                initializer=paddle.nn.initializer.Constant(
                    value=float(fill_value)
                ),
727
            )
H
hong 已提交
728

姜永久 已提交
729
        if in_dygraph_mode():
H
hong 已提交
730
            if len(self._accumulators_holder) > 0:
731 732 733 734 735
                assert (
                    var_name in self._accumulators_holder
                ), "Optimizer set error, {} should in state dict".format(
                    var_name
                )
H
hong 已提交
736 737
                var.set_value(self._accumulators_holder[var_name])

Q
Qiao Longfei 已提交
738
        self._accumulators[name][param.name] = var
739
        return var
740

741 742 743 744 745 746 747 748 749
    def _add_global_accumulator(
        self,
        name,
        dtype=None,
        fill_value=0.0,
        shape=None,
        type=None,
        device=None,
    ):
750 751 752 753 754 755 756 757 758 759 760 761 762
        """Utility function to add a global accumulator for all parameters in the model

        Args:
            block: the block in which the loss variable is present
            name: name of the accumulator
            dtype: data type of the accumulator variable
            fill_value: value to initialize the accumulator variable
            shape: the shape of the accumulator
            type: the variable type of the accumulator
            device: the target place of the accumulator
        """
        if self._name is not None:
            name = self._name + "_" + name
763
        if name in self._global_accumulators:
姜永久 已提交
764
            if in_dygraph_mode():
765 766
                return self._global_accumulators[name]
            raise Exception("Global accumulator {} already exists".format(name))
767
        if shape is None:
768 769 770 771 772 773 774 775 776 777 778 779 780
            shape = [1]  # most case, global accumulator is of shape [1]
        assert isinstance(self.helper, LayerHelper)

        var_name = name
        var_name = unique_name.generate(var_name)
        self._opti_name_list.append(var_name)

        var = self.helper.create_global_variable(
            name=var_name,
            persistable=True,
            dtype=dtype if dtype else self._dtype,
            type=type,
            shape=shape,
781 782
            belong_to_optimizer=True,
        )
783 784 785 786
        if device is None:
            device = 'cpu'
        with device_guard(device):
            self.helper.set_variable_initializer(
787 788 789 790
                var,
                initializer=paddle.nn.initializer.Constant(
                    value=float(fill_value)
                ),
791
            )
792

姜永久 已提交
793
        if in_dygraph_mode():
794
            if len(self._accumulators_holder) > 0:
795 796 797 798 799
                assert (
                    var_name in self._accumulators_holder
                ), "Optimizer set error, {} should in state dict".format(
                    var_name
                )
800 801 802 803 804
                var.set_value(self._accumulators_holder[var_name])

        self._global_accumulators[name] = var
        return var

805 806 807 808 809 810 811 812
    def _get_accumulator(self, name, param):
        """Utility function to fetch an accumulator for a parameter

        Args:
            name: name of the accumulator
            param: parameter variable for which accumulator is to be fetched

        Returns:
813
            accumulator variable
814
        """
W
whs 已提交
815 816
        if self._name is not None:
            name = self._name + "_" + name
817 818 819 820
        if (
            name not in self._accumulators
            or param.name not in self._accumulators[name]
        ):
821 822
            raise Exception(
                "Accumulator {} does not exist for parameter {}".format(
823 824 825
                    name, param.name
                )
            )
826 827
        return self._accumulators[name][param.name]

828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855
    def _get_accumulator_master(self, name, param):
        """Utility function to fetch an accumulator for a parameter
        Args:
            name: name of the accumulator
            param: parameter variable for which accumulator is to be fetched
        Returns:
            accumulator variable for the parameter
        """
        if self._name is not None:
            name = self._name + "_" + name
        find_master = self._multi_precision and self._is_dtype_fp16_or_bf16(
            param.dtype
        )
        target_param = (
            self._master_weights[param.name] if find_master else param
        )
        target_name = target_param.name
        if (
            name not in self._accumulators
            or target_name not in self._accumulators[name]
        ):
            raise Exception(
                "Accumulator {} does not exist for parameter {}".format(
                    name, target_name
                )
            )
        return self._accumulators[name][target_name]

856 857 858 859 860 861 862 863 864 865 866
    def _get_global_accumulator(self, name):
        """Utility function to fetch a global accumulator

        Args:
            name: name of the accumulator

        Returns:
            accumulator variable
        """
        if self._name is not None:
            name = self._name + "_" + name
867
        if name not in self._global_accumulators:
868 869 870
            raise Exception("Global accumulator {} does not exist".format(name))
        return self._global_accumulators[name]

871 872 873 874 875
    def _update_param_device_map(self, parameters_and_grads, target_block):
        for param_and_grad in parameters_and_grads:
            if param_and_grad[0].trainable is True:
                param_name = param_and_grad[0].name
                ops = target_block.ops
876 877
                device_attr_name = (
                    core.op_proto_and_checker_maker.kOpDeviceAttrName()
878 879 880 881 882
                )
                for op in ops:
                    input_arg_names = op.input_arg_names
                    if param_name in input_arg_names:
                        self._param_device_map[param_name] = op.attr(
883 884
                            device_attr_name
                        )
885
                        break
886 887 888 889 890 891 892

    def _get_device_for_param(self, param_name):
        device = None
        if param_name in self._param_device_map:
            device = self._param_device_map[param_name]
        return device

893
    def _create_optimization_pass(self, parameters_and_grads):
Q
Qiao Longfei 已提交
894 895 896
        """Add optimization operators to update gradients to variables.

        Args:
Q
qiaolongfei 已提交
897
          parameters_and_grads(list(tuple(Variable, Variable))):
898
            a list of (variable, gradient) pair to update.
Q
Qiao Longfei 已提交
899 900

        Returns:
901
          return_op_list: a list of operators that will complete one step of
902 903 904
            optimization. This will include parameter update ops, global step
            update ops and any other custom ops required by subclasses to manage
            their internal state.
Q
Qiao Longfei 已提交
905
        """
906 907 908 909 910
        # This is a default implementation of create_optimization_pass that
        # can be shared by most optimizers. This implementation assumes that
        # the subclass will implement the _append_optimize_op method and the
        #  _initialize_tensors method. The subclass can extend the
        # _create_accumulators method if it needs to create accumulators
911
        # for parameters and extend _finish_update method to add custom ops.
912

913
        # Allways called under program_guard use global block as loss block
914 915 916
        # But if current block is in control flow, append optimize op in the
        # grad block of current block

917
        global_block = framework.default_main_program().global_block()
918 919 920
        target_block = global_block
        current_block = framework.default_main_program().current_block()
        if current_block.idx != global_block.idx:
921 922 923
            assert (
                current_block.backward_block_idx != -1
            ), "current block is not global_block, but it doesn't have backward block."
924
            target_block = framework.default_main_program().blocks[
925 926
                current_block.backward_block_idx
            ]
927 928

        start = len(target_block.ops)
929

930
        self._update_param_device_map(parameters_and_grads, target_block)
C
chengduo 已提交
931
        self._create_accumulators(
932 933
            target_block, [p[0] for p in parameters_and_grads if p[0].trainable]
        )
934 935
        self._create_global_learning_rate()

姜永久 已提交
936
        if in_dygraph_mode():
W
wanghuancoder 已提交
937 938 939 940 941 942 943 944 945 946 947 948
            found_inf = self._get_auxiliary_var('found_inf')
            if found_inf:
                if isinstance(found_inf, core.eager.Tensor):
                    self._set_auxiliary_var('found_inf', True)
            else:
                if isinstance(found_inf, core.eager.Tensor):
                    self._set_auxiliary_var('found_inf', False)
                for param_and_grad in parameters_and_grads:
                    if param_and_grad[1] is None:
                        continue
                    if param_and_grad[0].trainable is True:
                        self._append_optimize_op(target_block, param_and_grad)
949 950 951 952 953
        else:
            for param_and_grad in parameters_and_grads:
                if param_and_grad[1] is None:
                    continue
                with param_and_grad[0].block.program._optimized_guard(
954 955
                    param_and_grad
                ), name_scope("optimizer"):
956
                    if param_and_grad[0].trainable is True:
957
                        device = self._get_device_for_param(
958 959
                            param_and_grad[0].name
                        )
960 961
                        with device_guard(device):
                            optimize_op = self._append_optimize_op(
962 963
                                target_block, param_and_grad
                            )
964 965 966

        # Get custom finish ops for subclasses
        # FIXME: Need to fix this once we figure out how to handle dependencies
967
        self._finish_update(target_block, parameters_and_grads)
968

969 970
        end = len(target_block.ops)
        return target_block._slice_ops(start, end)
971 972

    def _process_distribute_lookuptable(self, param_grads):
Q
Qiao Longfei 已提交
973 974 975 976 977 978 979 980 981
        """
        Because distribute lookup table only support SGD optimizer for now, not support
        other optimizer and regularization, so we should find the table parameter out,
        and avoid to add regularization and other op for it, and add sgd optimize op
        for it independently.
        :param param_grads(list((Var, Var))): list of (param, grad) pair.
        :param loss: the loss variable.
        :param startup_program: the startup program
        """
982 983 984 985
        from paddle.distributed.distribute_lookup_table import (
            find_distributed_lookup_table,
        )

986 987
        program = framework.default_main_program()
        global_block = framework.default_main_program().global_block()
Q
Qiao Longfei 已提交
988 989 990 991 992 993 994 995
        table_name = find_distributed_lookup_table(program)
        table_param = None
        table_grad = None
        new_param_grads = []
        for p, g in param_grads:
            if p.name == table_name:
                if table_param is not None:
                    raise RuntimeError(
996 997
                        "multi dist table var found, only support one now!"
                    )
Q
Qiao Longfei 已提交
998 999 1000 1001 1002 1003
                table_param = p
                table_grad = g
            else:
                new_param_grads.append((p, g))
        sgd_op = None
        if table_param is not None:
1004
            param_and_grad = [table_param, table_grad]
1005 1006 1007
            with table_param.block.program._optimized_guard(
                param_and_grad
            ), framework.name_scope("optimizer"):
1008 1009 1010 1011 1012 1013 1014
                self._create_global_learning_rate()
                # create the optimize op
                sgd_op = global_block.append_op(
                    type='sgd',
                    inputs={
                        "Param": table_param,
                        "Grad": table_grad,
1015
                        "LearningRate": self._create_param_lr(param_and_grad),
1016
                    },
1017 1018
                    outputs={"ParamOut": param_and_grad[0]},
                )
Q
Qiao Longfei 已提交
1019 1020
        return new_param_grads, (table_param, table_grad), sgd_op

1021 1022 1023 1024 1025 1026 1027 1028
    def backward(
        self,
        loss,
        startup_program=None,
        parameter_list=None,
        no_grad_set=None,
        callbacks=None,
    ):
1029
        """
1030
        The first part of ``minimize``, do auto-diff to append backward operations for
1031 1032 1033
        the current program.

        Args:
1034 1035 1036 1037
            loss (Variable): ``loss`` variable to run optimizations.
            startup_program (Program, optional): :ref:`api_fluid_Program` for
                initializing parameters in ``parameter_list``. The default value
                is None, at this time :ref:`api_fluid_default_startup_program` will be used.
H
hong 已提交
1038
            parameter_list (Iterable, optional): Iterable of ``Variable`` or ``Variable.name`` to update
1039 1040
                to minimize ``loss``. The default value is None, at this time all parameters
                will be updated.
1041
            no_grad_set (set, optional): Set of ``Variable``  or ``Variable.name`` that don't need
1042 1043 1044
                to be updated. The default value is None.
            callbacks (list, optional): list of callable objects to run when appending backward
                operator for one parameter. The default value is None.
M
minqiyang 已提交
1045

1046
        Return:
1047 1048
            list: list of (param, grad) variable pairs, param is ``Parameter``,
                grad is the gradient value corresponding to the parameter.
M
minqiyang 已提交
1049

1050
        Examples:
1051
            See examples in ``apply_gradients``.
1052
        """
1053
        act_no_grad_set = None
姜永久 已提交
1054
        if in_dygraph_mode():
1055
            pass
L
Leo Chen 已提交
1056 1057
        else:
            act_no_grad_set = self._get_no_grad_set(loss, no_grad_set)
G
gongweibao 已提交
1058

L
Leo Chen 已提交
1059 1060 1061 1062
        # Infer dtype by loss if None
        if self._dtype is None:
            self._dtype = loss.dtype

姜永久 已提交
1063
        if in_dygraph_mode():
1064 1065 1066
            parameter_list = (
                parameter_list if parameter_list else self._parameter_list
            )
1067

C
chengduo 已提交
1068
            params_grads = []
1069
            for param in parameter_list:
C
chengduo 已提交
1070 1071
                if not param.trainable:
                    continue
1072
                if param._grad_ivar() is not None:
C
chengduo 已提交
1073
                    # create gradient variable
1074
                    grad_var = param._grad_ivar()
C
chengduo 已提交
1075
                    params_grads.append((param, grad_var))
1076
        else:
C
chengduo 已提交
1077
            if callbacks is None:
1078
                callbacks = [paddle.nn.clip.error_clip_callback]
C
chengduo 已提交
1079
            else:
1080
                assert isinstance(callbacks, list)
C
chengduo 已提交
1081
            program = loss.block.program
zhouweiwei2014's avatar
zhouweiwei2014 已提交
1082 1083
            assert np.prod(loss.shape) == 1, (
                "The number of elements of loss should be 1, but the current loss.shape is {}, whose number of elements is not 1. "
1084
                "Maybe that you should call paddle.mean to process the current loss.".format(
1085 1086 1087 1088 1089 1090
                    loss.shape
                )
            )
            parameter_list = (
                parameter_list if parameter_list else self._parameter_list
            )
C
chengduo 已提交
1091
            with program_guard(program, startup_program):
1092 1093 1094
                params_grads = append_backward(
                    loss, parameter_list, act_no_grad_set, callbacks
                )
C
chengduo 已提交
1095
        return params_grads
1096

1097
    def _create_regularization_of_grad(self, param, grad, regularization=None):
1098
        """Create and add backward regularization Operators
1099

1100 1101 1102
        Function helper of append_regularization_ops.
        """
        # If no gradient or no regularization is specified,  then we don't need to do anything
1103
        if grad is None or (
1104 1105 1106 1107 1108 1109
            (
                not hasattr(param, 'regularizer')
                or (hasattr(param, 'regularizer') and param.regularizer is None)
            )
            and regularization is None
        ):
1110 1111 1112 1113 1114 1115 1116 1117 1118 1119
            return grad
        regularization_term = None
        if hasattr(param, 'regularizer') and param.regularizer is not None:
            # Add variable for regularization term in grad block
            regularization_term = param.regularizer(param, grad, grad.block)
        elif regularization is not None:
            regularization_term = regularization(param, grad, grad.block)

        assert regularization_term is not None

姜永久 已提交
1120
        if in_dygraph_mode():
1121
            return _legacy_C_ops.sum([grad, regularization_term])
1122

1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133
        new_grad = grad
        if grad.type == core.VarDesc.VarType.SELECTED_ROWS:
            # FIXME(zcd): If the grad is SELECTED_ROWS, after regularization,
            # the grad's type and name will be changed. But the gradient's name
            # is used in ParallelExecutor Reduce mode, so I add a flag for
            # the new_grad here.
            new_grad = grad.block.create_var(
                name=grad.name + core.kNewGradSuffix(),
                dtype=param.dtype,
                shape=param.shape,
                lod_level=param.lod_level,
1134 1135
                type=core.VarDesc.VarType.LOD_TENSOR,
            )
1136 1137 1138

        inputs = {"X": [grad, regularization_term]}
        outputs = {"Out": [new_grad]}
1139
        grad.block.append_op(type='sum', inputs=inputs, outputs=outputs)
1140 1141 1142

        return new_grad

1143 1144 1145
    def append_regularization_ops(
        self, parameters_and_grads, regularization=None
    ):
1146
        r"""Create and add backward regularization Operators
1147

1148 1149 1150 1151
        Creates and adds backward regularization operators in the BlockDesc.
        This will add gradients of the regularizer function to the gradients
        of the parameters and return these modified gradients. This is the
        same as implementing weight decay in optimizers for regularization.
1152

1153 1154 1155 1156 1157
        Args:
            parameters_and_grads: A list of (parameters, gradients) pairs
                                  that need to be regularized.
            regularization: A global regularizer. If the parameter is not
                            set. It will be applied with regularizer.
1158

1159 1160 1161
        Returns:
            list[(Variable, Variable)]: list of (parameters, gradients) \
            pair with the regularized gradient
1162

1163 1164 1165 1166
        Raises:
            Exception: Unknown regularization type
        """
        params_and_grads = []
姜永久 已提交
1167
        if in_dygraph_mode():
1168
            for param, grad in parameters_and_grads:
1169
                new_grad = self._create_regularization_of_grad(
1170 1171
                    param, grad, regularization
                )
1172 1173 1174 1175 1176
                params_and_grads.append((param, new_grad))
        else:
            repeate_regularizer = False
            with framework.name_scope('regularization'):
                for param, grad in parameters_and_grads:
1177 1178 1179 1180 1181
                    if (
                        not repeate_regularizer
                        and getattr(param, 'regularizer', None) is not None
                        and regularization is not None
                    ):
1182 1183 1184 1185
                        repeate_regularizer = True
                        logging.info(
                            "If regularizer of a Parameter has been set by 'fluid.ParamAttr' or 'fluid.WeightNormParamAttr' already. "
                            "The Regularization[%s] in Optimizer will not take effect, and it will only be applied to other Parameters!"
1186 1187
                            % regularization.__str__()
                        )
1188 1189
                    with param.block.program._optimized_guard([param, grad]):
                        new_grad = self._create_regularization_of_grad(
1190 1191
                            param, grad, regularization
                        )
1192 1193 1194
                        params_and_grads.append((param, new_grad))
        return params_and_grads

1195 1196 1197 1198 1199 1200 1201
    def flatten_param_grads(self, params_grads):
        need_flatten_params = []
        need_flatten_grads = []
        for p, g in params_grads:
            if g is None:
                continue
            g.persistable = True
1202 1203 1204 1205
            if (
                getattr(p, 'need_clip', True) is False
                or getattr(p, 'regularizer', None) is not None
            ):
1206 1207
                warnings.warn(
                    "flatten_param_grads=True will be discarded since paramter '{}''s need_clip is False or "
1208 1209
                    "the regularizer is set".format(p.name)
                )
1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223
                self._flatten_param_grads = False
                return params_grads

            need_flatten_params.append(p)
            need_flatten_grads.append(g)

        shape = [np.prod(p.shape) for p in need_flatten_params]
        block = need_flatten_params[0].block

        flatten_param = self.helper.create_global_variable(
            name='flatten_param',
            persistable=True,
            dtype=need_flatten_params[0].dtype,
            shape=[np.sum(shape)],
1224 1225
            belong_to_optimizer=True,
        )
1226 1227 1228 1229 1230 1231 1232 1233 1234 1235

        flatten_param.trainable = True
        flatten_param.optimize_attr = need_flatten_params[0].optimize_attr
        flatten_param.regularizer = need_flatten_params[0].regularizer

        flatten_grad = self.helper.create_global_variable(
            name='flatten_grad',
            persistable=True,
            dtype=need_flatten_grads[0].dtype,
            shape=[np.sum(shape)],
1236 1237
            belong_to_optimizer=True,
        )
1238 1239

        with program_guard(default_main_program()):
1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268
            block.append_op(
                type="coalesce_tensor",
                inputs={"Input": need_flatten_params},
                outputs={
                    "Output": need_flatten_params,
                    "FusedOutput": flatten_param,
                },
                attrs={
                    "copy_data": True,
                    "use_align": True,
                    "align_size": self._align_size,
                    "dtype": need_flatten_params[0].dtype,
                },
            )

            block.append_op(
                type="coalesce_tensor",
                inputs={"Input": need_flatten_grads},
                outputs={
                    "Output": need_flatten_grads,
                    "FusedOutput": flatten_grad,
                },
                attrs={
                    "copy_data": True,
                    "use_align": True,
                    "align_size": self._align_size,
                    "dtype": need_flatten_grads[0].dtype,
                },
            )
1269

1270
        # NOTE(zhiqiu): the initializer should be set after coalesce_tensor op,
1271
        # so the shape of flatten_param and flatten_grad will be inferred.
1272
        self.helper.set_variable_initializer(
1273 1274
            flatten_param,
            initializer=paddle.nn.initializer.Constant(0.0),
1275 1276
        )
        self.helper.set_variable_initializer(
1277 1278
            flatten_grad,
            initializer=paddle.nn.initializer.Constant(0.0),
1279
        )
1280 1281 1282

        return [(flatten_param, flatten_grad)]

1283 1284 1285 1286 1287 1288 1289
    def apply_gradients(self, params_grads):
        """
        Second part of `minimize`, appending optimization operators for
        given `params_grads` pairs.

        Args:
            params_grads (list): list of (param, grad) pair to do optimization.
M
minqiyang 已提交
1290

1291 1292
        Returns:
            list: A list of operators appended to the current program.
M
minqiyang 已提交
1293

1294 1295 1296
        Examples:
            .. code-block:: python

1297
                import paddle.fluid as fluid
1298
                loss = network()
L
LoneRanger 已提交
1299
                optimizer = paddle.optimizer.SGD(learning_rate=0.1)
1300 1301 1302 1303 1304 1305 1306
                params_grads = optimizer.backward(loss)
                # you may append operations for params_grads here
                # ...
                optimizer.apply_gradients(params_grads)
        """
        params_grads = sorted(params_grads, key=lambda x: x[0].name)

1307 1308
        # NOTE(zhiqiu): currently, only support ClipGradByGlobalNorm and without regularization.
        if self._flatten_param_grads and self.regularization is None:
1309
            if self._grad_clip is None or isinstance(
1310
                self._grad_clip, paddle.nn.ClipGradByGlobalNorm
1311
            ):
1312 1313
                params_grads = self.flatten_param_grads(params_grads)

1314
        # 'optimizer(grad_clip)' or 'set_gradient_clip'
1315 1316 1317
        if self._grad_clip is not None:
            params_grads = self._grad_clip(params_grads)
        else:
1318
            params_grads = paddle.nn.clip.append_gradient_clip_ops(params_grads)
1319 1320

        # Add regularization if any
1321 1322 1323
        params_grads = self.append_regularization_ops(
            params_grads, self.regularization
        )
1324 1325 1326 1327

        optimize_ops = self._create_optimization_pass(params_grads)
        return optimize_ops

C
chengduo 已提交
1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339
    def apply_optimize(self, loss, startup_program, params_grads):
        """
        Second part of `minimize`, appending optimization operators for
        given `params_grads` pairs.
        Args:
            loss (Variable): loss variable to run optimizations.
            startup_program (Program): startup_program for initializing parameters
                in `parameter_list`.
            params_grads (list): list of (param, grad) pair to do optimization.
        Returns:
            list: A list of operators appended to the current program.
        """
姜永久 已提交
1340
        if in_dygraph_mode():
1341 1342 1343 1344
            with program_guard(
                framework.default_main_program(),
                framework.default_startup_program(),
            ):
1345 1346
                if self._grad_clip is not None:
                    params_grads = self._grad_clip(params_grads)
1347
                params_grads = self.append_regularization_ops(
1348 1349
                    params_grads, self.regularization
                )
C
chengduo 已提交
1350 1351 1352 1353 1354 1355 1356
                optimize_ops = self._create_optimization_pass(params_grads)
        else:
            program = loss.block.program
            with program_guard(program, startup_program):
                optimize_ops = self.apply_gradients(params_grads)
        return optimize_ops

G
gongweibao 已提交
1357
    def _get_no_grad_set(self, loss, no_grad_set=None):
1358
        no_grad_set = _get_no_grad_set_name(no_grad_set)
G
gongweibao 已提交
1359 1360
        parameters = loss.block.program.global_block().all_parameters()
        param_no_trainable = set(
1361 1362
            [param.name for param in parameters if param.trainable is False]
        )
G
gongweibao 已提交
1363 1364 1365 1366 1367
        # If the parameter is no trainable, it should not have a gradient.
        no_grad_set.update(param_no_trainable)

        return no_grad_set

1368 1369 1370 1371
    @framework.dygraph_only
    def clear_gradients(self):
        """
        Clear the gradients of all optimized parameters for model.
1372 1373

        If not, new gradient will accumulat on previous gradient.
1374

1375 1376
        Returns:
            None
1377

1378 1379 1380 1381
        Examples:
            .. code-block:: python

                import paddle.fluid as fluid
1382
                import paddle
1383 1384 1385 1386 1387
                import numpy as np

                with fluid.dygraph.guard():
                    value = np.arange(26).reshape(2, 13).astype("float32")
                    a = fluid.dygraph.to_variable(value)
1388
                    linear = paddle.nn.Linear(13, 5)
1389
                    # This can be any optimizer supported by dygraph.
L
LoneRanger 已提交
1390 1391
                    adam = paddle.optimizer.Adam(learning_rate = 0.01,
                                                parameters = linear.parameters())
1392 1393 1394 1395 1396 1397 1398 1399 1400 1401
                    out = linear(a)
                    out.backward()
                    adam.minimize(out)
                    adam.clear_gradients()

        """
        for p in self._parameter_list:
            if p.trainable:
                p.clear_gradient()

1402
    @imperative_base.no_grad
1403 1404 1405
    def minimize(
        self, loss, startup_program=None, parameter_list=None, no_grad_set=None
    ):
1406
        """
1407
        Add operations to minimize ``loss`` by updating ``parameter_list``.
M
minqiyang 已提交
1408

1409
        Args:
1410 1411 1412 1413
            loss (Variable): A ``Variable`` containing the value to minimize.
            startup_program (Program, optional): :ref:`api_fluid_Program` for
                initializing parameters in ``parameter_list``. The default value
                is None, at this time :ref:`api_fluid_default_startup_program` will be used.
H
hong 已提交
1414
            parameter_list (Iterable, optional): Iterable of ``Variable`` or ``Variable.name`` to update
1415 1416
                to minimize ``loss``. The default value is None, at this time all parameters
                will be updated.
1417
            no_grad_set (set, optional): Set of ``Variable``  or ``Variable.name`` that don't need
1418
                to be updated. The default value is None.
Q
Qiao Longfei 已提交
1419

1420
        Returns:
1421 1422 1423
            tuple: tuple (optimize_ops, params_grads), A list of operators appended
            by minimize and a list of (param, grad) variable pairs, param is
            ``Parameter``, grad is the gradient value corresponding to the parameter.
1424 1425
            The returned tuple can be passed to ``fetch_list`` in ``Executor.run()`` to
            indicate program pruning. If so, the program will be pruned by ``feed`` and
1426
            ``fetch_list`` before run, see details in ``Executor``.
1427 1428 1429

        Examples:
            Please refer to the example of current Optimizer.
Q
Qiao Longfei 已提交
1430
        """
C
chengduo 已提交
1431
        assert isinstance(loss, Variable), "The loss should be an Variable."
1432

1433 1434 1435
        parameter_list = (
            parameter_list if parameter_list else self._parameter_list
        )
1436

1437 1438 1439 1440 1441 1442
        params_grads = self.backward(
            loss,
            startup_program=startup_program,
            parameter_list=parameter_list,
            no_grad_set=no_grad_set,
        )
1443

1444 1445 1446
        optimize_ops = self.apply_optimize(
            loss, startup_program=startup_program, params_grads=params_grads
        )
M
minqiyang 已提交
1447

Q
Qiao Longfei 已提交
1448
        return optimize_ops, params_grads