optimizer.py 323.9 KB
Newer Older
1
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14

15
import numpy as np
16
import os
17
import logging
18
from collections import defaultdict
19

20
import paddle
Q
Qiao Longfei 已提交
21
from paddle.fluid.distribute_lookup_table import find_distributed_lookup_table
22
from paddle.fluid.framework import Program, Variable, Parameter, name_scope, default_main_program, default_startup_program, device_guard
23

24 25
from . import framework
from . import layers
26
from . import unique_name
27
from .backward import append_backward, _some_in_set_, _append_grad_suffix_, _get_no_grad_set_name
28
from .clip import GradientClipBase, GradientClipByNorm, error_clip_callback, append_gradient_clip_ops, ClipGradByGlobalNorm
29 30 31
from .framework import program_guard
from .initializer import Constant
from .layer_helper import LayerHelper
S
sneaxiy 已提交
32
from .layers import ops
33
from .dygraph import base as imperative_base
34
from .dygraph import no_grad
35
from .dygraph.learning_rate_scheduler import LearningRateDecay, _LearningRateEpochDecay
36 37 38
from paddle.fluid import core
from paddle.fluid.layers import tensor
from functools import reduce
39
from functools import cmp_to_key
40
from .wrapped_decorator import signature_safe_contextmanager
M
mapingshuo 已提交
41
from .. import compat as cpt
42
import warnings
43
from paddle import _C_ops, _legacy_C_ops
44
from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode, _current_expected_place
45

46
__all__ = [
47 48 49 50
    'SGD', 'Momentum', 'Adagrad', 'Adam', 'Adamax', 'Dpsgd', 'DecayedAdagrad',
    'Ftrl', 'SGDOptimizer', 'MomentumOptimizer', 'AdagradOptimizer',
    'AdamOptimizer', 'AdamaxOptimizer', 'DpsgdOptimizer',
    'DecayedAdagradOptimizer', 'RMSPropOptimizer', 'FtrlOptimizer', 'Adadelta',
Z
Zeng Jinle 已提交
51
    'AdadeltaOptimizer', 'ModelAverage', 'LarsMomentum',
52 53
    'LarsMomentumOptimizer', 'LambOptimizer', 'ExponentialMovingAverage',
    'PipelineOptimizer', 'LookaheadOptimizer', 'RecomputeOptimizer'
54
]
Q
Qiao Longfei 已提交
55 56 57 58 59 60


class Optimizer(object):
    """Optimizer Base class.

    Define the common interface of an optimizer.
61 62
    User should not use this class directly,
    but need to use one of it's implementation.
Q
Qiao Longfei 已提交
63 64
    """

65
    @imperative_base.no_grad
66 67 68 69
    def __init__(self,
                 learning_rate,
                 parameter_list=None,
                 regularization=None,
70
                 grad_clip=None,
71 72
                 flatten_param_grads=False,
                 align_size=-1,
73
                 name=None):
74 75
        """
        Args:
76 77
            flatten_param_grads (bool, optional): Whether to flatten all the parameters and grads.
                If true, the parameters and gradients will be coalesce to contiguous mempry,
78 79
                and the grad_clip ops / optimizer ops will be fuse to one operator.
        """
80
        # Because of the loop import, so place it in the function body
81
        from paddle.optimizer.lr import LRScheduler
H
hong 已提交
82 83
        self._parameter_list = list(
            parameter_list) if parameter_list is not None else None
84
        self._name = name
J
Jiabin Yang 已提交
85
        if framework._non_static_mode():
86
            if not isinstance(learning_rate,
87
                              (float, LearningRateDecay, LRScheduler)):
M
minqiyang 已提交
88
                raise TypeError(
89
                    "learning rate should be float or LRScheduler, got %s here"
M
minqiyang 已提交
90
                    % type(learning_rate))
91
            if self._parameter_list is None:
92 93 94
                raise AttributeError(
                    "parameter_list argument given to the Optimizer should not be None in dygraph mode."
                )
95 96 97 98 99 100 101 102
            if regularization is not None:
                for param in self._parameter_list:
                    if param.regularizer is not None:
                        logging.info(
                            "If regularizer of a Parameter has been set by 'fluid.ParamAttr' or 'fluid.WeightNormParamAttr' already. "
                            "The Regularization[%s] in Optimizer will not take effect, and it will only be applied to other Parameters!"
                            % regularization.__str__())
                        break
M
minqiyang 已提交
103
        else:
104
            if not isinstance(learning_rate,
105
                              (float, framework.Variable, LRScheduler)):
M
minqiyang 已提交
106
                raise TypeError(
107
                    "learning rate should be float or LRScheduler, got %s here"
108
                    % type(learning_rate))
M
minqiyang 已提交
109

110 111 112 113 114
        if grad_clip is not None:
            if not isinstance(grad_clip, GradientClipBase):
                raise TypeError(
                    "'grad_clip' should be an instance of GradientClipBase's derived class"
                )
D
dzhwinter 已提交
115
        self.regularization = regularization
116
        self._grad_clip = grad_clip
117
        self._learning_rate = learning_rate
118 119
        self._flatten_param_grads = flatten_param_grads
        self._align_size = align_size
L
Leo Chen 已提交
120

D
dzhwinter 已提交
121
        self._dtype = None
L
Leo Chen 已提交
122 123 124 125
        # Infer the dtype form parameter
        if self._parameter_list:
            self._dtype = self._parameter_list[0].dtype

126
        # each program should have a independent learning rate
127
        # program -> Variable(learning_rate)
Q
qiaolongfei 已提交
128
        self._learning_rate_map = dict()
129
        if isinstance(self._learning_rate, framework.Variable):
130 131
            self._learning_rate_map[
                framework.default_main_program()] = self._learning_rate
132 133 134 135 136
        # Dictionary of accumulators. Some optimizer subclasses need to
        # allocate and manage extra variables associated with the parameters
        # to train. These variables are called accumulators.
        # {accum_name : { paramter_name : accumulator_for_parameter, ...}, ...}
        self._accumulators = defaultdict(lambda: dict())
137 138
        # global_accumulator dict, {accum_name : acc_variable, ...}
        self._global_accumulators = {}
139
        self.helper = LayerHelper(self.__class__.__name__)
140
        self._opti_name_list = []
H
hong 已提交
141
        self._accumulators_holder = {}
142
        self._param_device_map = dict()
143 144
        # NOTE(zhiqiu): sometimes we want to add some variables(Tenosr) to the optimizer for a specific optimization,
        # for example, we want to pass 'found_inf' to adam optimizer so it can skip update when found_inf is True.
145
        # And these variables should not be the parameters of Optimizer's construnctor (because not commonly used).
146 147
        # Use _auxiliary_vars together with _set_auxiliary_var/_get_auxiliary_var to achieve that.
        self._auxiliary_vars = dict()
H
hong 已提交
148 149 150 151

    @framework.dygraph_only
    def state_dict(self):
        '''
T
tianshuo78520a 已提交
152 153
        Get state dict information from optimizer. It contain all the variable used by optimizer. For Adam optimizer, contains beta1, beta2, momentum etc. If LearningRateDecay have been used, global_step will be include in state dict.
        If the optimizer never be called(minimize function), the state_dict is empty.
H
hong 已提交
154 155 156

        Args: None
        Return:
T
tianshuo78520a 已提交
157
            state_dict(dict) : dict contains all the variable used by optimizer
158

H
hong 已提交
159 160 161 162
        Examples:
            .. code-block:: python

                import paddle.fluid as fluid
163 164 165 166 167 168

                with fluid.dygraph.guard():
                    emb = fluid.dygraph.Embedding([10, 10])

                    adam = fluid.optimizer.Adam(0.001, parameter_list=emb.parameters())
                    state_dict = adam.state_dict()
H
hong 已提交
169 170

        '''
171
        from paddle.optimizer.lr import LRScheduler
H
hong 已提交
172 173 174 175
        state_dict = {}
        for k, v in self._accumulators.items():
            for para_name, var_tmp in v.items():
                state_dict[var_tmp.name] = var_tmp
176 177
        for k, v in self._global_accumulators.items():
            state_dict[v.name] = v
H
hong 已提交
178
        # global step if use lr decay
179
        if isinstance(self._learning_rate, LRScheduler):
180 181
            state_dict["LR_Scheduler"] = self._learning_rate.state_dict()
            return state_dict
H
hong 已提交
182
        if isinstance(self._learning_rate, LearningRateDecay):
183 184 185 186
            state_dict["LR_Scheduler"] = self._learning_rate.state_dict()

            if not isinstance(self._learning_rate, _LearningRateEpochDecay):
                var_tmp = None
187 188 189
                var_temp = framework._varbase_creator(None,
                                                      name='global_step',
                                                      dtype='int32')
190

191 192 193 194
                tensor.fill_constant([1],
                                     "int32",
                                     self._learning_rate.step_num,
                                     out=var_temp)
H
hong 已提交
195

196
                state_dict['global_step'] = var_temp
H
hong 已提交
197 198 199
        return state_dict

    @framework.dygraph_only
200
    def set_state_dict(self, state_dict):
H
hong 已提交
201
        '''
T
tianshuo78520a 已提交
202
        Load optimizer state dict. For Adam optimizer, contains beta1, beta2, momentum etc. If LearningRateDecay have been used, global_step will be changed.
H
hong 已提交
203

204
        Args:
H
hong 已提交
205 206 207
            state_dict(dict) : Dict contains all the Variable needed by optimizer
        Return:
            None
208

H
hong 已提交
209 210
        Examples:
            .. code-block:: python
211

212 213
                import paddle
                import paddle.fluid as fluid
214 215 216

                paddle.disable_static()

217
                emb = paddle.nn.Embedding(10, 10)
218

219
                state_dict = emb.state_dict()
220
                fluid.save_dygraph(state_dict, "paddle_dy")
221

222
                scheduler = paddle.optimizer.lr.NoamDecay(
223 224 225 226
                    d_model=0.01, warmup_steps=100, verbose=True)
                adam = paddle.optimizer.Adam(
                    learning_rate=scheduler,
                    parameters=emb.parameters())
227
                state_dict = adam.state_dict()
228
                fluid.save_dygraph(state_dict, "paddle_dy")
229

230
                para_state_dict, opti_state_dict = fluid.load_dygraph("paddle_dy")
H
hong 已提交
231
        '''
232 233
        from paddle.optimizer.lr import LRScheduler
        if isinstance(self._learning_rate, LRScheduler):
234
            self._learning_rate.set_dict(state_dict["LR_Scheduler"])
H
hong 已提交
235 236

        if isinstance(self._learning_rate, LearningRateDecay):
237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
            self._learning_rate.set_dict(state_dict["LR_Scheduler"])

            if not isinstance(self._learning_rate, _LearningRateEpochDecay):
                assert 'global_step' in state_dict, \
                        'Global step not in state dict, Dygraph use LearningRateDecay, global_step must in state_dict'
                global_step = state_dict['global_step']

                if isinstance(global_step, Variable):
                    step_np = global_step
                    step_np = np.array(step_np.value().get_tensor())
                    assert step_np.shape == (1,),  \
                            "global step shape is (1,), the shape is {}".format( step_np.shape )

                    self._learning_rate.step_num = int(step_np[0])
                elif isinstance(global_step, np.ndarray):
                    assert global_step.shape == (1,),  \
                            "global step shape is (1,), the shape is {}".format( global_step.shape )
                    self._learning_rate.step_num = global_step[0]
                else:
                    raise RuntimeError(
                        "Type not supprt, value in state dict must be [VarBase, Variable, numpy], the type is ",
                        type(global_step))
H
hong 已提交
259

260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
        def _load_state_para(state_dict, param):
            var = param.value()
            tensor = var.get_tensor()
            model_np = np.array(tensor)
            load_para = state_dict[param.name]
            if isinstance(load_para, Variable):
                load_para_np = load_para.numpy()
            elif isinstance(load_para, core.VarBase):
                load_para_np = load_para.numpy()
            elif isinstance(load_para, np.ndarray):
                load_para_np = load_para
            else:
                raise RuntimeError("State dict type {} not supprt".format(
                    str(type(load_para))))

            assert model_np.shape == load_para_np.shape,  \
                                        "Parameter shape not match, Dygraph Parameter [ {} ] need tensor with shape {} but load tensor with shape {}".format(
277
                                                param.name, model_np.shape, load_para_np.shape)
278 279 280

            assert model_np.dtype == load_para_np.dtype, \
                                        "Parameter dtype not match, Dygraph Parameter [ {} ] need tensor with dtype {}  but load tensor with dtype {}".format(
281
                                            param.name, model_np.dtype, load_para_np.dtype)
282 283 284

            tensor.set(load_para_np, framework._current_expected_place())

H
hong 已提交
285 286 287 288 289
        self._accumulators_holder = state_dict
        for k, v in self._accumulators.items():
            for para_name, var_tmp in v.items():
                assert var_tmp.name in state_dict, \
                        "optimizer variable {} not found".format( var_tmp.name )
290
                _load_state_para(state_dict, var_tmp)
H
hong 已提交
291

292 293 294 295
        for k, v in self._global_accumulators.items():
            assert v.name in state_dict, \
                        "optimizer variable {} not found".format( v.name )
            _load_state_para(state_dict, v)
296

297 298 299
    # [aliases] Compatible with old method names
    set_dict = set_state_dict

300 301
    def get_opti_var_name_list(self):
        return self._opti_name_list
Q
Qiao Longfei 已提交
302

303 304 305 306 307 308 309 310 311
    def _set_auxiliary_var(self, key, val):
        self._auxiliary_vars[key] = val

    def _get_auxiliary_var(self, key):
        if key in self._auxiliary_vars:
            return self._auxiliary_vars[key]
        else:
            return None

Q
Qiao Longfei 已提交
312
    def _create_global_learning_rate(self):
313 314
        from paddle.optimizer.lr import LRScheduler
        if isinstance(self._learning_rate, LRScheduler):
315 316 317 318 319 320 321 322 323 324 325 326 327 328
            lr_var = self._global_learning_rate()
            # only create global lr_var once
            if not isinstance(lr_var, framework.Variable):
                lr_name = unique_name.generate('learning_rate')
                self._learning_rate._var_name = lr_name
                lr_var = self.helper.create_global_variable(
                    name=lr_name,
                    shape=[1],
                    persistable=True,
                    stop_gradient=True,
                    dtype='float32' if self._dtype is None else self._dtype)
                main_prog = framework.default_main_program()
                main_prog.lr_sheduler = self._learning_rate
                main_prog.lr_var = lr_var
329 330
                self._learning_rate_map[
                    framework.default_main_program()] = lr_var
331 332 333 334 335 336

            lr_value = float(self._learning_rate())
            self.helper.set_variable_initializer(
                lr_var, initializer=Constant(value=lr_value))
            return

337 338 339
        if imperative_base.enabled():
            # create learning rate Variable
            if isinstance(self._learning_rate, float):
M
minqiyang 已提交
340 341 342 343 344 345 346 347 348 349 350 351
                lr = self._global_learning_rate()

                if isinstance(lr, framework.Variable):
                    return
                else:
                    self._learning_rate_map[framework.default_main_program(
                    )] = layers.create_global_var(
                        name=unique_name.generate("learning_rate"),
                        shape=[1],
                        value=float(self._learning_rate),
                        dtype='float32' if self._dtype is None else self._dtype,
                        persistable=True)
352
            # get learning rate Variable from LearningRateDecay
M
minqiyang 已提交
353
            elif isinstance(self._learning_rate, LearningRateDecay):
354 355
                self._learning_rate_map[
                    framework.default_main_program()] = self._learning_rate()
356
            else:
Q
qiaolongfei 已提交
357
                raise TypeError(
358 359
                    "optimizer's learning rate must be float or LearningRateDecay"
                )
360
        else:
361 362 363 364
            lr = self._global_learning_rate()

            if isinstance(lr, framework.Variable):
                return
M
minqiyang 已提交
365 366 367 368 369 370
            else:
                if not isinstance(self._learning_rate, float):
                    raise TypeError(
                        "learning rate variable is create outside optimizer,"
                        "can not create new learning rate variable for new program"
                    )
Q
Qiao Longfei 已提交
371

372
            # create learning rate in the current main program
373 374 375 376 377 378 379
            self._learning_rate_map[
                framework.default_main_program()] = layers.create_global_var(
                    name=unique_name.generate("learning_rate"),
                    shape=[1],
                    value=float(self._learning_rate),
                    dtype='float32' if self._dtype is None else self._dtype,
                    persistable=True)
380

381 382 383 384
    @framework.dygraph_only
    def set_lr(self, value):
        """
        :api_attr: imperative
385

386 387 388 389 390 391 392 393
        Set the value of the learning rate manually in the optimizer. If the optimizer use LearningRateDecay,
        this API cannot be invoked, because it will lead to conflict.

        Args:
            value (float|Variable): the value of learning rate

        Returns:
            None
394

395 396 397 398
        Examples:
            .. code-block:: python

                import paddle.fluid as fluid
399

400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
                with fluid.dygraph.guard():
                    linear = fluid.dygraph.nn.Linear(10, 10)

                    adam = fluid.optimizer.Adam(0.1, parameter_list=linear.parameters())

                    # set learning rate manually by python float value
                    lr_list = [0.2, 0.3, 0.4, 0.5, 0.6]
                    for i in range(5):
                        adam.set_lr(lr_list[i])
                        lr = adam.current_step_lr()
                        print("current lr is {}".format(lr))
                    # Print:
                    #    current lr is 0.2
                    #    current lr is 0.3
                    #    current lr is 0.4
                    #    current lr is 0.5
                    #    current lr is 0.6


                    # set learning rate manually by framework Variable
                    lr_var = fluid.layers.create_global_var(
                        shape=[1], value=0.7, dtype='float32')
                    adam.set_lr(lr_var)
                    lr = adam.current_step_lr()
                    print("current lr is {}".format(lr))
                    # Print:
                    #    current lr is 0.7



        """
        if not isinstance(value, (framework.Variable, float)):
            raise TypeError(
                "The type of 'value' in optimizer.set_lr must be (float, Variable), but received %s."
                % (type(value)))
        if isinstance(self._learning_rate, LearningRateDecay):
            raise RuntimeError(
                "optimizer's learning rate can't be LearningRateDecay when invoke this API, because this will lead to conflict."
            )
        if isinstance(value, float):
            self._learning_rate = value
            current_lr = self._global_learning_rate()
            if current_lr is not None:
443 444
                if in_dygraph_mode():
                    place = _current_expected_place()
445 446
                    _C_ops.full_(current_lr, list(current_lr.shape),
                                 float(value), current_lr.dtype, place)
447 448

                elif _in_legacy_dygraph():
449 450 451 452
                    _legacy_C_ops.fill_constant(current_lr, 'value',
                                                float(value), 'dtype',
                                                current_lr.dtype, 'shape',
                                                list(current_lr.shape))
453 454 455
                else:
                    global_block = framework.default_main_program(
                    ).global_block()
456 457 458 459 460 461 462 463
                    global_block.append_op(type='fill_constant',
                                           outputs={'Out': [current_lr]},
                                           attrs={
                                               'dtype': current_lr.dtype,
                                               'shape': list(current_lr.shape),
                                               'value': float(value)
                                           },
                                           stop_gradient=True)
464 465 466 467 468
        else:
            assert len(value.shape) == 1 and value.shape[
                0] == 1, "optimizer's learning rate must be 1-D Tensor with shape[1]"
            self._learning_rate_map[framework.default_main_program()] = value

469 470 471
    @framework.dygraph_only
    def current_step_lr(self):
        """
472
        :api_attr: imperative
473

474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499
        Get current step learning rate. The return value is all the same When LearningRateDecay is not used,
        otherwise return the step learning rate.

        Returns:
            float: The learning rate of the current step.

        Examples:
            .. code-block:: python

                import paddle.fluid as fluid
                import numpy as np

                # example1: LearningRateDecay is not used, return value is all the same
                with fluid.dygraph.guard():
                    emb = fluid.dygraph.Embedding([10, 10])
                    adam = fluid.optimizer.Adam(0.001, parameter_list = emb.parameters())
                    lr = adam.current_step_lr()
                    print(lr) # 0.001

                # example2: PiecewiseDecay is used, return the step learning rate
                with fluid.dygraph.guard():
                    inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
                    linear = fluid.dygraph.nn.Linear(10, 10)
                    inp = fluid.dygraph.to_variable(inp)
                    out = linear(inp)
                    loss = fluid.layers.reduce_mean(out)
500

501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
                    bd = [2, 4, 6, 8]
                    value = [0.2, 0.4, 0.6, 0.8, 1.0]
                    adam = fluid.optimizer.Adam(fluid.dygraph.PiecewiseDecay(bd, value, 0),
                                           parameter_list=linear.parameters())

                    # first step: learning rate is 0.2
                    np.allclose(adam.current_step_lr(), 0.2, rtol=1e-06, atol=0.0) # True

                    # learning rate for different steps
                    ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0]
                    for i in range(12):
                        adam.minimize(loss)
                        lr = adam.current_step_lr()
                        np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True

        """
        current_lr = self._global_learning_rate()
518
        if isinstance(current_lr, framework.Variable):
519 520 521 522
            return self._global_learning_rate().numpy()[0]

        if isinstance(self._learning_rate, float):
            return self._learning_rate
523 524 525
        elif isinstance(self._learning_rate, _LearningRateEpochDecay):
            step_lr = self._learning_rate()
            return step_lr.numpy()[0]
526 527 528 529 530 531 532
        else:
            step_lr = self._learning_rate.step()
            if isinstance(step_lr, (float, int)):
                return step_lr
            else:
                return step_lr.numpy()[0]

Y
yuyang18 已提交
533
    def _global_learning_rate(self, program=None):
Q
Qiao Longfei 已提交
534 535 536 537
        """
        get global decayed learning rate
        :return:
        """
538 539
        if program is None:
            program = framework.default_main_program()
Q
qiaolongfei 已提交
540
        return self._learning_rate_map.get(program, None)
Q
Qiao Longfei 已提交
541

Q
Qiao Longfei 已提交
542 543 544 545 546
    def _append_optimize_op(self, block, param_and_grad):
        """ append optimize operator to block and return all the added optimize_op
        """
        raise NotImplementedError()

547 548 549 550
    def _create_param_lr(self, param_and_grad):
        # create learning rate variable for every parameter
        param = param_and_grad[0]
        param_lr = param.optimize_attr['learning_rate']
W
Wu Yi 已提交
551 552
        if type(param_lr) == Variable:
            return param_lr
Q
qiaolongfei 已提交
553
        else:
W
Wu Yi 已提交
554
            if param_lr == 1.0:
Y
yuyang18 已提交
555
                return self._global_learning_rate()
W
Wu Yi 已提交
556
            else:
X
Xin Pan 已提交
557 558 559
                with default_main_program()._lr_schedule_guard(
                        is_with_opt=True), framework.name_scope(
                            'scale_with_param_lr'):
560
                    return self._global_learning_rate() * param_lr
561 562 563 564 565 566 567

    def _create_accumulators(self, block, parameters):
        """Create all accumulators needed by the parameters

        Args:
            block: the block in which the loss variable is present
            parameters: list of parameter variables for the optimizer
Q
Qiao Longfei 已提交
568
        """
569 570
        pass

571
    def _finish_update(self, block, parameters_and_grads):
572 573 574 575 576 577 578 579
        """Finish any custom updates needed
           before completing an optimization step

        Args:
            block: the block in which the loss variable is present
            parameters: list of parameter variables for the optimizer

        Returns:
Q
qiaolongfei 已提交
580
            None
581 582 583
        """
        pass

584 585 586 587 588
    def _add_accumulator(self,
                         name,
                         param,
                         dtype=None,
                         fill_value=0.0,
589
                         shape=None,
590
                         type=None,
591
                         device=None):
592 593 594 595 596 597 598 599 600
        """Utility function to add an accumulator for a parameter

        Args:
            block: the block in which the loss variable is present
            name: name of the accumulator
            param: parameter variable for which accumulator is to be added
            dtype: data type of the accumulator variable
            fill_value: value to initialize the accumulator variable
        """
W
whs 已提交
601 602
        if self._name is not None:
            name = self._name + "_" + name
603 604
        if (name in self._accumulators
                and param.name in self._accumulators[name]):
J
Jiabin Yang 已提交
605
            if framework._non_static_mode():
X
polish  
Xin Pan 已提交
606
                return self._accumulators[name][param.name]
607 608 609
            raise Exception(
                "Accumulator {} already exists for parameter {}".format(
                    name, param.name))
610 611
        if shape == None:
            shape = param.shape
Q
Qiao Longfei 已提交
612
        assert isinstance(self.helper, LayerHelper)
613 614 615 616 617

        var_name = param.name + "_" + name
        var_name = unique_name.generate(var_name)
        self._opti_name_list.append(var_name)

Q
Qiao Longfei 已提交
618
        var = self.helper.create_global_variable(
619
            name=var_name,
Q
Qiao Longfei 已提交
620
            persistable=True,
F
fengjiayi 已提交
621
            dtype=dtype or param.dtype,
622
            type=core.VarDesc.VarType.LOD_TENSOR
623 624
            if framework._non_static_mode() else
            (param.type if type is None else type),
H
hong 已提交
625 626
            shape=shape,
            belong_to_optimizer=True)
627 628 629 630 631
        if device is None:
            device = self._get_device_for_param(param.name)
        with device_guard(device):
            self.helper.set_variable_initializer(
                var, initializer=Constant(value=float(fill_value)))
H
hong 已提交
632

J
Jiabin Yang 已提交
633
        if framework._non_static_mode():
H
hong 已提交
634 635 636 637 638
            if len(self._accumulators_holder) > 0:
                assert var_name in self._accumulators_holder, \
                        "Optimizer set error, {} should in state dict".format( var_name )
                var.set_value(self._accumulators_holder[var_name])

Q
Qiao Longfei 已提交
639
        self._accumulators[name][param.name] = var
640
        return var
641

642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662
    def _add_global_accumulator(self,
                                name,
                                dtype=None,
                                fill_value=0.0,
                                shape=None,
                                type=None,
                                device=None):
        """Utility function to add a global accumulator for all parameters in the model

        Args:
            block: the block in which the loss variable is present
            name: name of the accumulator
            dtype: data type of the accumulator variable
            fill_value: value to initialize the accumulator variable
            shape: the shape of the accumulator
            type: the variable type of the accumulator
            device: the target place of the accumulator
        """
        if self._name is not None:
            name = self._name + "_" + name
        if (name in self._global_accumulators):
J
Jiabin Yang 已提交
663
            if framework._non_static_mode():
664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686
                return self._global_accumulators[name]
            raise Exception("Global accumulator {} already exists".format(name))
        if shape == None:
            shape = [1]  # most case, global accumulator is of shape [1]
        assert isinstance(self.helper, LayerHelper)

        var_name = name
        var_name = unique_name.generate(var_name)
        self._opti_name_list.append(var_name)

        var = self.helper.create_global_variable(
            name=var_name,
            persistable=True,
            dtype=dtype if dtype else self._dtype,
            type=type,
            shape=shape,
            belong_to_optimizer=True)
        if device is None:
            device = 'cpu'
        with device_guard(device):
            self.helper.set_variable_initializer(
                var, initializer=Constant(value=float(fill_value)))

J
Jiabin Yang 已提交
687
        if framework._non_static_mode():
688 689 690 691 692 693 694 695
            if len(self._accumulators_holder) > 0:
                assert var_name in self._accumulators_holder, \
                        "Optimizer set error, {} should in state dict".format( var_name )
                var.set_value(self._accumulators_holder[var_name])

        self._global_accumulators[name] = var
        return var

696 697 698 699 700 701 702 703
    def _get_accumulator(self, name, param):
        """Utility function to fetch an accumulator for a parameter

        Args:
            name: name of the accumulator
            param: parameter variable for which accumulator is to be fetched

        Returns:
704
            accumulator variable
705
        """
W
whs 已提交
706 707
        if self._name is not None:
            name = self._name + "_" + name
708 709 710 711 712
        if (name not in self._accumulators
                or param.name not in self._accumulators[name]):
            raise Exception(
                "Accumulator {} does not exist for parameter {}".format(
                    name, param.name))
713 714
        return self._accumulators[name][param.name]

715 716 717 718 719 720 721 722 723 724 725 726 727 728 729
    def _get_global_accumulator(self, name):
        """Utility function to fetch a global accumulator

        Args:
            name: name of the accumulator

        Returns:
            accumulator variable
        """
        if self._name is not None:
            name = self._name + "_" + name
        if (name not in self._global_accumulators):
            raise Exception("Global accumulator {} does not exist".format(name))
        return self._global_accumulators[name]

730 731 732 733 734 735 736 737 738 739 740 741
    def _update_param_device_map(self, parameters_and_grads, target_block):
        for param_and_grad in parameters_and_grads:
            if param_and_grad[0].trainable is True:
                param_name = param_and_grad[0].name
                ops = target_block.ops
                device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName(
                )
                for op in ops:
                    input_arg_names = op.input_arg_names
                    if param_name in input_arg_names:
                        self._param_device_map[param_name] = op.attr(
                            device_attr_name)
742
                        break
743 744 745 746 747 748 749

    def _get_device_for_param(self, param_name):
        device = None
        if param_name in self._param_device_map:
            device = self._param_device_map[param_name]
        return device

750
    def _create_optimization_pass(self, parameters_and_grads):
Q
Qiao Longfei 已提交
751 752 753
        """Add optimization operators to update gradients to variables.

        Args:
Q
qiaolongfei 已提交
754
          parameters_and_grads(list(tuple(Variable, Variable))):
755
            a list of (variable, gradient) pair to update.
Q
Qiao Longfei 已提交
756 757

        Returns:
758
          return_op_list: a list of operators that will complete one step of
759 760 761
            optimization. This will include parameter update ops, global step
            update ops and any other custom ops required by subclasses to manage
            their internal state.
Q
Qiao Longfei 已提交
762
        """
763 764 765 766 767
        # This is a default implementation of create_optimization_pass that
        # can be shared by most optimizers. This implementation assumes that
        # the subclass will implement the _append_optimize_op method and the
        #  _initialize_tensors method. The subclass can extend the
        # _create_accumulators method if it needs to create accumulators
768
        # for parameters and extend _finish_update method to add custom ops.
769

770
        # Allways called under program_guard use global block as loss block
771 772 773
        # But if current block is in control flow, append optimize op in the
        # grad block of current block

774
        global_block = framework.default_main_program().global_block()
775 776 777 778 779 780 781 782 783
        target_block = global_block
        current_block = framework.default_main_program().current_block()
        if current_block.idx != global_block.idx:
            assert current_block.backward_block_idx != -1, \
                "current block is not global_block, but it doesn't have backward block."
            target_block = framework.default_main_program().blocks[
                current_block.backward_block_idx]

        start = len(target_block.ops)
784

785
        self._update_param_device_map(parameters_and_grads, target_block)
C
chengduo 已提交
786
        self._create_accumulators(
787
            target_block,
C
chengduo 已提交
788
            [p[0] for p in parameters_and_grads if p[0].trainable])
789 790
        self._create_global_learning_rate()

J
Jiabin Yang 已提交
791
        if framework._non_static_mode():
792 793 794
            for param_and_grad in parameters_and_grads:
                if param_and_grad[1] is None:
                    continue
795 796
                if param_and_grad[0].trainable is True:
                    self._append_optimize_op(target_block, param_and_grad)
797 798 799 800 801 802 803
        else:
            for param_and_grad in parameters_and_grads:
                if param_and_grad[1] is None:
                    continue
                with param_and_grad[0].block.program._optimized_guard(
                        param_and_grad), name_scope("optimizer"):
                    if param_and_grad[0].trainable is True:
804 805
                        device = self._get_device_for_param(
                            param_and_grad[0].name)
806 807 808
                        with device_guard(device):
                            optimize_op = self._append_optimize_op(
                                target_block, param_and_grad)
809 810 811

        # Get custom finish ops for subclasses
        # FIXME: Need to fix this once we figure out how to handle dependencies
812
        self._finish_update(target_block, parameters_and_grads)
813

814 815
        end = len(target_block.ops)
        return target_block._slice_ops(start, end)
816 817

    def _process_distribute_lookuptable(self, param_grads):
Q
Qiao Longfei 已提交
818 819 820 821 822 823 824 825 826
        """
        Because distribute lookup table only support SGD optimizer for now, not support
        other optimizer and regularization, so we should find the table parameter out,
        and avoid to add regularization and other op for it, and add sgd optimize op
        for it independently.
        :param param_grads(list((Var, Var))): list of (param, grad) pair.
        :param loss: the loss variable.
        :param startup_program: the startup program
        """
827 828
        program = framework.default_main_program()
        global_block = framework.default_main_program().global_block()
Q
Qiao Longfei 已提交
829 830 831 832 833 834 835 836 837 838 839 840 841 842 843
        table_name = find_distributed_lookup_table(program)
        table_param = None
        table_grad = None
        new_param_grads = []
        for p, g in param_grads:
            if p.name == table_name:
                if table_param is not None:
                    raise RuntimeError(
                        "multi dist table var found, only support one now!")
                table_param = p
                table_grad = g
            else:
                new_param_grads.append((p, g))
        sgd_op = None
        if table_param is not None:
844 845 846 847 848 849 850 851 852 853 854 855 856
            param_and_grad = [table_param, table_grad]
            with table_param.block.program._optimized_guard(param_and_grad), \
                    framework.name_scope("optimizer"):
                self._create_global_learning_rate()
                # create the optimize op
                sgd_op = global_block.append_op(
                    type='sgd',
                    inputs={
                        "Param": table_param,
                        "Grad": table_grad,
                        "LearningRate": self._create_param_lr(param_and_grad)
                    },
                    outputs={"ParamOut": param_and_grad[0]})
Q
Qiao Longfei 已提交
857 858
        return new_param_grads, (table_param, table_grad), sgd_op

859 860 861 862 863 864 865
    def backward(self,
                 loss,
                 startup_program=None,
                 parameter_list=None,
                 no_grad_set=None,
                 callbacks=None):
        """
866
        The first part of ``minimize``, do auto-diff to append backward operations for
867 868 869
        the current program.

        Args:
870 871 872 873
            loss (Variable): ``loss`` variable to run optimizations.
            startup_program (Program, optional): :ref:`api_fluid_Program` for
                initializing parameters in ``parameter_list``. The default value
                is None, at this time :ref:`api_fluid_default_startup_program` will be used.
H
hong 已提交
874
            parameter_list (Iterable, optional): Iterable of ``Variable`` or ``Variable.name`` to update
875 876
                to minimize ``loss``. The default value is None, at this time all parameters
                will be updated.
877
            no_grad_set (set, optional): Set of ``Variable``  or ``Variable.name`` that don't need
878 879 880
                to be updated. The default value is None.
            callbacks (list, optional): list of callable objects to run when appending backward
                operator for one parameter. The default value is None.
M
minqiyang 已提交
881

882
        Return:
883 884
            list: list of (param, grad) variable pairs, param is ``Parameter``,
                grad is the gradient value corresponding to the parameter.
M
minqiyang 已提交
885

886
        Examples:
887
            See examples in ``apply_gradients``.
888
        """
889
        act_no_grad_set = None
J
Jiabin Yang 已提交
890
        if framework._non_static_mode():
891
            pass
L
Leo Chen 已提交
892 893
        else:
            act_no_grad_set = self._get_no_grad_set(loss, no_grad_set)
G
gongweibao 已提交
894

L
Leo Chen 已提交
895 896 897 898
        # Infer dtype by loss if None
        if self._dtype is None:
            self._dtype = loss.dtype

J
Jiabin Yang 已提交
899
        if framework._non_static_mode():
900 901 902
            parameter_list = parameter_list if parameter_list \
                else self._parameter_list

C
chengduo 已提交
903
            params_grads = []
904
            for param in parameter_list:
C
chengduo 已提交
905 906
                if not param.trainable:
                    continue
907
                if param._grad_ivar() is not None:
C
chengduo 已提交
908
                    # create gradient variable
909
                    grad_var = param._grad_ivar()
C
chengduo 已提交
910
                    params_grads.append((param, grad_var))
911
        else:
C
chengduo 已提交
912 913 914 915 916
            if callbacks is None:
                callbacks = [error_clip_callback]
            else:
                assert (isinstance(callbacks, list))
            program = loss.block.program
C
chengduo 已提交
917 918
            assert len(loss.shape) == 1 and loss.shape[0] == 1, \
                "The loss.shape should be (1L,), but the current loss.shape is {}. " \
919
                "Maybe that you should call paddle.mean to process the current loss.".format(
C
chengduo 已提交
920
                    loss.shape)
921 922
            parameter_list = parameter_list if parameter_list \
                else self._parameter_list
C
chengduo 已提交
923 924
            with program_guard(program, startup_program):
                params_grads = append_backward(loss, parameter_list,
925
                                               act_no_grad_set, callbacks)
C
chengduo 已提交
926
        return params_grads
927

928 929
    def _create_regularization_of_grad(self, param, grad, regularization=None):
        """ Create and add backward regularization Operators
930

931 932 933
        Function helper of append_regularization_ops.
        """
        # If no gradient or no regularization is specified,  then we don't need to do anything
934 935 936 937
        if grad is None or (
            (not hasattr(param, 'regularizer') or
             (hasattr(param, 'regularizer') and param.regularizer is None))
                and regularization is None):
938 939 940 941 942 943 944 945 946 947
            return grad
        regularization_term = None
        if hasattr(param, 'regularizer') and param.regularizer is not None:
            # Add variable for regularization term in grad block
            regularization_term = param.regularizer(param, grad, grad.block)
        elif regularization is not None:
            regularization_term = regularization(param, grad, grad.block)

        assert regularization_term is not None

J
Jiabin Yang 已提交
948
        if framework._non_static_mode():
949
            return _legacy_C_ops.sum([grad, regularization_term])
950

951 952 953 954 955 956 957 958 959 960 961 962 963 964 965
        new_grad = grad
        if grad.type == core.VarDesc.VarType.SELECTED_ROWS:
            # FIXME(zcd): If the grad is SELECTED_ROWS, after regularization,
            # the grad's type and name will be changed. But the gradient's name
            # is used in ParallelExecutor Reduce mode, so I add a flag for
            # the new_grad here.
            new_grad = grad.block.create_var(
                name=grad.name + core.kNewGradSuffix(),
                dtype=param.dtype,
                shape=param.shape,
                lod_level=param.lod_level,
                type=core.VarDesc.VarType.LOD_TENSOR)

        inputs = {"X": [grad, regularization_term]}
        outputs = {"Out": [new_grad]}
966
        grad.block.append_op(type='sum', inputs=inputs, outputs=outputs)
967 968 969 970 971 972 973

        return new_grad

    def append_regularization_ops(self,
                                  parameters_and_grads,
                                  regularization=None):
        r"""Create and add backward regularization Operators
974

975 976 977 978
        Creates and adds backward regularization operators in the BlockDesc.
        This will add gradients of the regularizer function to the gradients
        of the parameters and return these modified gradients. This is the
        same as implementing weight decay in optimizers for regularization.
979

980 981 982 983 984
        Args:
            parameters_and_grads: A list of (parameters, gradients) pairs
                                  that need to be regularized.
            regularization: A global regularizer. If the parameter is not
                            set. It will be applied with regularizer.
985

986 987 988
        Returns:
            list[(Variable, Variable)]: list of (parameters, gradients) \
            pair with the regularized gradient
989

990 991 992 993
        Raises:
            Exception: Unknown regularization type
        """
        params_and_grads = []
J
Jiabin Yang 已提交
994
        if framework._non_static_mode():
995
            for param, grad in parameters_and_grads:
996 997
                new_grad = self._create_regularization_of_grad(
                    param, grad, regularization)
998 999 1000 1001 1002
                params_and_grads.append((param, new_grad))
        else:
            repeate_regularizer = False
            with framework.name_scope('regularization'):
                for param, grad in parameters_and_grads:
1003 1004 1005
                    if not repeate_regularizer and getattr(
                            param, 'regularizer',
                            None) is not None and regularization is not None:
1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016
                        repeate_regularizer = True
                        logging.info(
                            "If regularizer of a Parameter has been set by 'fluid.ParamAttr' or 'fluid.WeightNormParamAttr' already. "
                            "The Regularization[%s] in Optimizer will not take effect, and it will only be applied to other Parameters!"
                            % regularization.__str__())
                    with param.block.program._optimized_guard([param, grad]):
                        new_grad = self._create_regularization_of_grad(
                            param, grad, regularization)
                        params_and_grads.append((param, new_grad))
        return params_and_grads

1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
    def flatten_param_grads(self, params_grads):
        need_flatten_params = []
        need_flatten_grads = []
        for p, g in params_grads:
            if g is None:
                continue
            g.persistable = True
            if getattr(p, 'need_clip', True) is False or getattr(
                    p, 'regularizer', None) is not None:
                warnings.warn(
                    "flatten_param_grads=True will be discarded since paramter '{}''s need_clip is False or "
                    "the regularizer is set".format(p.name))
                self._flatten_param_grads = False
                return params_grads

            need_flatten_params.append(p)
            need_flatten_grads.append(g)

        shape = [np.prod(p.shape) for p in need_flatten_params]
        block = need_flatten_params[0].block

        flatten_param = self.helper.create_global_variable(
            name='flatten_param',
            persistable=True,
            dtype=need_flatten_params[0].dtype,
            shape=[np.sum(shape)],
            belong_to_optimizer=True)

        flatten_param.trainable = True
        flatten_param.optimize_attr = need_flatten_params[0].optimize_attr
        flatten_param.regularizer = need_flatten_params[0].regularizer

        flatten_grad = self.helper.create_global_variable(
            name='flatten_grad',
            persistable=True,
            dtype=need_flatten_grads[0].dtype,
            shape=[np.sum(shape)],
            belong_to_optimizer=True)

        with program_guard(default_main_program()):
1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
            block.append_op(type="coalesce_tensor",
                            inputs={"Input": need_flatten_params},
                            outputs={
                                "Output": need_flatten_params,
                                "FusedOutput": flatten_param
                            },
                            attrs={
                                "copy_data": True,
                                "use_align": True,
                                "align_size": self._align_size,
                                "dtype": need_flatten_params[0].dtype
                            })
1069

1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081
            block.append_op(type="coalesce_tensor",
                            inputs={"Input": need_flatten_grads},
                            outputs={
                                "Output": need_flatten_grads,
                                "FusedOutput": flatten_grad
                            },
                            attrs={
                                "copy_data": True,
                                "use_align": True,
                                "align_size": self._align_size,
                                "dtype": need_flatten_grads[0].dtype
                            })
1082 1083 1084

        #NOTE(zhiqiu): the initializer should be set after coalesce_tensor op,
        # so the shape of flatten_param and flatten_grad will be inferred.
1085 1086 1087 1088
        self.helper.set_variable_initializer(flatten_param,
                                             initializer=Constant(0.0))
        self.helper.set_variable_initializer(flatten_grad,
                                             initializer=Constant(0.0))
1089 1090 1091

        return [(flatten_param, flatten_grad)]

1092 1093 1094 1095 1096 1097 1098
    def apply_gradients(self, params_grads):
        """
        Second part of `minimize`, appending optimization operators for
        given `params_grads` pairs.

        Args:
            params_grads (list): list of (param, grad) pair to do optimization.
M
minqiyang 已提交
1099

1100 1101
        Returns:
            list: A list of operators appended to the current program.
M
minqiyang 已提交
1102

1103 1104 1105
        Examples:
            .. code-block:: python

1106
                import paddle.fluid as fluid
1107 1108 1109 1110 1111 1112 1113 1114 1115
                loss = network()
                optimizer = fluid.optimizer.SGD(learning_rate=0.1)
                params_grads = optimizer.backward(loss)
                # you may append operations for params_grads here
                # ...
                optimizer.apply_gradients(params_grads)
        """
        params_grads = sorted(params_grads, key=lambda x: x[0].name)

1116 1117 1118 1119 1120 1121
        # NOTE(zhiqiu): currently, only support ClipGradByGlobalNorm and without regularization.
        if self._flatten_param_grads and self.regularization is None:
            if self._grad_clip == None or isinstance(self._grad_clip,
                                                     ClipGradByGlobalNorm):
                params_grads = self.flatten_param_grads(params_grads)

1122
        # 'optimizer(grad_clip)' or 'set_gradient_clip'
1123 1124 1125 1126
        if self._grad_clip is not None:
            params_grads = self._grad_clip(params_grads)
        else:
            params_grads = append_gradient_clip_ops(params_grads)
1127 1128

        # Add regularization if any
1129 1130
        params_grads = self.append_regularization_ops(params_grads,
                                                      self.regularization)
1131 1132 1133 1134

        optimize_ops = self._create_optimization_pass(params_grads)
        return optimize_ops

C
chengduo 已提交
1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146
    def apply_optimize(self, loss, startup_program, params_grads):
        """
        Second part of `minimize`, appending optimization operators for
        given `params_grads` pairs.
        Args:
            loss (Variable): loss variable to run optimizations.
            startup_program (Program): startup_program for initializing parameters
                in `parameter_list`.
            params_grads (list): list of (param, grad) pair to do optimization.
        Returns:
            list: A list of operators appended to the current program.
        """
J
Jiabin Yang 已提交
1147
        if framework._non_static_mode():
C
chengduo 已提交
1148 1149
            with program_guard(framework.default_main_program(),
                               framework.default_startup_program()):
1150 1151
                if self._grad_clip is not None:
                    params_grads = self._grad_clip(params_grads)
1152 1153
                params_grads = self.append_regularization_ops(
                    params_grads, self.regularization)
C
chengduo 已提交
1154 1155 1156 1157 1158 1159 1160
                optimize_ops = self._create_optimization_pass(params_grads)
        else:
            program = loss.block.program
            with program_guard(program, startup_program):
                optimize_ops = self.apply_gradients(params_grads)
        return optimize_ops

G
gongweibao 已提交
1161
    def _get_no_grad_set(self, loss, no_grad_set=None):
1162
        no_grad_set = _get_no_grad_set_name(no_grad_set)
G
gongweibao 已提交
1163 1164 1165 1166 1167 1168 1169 1170
        parameters = loss.block.program.global_block().all_parameters()
        param_no_trainable = set(
            [param.name for param in parameters if param.trainable is False])
        # If the parameter is no trainable, it should not have a gradient.
        no_grad_set.update(param_no_trainable)

        return no_grad_set

1171 1172 1173 1174
    @framework.dygraph_only
    def clear_gradients(self):
        """
        Clear the gradients of all optimized parameters for model.
1175 1176

        If not, new gradient will accumulat on previous gradient.
1177

1178 1179
        Returns:
            None
1180

1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191
        Examples:
            .. code-block:: python

                import paddle.fluid as fluid
                import numpy as np

                with fluid.dygraph.guard():
                    value = np.arange(26).reshape(2, 13).astype("float32")
                    a = fluid.dygraph.to_variable(value)
                    linear = fluid.Linear(13, 5, dtype="float32")
                    # This can be any optimizer supported by dygraph.
1192
                    adam = fluid.optimizer.Adam(learning_rate = 0.01,
1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203
                                                parameter_list = linear.parameters())
                    out = linear(a)
                    out.backward()
                    adam.minimize(out)
                    adam.clear_gradients()

        """
        for p in self._parameter_list:
            if p.trainable:
                p.clear_gradient()

1204
    @imperative_base.no_grad
Q
Qiao Longfei 已提交
1205 1206
    def minimize(self,
                 loss,
1207
                 startup_program=None,
Q
Qiao Longfei 已提交
1208
                 parameter_list=None,
1209
                 no_grad_set=None):
1210
        """
1211
        Add operations to minimize ``loss`` by updating ``parameter_list``.
M
minqiyang 已提交
1212

1213
        Args:
1214 1215 1216 1217
            loss (Variable): A ``Variable`` containing the value to minimize.
            startup_program (Program, optional): :ref:`api_fluid_Program` for
                initializing parameters in ``parameter_list``. The default value
                is None, at this time :ref:`api_fluid_default_startup_program` will be used.
H
hong 已提交
1218
            parameter_list (Iterable, optional): Iterable of ``Variable`` or ``Variable.name`` to update
1219 1220
                to minimize ``loss``. The default value is None, at this time all parameters
                will be updated.
1221
            no_grad_set (set, optional): Set of ``Variable``  or ``Variable.name`` that don't need
1222
                to be updated. The default value is None.
Q
Qiao Longfei 已提交
1223

1224
        Returns:
1225 1226 1227
            tuple: tuple (optimize_ops, params_grads), A list of operators appended
            by minimize and a list of (param, grad) variable pairs, param is
            ``Parameter``, grad is the gradient value corresponding to the parameter.
1228 1229
            The returned tuple can be passed to ``fetch_list`` in ``Executor.run()`` to
            indicate program pruning. If so, the program will be pruned by ``feed`` and
1230
            ``fetch_list`` before run, see details in ``Executor``.
1231 1232 1233

        Examples:
            Please refer to the example of current Optimizer.
Q
Qiao Longfei 已提交
1234
        """
C
chengduo 已提交
1235
        assert isinstance(loss, Variable), "The loss should be an Variable."
1236

1237 1238
        parameter_list = parameter_list if parameter_list \
            else self._parameter_list
1239

1240 1241 1242 1243
        params_grads = self.backward(loss,
                                     startup_program=startup_program,
                                     parameter_list=parameter_list,
                                     no_grad_set=no_grad_set)
1244

1245 1246 1247
        optimize_ops = self.apply_optimize(loss,
                                           startup_program=startup_program,
                                           params_grads=params_grads)
M
minqiyang 已提交
1248

Q
Qiao Longfei 已提交
1249
        return optimize_ops, params_grads
Q
Qiao Longfei 已提交
1250 1251 1252


class SGDOptimizer(Optimizer):
1253
    r"""
Q
qiaolongfei 已提交
1254 1255 1256 1257 1258 1259
    Optimizer of the stochastic gradient descent algorithm.

    .. math::

        param\_out = param - learning\_rate * grad

1260 1261 1262
    Parameters:
        learning_rate (float|Variable): The learning rate used to update parameters. \
            Can be a float value or a Variable with one float value as data element.
H
hong 已提交
1263
        parameter_list (Iterable, optional):  Iterable of ``Variable`` names to update to minimize ``loss``. \
1264 1265
            This parameter is required in dygraph mode. \
            The default value is None in static mode, at this time all parameters will be updated.
1266 1267 1268 1269 1270
        regularization (WeightDecayRegularizer, optional): The strategy of regularization. There are two method: \
             :ref:`api_fluid_regularizer_L1Decay` , :ref:`api_fluid_regularizer_L2Decay` . If a parameter has set \
            regularizer using :ref:`api_fluid_ParamAttr` already, the regularization setting here in optimizer will be \
            ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect.  \
            Default None, meaning there is no regularization.
1271 1272 1273
        grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of
            some derived class of ``GradientClipBase`` . There are three cliping strategies
            ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` ,
1274
            :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping.
1275 1276
        name (str, optional): This parameter is used by developers to print debugging information. \
            For details, please refer to :ref:`api_guide_Name`. Default is None.
Q
qiaolongfei 已提交
1277 1278 1279 1280

    Examples:
        .. code-block:: python

1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305
            import paddle
            import paddle.fluid as fluid
            import numpy as np

            place = fluid.CPUPlace()
            main = fluid.Program()
            with fluid.program_guard(main):
                x = fluid.layers.data(name='x', shape=[13], dtype='float32')
                y = fluid.layers.data(name='y', shape=[1], dtype='float32')
                y_predict = fluid.layers.fc(input=x, size=1, act=None)
                cost = fluid.layers.square_error_cost(input=y_predict, label=y)
                avg_cost = fluid.layers.mean(cost)

                sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001)
                sgd_optimizer.minimize(avg_cost)

                fetch_list = [avg_cost]
                train_reader = paddle.batch(
                    paddle.dataset.uci_housing.train(), batch_size=1)
                feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
                exe = fluid.Executor(place)
                exe.run(fluid.default_startup_program())
                for data in train_reader():
                    exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)

Q
Qiao Longfei 已提交
1306 1307
    """

1308 1309 1310 1311
    def __init__(self,
                 learning_rate,
                 parameter_list=None,
                 regularization=None,
1312
                 grad_clip=None,
1313
                 multi_precision=False,
1314
                 name=None):
Q
Qiao Longfei 已提交
1315
        assert learning_rate is not None
1316 1317 1318 1319 1320
        super(SGDOptimizer, self).__init__(learning_rate=learning_rate,
                                           parameter_list=parameter_list,
                                           regularization=regularization,
                                           grad_clip=grad_clip,
                                           name=name)
Q
Qiao Longfei 已提交
1321
        self.type = "sgd"
1322
        self._use_mkldnn = False
1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333
        self._multi_precision = multi_precision
        self._master_weights = {}

    def _create_master_weight(self, param):
        if param.name in self._master_weights:
            var = self._master_weights[param.name]
        else:
            assert isinstance(self.helper, LayerHelper)

            var_name = param.name + "_fp32_master"
            var_name = unique_name.generate(var_name)
1334 1335 1336 1337 1338
            var = layers.create_global_var(name=var_name,
                                           shape=param.shape,
                                           value=0,
                                           dtype='float32',
                                           persistable=True)
1339
            block = self.helper.startup_program.global_block()
1340 1341 1342 1343 1344 1345 1346
            block.append_op(type="cast",
                            inputs={"X": [param]},
                            outputs={"Out": [var]},
                            attrs={
                                "in_dtype": param.dtype,
                                "out_dtype": core.VarDesc.VarType.FP32
                            })
1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364
            self._master_weights[param.name] = var
        return var

    def _create_accumulators(self, block, parameters):
        assert isinstance(block, framework.Block)
        if isinstance(parameters, dict):
            parameters = self._update_param_group(parameters)

        # Create accumulator tensors for first and second moments
        for p in parameters:
            if self._multi_precision and p.dtype == core.VarDesc.VarType.FP16:
                master_p = self._create_master_weight(p)
                continue
            if p.dtype == core.VarDesc.VarType.FP16 and not self._multi_precision:
                warnings.warn(
                    "Accumulating with FP16 in optimizer can lead to poor accuracy or slow convergence."
                    "Consider using multi_precision=True option of the Adam optimizer."
                )
Q
Qiao Longfei 已提交
1365

1366
    @no_grad
1367
    def _append_optimize_op(self, block, param_and_grad):
1368 1369 1370 1371 1372 1373

        find_master = self._multi_precision and param_and_grad[
            0].dtype == core.VarDesc.VarType.FP16
        master_weight = (self._master_weights[param_and_grad[0].name]
                         if find_master else None)

1374
        lr = self._create_param_lr(param_and_grad)
Z
zyfncg 已提交
1375
        if in_dygraph_mode():
1376 1377
            _C_ops.sgd_(param_and_grad[0], lr, param_and_grad[1], master_weight,
                        find_master)
Z
zyfncg 已提交
1378 1379
            return None
        if _in_legacy_dygraph():
1380 1381
            _legacy_C_ops.sgd(param_and_grad[0], lr, param_and_grad[1],
                              master_weight, param_and_grad[0], master_weight)
1382
            return None
1383

1384
        assert isinstance(block, framework.Block)
Q
Qiao Longfei 已提交
1385
        # create the optimize op
1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399
        inputs = {
            "Param": param_and_grad[0],
            "Grad": param_and_grad[1],
            "LearningRate": lr
        }

        outputs = {"ParamOut": param_and_grad[0]}

        attrs = {"multi_precision": find_master}

        if find_master:
            inputs["MasterParam"] = master_weight
            outputs["MasterParamOut"] = master_weight

1400 1401 1402 1403 1404
        sgd_op = block.append_op(type=self.type,
                                 inputs=inputs,
                                 outputs=outputs,
                                 attrs=attrs,
                                 stop_gradient=True)
Q
Qiao Longfei 已提交
1405 1406

        return sgd_op
1407 1408 1409


class MomentumOptimizer(Optimizer):
1410
    r"""
Q
qiaolongfei 已提交
1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423

    Simple Momentum optimizer with velocity state

    This optimizer has a flag for Nestrov Momentum.

    The update equations are as follows:

    .. math::

        & velocity = mu * velocity + gradient

        & if (use\_nesterov):

1424
        &\quad   param = param - (gradient + mu * velocity) * learning\_rate
Q
qiaolongfei 已提交
1425 1426 1427

        & else:

Q
qiaolongfei 已提交
1428
        &\quad   param = param - learning\_rate * velocity
Q
qiaolongfei 已提交
1429

1430 1431 1432 1433
    Parameters:
        learning_rate (float|Variable): The learning rate used to update parameters. \
            Can be a float value or a Variable with one float value as data element.
        momentum (float): Momentum factor
H
hong 已提交
1434
        parameter_list (Iterable, optional):  Iterable of ``Variable`` names to update to minimize ``loss``. \
1435 1436
            This parameter is required in dygraph mode. \
            The default value is None in static mode, at this time all parameters will be updated.
1437
        use_nesterov (bool, optional): Enables Nesterov momentum, default is false.
1438 1439 1440 1441 1442
        regularization (WeightDecayRegularizer, optional): The strategy of regularization. There are two method: \
             :ref:`api_fluid_regularizer_L1Decay` , :ref:`api_fluid_regularizer_L2Decay` . If a parameter has set \
            regularizer using :ref:`api_fluid_ParamAttr` already, the regularization setting here in optimizer will be \
            ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect.  \
            Default None, meaning there is no regularization.
1443 1444 1445
        grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of
            some derived class of ``GradientClipBase`` . There are three cliping strategies
            ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` ,
1446
            :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping.
1447 1448
        name (str, optional): This parameter is used by developers to print debugging information. \
            For details, please refer to :ref:`api_guide_Name`. Default is None.
Q
qiaolongfei 已提交
1449 1450 1451 1452

    Examples:
        .. code-block:: python

1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477
            import paddle
            import paddle.fluid as fluid
            import numpy as np

            place = fluid.CPUPlace()
            main = fluid.Program()
            with fluid.program_guard(main):
                x = fluid.layers.data(name='x', shape=[13], dtype='float32')
                y = fluid.layers.data(name='y', shape=[1], dtype='float32')
                y_predict = fluid.layers.fc(input=x, size=1, act=None)
                cost = fluid.layers.square_error_cost(input=y_predict, label=y)
                avg_cost = fluid.layers.mean(cost)

                moment_optimizer = fluid.optimizer.MomentumOptimizer(learning_rate=0.001, momentum=0.9)
                moment_optimizer.minimize(avg_cost)

                fetch_list = [avg_cost]
                train_reader = paddle.batch(
                    paddle.dataset.uci_housing.train(), batch_size=1)
                feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
                exe = fluid.Executor(place)
                exe.run(fluid.default_startup_program())
                for data in train_reader():
                    exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)

1478 1479 1480
    """
    _velocity_acc_str = "velocity"

X
Xin Pan 已提交
1481 1482 1483
    def __init__(self,
                 learning_rate,
                 momentum,
1484
                 parameter_list=None,
X
Xin Pan 已提交
1485 1486
                 use_nesterov=False,
                 regularization=None,
1487
                 grad_clip=None,
X
Xin Pan 已提交
1488
                 name=None):
1489 1490
        assert learning_rate is not None
        assert momentum is not None
1491 1492 1493 1494 1495
        super(MomentumOptimizer, self).__init__(learning_rate=learning_rate,
                                                parameter_list=parameter_list,
                                                regularization=regularization,
                                                grad_clip=grad_clip,
                                                name=name)
1496 1497
        self.type = "momentum"
        self._momentum = momentum
1498
        self._use_nesterov = bool(use_nesterov)
1499 1500 1501 1502 1503

    def _create_accumulators(self, block, parameters):
        assert isinstance(block, framework.Block)

        for p in parameters:
Q
Qiao Longfei 已提交
1504
            self._add_accumulator(self._velocity_acc_str, p)
1505 1506 1507 1508 1509 1510

    def _append_optimize_op(self, block, param_and_grad):
        assert isinstance(block, framework.Block)

        velocity_acc = self._get_accumulator(self._velocity_acc_str,
                                             param_and_grad[0])
1511
        lr = self._create_param_lr(param_and_grad)
1512
        master_weight = None
J
Jiabin Yang 已提交
1513
        if framework._non_static_mode():
1514 1515 1516 1517
            _, _, _ = _legacy_C_ops.momentum(
                param_and_grad[0], param_and_grad[1], velocity_acc, lr,
                master_weight, param_and_grad[0], velocity_acc, master_weight,
                'mu', self._momentum, 'use_nesterov', self._use_nesterov)
1518
            return None
1519

1520
        attrs = {"mu": self._momentum, "use_nesterov": self._use_nesterov}
1521 1522 1523 1524
        inputs = {
            "Param": [param_and_grad[0]],
            "Grad": [param_and_grad[1]],
            "Velocity": [velocity_acc],
1525
            "LearningRate": [lr]
1526 1527 1528 1529 1530 1531
        }

        outputs = {
            "ParamOut": [param_and_grad[0]],
            "VelocityOut": [velocity_acc]
        }
1532
        # create the momentum optimize op
1533 1534 1535 1536 1537
        momentum_op = block.append_op(type=self.type,
                                      inputs=inputs,
                                      outputs=outputs,
                                      attrs=attrs,
                                      stop_gradient=True)
1538 1539

        return momentum_op
1540 1541


1542
class DGCMomentumOptimizer(Optimizer):
1543
    r"""
1544
	:api_attr: Static Graph
S
swtkiwi 已提交
1545

1546
    DGC (Deep Gradient Compression) Momentum Optimizer. Original paper is https://arxiv.org/abs/1712.01887
1547

G
gongweibao 已提交
1548
    DGC reduces the communication bandwidth by sending only the important gradients (sparse update):\
1549 1550
        only gradients larger than a threshold are transmitted.

G
gongweibao 已提交
1551
    To avoid losing information, DGC accumulates the rest of the gradients locally.
1552 1553 1554

    Eventually, these gradients become large enough to be transmitted.

1555
    Thus, DGC sends the large gradients immediately but eventually sends all of the gradients over time.
1556

G
gongweibao 已提交
1557
    To ensure no loss of accuracy, DGC employs momentum correction and local gradient clipping on top of the gradient sparsification to maintain model performance.
1558 1559 1560 1561

    DGC also uses momentum factor masking and warmup training to overcome the staleness problem caused by reduced communication.

    This optimizer will do two things:
1562

1563 1564
        1. Compress the gradient by get TopK import value from tensor \
            and use it for allreduce to reduce network bandwidth.
1565

1566
        2. Call momentum to optimize the cost.
1567 1568

    Args:
1569 1570
        learning_rate (float|Variable): The learning rate used to update parameters. \
            It can be a float value or a Variable with one float value as a data element.
1571
        momentum (float): Momentum factor.
G
gongweibao 已提交
1572
        rampup_begin_step (int): The beginning step from which gradient compression is implemented.
1573 1574 1575 1576 1577 1578 1579
        rampup_step (int): Time steps used in sparsity warm-up periods. Default is 1.
            For example, if the sparsity is [0.75, 0.9375, 0.984375, 0.996, 0.999], and the rampup_step is 100, \
                it will use 0.75 at 0~19 steps, and 0.9375 at 20~39 steps, and so on. \
                And when reach sparsity array ends, it will use 0.999 then and after.
        sparsity (list[float]): Get top important element from gradient tensor, the ratio is (1 - current sparsity). \
            Default is [0.999]. For example, if the sparsity is [0.99, 0.999], \
                the top [1%, 0.1%] important element will be transmitted.
H
hong 已提交
1580
        parameter_list (Iterable, optional):  Iterable of ``Variable`` names to update to minimize ``loss``. \
1581 1582
            This parameter is required in dygraph mode. \
            The default value is None in static mode, at this time all parameters will be updated.
1583
        use_nesterov (bool): Enables Nesterov momentum. True means use Nesterov. Default is False.
1584 1585 1586 1587 1588
        regularization (WeightDecayRegularizer, optional): The strategy of regularization. There are two method: \
             :ref:`api_fluid_regularizer_L1Decay` , :ref:`api_fluid_regularizer_L2Decay` . If a parameter has set \
            regularizer using :ref:`api_fluid_ParamAttr` already, the regularization setting here in optimizer will be \
            ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect.  \
            Default None, meaning there is no regularization.
1589 1590
        grad_clip (GradientClipByNorm, optional): Gradient cliping strategy. ``DGCMomentumOptimizer`` only support
            :ref:`api_fluid_clip_GradientClipByNorm` , and if not, it will raise TypeError. Default None,
1591
            meaning there is no gradient clipping.
1592 1593
        name (str, optional): This parameter is used by developers to print debugging information. \
            For details, please refer to :ref:`api_guide_Name`. Default is None.
1594 1595 1596 1597

    Examples:
        .. code-block:: python

1598
            import paddle.fluid as fluid
1599
            optimizer = fluid.optimizer.DGCMomentumOptimizer(
G
gongweibao 已提交
1600 1601 1602 1603 1604
                        learning_rate=0.0001,
                        momentum=0.9,
                        rampup_step=1000,
                        rampup_begin_step=1252,
                        sparsity=[0.999, 0.999])
1605 1606

    """
1607 1608
    _u_velocity_acc_str = "_dgc_u_"
    _v_velocity_acc_str = "_dgc_v_"
1609 1610 1611 1612 1613 1614 1615

    def __init__(self,
                 learning_rate,
                 momentum,
                 rampup_begin_step,
                 rampup_step=1,
                 sparsity=[0.999],
1616
                 parameter_list=None,
1617 1618 1619
                 use_nesterov=False,
                 num_trainers=None,
                 regularization=None,
1620
                 grad_clip=None,
1621
                 name=None):
J
Jiabin Yang 已提交
1622
        if framework._non_static_mode():
Z
zhongpu 已提交
1623
            raise Exception("In dygraph, don't support DGCMomentumOptimizer.")
1624 1625 1626 1627

        assert core.is_compiled_with_cuda(), \
            "Paddle is not compiled with CUDA. DGC is only support GPU for now."

1628 1629
        assert learning_rate is not None
        assert momentum is not None
1630 1631 1632 1633 1634 1635
        super(DGCMomentumOptimizer,
              self).__init__(learning_rate=learning_rate,
                             parameter_list=parameter_list,
                             regularization=regularization,
                             grad_clip=grad_clip,
                             name=name)
1636 1637 1638
        self.type = "dgc_momentum"
        self._momentum = momentum
        self._use_nesterov = bool(use_nesterov)
1639

1640
        assert rampup_begin_step >= 0, "rampup_begin_step must >= 0"
1641
        self._rampup_begin_step = rampup_begin_step
1642 1643
        self._rampup_step = rampup_step
        self._sparsity = sparsity
1644

1645
        self._rampup_begin_step_var = None
1646
        self._global_step_var = None
1647

1648 1649 1650 1651 1652 1653 1654 1655 1656
        self._dgc_clip_norm = None
        if grad_clip is not None:
            if not isinstance(grad_clip, GradientClipByNorm):
                raise TypeError(
                    "The type of grad_clip should be 'GradientClipByNorm', because DGCMomentumOptimizer only support GradientClipByNorm"
                )
            assert isinstance(
                num_trainers, int
            ), "The type of num_trainers should be 'int', but received %s" % type(
J
Jiangxinz 已提交
1657
                num_trainers)
1658
            assert num_trainers > 0, "The value of num_trainers should be greater than 0!"
1659 1660

            self._num_trainers = num_trainers
1661
            self._dgc_clip_norm = grad_clip.clip_norm * (num_trainers**-0.5)
1662

1663 1664
        self.regular_type, self.regular_coeff = self._get_regularization_param(
            self.regularization)
1665

1666 1667 1668
    def _get_regularization_param(self, regularization):
        regular_type = 0
        regular_coeff = 0.0
1669

1670 1671
        if regularization is not None:
            regular_coeff = regularization._regularization_coeff
1672
            from .regularizer import L1Decay, L2Decay
1673 1674 1675 1676
            if isinstance(regularization, L1Decay):
                regular_type = 1
            elif isinstance(regularization, L2Decay):
                regular_type = 2
1677 1678
            else:
                assert False, 'regularization must be None|L1Decay|L2Deacy'
1679
        return regular_type, regular_coeff
1680

1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691
    def _is_use_dgc(self, param_var, grad_var):
        var_numel = abs(reduce(lambda x, y: x * y, param_var.shape))
        if var_numel < 16384 or \
           param_var.type == core.VarDesc.VarType.SELECTED_ROWS  or \
           grad_var.type == core.VarDesc.VarType.SELECTED_ROWS  or  \
               param_var.dtype != core.VarDesc.VarType.FP32 :
            return False
        return True

    def _append_optimize_op(self, block, param_and_grad):
        assert isinstance(block, framework.Block)
1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706
        velocity_acc = self._get_accumulator(self._u_velocity_acc_str,
                                             param_and_grad[0])
        assert velocity_acc is not None

        inputs = {
            "Param": param_and_grad[0],
            "Grad": param_and_grad[1],
            "Velocity": velocity_acc,
            "LearningRate": self._create_param_lr(param_and_grad),
        }
        outputs = {
            "ParamOut": param_and_grad[0],
            "VelocityOut": velocity_acc,
        }
        attrs = {"mu": self._momentum, "use_nesterov": self._use_nesterov}
1707 1708

        if not self._is_use_dgc(param_and_grad[0], param_and_grad[1]):
1709 1710 1711
            type = "momentum"
        else:
            type = "dgc_momentum"
1712 1713 1714 1715 1716
            inputs.update({
                "current_step": self._global_step_var,
                "nranks": self._nranks_var
            })
            outputs.update({'Grad_out': param_and_grad[1]})
1717
            attrs.update({"rampup_begin_step": float(self._rampup_begin_step)})
1718 1719

        # create the dgc momentum optimize op
1720 1721 1722 1723 1724
        dgc_momentum_op = block.append_op(type=type,
                                          inputs=inputs,
                                          outputs=outputs,
                                          attrs=attrs,
                                          stop_gradient=True)
1725 1726
        return dgc_momentum_op

1727 1728 1729 1730 1731
    def _add_auto_increment_var(self, counter_name, begin, step=1):
        helper = LayerHelper('global_step_counter')
        counter, is_new_var = helper.create_or_get_global_variable(
            name=counter_name, dtype='float32', shape=[1], persistable=True)
        if is_new_var:
1732 1733 1734 1735
            helper.set_variable_initializer(counter,
                                            initializer=Constant(
                                                value=float(begin - 1),
                                                force_cpu=True))
1736 1737 1738 1739 1740 1741 1742 1743 1744 1745
            helper.main_program.global_block()._prepend_op(
                type='increment',
                inputs={'X': [counter]},
                outputs={'Out': [counter]},
                attrs={'step': float(step)},
                stop_gradient=True)
            counter.stop_gradient = True

        return counter

1746 1747 1748 1749 1750
    def _add_nranks_var(self, name, value=-1):
        helper = LayerHelper('global_step_counter')
        counter, is_new_var = helper.create_or_get_global_variable(
            name=name, dtype='float32', shape=[1], persistable=True)
        if is_new_var:
1751 1752 1753 1754
            helper.set_variable_initializer(counter,
                                            initializer=Constant(
                                                value=float(value),
                                                force_cpu=True))
1755 1756 1757 1758
            counter.stop_gradient = True

        return counter

1759 1760 1761 1762 1763 1764
    def _append_dgc_ops(self, param_and_grads):
        main_program = default_main_program()
        main_program._enable_dgc = True

        # step counter
        self._global_step_var = self._add_auto_increment_var(
G
gongweibao 已提交
1765
            counter_name=core.dgc.kDGCCounterName(), begin=0)
1766

1767 1768
        self._nranks_var = self._add_nranks_var(name=core.dgc.kDGCNRanksName(),
                                                value=-1)
1769

1770 1771 1772 1773 1774
        # rampup begin step var for all_reduce_op_handle
        self._rampup_begin_step_var = tensor.create_global_var(
            shape=[1],
            dtype=core.VarDesc.VarType.FP32,
            persistable=True,
G
gongweibao 已提交
1775
            name=core.dgc.kDGCRampUpBeginStepName(),
1776 1777 1778
            value=self._rampup_begin_step * 1.0,
            force_cpu=True)

1779 1780
        self.helper = LayerHelper(self.__class__.__name__)

1781
        for param_var, grad_var in param_and_grads:
1782 1783 1784
            # reuse velocity in dgc_op and dgc_momentum_op
            u_var = self._add_accumulator(self._u_velocity_acc_str, param_var)

1785
            if not self._is_use_dgc(param_var, grad_var):
1786 1787
                continue

1788
            v_var = self._add_accumulator(self._v_velocity_acc_str, param_var)
1789

1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812
            k_var = tensor.create_global_var(shape=[1],
                                             dtype=param_var.dtype,
                                             persistable=True,
                                             name=param_var.name +
                                             core.dgc.kDGCKName(),
                                             value=0.0,
                                             force_cpu=True)

            encoded_var = tensor.create_global_var(shape=[1],
                                                   dtype=param_var.dtype,
                                                   persistable=True,
                                                   name=param_var.name +
                                                   core.dgc.kDGCEncodedName(),
                                                   value=0.0,
                                                   force_cpu=False)

            gather_var = tensor.create_global_var(shape=[1],
                                                  dtype=param_var.dtype,
                                                  persistable=True,
                                                  name=param_var.name +
                                                  core.dgc.kDGCGatherName(),
                                                  value=0.0,
                                                  force_cpu=False)
1813

1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832
            # del back oprolevarname
            op_maker = core.op_proto_and_checker_maker
            backward = core.op_proto_and_checker_maker.OpRole.Backward
            for op in main_program.global_block().ops:
                if not self._is_the_backward_op(op):
                    continue

                var_attr = op.all_attrs()[op_maker.kOpRoleVarAttrName()]
                if param_var.name not in var_attr:
                    continue

                var_attr.remove(param_var.name)
                var_attr.remove(grad_var.name)
                if len(var_attr) > 1:
                    op._set_attr(op_maker.kOpRoleVarAttrName(), var_attr)
                else:
                    op._remove_attr(op_maker.kOpRoleVarAttrName())

            clip_var = grad_var
1833 1834
            if self._dgc_clip_norm is not None:
                clip_var = self._append_clip_norm(grad_var, self._dgc_clip_norm)
1835
            self._dgc_op(param_var, clip_var, grad_var, u_var, v_var, k_var,
1836
                         encoded_var, gather_var)
1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851

    def _is_the_backward_op(self, op):
        op_maker = core.op_proto_and_checker_maker
        backward = core.op_proto_and_checker_maker.OpRole.Backward
        if op_maker.kOpRoleVarAttrName() in op.attr_names and \
                int(op.all_attrs()[op_maker.kOpRoleAttrName()]) == int(backward):
            return True
        return False

    def _clip_by_norm(self, x, max_norm, name=None):
        args = {'x': x, 'max_norm': max_norm, 'name': name}

        helper = LayerHelper("dgc_clip_by_norm_op", **args)

        if name is None:
1852 1853
            name = unique_name.generate_with_ignorable_key(".".join(
                [helper.name, 'tmp']))
1854

1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869
        out = helper.create_variable(type=x.type,
                                     name=name,
                                     dtype=x.dtype,
                                     persistable=False)

        helper.append_op(type="dgc_clip_by_norm",
                         inputs={
                             "X": x,
                             "current_step": self._global_step_var
                         },
                         attrs={
                             "max_norm": max_norm,
                             "rampup_begin_step": float(self._rampup_begin_step)
                         },
                         outputs={"Out": out})
1870 1871 1872 1873
        return out

    def _append_clip_norm(self, grad_var, clip_norm):
        with grad_var.block.program._backward_role_guard():
1874 1875 1876
            return self._clip_by_norm(x=grad_var,
                                      max_norm=clip_norm,
                                      name=grad_var.name)
1877 1878

    def _dgc_op(self, param_var, clip_var, grad_var, u_var, v_var, k_var,
1879
                encoded_var, gather_var):
1880 1881
        block = framework.default_main_program().global_block()
        op_maker = core.op_proto_and_checker_maker
1882

1883 1884 1885 1886 1887 1888 1889
        regular_type = self.regular_type
        regular_coeff = self.regular_coeff
        # The regularizer of the Parameters have higher priority
        if param_var.regularizer is not None:
            regular_type, regular_coeff = self._get_regularization_param(
                param_var.regularizer)

1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923
        dgc_op = block.append_op(type="dgc",
                                 inputs={
                                     "U": u_var,
                                     "V": v_var,
                                     "Grad": clip_var,
                                     "Param": param_var,
                                     "current_step": self._global_step_var,
                                     "nranks": self._nranks_var,
                                 },
                                 outputs={
                                     "U_out": u_var,
                                     "V_out": v_var,
                                     "EncodeGrad": encoded_var,
                                     "k": k_var,
                                     "Grad_out": grad_var,
                                     "GatherBuff": gather_var,
                                 },
                                 attrs={
                                     "m":
                                     self._momentum,
                                     "sparsity":
                                     self._sparsity,
                                     "use_nesterov":
                                     self._use_nesterov,
                                     "rampup_begin_step":
                                     float(self._rampup_begin_step),
                                     "rampup_step":
                                     float(self._rampup_step),
                                     "regular_coeff":
                                     float(regular_coeff),
                                     "regular_type":
                                     int(regular_type),
                                 },
                                 stop_gradient=True)
1924 1925 1926 1927 1928 1929

        backward = op_maker.OpRole.Backward
        dgc_op._set_attr(op_maker.kOpRoleAttrName(), backward)
        dgc_op._set_attr(op_maker.kOpRoleVarAttrName(),
                         [param_var.name, grad_var.name])

1930
    @imperative_base.no_grad
1931
    def apply_gradients(self, params_grads):
1932 1933 1934 1935 1936
        # Note: since we can't use all_reduce_op now,
        # dgc_op should be the last op of one grad.
        # Maybe need a grad allreduce pass.
        self._append_dgc_ops(params_grads)

1937 1938 1939 1940 1941 1942
        params_grads = sorted(params_grads, key=lambda x: x[0].name)
        params_grads, table_param_and_grad, table_optimize_op = \
            self._process_distribute_lookuptable(params_grads)

        not_dgc_params_grads = []
        dgc_params_grads = []
1943
        # DGC clip and regularization in optimizer.backward
1944 1945 1946 1947 1948 1949
        for param, grad in params_grads:
            if not self._is_use_dgc(param, grad):
                not_dgc_params_grads.append((param, grad))
            else:
                dgc_params_grads.append((param, grad))

1950
        # 'optimizer(grad_clip)' or 'set_gradient_clip'
1951 1952 1953 1954 1955
        if self._grad_clip is not None:
            not_dgc_params_grads = self._grad_clip(not_dgc_params_grads)
        else:
            not_dgc_params_grads = append_gradient_clip_ops(
                not_dgc_params_grads)
1956

1957 1958
        not_dgc_params_grads = self.append_regularization_ops(
            not_dgc_params_grads, self.regularization)
1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969

        params_grads = not_dgc_params_grads + dgc_params_grads
        params_grads = sorted(params_grads, key=lambda x: x[0].name)

        optimize_ops = self._create_optimization_pass(params_grads)
        if table_optimize_op is not None:
            optimize_ops.append(table_optimize_op)
            params_grads.append(table_param_and_grad)

        return optimize_ops

1970

1971
class LarsMomentumOptimizer(Optimizer):
1972
    r"""
1973 1974 1975 1976 1977 1978 1979 1980 1981
    Momentum optimizer with LARS support

    The update equations are as follows:

    .. math::

        & local\_learning\_rate = learning\_rate * lars\_coeff * \\
          \\frac{||param||}{||gradient|| + lars\_weight\_decay * ||param||}

1982
        & velocity = mu * velocity + local\_learning\_rate * (gradient + lars\_weight\_decay * param + epsilon)
1983 1984 1985

        & param = param - velocity

1986 1987 1988 1989 1990 1991
    Parameters:
        learning_rate (float|Variable): The learning rate used to update parameters. \
            Can be a float value or a Variable with one float value as data element. \
            momentum (float): momentum factor
        lars_coeff (float): Defines how much we trust the layer to change its weights.
        lars_weight_decay (float): Weight decay coefficient for decaying using LARS.
H
hong 已提交
1992
        parameter_list (Iterable, optional):  Iterable of ``Variable`` names to update to minimize ``loss``. \
1993 1994
            This parameter is required in dygraph mode. \
            The default value is None in static mode, at this time all parameters will be updated.
1995 1996 1997 1998 1999
        regularization (WeightDecayRegularizer, optional): The strategy of regularization. There are two method: \
             :ref:`api_fluid_regularizer_L1Decay` , :ref:`api_fluid_regularizer_L2Decay` . If a parameter has set \
            regularizer using :ref:`api_fluid_ParamAttr` already, the regularization setting here in optimizer will be \
            ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect.  \
            Default None, meaning there is no regularization.
2000 2001 2002
        grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of
            some derived class of ``GradientClipBase`` . There are three cliping strategies
            ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` ,
2003
            :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping.
2004 2005
        name (str, optional): This parameter is used by developers to print debugging information. \
            For details, please refer to :ref:`api_guide_Name`. Default is None.
2006 2007
        exclude_from_weight_decay (list[str], optional): Name string of layers which will be exclude from lars weight decay. Default is None.
        epsilon (float, optional): Epsilon to avoid Division by Zero when calculate local lr. Default is 0.
2008 2009 2010
        multi_precision (bool, optional): Whether to use multi-precision during weight updating.
        rescale_grad (float, optional): Multiply the gradient with `rescale_grad` \
            before updating. Often choose to be `1.0/batch_size`.
2011

2012 2013 2014
    Examples:
        .. code-block:: python

2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030
            import paddle.fluid as fluid
            import numpy as np

            np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)
            inp = fluid.layers.data(
                name="inp", shape=[2, 2], append_batch_size=False)
            out = fluid.layers.fc(inp, size=3)
            out = fluid.layers.reduce_sum(out)
            optimizer = fluid.optimizer.LarsMomentumOptimizer(learning_rate=0.001, momentum=0.9)
            optimizer.minimize(out)

            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
            exe.run(
                feed={"inp": np_inp},
                fetch_list=[out.name])
2031 2032 2033 2034 2035 2036 2037 2038
    """
    _velocity_acc_str = "velocity"

    def __init__(self,
                 learning_rate,
                 momentum,
                 lars_coeff=0.001,
                 lars_weight_decay=0.0005,
2039
                 parameter_list=None,
2040
                 regularization=None,
2041
                 grad_clip=None,
2042 2043
                 name=None,
                 exclude_from_weight_decay=None,
2044 2045 2046
                 epsilon=0,
                 multi_precision=False,
                 rescale_grad=1.0):
2047 2048
        assert learning_rate is not None
        assert momentum is not None
2049 2050 2051 2052 2053 2054
        super(LarsMomentumOptimizer,
              self).__init__(learning_rate=learning_rate,
                             parameter_list=parameter_list,
                             regularization=regularization,
                             grad_clip=grad_clip,
                             name=name)
2055 2056 2057 2058
        self.type = "lars_momentum"
        self._momentum = momentum
        self._lars_coeff = float(lars_coeff)
        self._lars_weight_decay = float(lars_weight_decay)
2059 2060 2061 2062 2063
        self._epsilon = float(epsilon)
        if exclude_from_weight_decay is None:
            self._exclude_from_weight_decay = []
        else:
            self._exclude_from_weight_decay = exclude_from_weight_decay
2064 2065 2066 2067 2068
        self._multi_precision = multi_precision
        self._rescale_grad = float(rescale_grad)
        self._master_weights = {}

    def _create_master_weight(self, param):
2069 2070 2071 2072
        if param.name in self._master_weights:
            var = self._master_weights[param.name]
        else:
            assert isinstance(self.helper, LayerHelper)
2073

2074 2075
            var_name = param.name + '_fp32_master'
            var_name = unique_name.generate(var_name)
2076 2077 2078 2079 2080
            var = layers.create_global_var(name=var_name,
                                           shape=param.shape,
                                           value=0,
                                           dtype='float32',
                                           persistable=True)
2081
            block = self.helper.startup_program.global_block()
2082 2083 2084 2085 2086 2087 2088
            block.append_op(type="cast",
                            inputs={"X": [param]},
                            outputs={"Out": [var]},
                            attrs={
                                "in_dtype": param.dtype,
                                "out_dtype": core.VarDesc.VarType.FP32
                            })
2089
            self._master_weights[param.name] = var
2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105
        return var

    def _get_accumulator(self, name, param):
        """Utility function to fetch an accumulator for a parameter
        Args:
            name: name of the accumulator
            param: parameter variable for which accumulator is to be fetched
        Returns:
            accumulator variable for the parameter
        """
        if self._name is not None:
            name = self._name + "_" + name
        find_master = self._multi_precision and param.dtype == core.VarDesc.VarType.FP16
        target_param = self._master_weights[
            param.name] if find_master else param
        target_name = target_param.name
2106 2107 2108 2109 2110
        if (name not in self._accumulators
                or target_name not in self._accumulators[name]):
            raise Exception(
                "Accumulator {} does not exist for parameter {}".format(
                    name, target_name))
2111
        return self._accumulators[name][target_name]
2112 2113 2114 2115 2116

    def _create_accumulators(self, block, parameters):
        assert isinstance(block, framework.Block)

        for p in parameters:
2117 2118 2119 2120 2121 2122 2123 2124 2125
            if self._multi_precision and p.dtype == core.VarDesc.VarType.FP16:
                master_p = self._create_master_weight(p)
                self._add_accumulator(self._velocity_acc_str, master_p)
                continue
            if p.dtype == core.VarDesc.VarType.FP16 and not self._multi_precision:
                warnings.warn(
                    "Accumulating with FP16 in optimizer can lead to poor accuracy or slow convergence."
                    "Consider using multi_precision=True option of the Lars optimizer."
                )
2126 2127 2128 2129
            self._add_accumulator(self._velocity_acc_str, p)

    def _append_optimize_op(self, block, param_and_grad):
        assert isinstance(block, framework.Block)
2130 2131 2132 2133 2134 2135 2136 2137
        _lars_weight_decay = self._lars_weight_decay
        param_name = param_and_grad[0].name
        if len(self._exclude_from_weight_decay) > 0:
            for name in self._exclude_from_weight_decay:
                if name in param_name:
                    _lars_weight_decay = 0.0
                    break

2138 2139
        velocity_acc = self._get_accumulator(self._velocity_acc_str,
                                             param_and_grad[0])
2140 2141 2142 2143 2144 2145 2146 2147 2148
        lr = self._create_param_lr(param_and_grad)

        find_master = self._multi_precision and param_and_grad[
            0].dtype == core.VarDesc.VarType.FP16
        master_weight = (self._master_weights[param_and_grad[0].name]
                         if find_master else None)

        attrs = {
            "mu": self._momentum,
2149
            "lars_coeff": self._lars_coeff,
L
limingshu 已提交
2150
            "lars_weight_decay": [_lars_weight_decay],
2151
            "multi_precision": find_master,
L
limingshu 已提交
2152
            "epsilon": self._epsilon,
2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168
            "rescale_grad": self._rescale_grad
        }

        inputs = {
            "Param": param_and_grad[0],
            "Grad": param_and_grad[1],
            "Velocity": velocity_acc,
            "LearningRate": lr
        }

        outputs = {"ParamOut": param_and_grad[0], "VelocityOut": velocity_acc}

        if find_master:
            inputs["MasterParam"] = master_weight
            outputs["MasterParamOut"] = master_weight

J
Jiabin Yang 已提交
2169
        if framework._non_static_mode():
2170
            tmp, tmp2 = _legacy_C_ops.lars_momentum(
D
duanboqiang 已提交
2171 2172 2173 2174 2175
                [param_and_grad[0]], [param_and_grad[1]], [velocity_acc], [lr],
                [param_and_grad[0]], [velocity_acc], "mu", self._momentum,
                "lars_coeff", self._lars_coeff, "lars_weight_decay",
                [_lars_weight_decay], "multi_precision", find_master, "epsilon",
                self._epsilon, "rescale_grad", self._rescale_grad)
2176 2177
        else:
            # create the momentum optimize op
2178 2179 2180 2181 2182
            momentum_op = block.append_op(type=self.type,
                                          inputs=inputs,
                                          outputs=outputs,
                                          attrs=attrs,
                                          stop_gradient=True)
2183

2184
            return momentum_op
2185 2186


2187
class AdagradOptimizer(Optimizer):
2188
    r"""
2189 2190
    The Adaptive Gradient optimizer (Adagrad for short) can adaptively assign
    different learning rates to individual parameters.
Q
qiaolongfei 已提交
2191

2192
    The parameter ``param_out`` update rule with gradient ``grad``:
Q
qiaolongfei 已提交
2193 2194 2195 2196 2197 2198 2199

    .. math::

        moment\_out &= moment + grad * grad

        param\_out &= param - \\frac{learning\_rate * grad}{\sqrt{moment\_out} + \epsilon}

2200 2201 2202 2203 2204 2205
    Related paper: `Adaptive Subgradient Methods for Online Learning and
    Stochastic Optimization <http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf>`_.

    The original paper does not have the ``epsilon`` attribute. It is added here
    in our implementation as also proposed `Per-parameter adaptive learning rate
    methods <http://cs231n.github.io/neural-networks-3/#ada>`_
Q
qiaolongfei 已提交
2206 2207 2208
    for numerical stability to avoid the division by zero error.

    Args:
2209 2210 2211 2212
        learning_rate (float|Variable): The learning rate used to update ``Parameter``.
            It can be a float value or a ``Variable`` with a float type.
        epsilon (float, optional): A small float value for numerical stability.
            The default value is 1e-06.
H
hong 已提交
2213
        parameter_list (Iterable, optional):  Iterable of ``Variable`` names to update to minimize ``loss``. \
2214 2215
            This parameter is required in dygraph mode. \
            The default value is None in static mode, at this time all parameters will be updated.
2216 2217 2218 2219 2220
        regularization (WeightDecayRegularizer, optional): The strategy of regularization. There are two method: \
             :ref:`api_fluid_regularizer_L1Decay` , :ref:`api_fluid_regularizer_L2Decay` . If a parameter has set \
            regularizer using :ref:`api_fluid_ParamAttr` already, the regularization setting here in optimizer will be \
            ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect.  \
            Default None, meaning there is no regularization.
2221 2222 2223
        grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of
            some derived class of ``GradientClipBase`` . There are three cliping strategies
            ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` ,
2224
            :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping.
2225 2226 2227 2228 2229
        name (str, optional): Normally there is no need for user to set this property.
            For more information, please refer to :ref:`api_guide_Name`.
            The default value is None.
        initial_accumulator_value (float, optional): Initial value for moment accumulator.
            The default value is 0.0.
Q
qiaolongfei 已提交
2230 2231 2232 2233

    Examples:
        .. code-block:: python

2234
            import numpy as np
2235
            import paddle.fluid as fluid
2236 2237

            np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)
2238
            inp = fluid.data(name="inp", shape=[2, 2])
2239 2240
            out = fluid.layers.fc(inp, size=3)
            out = fluid.layers.reduce_sum(out)
2241
            optimizer = fluid.optimizer.AdagradOptimizer(learning_rate=0.2)
2242 2243 2244 2245 2246 2247 2248
            optimizer.minimize(out)

            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
            exe.run(
                feed={"inp": np_inp},
                fetch_list=[out.name])
2249 2250 2251
    """
    _moment_acc_str = "moment"

X
Xin Pan 已提交
2252 2253 2254
    def __init__(self,
                 learning_rate,
                 epsilon=1.0e-6,
2255
                 parameter_list=None,
X
Xin Pan 已提交
2256
                 regularization=None,
2257
                 grad_clip=None,
2258
                 name=None,
X
xuezhong 已提交
2259
                 initial_accumulator_value=0.0):
2260 2261
        assert learning_rate is not None
        assert epsilon is not None
2262 2263 2264 2265 2266
        super(AdagradOptimizer, self).__init__(learning_rate=learning_rate,
                                               parameter_list=parameter_list,
                                               regularization=regularization,
                                               grad_clip=grad_clip,
                                               name=name)
2267 2268
        self.type = "adagrad"
        self._epsilon = epsilon
2269
        self.initial_accumulator_value = initial_accumulator_value
2270 2271 2272 2273 2274

    def _create_accumulators(self, block, parameters):
        assert isinstance(block, framework.Block)

        for p in parameters:
2275 2276 2277
            self._add_accumulator(self._moment_acc_str,
                                  p,
                                  fill_value=self.initial_accumulator_value)
2278 2279 2280 2281 2282 2283

    def _append_optimize_op(self, block, param_and_grad):
        assert isinstance(block, framework.Block)

        moment_acc = self._get_accumulator(self._moment_acc_str,
                                           param_and_grad[0])
C
caozhou 已提交
2284
        if in_dygraph_mode():
2285 2286 2287
            _C_ops.adagrad_(param_and_grad[0], param_and_grad[1], moment_acc,
                            self._create_param_lr(param_and_grad),
                            self._epsilon)
C
caozhou 已提交
2288 2289
            return None
        elif _in_legacy_dygraph():
2290 2291 2292 2293 2294
            _legacy_C_ops.adagrad(param_and_grad[0], param_and_grad[1],
                                  moment_acc,
                                  self._create_param_lr(param_and_grad),
                                  param_and_grad[0], moment_acc, "epsilon",
                                  self._epsilon)
C
caozhou 已提交
2295
            return None
2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311
        else:
            # Create the adagrad optimizer op
            adagrad_op = block.append_op(
                type=self.type,
                inputs={
                    "Param": param_and_grad[0],
                    "Grad": param_and_grad[1],
                    "Moment": moment_acc,
                    "LearningRate": self._create_param_lr(param_and_grad)
                },
                outputs={
                    "ParamOut": param_and_grad[0],
                    "MomentOut": moment_acc
                },
                attrs={"epsilon": self._epsilon},
                stop_gradient=True)
2312

2313
            return adagrad_op
2314 2315 2316


class AdamOptimizer(Optimizer):
2317
    r"""
T
tianshuo78520a 已提交
2318
    The Adam optimizer uses an optimization described at the end
2319 2320 2321
    of section 2 of `Adam paper <https://arxiv.org/abs/1412.6980>`_ ,
    it can dynamically adjusts the learning rate of each parameter using
    the 1st moment estimates and the 2nd moment estimates of the gradient.
2322

2323
    The parameter ``param_out`` update rule with gradient ``grad``:
Q
qiaolongfei 已提交
2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337

    .. math::

        t & = t + 1

        moment\_1\_out & = {\\beta}_1 * moment\_1 + (1 - {\\beta}_1) * grad

        moment\_2\_out & = {\\beta}_2 * moment\_2 + (1 - {\\beta}_2) * grad * grad

        learning\_rate & = learning\_rate * \\
                          \\frac{\sqrt{1 - {\\beta}_2^t}}{1 - {\\beta}_1^t}

        param\_out & = param - learning\_rate * \\frac{moment\_1}{\sqrt{moment\_2} + \epsilon}

2338 2339
    Related paper: `Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_

Q
qiaolongfei 已提交
2340
    Args:
2341 2342
        learning_rate (float|Variable, optional): The learning rate used to update ``Parameter``.
            It can be a float value or a ``Variable`` with a float type. The default value is 0.001.
2343 2344
        beta1 (float|Variable, optional): The exponential decay rate for the 1st moment estimates.
            It should be a float number or a Variable with shape [1] and data type as float32.
2345
            The default value is 0.9.
2346 2347
        beta2 (float|Variable, optional): The exponential decay rate for the 2nd moment estimates.
            It should be a float number or a Variable with shape [1] and data type as float32.
2348
            The default value is 0.999.
2349 2350
        epsilon (float|Tensor, optional): A small float value for numerical stability.
            It should be a float number or a Variable with shape [1] and data type as float32.
2351
            The default value is 1e-08.
H
hong 已提交
2352
        parameter_list (Iterable, optional):  Iterable of ``Variable`` names to update to minimize ``loss``. \
2353 2354
            This parameter is required in dygraph mode. \
            The default value is None in static mode, at this time all parameters will be updated.
2355 2356 2357 2358 2359
        regularization (WeightDecayRegularizer, optional): The strategy of regularization. There are two method: \
             :ref:`api_fluid_regularizer_L1Decay` , :ref:`api_fluid_regularizer_L2Decay` . If a parameter has set \
            regularizer using :ref:`api_fluid_ParamAttr` already, the regularization setting here in optimizer will be \
            ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect.  \
            Default None, meaning there is no regularization.
2360 2361 2362
        grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of
            some derived class of ``GradientClipBase`` . There are three cliping strategies
            ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` ,
2363
            :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping.
2364 2365 2366 2367 2368 2369 2370 2371 2372 2373
        name (str, optional): Normally there is no need for user to set this property.
            For more information, please refer to :ref:`api_guide_Name`.
            The default value is None.
        lazy_mode (bool, optional): The official Adam algorithm has two moving-average accumulators.
            The accumulators are updated at every step. Every element of the two moving-average
            is updated in both dense mode and sparse mode. If the size of parameter is very large,
            then the update may be very slow. The lazy mode only update the element that has
            gradient in current mini-batch, so it will be much more faster. But this mode has
            different semantics with the original Adam algorithm and may lead to different result.
            The default value is False.
2374
        use_global_beta_pow (bool, optional): Whether to use global beta_pow. If true, Adam will use global beta_pow
2375
            for whole model instead of creating beta_pow for each parameter. Default is false.
2376 2377
        flatten_param_grads (bool, optional): Whether to flatten all parameters and gradients. Default is false.
        align_size (int, optional): The alignment size when flatten parameters and gradients. Default is -1, which means
2378
            use same align_size as allocator.
Q
qiaolongfei 已提交
2379 2380 2381 2382

    Examples:
        .. code-block:: python

2383 2384 2385 2386 2387 2388
            import paddle
            import paddle.fluid as fluid

            place = fluid.CPUPlace()
            main = fluid.Program()
            with fluid.program_guard(main):
2389 2390
                x = fluid.data(name='x', shape=[None, 13], dtype='float32')
                y = fluid.data(name='y', shape=[None, 1], dtype='float32')
2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405
                y_predict = fluid.layers.fc(input=x, size=1, act=None)
                cost = fluid.layers.square_error_cost(input=y_predict, label=y)
                avg_cost = fluid.layers.mean(cost)

                adam_optimizer = fluid.optimizer.AdamOptimizer(0.01)
                adam_optimizer.minimize(avg_cost)

                fetch_list = [avg_cost]
                train_reader = paddle.batch(
                    paddle.dataset.uci_housing.train(), batch_size=1)
                feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
                exe = fluid.Executor(place)
                exe.run(fluid.default_startup_program())
                for data in train_reader():
                    exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)
Q
qiaolongfei 已提交
2406

2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423
        .. code-block:: python

            # Adam with beta1/beta2 as Variable
            import paddle
            import paddle.fluid as fluid
            import paddle.fluid.layers.learning_rate_scheduler as lr_scheduler

            place = fluid.CPUPlace()
            main = fluid.Program()
            with fluid.program_guard(main):
                x = fluid.data(name='x', shape=[None, 13], dtype='float32')
                y = fluid.data(name='y', shape=[None, 1], dtype='float32')
                y_predict = fluid.layers.fc(input=x, size=1, act=None)
                cost = fluid.layers.square_error_cost(input=y_predict, label=y)
                avg_cost = fluid.layers.mean(cost)

                # define beta decay variable
2424
                def get_decayed_betas(beta1_init, beta2_init, decay_steps, decay_rate, epsilon_init):
2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440
                    global_step = lr_scheduler._decay_step_counter()

                    beta1 = fluid.layers.create_global_var(
                        shape=[1],
                        value=float(beta1_init),
                        dtype='float32',
                        # set persistable for save checkpoints and resume
                        persistable=True,
                        name="beta1")
                    beta2 = fluid.layers.create_global_var(
                        shape=[1],
                        value=float(beta2_init),
                        dtype='float32',
                        # set persistable for save checkpoints and resume
                        persistable=True,
                        name="beta2")
2441 2442 2443 2444 2445 2446 2447
                    epsilon = fluid.layers.create_global_var(
                        shape=[1],
                        value=float(epsilon_init),
                        dtype='float32',
                        # set persistable for save checkpoints and resume
                        persistable=True,
                        name="epsilon")
2448 2449 2450 2451 2452 2453 2454

                    div_res = global_step / decay_steps
                    decayed_beta1 = beta1_init * (decay_rate**div_res)
                    decayed_beta2 = beta2_init * (decay_rate**div_res)
                    fluid.layers.assign(decayed_beta1, beta1)
                    fluid.layers.assign(decayed_beta2, beta2)

2455
                    return beta1, beta2, epsilon
2456

2457
                beta1, beta2, epsilon = get_decayed_betas(0.9, 0.99, 1e5, 0.9, 1e-8)
2458 2459
                adam_optimizer = fluid.optimizer.AdamOptimizer(
                                                    learning_rate=0.01,
2460
                                                    beta1=beta1,
2461 2462
                                                    beta2=beta2,
                                                    epsilon=epsilon)
2463 2464 2465 2466 2467 2468 2469 2470 2471 2472
                adam_optimizer.minimize(avg_cost)

                fetch_list = [avg_cost]
                train_reader = paddle.batch(
                    paddle.dataset.uci_housing.train(), batch_size=1)
                feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
                exe = fluid.Executor(place)
                exe.run(fluid.default_startup_program())
                for data in train_reader():
                    exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)
2473 2474 2475
    """
    _moment1_acc_str = "moment1"
    _moment2_acc_str = "moment2"
Q
qiaolongfei 已提交
2476 2477
    _beta1_pow_acc_str = "beta1_pow_acc"
    _beta2_pow_acc_str = "beta2_pow_acc"
2478 2479 2480 2481 2482

    def __init__(self,
                 learning_rate=0.001,
                 beta1=0.9,
                 beta2=0.999,
2483
                 epsilon=1e-8,
2484
                 parameter_list=None,
X
Xin Pan 已提交
2485
                 regularization=None,
2486
                 grad_clip=None,
Q
Qiao Longfei 已提交
2487
                 name=None,
2488
                 lazy_mode=False,
2489 2490 2491
                 use_global_beta_pow=False,
                 flatten_param_grads=False,
                 align_size=-1):
2492 2493 2494 2495
        assert learning_rate is not None
        assert beta1 is not None
        assert beta2 is not None
        assert epsilon is not None
2496 2497 2498 2499 2500 2501 2502 2503
        super(AdamOptimizer,
              self).__init__(learning_rate=learning_rate,
                             parameter_list=parameter_list,
                             regularization=regularization,
                             grad_clip=grad_clip,
                             flatten_param_grads=flatten_param_grads,
                             align_size=align_size,
                             name=name)
2504 2505 2506 2507
        self.type = "adam"
        self._beta1 = beta1
        self._beta2 = beta2
        self._epsilon = epsilon
Q
Qiao Longfei 已提交
2508
        self._lazy_mode = lazy_mode
2509
        self._use_global_beta_pow = use_global_beta_pow
2510 2511 2512 2513 2514 2515

    def _create_accumulators(self, block, parameters):
        assert isinstance(block, framework.Block)

        # Create accumulator tensors for first and second moments
        for p in parameters:
Q
Qiao Longfei 已提交
2516 2517
            self._add_accumulator(self._moment1_acc_str, p)
            self._add_accumulator(self._moment2_acc_str, p)
2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534
            if not self._use_global_beta_pow:
                self._add_accumulator(
                    name=self._beta1_pow_acc_str,
                    param=p,
                    fill_value=0.9 if isinstance(self._beta1, Variable) \
                            else self._beta1,
                    shape=[1],
                    type=core.VarDesc.VarType.LOD_TENSOR, device='cpu')
                self._add_accumulator(
                    name=self._beta2_pow_acc_str,
                    param=p,
                    fill_value=0.999 if isinstance(self._beta2, Variable) \
                            else self._beta2,
                    shape=[1],
                    type=core.VarDesc.VarType.LOD_TENSOR, device='cpu')
        if self._use_global_beta_pow:
            self._add_global_accumulator(
Q
qiaolongfei 已提交
2535
                name=self._beta1_pow_acc_str,
2536 2537
                fill_value=0.9 if isinstance(self._beta1, Variable) \
                        else self._beta1,
2538
                shape=[1],
2539
                type=core.VarDesc.VarType.LOD_TENSOR, device='cpu')
2540
            self._add_global_accumulator(
Q
qiaolongfei 已提交
2541
                name=self._beta2_pow_acc_str,
2542 2543
                fill_value=0.999 if isinstance(self._beta2, Variable) \
                        else self._beta2,
2544
                shape=[1],
2545
                type=core.VarDesc.VarType.LOD_TENSOR, device='cpu')
2546 2547 2548 2549 2550 2551 2552 2553

    def _append_optimize_op(self, block, param_and_grad):
        assert isinstance(block, framework.Block)

        moment1 = self._get_accumulator(self._moment1_acc_str,
                                        param_and_grad[0])
        moment2 = self._get_accumulator(self._moment2_acc_str,
                                        param_and_grad[0])
2554 2555 2556 2557 2558 2559 2560 2561 2562 2563
        if self._use_global_beta_pow:
            beta1_pow_acc = self._get_global_accumulator(
                self._beta1_pow_acc_str)
            beta2_pow_acc = self._get_global_accumulator(
                self._beta2_pow_acc_str)
        else:
            beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str,
                                                  param_and_grad[0])
            beta2_pow_acc = self._get_accumulator(self._beta2_pow_acc_str,
                                                  param_and_grad[0])
2564
        lr = self._create_param_lr(param_and_grad)
2565
        # create the adam optimize op
2566

J
Jiabin Yang 已提交
2567
        if framework._non_static_mode():
2568 2569 2570 2571
            _beta1 = self._beta1 if not isinstance(
                self._beta1, Variable) else self._beta1.numpy().item(0)
            _beta2 = self._beta2 if not isinstance(
                self._beta2, Variable) else self._beta2.numpy().item(0)
2572
            master_weight = None
2573
            _, _, _, _, _, _ = _legacy_C_ops.adam(
2574
                param_and_grad[0], param_and_grad[1], lr, moment1, moment2,
2575 2576 2577 2578 2579
                beta1_pow_acc, beta2_pow_acc, master_weight, param_and_grad[0],
                moment1, moment2, beta1_pow_acc, beta2_pow_acc, master_weight,
                'epsilon', self._epsilon, 'lazy_mode', self._lazy_mode,
                'min_row_size_to_use_multithread', 1000, 'beta1', _beta1,
                'beta2', _beta2, 'use_global_beta_pow',
2580
                self._use_global_beta_pow)
2581 2582 2583

            return None

2584
        inputs = {
2585 2586
            "Param": [param_and_grad[0]],
            "Grad": [param_and_grad[1]],
2587
            "LearningRate": [lr],
2588 2589 2590 2591
            "Moment1": [moment1],
            "Moment2": [moment2],
            "Beta1Pow": [beta1_pow_acc],
            "Beta2Pow": [beta2_pow_acc]
2592
        }
2593 2594 2595 2596 2597 2598 2599

        # Pass found_inf to adam, to skip update for not only param, but also momentum and beta_pow
        found_inf = self._get_auxiliary_var('found_inf')

        if found_inf:
            inputs['SkipUpdate'] = found_inf

2600
        outputs = {
2601 2602 2603 2604 2605
            "ParamOut": [param_and_grad[0]],
            "Moment1Out": [moment1],
            "Moment2Out": [moment2],
            "Beta1PowOut": [beta1_pow_acc],
            "Beta2PowOut": [beta2_pow_acc],
2606 2607 2608
        }
        attrs = {
            "lazy_mode": self._lazy_mode,
2609 2610
            "min_row_size_to_use_multithread": 1000,
            'use_global_beta_pow': self._use_global_beta_pow
2611 2612 2613 2614 2615 2616 2617 2618 2619 2620
        }

        if isinstance(self._beta1, Variable):
            inputs['Beta1Tensor'] = self._beta1
        else:
            attrs['beta1'] = self._beta1
        if isinstance(self._beta2, Variable):
            inputs['Beta2Tensor'] = self._beta2
        else:
            attrs['beta2'] = self._beta2
2621 2622 2623 2624
        if isinstance(self._epsilon, Variable):
            inputs['EpsilonTensor'] = self._epsilon
        else:
            attrs['epsilon'] = self._epsilon
2625

2626 2627 2628 2629 2630
        adam_op = block.append_op(type=self.type,
                                  inputs=inputs,
                                  outputs=outputs,
                                  attrs=attrs,
                                  stop_gradient=True)
2631 2632 2633

        return adam_op

2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645
    def _finish_update(self, block, parameters_and_grads):
        r"""Update beta1_pow and beta2_pow accumulator
        """
        assert isinstance(block, framework.Block)
        if self._use_global_beta_pow:
            beta1_pow_acc = self._get_global_accumulator(
                self._beta1_pow_acc_str)
            beta2_pow_acc = self._get_global_accumulator(
                self._beta2_pow_acc_str)

            with block.program._optimized_guard([]):
                inputs = {"X": beta1_pow_acc}
2646
                outputs = {"Out": beta1_pow_acc}
2647 2648
                attrs = {}
                if isinstance(self._beta1, Variable):
2649 2650
                    inputs["Y"] = self._beta1
                    # use elementwise_mul for better performance
2651 2652 2653 2654 2655
                    block.append_op(type="elementwise_mul",
                                    inputs=inputs,
                                    outputs=outputs,
                                    attrs=attrs,
                                    stop_gradient=True)
2656 2657
                else:
                    attrs['scale'] = self._beta1
2658 2659 2660 2661 2662
                    block.append_op(type="scale",
                                    inputs=inputs,
                                    outputs=outputs,
                                    attrs=attrs,
                                    stop_gradient=True)
2663 2664

                inputs = {"X": beta2_pow_acc}
2665
                outputs = {"Out": beta2_pow_acc}
2666 2667
                attrs = {}
                if isinstance(self._beta2, Variable):
2668 2669
                    inputs["Y"] = self._beta2
                    # use elementwise_mul for better performance
2670 2671 2672 2673 2674
                    block.append_op(type="elementwise_mul",
                                    inputs=inputs,
                                    outputs=outputs,
                                    attrs=attrs,
                                    stop_gradient=True)
2675 2676
                else:
                    attrs['scale'] = self._beta2
2677 2678 2679 2680 2681
                    block.append_op(type="scale",
                                    inputs=inputs,
                                    outputs=outputs,
                                    attrs=attrs,
                                    stop_gradient=True)
2682

2683 2684

class AdamaxOptimizer(Optimizer):
2685
    r"""
2686
    The Adamax optimizer is implemented based on the Adamax Optimization
2687 2688 2689
    in Section 7 of `Adam paper <https://arxiv.org/abs/1412.6980>`_.
    The Adamax algorithm is a variant of the Adam algorithm based on the infinite norm,
    which makes the learning rate update algorithm more stable and simple.
Q
qiaolongfei 已提交
2690

2691
    The parameter ``param_out`` update rule with gradient ``grad``:
Q
qiaolongfei 已提交
2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704

    .. math::

        t & = t + 1

        moment\_out & = {\\beta}_1 * moment + (1 - {\\beta}_1) * grad

        inf\_norm\_out & = max({\\beta}_2 * inf\_norm + \epsilon, |grad|)

        learning\_rate & = \\frac{learning\_rate}{1 - {\\beta}_1^t}

        param\_out & = param - learning\_rate * \\frac{moment\_out}{inf\_norm\_out}

2705
    Related paper: `Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_
Q
qiaolongfei 已提交
2706

2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718
    The original paper does not have an ``epsilon`` attribute,
    it is added here for numerical stability to prevent the division by 0 error.

    Args:
        learning_rate (float|Variable, optional): The learning rate used to update ``Parameter``.
            It can be a float value or a ``Variable`` with a float type. The default value is 0.001.
        beta1 (float, optional): The exponential decay rate for the 1st moment estimates.
            The default value is 0.9.
        beta2 (float, optional): The exponential decay rate for the 2nd moment estimates.
            The default value is 0.999.
        epsilon (float, optional): A small float value for numerical stability.
            The default value is 1e-08.
H
hong 已提交
2719
        parameter_list (Iterable, optional):  Iterable of ``Variable`` names to update to minimize ``loss``. \
2720 2721
            This parameter is required in dygraph mode. \
            The default value is None in static mode, at this time all parameters will be updated.
2722 2723 2724 2725 2726
        regularization (WeightDecayRegularizer, optional): The strategy of regularization. There are two method: \
             :ref:`api_fluid_regularizer_L1Decay` , :ref:`api_fluid_regularizer_L2Decay` . If a parameter has set \
            regularizer using :ref:`api_fluid_ParamAttr` already, the regularization setting here in optimizer will be \
            ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect.  \
            Default None, meaning there is no regularization.
2727 2728 2729
        grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of
            some derived class of ``GradientClipBase`` . There are three cliping strategies
            ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` ,
2730
            :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping.
2731 2732 2733 2734 2735 2736
        name (str, optional): Normally there is no need for user to set this property.
            For more information, please refer to :ref:`api_guide_Name`.
            The default value is None.

    **Notes**:
        **Currently, AdamaxOptimizer doesn't support sparse parameter optimization.**
Q
qiaolongfei 已提交
2737

2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750
    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
          import numpy

          # First create the Executor.
          place = fluid.CPUPlace() # fluid.CUDAPlace(0)
          exe = fluid.Executor(place)

          train_program = fluid.Program()
          startup_program = fluid.Program()
          with fluid.program_guard(train_program, startup_program):
2751
              data = fluid.data(name='X', shape=[None, 1], dtype='float32')
2752 2753
              hidden = fluid.layers.fc(input=data, size=10)
              loss = fluid.layers.mean(hidden)
2754
              adam = fluid.optimizer.AdamaxOptimizer(learning_rate=0.2)
2755 2756 2757 2758 2759 2760 2761 2762 2763
              adam.minimize(loss)

          # Run the startup program once and only once.
          exe.run(startup_program)

          x = numpy.random.random(size=(10, 1)).astype('float32')
          outs = exe.run(program=train_program,
                        feed={'X': x},
                         fetch_list=[loss.name])
2764 2765 2766
    """
    _moment_acc_str = "moment"
    _inf_norm_acc_str = "inf_norm"
Q
qiaolongfei 已提交
2767
    _beta1_pow_acc_str = "beta1_pow_acc"
2768 2769 2770 2771 2772

    def __init__(self,
                 learning_rate=0.001,
                 beta1=0.9,
                 beta2=0.999,
2773
                 epsilon=1e-8,
2774
                 parameter_list=None,
X
Xin Pan 已提交
2775
                 regularization=None,
2776
                 grad_clip=None,
X
Xin Pan 已提交
2777
                 name=None):
2778 2779 2780 2781
        assert learning_rate is not None
        assert beta1 is not None
        assert beta2 is not None
        assert epsilon is not None
2782 2783 2784 2785 2786
        super(AdamaxOptimizer, self).__init__(learning_rate=learning_rate,
                                              parameter_list=parameter_list,
                                              regularization=regularization,
                                              grad_clip=grad_clip,
                                              name=name)
2787 2788 2789 2790 2791 2792 2793 2794
        self.type = "adamax"
        self._beta1 = beta1
        self._beta2 = beta2
        self._epsilon = epsilon

    def _create_accumulators(self, block, parameters):
        # Create accumulator tensors for first moment and infinity norm
        for p in parameters:
Q
Qiao Longfei 已提交
2795 2796
            self._add_accumulator(self._moment_acc_str, p)
            self._add_accumulator(self._inf_norm_acc_str, p)
2797 2798 2799 2800
            self._add_accumulator(name=self._beta1_pow_acc_str,
                                  param=p,
                                  fill_value=self._beta1,
                                  shape=[1])
2801 2802 2803 2804 2805 2806 2807

    def _append_optimize_op(self, block, param_and_grad):
        assert isinstance(block, framework.Block)

        moment = self._get_accumulator(self._moment_acc_str, param_and_grad[0])
        inf_norm = self._get_accumulator(self._inf_norm_acc_str,
                                         param_and_grad[0])
Q
qiaolongfei 已提交
2808 2809
        beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str,
                                              param_and_grad[0])
2810 2811

        if framework.in_dygraph_mode():
2812 2813 2814 2815
            _C_ops.adamax_(param_and_grad[0], param_and_grad[1],
                           self._create_param_lr(param_and_grad), moment,
                           inf_norm, beta1_pow_acc, self._beta1, self._beta2,
                           self._epsilon)
2816
        elif framework._in_legacy_dygraph():
2817 2818 2819 2820 2821
            _legacy_C_ops.adamax(param_and_grad[0], param_and_grad[1],
                                 self._create_param_lr(param_and_grad), moment,
                                 inf_norm, beta1_pow_acc, param_and_grad[0],
                                 moment, inf_norm, "beta1", self._beta1,
                                 "beta2", self._beta2, "epsilon", self._epsilon)
2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844
        else:
            # create the adamax optimize op
            adamax_op = block.append_op(
                type=self.type,
                inputs={
                    "Param": param_and_grad[0],
                    "Grad": param_and_grad[1],
                    "LearningRate": self._create_param_lr(param_and_grad),
                    "Moment": moment,
                    "InfNorm": inf_norm,
                    "Beta1Pow": beta1_pow_acc
                },
                outputs={
                    "ParamOut": param_and_grad[0],
                    "MomentOut": moment,
                    "InfNormOut": inf_norm
                },
                attrs={
                    "beta1": self._beta1,
                    "beta2": self._beta2,
                    "epsilon": self._epsilon
                },
                stop_gradient=True)
2845

2846
            return adamax_op
2847

2848
    def _finish_update(self, block, parameters_and_grads):
2849 2850 2851
        """Update Beta1 Power accumulator
        """
        assert isinstance(block, framework.Block)
2852
        for param, grad in parameters_and_grads:
C
chengduo 已提交
2853
            if grad is None or param.trainable is False:
2854
                continue
2855 2856
            with param.block.program._optimized_guard([param, grad
                                                       ]), name_scope('adamx'):
2857 2858
                beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str,
                                                      param)
J
Jiabin Yang 已提交
2859
                if framework._non_static_mode():
2860
                    if framework.in_dygraph_mode():
2861 2862
                        tmp = _C_ops.scale(beta1_pow_acc, self._beta1, 0.0,
                                           True)
2863
                    else:
2864 2865
                        tmp = _legacy_C_ops.scale(beta1_pow_acc, "scale",
                                                  self._beta1)
2866 2867
                    beta1_pow_acc.copy_(tmp, False)
                else:
2868 2869 2870 2871 2872
                    block.append_op(type="scale",
                                    inputs={"X": beta1_pow_acc},
                                    outputs={"Out": beta1_pow_acc},
                                    attrs={"scale": self._beta1},
                                    stop_gradient=True)
2873 2874


2875
class DpsgdOptimizer(Optimizer):
2876
    r"""
2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912
    We implement the Dpsgd optimizer according to CCS16 paper -
    Deep Learning with Differential Privacy.

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
          import numpy

          # First create the Executor.
          place = fluid.CPUPlace() # fluid.CUDAPlace(0)
          exe = fluid.Executor(place)

          train_program = fluid.Program()
          startup_program = fluid.Program()
          with fluid.program_guard(train_program, startup_program):
              data = fluid.layers.data(name='X', shape=[1], dtype='float32')
              hidden = fluid.layers.fc(input=data, size=10)
              loss = fluid.layers.mean(hidden)
              optimizer = fluid.optimizer.Dpsgd(learning_rate=0.01, clip=10.0, batch_size=16.0, sigma=1.0)
              optimizer.minimize(loss)

          # Run the startup program once and only once.
          exe.run(startup_program)

          x = numpy.random.random(size=(10, 1)).astype('float32')
          outs = exe.run(program=train_program,
                        feed={'X': x},
                         fetch_list=[loss.name])

    Args:
        learning_rate (float|Variable): the learning rate used to update parameters. \
        Can be a float value or a Variable with one float value as data element.
        clip (float): clipping threshold
        batch_size (float): batch size.
        sigma (float): for gaussian noise.
H
hong 已提交
2913
        parameter_list (Iterable, optional):  Iterable of ``Variable`` names to update to minimize ``loss``. \
2914 2915
            This parameter is required in dygraph mode. \
            The default value is None in static mode, at this time all parameters will be updated.
2916 2917 2918 2919 2920 2921 2922 2923
    Notes:
       Currently, DpsgdOptimizer doesn't support sparse parameter optimization.
    """

    def __init__(self,
                 learning_rate=0.001,
                 clip=0.9,
                 batch_size=0.999,
2924 2925
                 sigma=1e-8,
                 parameter_list=None):
2926 2927 2928 2929
        assert learning_rate is not None
        assert clip is not None
        assert batch_size is not None
        assert sigma is not None
2930 2931
        super(DpsgdOptimizer, self).__init__(learning_rate=learning_rate,
                                             parameter_list=parameter_list)
2932 2933 2934 2935
        self.type = "dpsgd"
        self._clip = clip
        self._batch_size = batch_size
        self._sigma = sigma
Z
zhongpu 已提交
2936 2937 2938 2939 2940 2941 2942
        '''
        Note(wangzhongpu):
        This property is only used for debugging, do not need to set it!
        Dpsgd operator use time(NULL) as random seed to generate random number.
        However, during debugging, we need determinated result, so we will set self._seed to a fixed number.
        '''
        self._seed = None
2943 2944 2945 2946 2947

    def _append_optimize_op(self, block, param_and_grad):
        assert isinstance(block, framework.Block)

        # create the dpsgd optimize op
Z
zhongpu 已提交
2948 2949 2950
        if self._seed == None:
            self._seed = 0

J
Jiabin Yang 已提交
2951
        if framework._non_static_mode():
2952 2953 2954 2955 2956
            _legacy_C_ops.dpsgd(param_and_grad[0], param_and_grad[1],
                                self._create_param_lr(param_and_grad),
                                param_and_grad[0], "clip", self._clip,
                                "batch_size", self._batch_size, "sigma",
                                self._sigma, "seed", self._seed)
2957
        else:
2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974
            dpsgd_op = block.append_op(type=self.type,
                                       inputs={
                                           "Param":
                                           param_and_grad[0],
                                           "Grad":
                                           param_and_grad[1],
                                           "LearningRate":
                                           self._create_param_lr(param_and_grad)
                                       },
                                       outputs={"ParamOut": param_and_grad[0]},
                                       attrs={
                                           "clip": self._clip,
                                           "batch_size": self._batch_size,
                                           "sigma": self._sigma,
                                           "seed": self._seed
                                       },
                                       stop_gradient=True)
2975

2976
            return dpsgd_op
2977 2978


2979
class DecayedAdagradOptimizer(Optimizer):
2980
    r"""
2981 2982 2983
    The Decayed Adagrad optimizer can be seen as an Adagrad algorithm that introduces
    the decay rate to solve the problem of a sharp drop in the learning rate
    during model training when using the AdagradOptimizer.
2984

2985
    The parameter ``param_out`` update rule with gradient ``grad``:
2986 2987 2988 2989 2990 2991 2992

    .. math::

        moment\_out & = decay * moment + (1 - decay) * grad * grad

        param\_out & = param - \\frac{learning\_rate * grad}{\sqrt{moment\_out} + \epsilon}

2993 2994 2995 2996
    Related paper: `Adaptive Subgradient Methods for Online Learning and Stochastic
    Optimization <http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf>`_.

    The original paper does not have an ``epsilon`` attribute. It is added here for numerical
2997 2998 2999
    stability to avoid the division by zero error.

    Args:
3000 3001 3002 3003 3004
        learning_rate (float|Variable): The learning rate used to update ``Parameter``.
            It can be a float value or a ``Variable`` with a float type.
        decay (float, optional): The decay rate. The default value is 0.95.
        epsilon (float, optional): A small float value for numerical stability.
            The default value is 1e-06.
H
hong 已提交
3005
        parameter_list (Iterable, optional):  Iterable of ``Variable`` names to update to minimize ``loss``. \
3006 3007
            This parameter is required in dygraph mode. \
            The default value is None in static mode, at this time all parameters will be updated.
3008 3009 3010 3011 3012
        regularization (WeightDecayRegularizer, optional): The strategy of regularization. There are two method: \
             :ref:`api_fluid_regularizer_L1Decay` , :ref:`api_fluid_regularizer_L2Decay` . If a parameter has set \
            regularizer using :ref:`api_fluid_ParamAttr` already, the regularization setting here in optimizer will be \
            ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect.  \
            Default None, meaning there is no regularization.
3013 3014 3015
        grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of
            some derived class of ``GradientClipBase`` . There are three cliping strategies
            ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` ,
3016
            :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping.
3017 3018 3019 3020 3021 3022
        name (str, optional): Normally there is no need for user to set this property.
            For more information, please refer to :ref:`api_guide_Name`.
            The default value is None.

    **Notes**:
        **Currently, DecayedAdagradOptimizer doesn't support sparse parameter optimization.**
3023 3024 3025 3026

    Examples:
        .. code-block:: python

3027 3028
            import paddle.fluid as fluid

3029 3030 3031 3032
            x = fluid.data( name='x', shape=[None, 10], dtype='float32' )
            trans = fluid.layers.fc( x, 100 )
            cost = fluid.layers.reduce_mean( trans )
            optimizer = fluid.optimizer.DecayedAdagradOptimizer(learning_rate=0.2)
3033
            optimizer.minimize(cost)
3034 3035 3036
    """
    _moment_acc_str = "moment"

X
Xin Pan 已提交
3037 3038 3039 3040
    def __init__(self,
                 learning_rate,
                 decay=0.95,
                 epsilon=1.0e-6,
3041
                 parameter_list=None,
X
Xin Pan 已提交
3042
                 regularization=None,
3043
                 grad_clip=None,
X
Xin Pan 已提交
3044
                 name=None):
3045 3046 3047 3048
        assert learning_rate is not None
        assert decay is not None
        assert epsilon is not None

3049 3050 3051 3052 3053 3054
        super(DecayedAdagradOptimizer,
              self).__init__(learning_rate=learning_rate,
                             parameter_list=parameter_list,
                             regularization=regularization,
                             grad_clip=grad_clip,
                             name=name)
3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070
        self.type = "decayed_adagrad"
        self._decay = decay
        self._epsilon = epsilon

    def _create_accumulators(self, block, parameters):
        assert isinstance(block, framework.Block)

        for p in parameters:
            self._add_accumulator(self._moment_acc_str, p)

    def _append_optimize_op(self, block, param_and_grad):
        assert isinstance(block, framework.Block)

        moment_acc = self._get_accumulator(self._moment_acc_str,
                                           param_and_grad[0])

J
Jiabin Yang 已提交
3071
        if framework._non_static_mode():
3072 3073 3074 3075 3076 3077
            _legacy_C_ops.decayed_adagrad(param_and_grad[0], param_and_grad[1],
                                          moment_acc,
                                          self._create_param_lr(param_and_grad),
                                          param_and_grad[0], moment_acc,
                                          "epsilon", self._epsilon, "decay",
                                          self._decay)
3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091
        else:
            # Create the decayed adagrad optimizer op
            decayed_adagrad_op = block.append_op(
                type=self.type,
                inputs={
                    "Param": param_and_grad[0],
                    "Grad": param_and_grad[1],
                    "Moment": moment_acc,
                    "LearningRate": self._create_param_lr(param_and_grad)
                },
                outputs={
                    "ParamOut": param_and_grad[0],
                    "MomentOut": moment_acc
                },
3092 3093 3094 3095
                attrs={
                    "epsilon": self._epsilon,
                    "decay": self._decay
                },
3096
                stop_gradient=True)
3097

3098
            return decayed_adagrad_op
3099 3100


3101
class AdadeltaOptimizer(Optimizer):
3102
    r"""
Z
Zeng Jinle 已提交
3103
    **Notes: This API does not support sparse parameter optimization.**
Q
qiaolongfei 已提交
3104

Z
Zeng Jinle 已提交
3105
    Adadelta Optimizer. Please refer to this for details:
Z
Zeng Jinle 已提交
3106 3107 3108
    `ADADELTA: AN ADAPTIVE LEARNING RATE METHOD <https://arxiv.org/abs/1212.5701>`_.

    The update is done as follows:
3109

Z
Zeng Jinle 已提交
3110 3111
    .. math::

Z
Zeng Jinle 已提交
3112
        E(g_t^2) &= \\rho * E(g_{t-1}^2) + (1-\\rho) * g^2
3113

Z
Zeng Jinle 已提交
3114
        learning\_rate &= \sqrt{ ( E(dx_{t-1}^2) + \\epsilon ) / ( E(g_t^2) + \\epsilon ) }
Z
Zeng Jinle 已提交
3115

Z
Zeng Jinle 已提交
3116
        E(dx_t^2) &= \\rho * E(dx_{t-1}^2) + (1-\\rho) * (-g*learning\_rate)^2
3117 3118

    Args:
Z
Zeng Jinle 已提交
3119 3120 3121
        learning_rate (float|Variable): global learning rate.
        epsilon (float): a small float number for numeric stability. Default 1.0e-6.
        rho (float): a floating point value indicating the decay rate. Default 0.95.
H
hong 已提交
3122
        parameter_list (Iterable, optional):  Iterable of ``Variable`` names to update to minimize ``loss``. \
3123 3124
            This parameter is required in dygraph mode. \
            The default value is None in static mode, at this time all parameters will be updated.
3125 3126 3127 3128 3129
        regularization (WeightDecayRegularizer, optional): The strategy of regularization. There are two method: \
             :ref:`api_fluid_regularizer_L1Decay` , :ref:`api_fluid_regularizer_L2Decay` . If a parameter has set \
            regularizer using :ref:`api_fluid_ParamAttr` already, the regularization setting here in optimizer will be \
            ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect.  \
            Default None, meaning there is no regularization.
3130 3131 3132
        grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of
            some derived class of ``GradientClipBase`` . There are three cliping strategies
            ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` ,
3133
            :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping.
3134 3135 3136
        name (str, optional): The default value is None. Normally there is no need for user
                to set this property. For more information, please refer to
                :ref:`api_guide_Name` .
3137 3138 3139 3140

    Examples:
        .. code-block:: python

3141
            import paddle.fluid as fluid
Z
Zeng Jinle 已提交
3142

3143
            image = fluid.data(name='image', shape=[None, 28], dtype='float32')
Z
Zeng Jinle 已提交
3144 3145
            fc = fluid.layers.fc(image, size=10)
            cost = fluid.layers.reduce_mean(fc)
3146 3147
            optimizer = fluid.optimizer.Adadelta(
                learning_rate=0.0003, epsilon=1.0e-6, rho=0.95)
C
chengduo 已提交
3148

Z
Zeng Jinle 已提交
3149 3150 3151 3152
            # optimizer_ops is a list of optimizer operators to update parameters
            # params_grads is a list of (param, param_grad), where param is each
            # parameter and param_grad is the gradient variable of param.
            optimizer_ops, params_grads = optimizer.minimize(cost)
3153
    """
3154

3155 3156 3157
    _avg_squared_grad_acc_str = "_avg_squared_grad"
    _avg_squared_update_acc_str = "_avg_squared_update"

X
Xin Pan 已提交
3158 3159 3160 3161
    def __init__(self,
                 learning_rate,
                 epsilon=1.0e-6,
                 rho=0.95,
3162
                 parameter_list=None,
X
Xin Pan 已提交
3163
                 regularization=None,
3164
                 grad_clip=None,
X
Xin Pan 已提交
3165
                 name=None):
3166 3167 3168 3169 3170 3171
        if learning_rate is None:
            raise ValueError("learning_rate is not set.")
        if epsilon is None:
            raise ValueError("epsilon is not set.")
        if rho is None:
            raise ValueError("rho is not set.")
3172 3173 3174 3175 3176
        super(AdadeltaOptimizer, self).__init__(learning_rate=learning_rate,
                                                parameter_list=parameter_list,
                                                regularization=regularization,
                                                grad_clip=grad_clip,
                                                name=name)
3177 3178 3179 3180 3181
        self.type = "adadelta"
        self._epsilon = epsilon
        self._rho = rho

    def _create_accumulators(self, block, parameters):
3182 3183
        if not isinstance(block, framework.Block):
            raise TypeError("block is not instance of framework.Block.")
3184 3185 3186 3187 3188 3189

        for p in parameters:
            self._add_accumulator(self._avg_squared_grad_acc_str, p)
            self._add_accumulator(self._avg_squared_update_acc_str, p)

    def _append_optimize_op(self, block, param_and_grad):
3190 3191
        if not isinstance(block, framework.Block):
            raise TypeError("block is not instance of framework.Block.")
3192 3193 3194 3195 3196 3197

        avg_squared_grad_acc = self._get_accumulator(
            self._avg_squared_grad_acc_str, param_and_grad[0])
        avg_squared_update_acc = self._get_accumulator(
            self._avg_squared_update_acc_str, param_and_grad[0])

3198
        if framework.in_dygraph_mode():
3199 3200 3201
            _C_ops.adadelta_(param_and_grad[0], param_and_grad[1],
                             avg_squared_grad_acc, avg_squared_update_acc,
                             self._rho, self._epsilon)
3202
        elif framework._in_legacy_dygraph():
3203 3204 3205 3206 3207
            _legacy_C_ops.adadelta(param_and_grad[0], param_and_grad[1],
                                   avg_squared_grad_acc, avg_squared_update_acc,
                                   param_and_grad[0], avg_squared_grad_acc,
                                   avg_squared_update_acc, "epsilon",
                                   self._epsilon, "rho", self._rho)
3208 3209
        else:
            # Create the adadelta optimizer op
3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233
            adadelta_op = block.append_op(type=self.type,
                                          inputs={
                                              "Param":
                                              param_and_grad[0],
                                              "Grad":
                                              param_and_grad[1],
                                              "AvgSquaredGrad":
                                              avg_squared_grad_acc,
                                              "AvgSquaredUpdate":
                                              avg_squared_update_acc
                                          },
                                          outputs={
                                              "ParamOut":
                                              param_and_grad[0],
                                              "AvgSquaredGradOut":
                                              avg_squared_grad_acc,
                                              "AvgSquaredUpdateOut":
                                              avg_squared_update_acc
                                          },
                                          attrs={
                                              "epsilon": self._epsilon,
                                              "rho": self._rho
                                          },
                                          stop_gradient=True)
3234

3235
            return adadelta_op
3236 3237


Q
qingqing01 已提交
3238
class RMSPropOptimizer(Optimizer):
3239
    r"""
Q
qingqing01 已提交
3240 3241 3242 3243 3244 3245 3246 3247
    Root Mean Squared Propagation (RMSProp) is an unpublished, adaptive learning
    rate method. The original slides proposed RMSProp: Slide 29 of
    http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf .

    The original equation is as follows:

    ..  math::

Q
qiaolongfei 已提交
3248
        r(w, t) & = \\rho r(w, t-1) + (1 - \\rho)(\\nabla Q_{i}(w))^2
Q
qingqing01 已提交
3249 3250 3251 3252

        w & = w - \\frac{\\eta} {\\sqrt{r(w,t) + \\epsilon}} \\nabla Q_{i}(w)

    The first equation calculates moving average of the squared gradient for
Q
qiaolongfei 已提交
3253
    each weight. Then dividing the gradient by :math:`sqrt{v(w,t)}`.
Q
qingqing01 已提交
3254 3255 3256 3257 3258 3259

    In some cases, adding a momentum term :math: `\\beta` is beneficial.
    In our implementation, Nesterov momentum is used:

    ..  math::

Q
qiaolongfei 已提交
3260
        r(w, t) & = \\rho r(w, t-1) + (1 - \\rho)(\\nabla Q_{i}(w))^2
Q
qingqing01 已提交
3261

3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275
        v(w, t) & = \\beta v(w, t-1) + \\frac{\\eta} {\\sqrt{r(w,t) +
            \\epsilon}} \\nabla Q_{i}(w)

        w & = w - v(w, t)

    if centered is True:

    ..  math::

        r(w, t) & = \\rho r(w, t-1) + (1 - \\rho)(\\nabla Q_{i}(w))^2

        g(w, t) & = \\rho g(w, t-1) + (1 - \\rho)\\nabla Q_{i}(w)

        v(w, t) & = \\beta v(w, t-1) + \\frac{\\eta} {\\sqrt{r(w,t) - (g(w, t))^2 +
Q
qingqing01 已提交
3276 3277 3278 3279
            \\epsilon}} \\nabla Q_{i}(w)

        w & = w - v(w, t)

Q
qiaolongfei 已提交
3280
    where, :math:`\\rho` is a hyperparameter and typical values are 0.9, 0.95
Q
qingqing01 已提交
3281 3282 3283 3284 3285
    and so on. :math: `beta` is the momentum term. :math: `\\epsilon` is a
    smoothing term to avoid division by zero, usually set somewhere in range
    from 1e-4 to 1e-8.


3286 3287 3288
    Parameters:
        learning_rate(float): Global learning rate.
        rho(float): rho is :math: `\\rho` in equation, default is 0.95.
Q
qingqing01 已提交
3289
        epsilon(float): :math: `\\epsilon` in equation is smoothing term to
3290
            avoid division by zero, default is 1e-6.
Q
qiaolongfei 已提交
3291
        momentum(float): :math:`\\beta` in equation is the momentum term,
3292
            default is 0.0.
3293 3294 3295 3296
        centered(bool): If True, gradients are normalized by the estimated variance of
            the gradient; if False, by the uncentered second moment. Setting this to
            True may help with training, but is slightly more expensive in terms of
            computation and memory. Defaults to False.
H
hong 已提交
3297
        parameter_list (Iterable, optional):  Iterable of ``Variable`` names to update to minimize ``loss``. \
3298 3299
            This parameter is required in dygraph mode. \
            The default value is None in static mode, at this time all parameters will be updated.
3300 3301 3302 3303 3304
        regularization (WeightDecayRegularizer, optional): The strategy of regularization. There are two method: \
             :ref:`api_fluid_regularizer_L1Decay` , :ref:`api_fluid_regularizer_L2Decay` . If a parameter has set \
            regularizer using :ref:`api_fluid_ParamAttr` already, the regularization setting here in optimizer will be \
            ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect.  \
            Default None, meaning there is no regularization.
3305 3306 3307
        grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of
            some derived class of ``GradientClipBase`` . There are three cliping strategies
            ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` ,
3308
            :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping.
3309 3310
        name (str, optional): This parameter is used by developers to print debugging information. \
            For details, please refer to :ref:`api_guide_Name`. Default is None.
Q
qingqing01 已提交
3311 3312 3313 3314 3315 3316 3317

    Raises:
        ValueError: If learning_rate, rho, epsilon, momentum are None.

    Examples:
          .. code-block:: python

3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342
            import paddle
            import paddle.fluid as fluid
            import numpy as np

            place = fluid.CPUPlace()
            main = fluid.Program()
            with fluid.program_guard(main):
                x = fluid.layers.data(name='x', shape=[13], dtype='float32')
                y = fluid.layers.data(name='y', shape=[1], dtype='float32')
                y_predict = fluid.layers.fc(input=x, size=1, act=None)
                cost = fluid.layers.square_error_cost(input=y_predict, label=y)
                avg_cost = fluid.layers.mean(cost)

                rms_optimizer = fluid.optimizer.RMSProp(learning_rate=0.1)
                rms_optimizer.minimize(avg_cost)

                fetch_list = [avg_cost]
                train_reader = paddle.batch(
                    paddle.dataset.uci_housing.train(), batch_size=1)
                feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
                exe = fluid.Executor(place)
                exe.run(fluid.default_startup_program())
                for data in train_reader():
                    exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)

Q
qingqing01 已提交
3343 3344 3345 3346
    """

    _momentum_acc_str = "momentum"
    _mean_square_acc_str = "mean_square"
3347
    _mean_grad_acc_str = "mean_grad"
Q
qingqing01 已提交
3348 3349 3350 3351 3352 3353

    def __init__(self,
                 learning_rate,
                 rho=0.95,
                 epsilon=1.0e-6,
                 momentum=0.0,
3354
                 centered=False,
3355
                 parameter_list=None,
X
Xin Pan 已提交
3356
                 regularization=None,
3357
                 grad_clip=None,
X
Xin Pan 已提交
3358
                 name=None):
3359 3360 3361 3362 3363
        super(RMSPropOptimizer, self).__init__(learning_rate=learning_rate,
                                               parameter_list=parameter_list,
                                               regularization=regularization,
                                               grad_clip=grad_clip,
                                               name=name)
Q
qingqing01 已提交
3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376
        if learning_rate is None:
            raise ValueError("learning_rate is not set.")
        if rho is None:
            raise ValueError("rho is not set.")
        if epsilon is None:
            raise ValueError("epsilon is not set.")
        if momentum is None:
            raise ValueError("momentum is not set.")

        self.type = "rmsprop"
        self._rho = rho
        self._epsilon = epsilon
        self._momentum = momentum
3377
        self._centered = centered
Q
qingqing01 已提交
3378 3379 3380 3381 3382 3383 3384 3385

    def _create_accumulators(self, block, parameters):
        if not isinstance(block, framework.Block):
            raise TypeError("block is not instance of framework.Block.")

        for p in parameters:
            self._add_accumulator(self._momentum_acc_str, p)
            self._add_accumulator(self._mean_square_acc_str, p)
3386
            self._add_accumulator(self._mean_grad_acc_str, p)
Q
qingqing01 已提交
3387 3388 3389 3390 3391 3392 3393 3394 3395

    def _append_optimize_op(self, block, param_and_grad):
        if not isinstance(block, framework.Block):
            raise TypeError("block is not instance of framework.Block.")

        momentum_acc = self._get_accumulator(self._momentum_acc_str,
                                             param_and_grad[0])
        mean_square_acc = self._get_accumulator(self._mean_square_acc_str,
                                                param_and_grad[0])
3396 3397
        mean_grad_acc = self._get_accumulator(self._mean_grad_acc_str,
                                              param_and_grad[0])
C
caozhou 已提交
3398
        if in_dygraph_mode():
3399 3400 3401 3402 3403
            _C_ops.rmsprop_(param_and_grad[0], mean_square_acc,
                            param_and_grad[1], momentum_acc,
                            self._create_param_lr(param_and_grad),
                            mean_grad_acc, self._epsilon, self._rho,
                            self._momentum, self._centered)
C
caozhou 已提交
3404 3405
            return None
        elif _in_legacy_dygraph():
3406 3407 3408 3409 3410 3411 3412
            _legacy_C_ops.rmsprop(param_and_grad[0], mean_square_acc,
                                  self._create_param_lr(param_and_grad),
                                  param_and_grad[1], momentum_acc,
                                  param_and_grad[0], momentum_acc,
                                  mean_square_acc, mean_grad_acc, "epsilon",
                                  self._epsilon, "decay", self._rho, "momentum",
                                  self._momentum, "centered", self._centered)
C
caozhou 已提交
3413
            return None
3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437
        else:
            rmsprop_op = block.append_op(
                type=self.type,
                inputs={
                    "Param": param_and_grad[0],
                    "Grad": param_and_grad[1],
                    "Moment": momentum_acc,
                    "MeanSquare": mean_square_acc,
                    "MeanGrad": mean_grad_acc,
                    "LearningRate": self._create_param_lr(param_and_grad),
                },
                outputs={
                    "ParamOut": param_and_grad[0],
                    "MomentOut": momentum_acc,
                    "MeanSquareOut": mean_square_acc,
                    "MeanGradOut": mean_grad_acc
                },
                attrs={
                    "epsilon": self._epsilon,
                    "decay": self._rho,
                    "momentum": self._momentum,
                    "centered": self._centered
                },
                stop_gradient=True)
Q
qingqing01 已提交
3438

3439
            return rmsprop_op
Q
qingqing01 已提交
3440 3441


Q
qiaolongfei 已提交
3442
class FtrlOptimizer(Optimizer):
3443
    r"""
Q
qiaolongfei 已提交
3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481
    FTRL (Follow The Regularized Leader) Optimizer.

    The paper that proposed Follow The Regularized Leader (FTRL):
    (https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf)

    ..  math::

        &new\_accum = squared\_accum + grad^2

        &if (lr\_power == -0.5):

        &\quad  linear\_accum += grad - \\frac{\\sqrt{new\_accum} - \\sqrt{squared\_accum}}{learning\_rate * param}

        &else:

        &\quad   linear\_accum += grad - \\frac{new\_accum^{-lr\_power} - accum^{-lr\_power}}{learning\_rate * param}


        &x = l1 * sign(linear\_accum) - linear\_accum

        &if (lr\_power == -0.5):

        &\quad   y = \\frac{\\sqrt{new\_accum}}{learning\_rate} + (2 * l2)

        &\quad   pre\_shrink = \\frac{x}{y}

        &\quad   param = (abs(linear\_accum) > l1).select(pre\_shrink, 0.0)

        &else:

        &\quad   y = \\frac{new\_accum^{-lr\_power}}{learning\_rate} + (2 * l2)

        &\quad   pre\_shrink = \\frac{x}{y}

        &\quad   param = (abs(linear\_accum) > l1).select(pre\_shrink, 0.0)

        &squared\_accum += grad^2

3482 3483 3484 3485 3486
    Parameters:
        learning_rate (float|Variable): Global learning rate.
        l1 (float): L1 regularization strength, default is 0.0.
        l2 (float): L2 regularization strength, default is 0.0.
        lr_power (float): Learning Rate Power, default is -0.5.
H
hong 已提交
3487
        parameter_list (Iterable, optional):  Iterable of ``Variable`` names to update to minimize ``loss``. \
3488 3489
            This parameter is required in dygraph mode. \
            The default value is None in static mode, at this time all parameters will be updated.
3490 3491 3492 3493 3494
        regularization (WeightDecayRegularizer, optional): The strategy of regularization. There are two method: \
             :ref:`api_fluid_regularizer_L1Decay` , :ref:`api_fluid_regularizer_L2Decay` . If a parameter has set \
            regularizer using :ref:`api_fluid_ParamAttr` already, the regularization setting here in optimizer will be \
            ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect.  \
            Default None, meaning there is no regularization.
3495 3496 3497
        grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of
            some derived class of ``GradientClipBase`` . There are three cliping strategies
            ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` ,
3498
            :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping.
3499 3500
        name (str, optional): This parameter is used by developers to print debugging information. \
            For details, please refer to :ref:`api_guide_Name`. Default is None.
Q
qiaolongfei 已提交
3501 3502 3503 3504 3505 3506 3507

    Raises:
        ValueError: If learning_rate, rho, epsilon, momentum are None.

    Examples:
          .. code-block:: python

3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531
            import paddle
            import paddle.fluid as fluid
            import numpy as np

            place = fluid.CPUPlace()
            main = fluid.Program()
            with fluid.program_guard(main):
                x = fluid.layers.data(name='x', shape=[13], dtype='float32')
                y = fluid.layers.data(name='y', shape=[1], dtype='float32')
                y_predict = fluid.layers.fc(input=x, size=1, act=None)
                cost = fluid.layers.square_error_cost(input=y_predict, label=y)
                avg_cost = fluid.layers.mean(cost)

                ftrl_optimizer = fluid.optimizer.Ftrl(learning_rate=0.1)
                ftrl_optimizer.minimize(avg_cost)

                fetch_list = [avg_cost]
                train_reader = paddle.batch(
                    paddle.dataset.uci_housing.train(), batch_size=1)
                feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
                exe = fluid.Executor(place)
                exe.run(fluid.default_startup_program())
                for data in train_reader():
                    exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)
C
chengduo 已提交
3532

3533
    NOTE:
C
chengduo 已提交
3534
       Currently, FtrlOptimizer doesn't support sparse parameter optimization.
Q
qiaolongfei 已提交
3535 3536 3537 3538 3539
    """

    _squared_acc_str = "squared"
    _linear_acc_str = "linear"

X
Xin Pan 已提交
3540 3541 3542 3543 3544
    def __init__(self,
                 learning_rate,
                 l1=0.0,
                 l2=0.0,
                 lr_power=-0.5,
3545
                 parameter_list=None,
X
Xin Pan 已提交
3546
                 regularization=None,
3547
                 grad_clip=None,
X
Xin Pan 已提交
3548
                 name=None):
3549 3550 3551 3552 3553
        super(FtrlOptimizer, self).__init__(learning_rate=learning_rate,
                                            parameter_list=parameter_list,
                                            regularization=regularization,
                                            grad_clip=grad_clip,
                                            name=name)
Q
qiaolongfei 已提交
3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577
        if learning_rate is None:
            raise ValueError("learning_rate is not set.")

        self.type = "ftrl"
        self._l1 = l1
        self._l2 = l2
        self._lr_power = lr_power

    def _create_accumulators(self, block, parameters):
        if not isinstance(block, framework.Block):
            raise TypeError("block is not instance of framework.Block.")

        for p in parameters:
            self._add_accumulator(self._squared_acc_str, p)
            self._add_accumulator(self._linear_acc_str, p)

    def _append_optimize_op(self, block, param_and_grad):
        if not isinstance(block, framework.Block):
            raise TypeError("block is not instance of framework.Block.")

        squared_acc = self._get_accumulator(self._squared_acc_str,
                                            param_and_grad[0])
        linear_acc = self._get_accumulator(self._linear_acc_str,
                                           param_and_grad[0])
J
Jiabin Yang 已提交
3578
        if framework._non_static_mode():
3579 3580 3581 3582 3583 3584
            _legacy_C_ops.ftrl(param_and_grad[0], squared_acc, linear_acc,
                               param_and_grad[1],
                               self._create_param_lr(param_and_grad),
                               param_and_grad[0], squared_acc, linear_acc, "l1",
                               self._l1, "l2", self._l2, "lr_power",
                               self._lr_power)
3585 3586

        else:
3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610
            ftrl_op = block.append_op(type=self.type,
                                      inputs={
                                          "Param":
                                          param_and_grad[0],
                                          "Grad":
                                          param_and_grad[1],
                                          "SquaredAccumulator":
                                          squared_acc,
                                          "LinearAccumulator":
                                          linear_acc,
                                          "LearningRate":
                                          self._create_param_lr(param_and_grad),
                                      },
                                      outputs={
                                          "ParamOut": param_and_grad[0],
                                          "SquaredAccumOut": squared_acc,
                                          "LinearAccumOut": linear_acc
                                      },
                                      attrs={
                                          "l1": self._l1,
                                          "l2": self._l2,
                                          "lr_power": self._lr_power
                                      },
                                      stop_gradient=True)
Q
qiaolongfei 已提交
3611

3612
            return ftrl_op
Q
qiaolongfei 已提交
3613 3614


Y
Yibing Liu 已提交
3615
class LambOptimizer(AdamOptimizer):
3616
    r"""
Y
Yibing Liu 已提交
3617 3618
    LAMB (Layer-wise Adaptive Moments optimizer for Batching training) Optimizer.

3619 3620 3621
    LAMB Optimizer is designed to scale up the batch size of training without losing
    accuracy, which supports adaptive element-wise updating and accurate layer-wise
    correction. For more information, please refer to `Large Batch Optimization for
Y
Yibing Liu 已提交
3622
    Deep Learning: Training BERT in 76 minutes <https://arxiv.org/abs/1904.00962>`_ .
Y
Yibing Liu 已提交
3623 3624 3625 3626 3627

    The updating of parameters follows:

    ..  math::

3628
        m_t &= \\beta_1 m_{t - 1}+ (1 - \\beta_1)g_t
Y
Yibing Liu 已提交
3629

Y
Yibing Liu 已提交
3630
        v_t &= \\beta_2 v_{t - 1}  + (1 - \\beta_2)g_t^2
Y
Yibing Liu 已提交
3631

3632 3633 3634 3635
        m_t &= \\frac{m_t}{\\beta_1^t}

        v_t &= \\frac{v_t}{\\beta_2^t}

Y
Yibing Liu 已提交
3636
        r_t &= \\frac{m_t}{\\sqrt{v_t}+\\epsilon}
Y
Yibing Liu 已提交
3637

Y
Yibing Liu 已提交
3638
        w_t &= w_{t-1} -\\eta_t \\frac{\\left \| w_{t-1}\\right \|}{\\left \| r_t + \\lambda w_{t-1}\\right \|} (r_t + \\lambda w_{t-1})
Y
Yibing Liu 已提交
3639 3640


3641
    where :math:`m` is the 1st moment, and :math:`v` the 2nd moment, :math:`\\eta` the
Y
Yibing Liu 已提交
3642 3643 3644
    learning rate, :math:`\\lambda` the LAMB weight decay rate.

    Args:
Y
Yibing Liu 已提交
3645 3646 3647 3648 3649 3650 3651 3652
        learning_rate (float|Variable, optional): the learning rate used to update parameters. \
            Can be a float value or a Variable with data type float32. Default 0.001.
        lamb_weight_decay (float, optional): The LAMB weight decay rate. Default 0.01.
        beta1 (float, optional): The exponential decay rate for the 1st moment estimates.
            Default 0.9.
        beta2 (float, optional): The exponential decay rate for the 2nd moment estimates.
            Default 0.999.
        epsilon (float, optional): A small float value for numerical stability. Default 1e-6.
H
hong 已提交
3653
        parameter_list (Iterable, optional):  Iterable of ``Variable`` names to update to minimize ``loss``. \
3654 3655
            This parameter is required in dygraph mode. \
            The default value is None in static mode, at this time all parameters will be updated.
3656 3657 3658 3659 3660
        regularization (WeightDecayRegularizer, optional): The strategy of regularization. There are two method: \
             :ref:`api_fluid_regularizer_L1Decay` , :ref:`api_fluid_regularizer_L2Decay` . If a parameter has set \
            regularizer using :ref:`api_fluid_ParamAttr` already, the regularization setting here in optimizer will be \
            ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect.  \
            Default None, meaning there is no regularization.
3661 3662
        grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of
            some derived class of ``GradientClipBase`` . There are three cliping strategies
3663 3664 3665
            ( :ref:`api_paddle_fluid_clip_ClipGradByGlobalNorm` , :ref:`api_paddle_fluid_clip_ClipGradByNorm` ,
            :ref:`api_paddle_fluid_clip_ClipGradByValue` ). If you want better convergence, it is recommended
            to use :ref:`api_paddle_fluid_clip_ClipGradByGlobalNorm` . Default None, meaning there is no gradient clipping.
3666 3667
        exclude_from_weight_decay_fn (function|None): Exclude a parameter from weight
            decay when **exclude_from_weight_decay_fn(parameter)** returns true.
Y
Yibing Liu 已提交
3668
            Default None.
3669
        name(str|None): For detailed information, please refer to
Y
Yibing Liu 已提交
3670
            :ref:`api_guide_Name` . Usually name is no need to set and None by default.
Y
Yibing Liu 已提交
3671 3672 3673

    Examples:
        .. code-block:: python
3674 3675

            import paddle.fluid as fluid
Y
Yibing Liu 已提交
3676

Y
Yibing Liu 已提交
3677
            data = fluid.data(name='x', shape=[-1, 5], dtype='float32')
Y
Yibing Liu 已提交
3678 3679 3680
            hidden = fluid.layers.fc(input=data, size=10)
            cost = fluid.layers.mean(hidden)

Y
Yibing Liu 已提交
3681 3682 3683 3684 3685
            def exclude_fn(param):
                return param.name.endswith('.b_0')

            optimizer = fluid.optimizer.Lamb(learning_rate=0.002,
                                             exclude_from_weight_decay_fn=exclude_fn)
Y
Yibing Liu 已提交
3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698
            optimizer.minimize(cost)
    """
    _moment1_acc_str = "moment1"
    _moment2_acc_str = "moment2"
    _beta1_pow_acc_str = "beta1_pow_acc"
    _beta2_pow_acc_str = "beta2_pow_acc"

    def __init__(self,
                 learning_rate=0.001,
                 lamb_weight_decay=0.01,
                 beta1=0.9,
                 beta2=0.999,
                 epsilon=1e-6,
3699
                 parameter_list=None,
Y
Yibing Liu 已提交
3700
                 regularization=None,
3701
                 grad_clip=None,
Y
Yibing Liu 已提交
3702
                 exclude_from_weight_decay_fn=None,
Y
Yibing Liu 已提交
3703 3704 3705 3706 3707 3708
                 name=None):
        assert learning_rate is not None
        assert lamb_weight_decay is not None
        assert beta1 is not None
        assert beta2 is not None
        assert epsilon is not None
3709 3710 3711 3712 3713 3714 3715 3716
        super(LambOptimizer, self).__init__(learning_rate=learning_rate,
                                            parameter_list=parameter_list,
                                            regularization=regularization,
                                            grad_clip=grad_clip,
                                            beta1=beta1,
                                            beta2=beta2,
                                            epsilon=epsilon,
                                            name=name)
Y
Yibing Liu 已提交
3717 3718
        self.type = "lamb"
        self._weight_decay = lamb_weight_decay
Y
Yibing Liu 已提交
3719
        self._exclude_from_weight_decay_fn = exclude_from_weight_decay_fn
Y
Yibing Liu 已提交
3720 3721 3722

    def _append_optimize_op(self, block, param_and_grad):
        assert isinstance(block, framework.Block)
3723
        block.program._use_lamb = True
Y
Yibing Liu 已提交
3724 3725 3726 3727 3728 3729 3730 3731 3732 3733

        moment1 = self._get_accumulator(self._moment1_acc_str,
                                        param_and_grad[0])
        moment2 = self._get_accumulator(self._moment2_acc_str,
                                        param_and_grad[0])
        beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str,
                                              param_and_grad[0])
        beta2_pow_acc = self._get_accumulator(self._beta2_pow_acc_str,
                                              param_and_grad[0])

Y
Yibing Liu 已提交
3734 3735 3736 3737 3738
        if self._exclude_from_weight_decay_fn is not None \
            and self._exclude_from_weight_decay_fn(param_and_grad[0]):
            weight_decay = 0.0
        else:
            weight_decay = self._weight_decay
3739
        lr = self._create_param_lr(param_and_grad)
3740
        master_weight = None
J
Jiabin Yang 已提交
3741
        if framework._non_static_mode():
3742 3743 3744 3745 3746 3747 3748
            _legacy_C_ops.lamb(param_and_grad[0], param_and_grad[1], lr,
                               moment1, moment2, beta1_pow_acc, beta2_pow_acc,
                               master_weight, param_and_grad[0], moment1,
                               moment2, beta1_pow_acc, beta2_pow_acc,
                               master_weight, 'beta1', self._beta1, 'beta2',
                               self._beta2, 'epsilon', self._epsilon,
                               'weight_decay', weight_decay)
3749
            return None
Y
Yibing Liu 已提交
3750

Y
Yibing Liu 已提交
3751
        # create the lamb optimize op
3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775
        lamb_op = block.append_op(type=self.type,
                                  inputs={
                                      "Param": param_and_grad[0],
                                      "Grad": param_and_grad[1],
                                      "LearningRate": lr,
                                      "Moment1": moment1,
                                      "Moment2": moment2,
                                      "Beta1Pow": beta1_pow_acc,
                                      "Beta2Pow": beta2_pow_acc
                                  },
                                  outputs={
                                      "ParamOut": param_and_grad[0],
                                      "Moment1Out": moment1,
                                      "Moment2Out": moment2,
                                      "Beta1PowOut": beta1_pow_acc,
                                      "Beta2PowOut": beta2_pow_acc
                                  },
                                  attrs={
                                      "beta1": self._beta1,
                                      "beta2": self._beta2,
                                      "epsilon": self._epsilon,
                                      "weight_decay": weight_decay
                                  },
                                  stop_gradient=True)
Y
Yibing Liu 已提交
3776 3777 3778 3779

        return lamb_op


3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792
# We short the class name, since users will use the optimizer with the package
# name. The sample code:
#
# import paddle.fluid as fluid
#
# sgd = fluid.optimizer.SGD(...)
#
# It is no need to add an `Optimizer` as the class suffix
SGD = SGDOptimizer
Momentum = MomentumOptimizer
Adagrad = AdagradOptimizer
Adam = AdamOptimizer
Adamax = AdamaxOptimizer
3793
Dpsgd = DpsgdOptimizer
3794
DecayedAdagrad = DecayedAdagradOptimizer
3795
Adadelta = AdadeltaOptimizer
Q
qingqing01 已提交
3796
RMSProp = RMSPropOptimizer
Q
qiaolongfei 已提交
3797
Ftrl = FtrlOptimizer
3798
LarsMomentum = LarsMomentumOptimizer
Y
Yibing Liu 已提交
3799
Lamb = LambOptimizer
3800 3801 3802


class ModelAverage(Optimizer):
3803
    r"""
3804
	:api_attr: Static Graph
S
swtkiwi 已提交
3805

3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823
    The ModelAverage optimizer accumulates specific continuous historical parameters
    during training. The accumulated historical range can be controlled by the passed
    ``average_window_rate`` argument. The averaged ``Parameter`` are used in the prediction,
    which usually can improve the accuracy of the prediction.

    Accumulate the average of the ``Parameter`` in the sliding window, the result will be saved
    in a temporary variable, can be applied to the current model's ``Parameter`` by calling
    the ``apply()`` method, and the current model ``Parameter`` can be restored by calling
    the ``restore()`` method.

    The window size for calculating the average is determined by ``average_window_rate``,
    ``min_average_window``, ``max_average_window`` and the current ``Parameter`` update times (num_updates).

    When the cumulative times (num_accumulates) is greater than the specific window
    threshold (average_window), the accumulated ``Parameter`` temporary variable is set to 0.0.
    The following example will help to understand the role of these arguments:

    ::
3824

3825 3826 3827 3828 3829 3830 3831 3832 3833
        if num_accumulates >= min_average_window and num_accumulates >= min(max_average_window, num_updates * average_window_rate):
            num_accumulates = 0

    In the above conditional judgment statement, ``num_accumulates`` indicates the current
    accumulated number, which can be abstractly understood as the length of the cumulative window.
    The length of the window must be at least the length set by the ``min_average_window`` argument,
    and cannot exceed the length specified by the ``max_average_window`` argument or
    ``num_updates * average_window_rate``, where ``num_updates`` indicates the current ``Parameter``
    update times, ``average_window_rate`` is a coefficient that calculates the length of the window.
3834 3835

    Args:
3836 3837 3838
        average_window_rate (float): The calculate ratio of the window length relative to ``Parameter`` update times.
        min_average_window (int, optional): the minimum size of average window length. The default value is 10000.
        max_average_window (int, optional): The maximum size of average window length. The default value is 10000.
3839 3840 3841 3842 3843
        regularization (WeightDecayRegularizer, optional): The strategy of regularization. There are two method: \
             :ref:`api_fluid_regularizer_L1Decay` , :ref:`api_fluid_regularizer_L2Decay` . If a parameter has set \
            regularizer using :ref:`api_fluid_ParamAttr` already, the regularization setting here in optimizer will be \
            ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect.  \
            Default None, meaning there is no regularization.
3844 3845 3846
        name (str, optional): Normally there is no need for user to set this property.
            For more information, please refer to :ref:`api_guide_Name`.
            The default value is None.
3847

3848
    Examples:
Q
qiaolongfei 已提交
3849 3850 3851

      .. code-block:: python

3852 3853 3854 3855 3856 3857
        import paddle.fluid as fluid
        import numpy

        # First create the Executor.
        place = fluid.CPUPlace()  # fluid.CUDAPlace(0)
        exe = fluid.Executor(place)
3858

3859 3860 3861 3862
        train_program = fluid.Program()
        startup_program = fluid.Program()
        with fluid.program_guard(train_program, startup_program):
            # build net
3863
            data = fluid.data(name='X', shape=[None, 1], dtype='float32')
3864 3865 3866 3867 3868 3869 3870 3871
            hidden = fluid.layers.fc(input=data, size=10)
            loss = fluid.layers.mean(hidden)
            optimizer = fluid.optimizer.Momentum(learning_rate=0.2, momentum=0.1)
            optimizer.minimize(loss)

            # build ModelAverage optimizer
            model_average = fluid.optimizer.ModelAverage(0.15,
                                                         min_average_window=10000,
3872
                                                         max_average_window=12500)
3873 3874

            exe.run(startup_program)
3875 3876 3877 3878 3879
            for i in range(12500):
                x = numpy.random.random(size=(10, 1)).astype('float32')
                outs = exe.run(program=train_program,
                               feed={'X': x},
                               fetch_list=[loss.name])
3880 3881

            # apply ModelAverage
3882
            with model_average.apply(exe):
3883 3884 3885 3886
                x = numpy.random.random(size=(10, 1)).astype('float32')
                exe.run(program=train_program,
                        feed={'X': x},
                        fetch_list=[loss.name])
3887 3888 3889
    """

    def __init__(self,
W
wanghaoshuang 已提交
3890
                 average_window_rate,
3891 3892
                 min_average_window=10000,
                 max_average_window=10000,
X
Xin Pan 已提交
3893 3894
                 regularization=None,
                 name=None):
J
Jiabin Yang 已提交
3895
        if framework._non_static_mode():
Z
zhongpu 已提交
3896
            raise Exception("In dygraph, don't support ModelAverage.")
3897 3898 3899
        super(ModelAverage, self).__init__(0.0,
                                           regularization=regularization,
                                           name=name)
3900 3901 3902
        self.average_window = average_window_rate
        self.min_average_window = min_average_window
        self.max_average_window = max_average_window
3903

3904
        self.params_grads = []
3905 3906
        for param in framework.default_main_program().global_block(
        ).all_parameters():
3907
            if param.do_model_average != False:
3908
                grad = param.block.create_var(
3909 3910
                    name=unique_name.generate_with_ignorable_key(".".join(
                        [param.name, 'tmp'])),
3911 3912
                    dtype=param.dtype,
                    persistable=False,
W
wanghaoshuang 已提交
3913
                    stop_gradient=True)
3914
                self.params_grads.append((param, grad))
3915

3916
        for param, grad in self.params_grads:
3917 3918
            if grad is None:
                continue
X
Xin Pan 已提交
3919 3920
            with param.block.program._optimized_guard(
                [param, grad]), name_scope('move_average'):
3921
                self._append_average_accumulate_op(param)
3922

3923 3924 3925 3926
        self.apply_program = Program()
        block = self.apply_program.global_block()
        with program_guard(main_program=self.apply_program):
            for param_grad in self.params_grads:
3927
                self._add_average_apply_op(block, param_grad)
3928 3929 3930 3931 3932

        self.restore_program = Program()
        block = self.restore_program.global_block()
        with program_guard(main_program=self.restore_program):
            for param_grad in self.params_grads:
3933
                self._add_average_restore_op(block, param_grad)
3934

3935
    def _add_average_apply_op(self, block, param_grad):
L
Luo Tao 已提交
3936 3937 3938 3939 3940 3941
        param = block._clone_variable(param_grad[0])
        grad = block._clone_variable(param_grad[1])
        sum_1 = block._clone_variable(self._get_accumulator('sum_1', param))
        sum_2 = block._clone_variable(self._get_accumulator('sum_2', param))
        sum_3 = block._clone_variable(self._get_accumulator('sum_3', param))
        num_accumulates = block._clone_variable(
3942
            self._get_accumulator('num_accumulates', param))
L
Luo Tao 已提交
3943
        old_num_accumulates = block._clone_variable(
3944
            self._get_accumulator('old_num_accumulates', param))
L
Luo Tao 已提交
3945
        num_updates = block._clone_variable(
3946 3947 3948 3949 3950 3951
            self._get_accumulator('num_updates', param))
        # backup param value to grad
        layers.assign(input=param, output=grad)
        # param = (sum_1 + sum_2 + sum_3) / (num_accumulates + old_num_accumulates)
        tmp = layers.sum(x=[num_accumulates, old_num_accumulates])
        sum = layers.sum(x=[sum_1, sum_2, sum_3])
D
dzhwinter 已提交
3952 3953 3954 3955
        tmp = layers.cast(
            x=tmp, dtype='float32' if self._dtype == None else self._dtype)
        sum = layers.cast(
            x=sum, dtype='float32' if self._dtype == None else self._dtype)
S
sneaxiy 已提交
3956
        ops._elementwise_div(x=sum, y=tmp, out=param)
3957 3958

    def _add_average_restore_op(self, block, param_grad):
L
Luo Tao 已提交
3959 3960
        param = block._clone_variable(param_grad[0])
        grad = block._clone_variable(param_grad[1])
3961 3962 3963 3964 3965 3966 3967
        layers.assign(input=grad, output=param)

    def _append_average_accumulate_op(self, param):
        self.helper = LayerHelper("average_accumulate")
        sum_1 = self._add_accumulator('sum_1', param)
        sum_2 = self._add_accumulator('sum_2', param)
        sum_3 = self._add_accumulator('sum_3', param)
3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005
        num_accumulates = self._add_accumulator('num_accumulates',
                                                param,
                                                dtype='int64',
                                                shape=[1])
        old_num_accumulates = self._add_accumulator('old_num_accumulates',
                                                    param,
                                                    dtype='int64',
                                                    shape=[1])
        num_updates = self._add_accumulator('num_updates',
                                            param,
                                            dtype='int64',
                                            shape=[1])

        self.helper.append_op(type='average_accumulates',
                              inputs={
                                  "param": param,
                                  "in_sum_1": sum_1,
                                  "in_sum_2": sum_2,
                                  "in_sum_3": sum_3,
                                  "in_num_accumulates": num_accumulates,
                                  "in_old_num_accumulates": old_num_accumulates,
                                  "in_num_updates": num_updates
                              },
                              outputs={
                                  "out_sum_1": sum_1,
                                  "out_sum_2": sum_2,
                                  "out_sum_3": sum_3,
                                  "out_num_accumulates": num_accumulates,
                                  "out_old_num_accumulates":
                                  old_num_accumulates,
                                  "out_num_updates": num_updates,
                              },
                              attrs={
                                  "average_window": self.average_window,
                                  "min_average_window": self.min_average_window,
                                  "max_average_window": self.max_average_window,
                              },
                              stop_gradient=True)
4006

S
rename  
sneaxiy 已提交
4007
    @signature_safe_contextmanager
4008
    def apply(self, executor, need_restore=True):
4009 4010
        """
        Apply the average of the cumulative ``Parameter`` to the parameters of the current model.
4011 4012

        Args:
4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056
            executor(fluid.Executor): The current network executor.
            need_restore(bool): Restore flag variable, if set to True, the network will restore
                the parameters of the network to the default value, if set to False,
                it will not be restored. The default value is True.

        Examples:

          .. code-block:: python

            import paddle.fluid as fluid
            import numpy

            # First create the Executor.
            place = fluid.CPUPlace()  # fluid.CUDAPlace(0)
            exe = fluid.Executor(place)

            train_program = fluid.Program()
            startup_program = fluid.Program()
            with fluid.program_guard(train_program, startup_program):
                # build net
                data = fluid.data(name='X', shape=[None, 1], dtype='float32')
                hidden = fluid.layers.fc(input=data, size=10)
                loss = fluid.layers.mean(hidden)
                optimizer = fluid.optimizer.Momentum(learning_rate=0.2, momentum=0.1)
                optimizer.minimize(loss)

                # build ModelAverage optimizer
                model_average = fluid.optimizer.ModelAverage(0.15,
                                                            min_average_window=10000,
                                                            max_average_window=12500)

                exe.run(startup_program)
                for i in range(12500):
                    x = numpy.random.random(size=(10, 1)).astype('float32')
                    outs = exe.run(program=train_program,
                                feed={'X': x},
                                fetch_list=[loss.name])

                # apply ModelAverage
                with model_average.apply(exe):
                    x = numpy.random.random(size=(10, 1)).astype('float32')
                    exe.run(program=train_program,
                            feed={'X': x},
                            fetch_list=[loss.name])
4057
        """
4058 4059 4060 4061 4062 4063
        executor.run(self.apply_program)
        try:
            yield
        finally:
            if need_restore:
                self.restore(executor)
4064 4065

    def restore(self, executor):
4066 4067
        """
        Restore ``Parameter`` values of current model.
4068

4069
        Args:
4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113
            executor(fluid.Executor): The current network executor.

        Examples:

          .. code-block:: python

            import paddle.fluid as fluid
            import numpy

            # First create the Executor.
            place = fluid.CPUPlace()  # fluid.CUDAPlace(0)
            exe = fluid.Executor(place)

            train_program = fluid.Program()
            startup_program = fluid.Program()
            with fluid.program_guard(train_program, startup_program):
                # build net
                data = fluid.data(name='X', shape=[None, 1], dtype='float32')
                hidden = fluid.layers.fc(input=data, size=10)
                loss = fluid.layers.mean(hidden)
                optimizer = fluid.optimizer.Momentum(learning_rate=0.2, momentum=0.1)
                optimizer.minimize(loss)

                # build ModelAverage optimizer
                model_average = fluid.optimizer.ModelAverage(0.15,
                                                            min_average_window=10000,
                                                            max_average_window=12500)

                exe.run(startup_program)
                for i in range(12500):
                    x = numpy.random.random(size=(10, 1)).astype('float32')
                    outs = exe.run(program=train_program,
                                feed={'X': x},
                                fetch_list=[loss.name])

                # apply ModelAverage
                with model_average.apply(exe, False):
                    x = numpy.random.random(size=(10, 1)).astype('float32')
                    exe.run(program=train_program,
                            feed={'X': x},
                            fetch_list=[loss.name])

                # restore Parameters
                model_average.restore(exe)
4114
        """
4115
        executor.run(self.restore_program)
4116 4117 4118


class ExponentialMovingAverage(object):
4119
    r"""
4120
	:api_attr: Static Graph
S
swtkiwi 已提交
4121

4122 4123 4124 4125 4126 4127
    Compute the moving average of parameters with exponential decay.
    Given a parameter :math:`\\theta`, its exponential moving average (EMA)
    will be

    ..  math::

4128
        \\text{EMA}_0 & = 0
4129

4130 4131
	\\text{EMA}_t & = \\text{decay} * \\text{EMA}_{t-1} + (1 - \\text{decay}) * \\theta_t

4132 4133 4134
    The average results calculated by **update()** method will be saved in
    temporary variables which are created and maintained by the object, and can
    be applied to parameters of current model by calling **apply()** method. And
Y
Yibing Liu 已提交
4135
    the **restore()** method is used to restore the parameters.
4136

4137 4138 4139 4140
    **Bias correction**. All EMAs are initialized to :math:`0` and hence they will be
    zero biased, which can be corrected by divided by a factor
    :math:`(1 - \\text{decay}^t)` , i.e., the actual EMAs applied to parameters
    when calling **apply()** method would be
4141 4142

    ..  math::
4143

4144 4145
        \\widehat{\\text{EMA}}_t = \\frac{\\text{EMA}_t}{1 - \\text{decay}^t}

4146 4147
    **Decay rate scheduling**. A large decay rate very close to 1 would result
    in that the averages move very slowly. And a better strategy is to set a
4148
    relative smaller decay rate in the very beginning. The argument **thres_steps**
4149
    allows users to pass a Variable to schedule the decay rate, in this case,
4150
    the actual decay rate becomes
4151

4152
    ..  math::
4153

4154 4155 4156
        \\min(\\text{decay}, \\frac{1 + \\text{thres_steps}}{10 + \\text{thres_steps}})

    Usually **thres_steps** can be the global training steps.
4157 4158 4159


    Args:
4160 4161 4162
        decay (float, optional): The exponential decay rate, usually close to 1, such as 0.999, 0.9999, ... . Default 0.999.
        thres_steps (Variable|None, optional): If not `None`, schedule the decay rate. Default None.
        name (str|None, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default.
4163 4164 4165 4166


    Examples:

4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194
        .. code-block:: python

            import numpy
            import paddle
            import paddle.static as static
            from paddle.static import ExponentialMovingAverage

            paddle.enable_static()

            data = static.data(name='x', shape=[-1, 5], dtype='float32')
            hidden = static.nn.fc(x=data, size=10)
            cost = paddle.mean(hidden)

            test_program = static.default_main_program().clone(for_test=True)
            optimizer = paddle.optimizer.Adam(learning_rate=0.001)
            optimizer.minimize(cost)

            ema = ExponentialMovingAverage(0.999)
            ema.update()

            place = paddle.CPUPlace()
            exe = static.Executor(place)
            exe.run(static.default_startup_program())

            for pass_id in range(3):
                for batch_id in range(6):
                    data = numpy.random.random(size=(10, 5)).astype('float32')
                    exe.run(program=static.default_main_program(),
4195
                    feed={'x': data},
4196 4197 4198 4199 4200 4201
                    fetch_list=[cost.name])

                # usage 1
                with ema.apply(exe):
                    data = numpy.random.random(size=(10, 5)).astype('float32')
                    exe.run(program=test_program,
4202
                        feed={'x': data},
4203 4204 4205 4206 4207 4208
                        fetch_list=[hidden.name])

                # usage 2
                with ema.apply(exe, need_restore=False):
                    data = numpy.random.random(size=(10, 5)).astype('float32')
                    exe.run(program=test_program,
4209
                        feed={'x': data},
4210 4211 4212
                        fetch_list=[hidden.name])
                ema.restore(exe)

4213 4214
    """

4215
    def __init__(self, decay=0.999, thres_steps=None, name=None):
J
Jiabin Yang 已提交
4216
        if framework._non_static_mode():
Z
zhongpu 已提交
4217 4218
            raise Exception(
                "In dygraph, don't support ExponentialMovingAverage.")
4219
        self._decay = decay
4220
        self._thres_steps = thres_steps
4221
        self._name = name if name is not None else ''
4222 4223
        self._decay_var = self._get_ema_decay()

4224
        self._step_counter_name = "@EMA_STEP_COUNTER@"
Y
Yibing Liu 已提交
4225
        self._params_tmps = []
4226
        for param in default_main_program().global_block().all_parameters():
4227
            if param.do_model_average != False:
4228 4229 4230 4231 4232
                tmp = param.block.create_var(name=unique_name.generate(".".join(
                    [self._name + param.name, 'ema_tmp'])),
                                             dtype=param.dtype,
                                             persistable=False,
                                             stop_gradient=True)
Y
Yibing Liu 已提交
4233
                self._params_tmps.append((param, tmp))
4234

Y
Yibing Liu 已提交
4235 4236
        self._ema_vars = {}
        for param, tmp in self._params_tmps:
4237 4238
            with param.block.program._optimized_guard(
                [param, tmp]), name_scope('moving_average'):
Y
Yibing Liu 已提交
4239
                self._ema_vars[param.name] = self._create_ema_vars(param)
4240 4241 4242 4243

        self.apply_program = Program()
        block = self.apply_program.global_block()
        with program_guard(main_program=self.apply_program):
4244
            decay_pow, global_step = self._get_decay_pow(block)
Y
Yibing Liu 已提交
4245
            for param, tmp in self._params_tmps:
4246 4247
                param = block._clone_variable(param)
                tmp = block._clone_variable(tmp)
Y
Yibing Liu 已提交
4248
                ema = block._clone_variable(self._ema_vars[param.name])
4249
                layers.assign(input=param, output=tmp)
4250
                # bias correction
4251 4252
                with layers.control_flow.Switch() as switch:
                    with switch.case(global_step > 0):
4253 4254
                        layers.assign(output=param,
                                      input=ema / (1.0 - decay_pow))
4255 4256
                    with switch.default():
                        layers.assign(output=param, input=ema)
4257 4258 4259 4260

        self.restore_program = Program()
        block = self.restore_program.global_block()
        with program_guard(main_program=self.restore_program):
Y
Yibing Liu 已提交
4261
            for param, tmp in self._params_tmps:
4262 4263 4264 4265
                tmp = block._clone_variable(tmp)
                param = block._clone_variable(param)
                layers.assign(input=tmp, output=param)

4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281
    def _get_ema_decay(self):
        with default_main_program()._lr_schedule_guard():
            decay_var = layers.tensor.create_global_var(
                shape=[1],
                value=self._decay,
                dtype='float32',
                persistable=True,
                name="scheduled_ema_decay_rate")

            if self._thres_steps is not None:
                decay_t = (self._thres_steps + 1.0) / (self._thres_steps + 10.0)
                with layers.control_flow.Switch() as switch:
                    with switch.case(decay_t < self._decay):
                        layers.tensor.assign(decay_t, decay_var)
                    with switch.default():
                        layers.tensor.assign(
4282
                            np.array([self._decay], dtype=np.float32),
4283 4284 4285 4286
                            decay_var)
        return decay_var

    def _get_decay_pow(self, block):
4287 4288 4289 4290 4291
        global_step = layers.create_global_var(name=self._step_counter_name,
                                               shape=[1],
                                               value=0,
                                               dtype='int64',
                                               persistable=True)
4292
        global_step = layers.cast(global_step, "float32")
4293
        decay_var = block._clone_variable(self._decay_var)
4294 4295
        decay_pow_acc = layers.elementwise_pow(decay_var, global_step)
        return decay_pow_acc, global_step
4296

Y
Yibing Liu 已提交
4297
    def _create_ema_vars(self, param):
4298 4299 4300 4301 4302 4303 4304 4305 4306
        param_ema = layers.create_global_var(
            name=unique_name.generate(self._name + param.name + '_ema'),
            shape=param.shape,
            value=0.0,
            dtype=param.dtype,
            persistable=True)

        return param_ema

Y
Yibing Liu 已提交
4307
    def update(self):
4308 4309
        """
        Update Exponential Moving Average. Should only call this method in
Y
Yibing Liu 已提交
4310 4311
        train program.
        """
4312 4313
        global_step = layers.autoincreased_step_counter(
            counter_name=self._step_counter_name)
4314
        param_master_emas = []
Y
Yibing Liu 已提交
4315 4316 4317 4318
        for param, tmp in self._params_tmps:
            with param.block.program._optimized_guard(
                [param, tmp]), name_scope('moving_average'):
                param_ema = self._ema_vars[param.name]
4319
                if param.name + '.master' in self._ema_vars:
4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336
                    master_ema = self._ema_vars[param.name + '.master']
                    param_master_emas.append([param_ema, master_ema])
                else:
                    ema_t = param_ema * self._decay_var + param * (
                        1 - self._decay_var)
                    layers.assign(input=ema_t, output=param_ema)

        # for fp16 params
        for param_ema, master_ema in param_master_emas:
            default_main_program().global_block().append_op(
                type="cast",
                inputs={"X": master_ema},
                outputs={"Out": param_ema},
                attrs={
                    "in_dtype": master_ema.dtype,
                    "out_dtype": param_ema.dtype
                })
Y
Yibing Liu 已提交
4337

4338 4339 4340 4341
    @signature_safe_contextmanager
    def apply(self, executor, need_restore=True):
        """
        Apply moving average to parameters for evaluation.
4342

4343 4344
        Args:
            executor (Executor): The Executor to execute applying.
4345
            need_restore (bool, optional): Whether to restore parameters after
Y
Yibing Liu 已提交
4346
                applying. Default True.
4347 4348 4349 4350 4351 4352 4353 4354 4355 4356
        """
        executor.run(self.apply_program)
        try:
            yield
        finally:
            if need_restore:
                self.restore(executor)

    def restore(self, executor):
        """Restore parameters.
4357

4358 4359 4360 4361
        Args:
            executor (Executor): The Executor to execute restoring.
        """
        executor.run(self.restore_program)
H
hutuxian 已提交
4362 4363 4364


class PipelineOptimizer(object):
4365
    """
4366
	:api_attr: Static Graph
S
swtkiwi 已提交
4367

4368 4369 4370 4371
    Pipeline Optimizer: Make a program to run as pipeline, that is splitting a
    program into multiple sections (sub-programs) and each section run on a
    device to enable the training of large scale models and the use of
    heterogeneous devices. Meanwhile, all sections run in the stype of pipeline.
H
hutuxian 已提交
4372

4373
    Args:
4374 4375 4376
        optimizer (Optimizer): The optimizer to use, such as SGD.
        num_microbatches (int): Number of microbatches. [Optional. Default:1].
        start_cpu_core_id (int): The first cpu core id to use. [Optional. Default:0].
4377

4378 4379
    Examples:
        .. code-block:: python
H
hutuxian 已提交
4380

4381
            import paddle.fluid as fluid
H
hutuxian 已提交
4382 4383
            import paddle.fluid.layers as layers

4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399
            with fluid.device_guard("gpu:0"):
                x = fluid.layers.data(name='x', shape=[1], dtype='int64', lod_level=0)
                y = fluid.layers.data(name='y', shape=[1], dtype='int64', lod_level=0)
                data_loader = fluid.io.DataLoader.from_generator(
                    feed_list=[x, y],
                    capacity=64,
                    use_double_buffer=True,
                    iterable=False)

                emb_x = layers.embedding(input=x, param_attr=fluid.ParamAttr(name="embx"), size=[10,2], is_sparse=False)
                emb_y = layers.embedding(input=y, param_attr=fluid.ParamAttr(name="emby",learning_rate=0.9), size=[10,2], is_sparse=False)

            with fluid.device_guard("gpu:1"):
                concat = layers.concat([emb_x, emb_y], axis=1)
                fc = layers.fc(input=concat, name="fc", size=1, num_flatten_dims=1, bias_attr=False)
                loss = layers.reduce_mean(fc)
H
hutuxian 已提交
4400
            optimizer = fluid.optimizer.SGD(learning_rate=0.5)
4401
            optimizer = fluid.optimizer.PipelineOptimizer(optimizer)
H
hutuxian 已提交
4402
            optimizer.minimize(loss)
4403 4404 4405 4406 4407 4408 4409 4410 4411

            def train_reader():
                for _ in range(4):
                    x = np.random.random(size=[1]).astype('int64')
                    y = np.random.random(size=[1]).astype('int64')
                    yield x, y
            data_loader.set_sample_generator(train_reader, batch_size=1)

            place = fluid.CUDAPlace(0)
H
hutuxian 已提交
4412 4413
            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())
4414 4415
            batch_size = 1
            data_loader.start()
H
hutuxian 已提交
4416
            exe.train_from_dataset(
4417
                    fluid.default_main_program())
4418
            data_loader.reset()
4419 4420
    """

4421
    def __init__(self, optimizer, num_microbatches=1, start_cpu_core_id=0):
4422 4423 4424 4425 4426
        self._device = 'cpu'
        if core.is_compiled_with_npu():
            self._device = "npu"
        elif core.is_compiled_with_cuda():
            self._device = "gpu"
J
Jiabin Yang 已提交
4427
        if framework._non_static_mode():
Z
zhongpu 已提交
4428
            raise Exception("In dygraph, don't support PipelineOptimizer.")
4429 4430 4431 4432
        valid_optimizers = (Optimizer, paddle.optimizer.Optimizer,
                            paddle.fluid.contrib.mixed_precision.decorator.
                            OptimizerWithMixedPrecision)
        if not isinstance(optimizer, valid_optimizers):
4433 4434
            raise ValueError("The 'optimizer' parameter for "
                             "PipelineOptimizer must be an instance of "
4435 4436
                             "{}, but the given type is {}.".format(
                                 valid_optimizers, type(optimizer)))
H
hutuxian 已提交
4437
        self._optimizer = optimizer
4438 4439 4440 4441 4442 4443

        # Get the original optimizer defined by users, such as SGD
        self._origin_optimizer = self._optimizer
        while hasattr(self._origin_optimizer, "inner_opt"):
            self._origin_optimizer = self._origin_optimizer.inner_opt

4444 4445 4446 4447
        assert num_microbatches >= 1, (
            "num_microbatches must be a positive value.")
        self._num_microbatches = num_microbatches
        assert start_cpu_core_id >= 0, (
4448
            "start_cpu_core_id must be a non-negative integer.")
H
hutuxian 已提交
4449
        self._start_cpu_core_id = start_cpu_core_id
4450 4451 4452 4453 4454 4455
        self._place_list = None
        op_maker = core.op_proto_and_checker_maker
        self._op_role = op_maker.OpRole
        self._op_role_key = op_maker.kOpRoleAttrName()
        self._op_role_var_key = op_maker.kOpRoleVarAttrName()
        self._op_device_key = op_maker.kOpDeviceAttrName()
4456
        self._param_device_map = None
4457 4458
        self._pipeline_pair = []
        self._pp_ring_map = dict()
4459 4460
        self.output_var_to_op = None
        self.input_var_to_op = None
4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475

    # insert allreduce op to sync global information for global
    # gradient clip and amp
    def _insert_allreduce_op(self, op_idx, block):
        """
        Insert allreduce op to sync global information for global
        gradient clip and amp.
        """
        op = block.ops[op_idx]
        out_name = op.desc.output_arg_names()[0]
        out_var = block.var(out_name)
        offset = 0
        if op.type == "reduce_any":
            # cast the bool var to int32 to use allreduce_max op
            temp_var_name = unique_name.generate(out_name + "_cast_int32")
4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487
            temp_var = block.create_var(name=temp_var_name,
                                        shape=[1],
                                        dtype="int32")
            block._insert_op(op_idx + 1 + offset,
                             type='cast',
                             inputs={'X': out_var},
                             outputs={'Out': temp_var},
                             attrs={
                                 'in_dtype': out_var.dtype,
                                 'out_dtype': temp_var.dtype,
                                 self._op_role_key: self._op_role.Optimize
                             })
4488 4489 4490 4491 4492 4493 4494 4495
            offset += 1
        block._insert_op(
            op_idx + 1 + offset,
            type='c_allreduce_max'
            if op.type == "reduce_any" else 'c_allreduce_sum',
            inputs={'X': temp_var if op.type == "reduce_any" else out_var},
            outputs={'Out': temp_var if op.type == "reduce_any" else out_var},
            attrs={
4496
                'ring_id': self.global_ring_id,
4497 4498 4499 4500 4501
                self._op_role_key: self._op_role.Optimize,
                'use_calc_stream': True
            })
        offset += 1
        if op.type == "reduce_any":
4502 4503 4504 4505 4506 4507 4508 4509 4510
            block._insert_op(op_idx + 1 + offset,
                             type='cast',
                             inputs={'X': temp_var},
                             outputs={'Out': out_var},
                             attrs={
                                 'in_dtype': temp_var.dtype,
                                 'out_dtype': out_var.dtype,
                                 self._op_role_key: self._op_role.Optimize
                             })
4511
            offset += 1
4512
        return offset
H
hutuxian 已提交
4513

4514
    def _create_vars(self, block, ori_block):
4515
        # Create vars for block, copied from ori_block
H
hutuxian 已提交
4516
        used_var_set = set()
4517 4518 4519 4520 4521 4522 4523 4524 4525
        added_op_num = 0
        op_idx = 0
        op_size = block.desc.op_size()
        while op_idx < op_size + added_op_num:
            # Whether to insert allreduce_sum or allreduce_max op.
            # For amp and global gradient clip strategies, we should
            # get the global information, so allreduce op is needed.
            should_insert = False
            op = block.ops[op_idx]
4526
            # For op process vars on all devices, remove its input
4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541
            # vars not in this block
            reserved_x = []
            if op.type == 'reduce_any' and self._is_optimize_op(op):
                should_insert = True
            elif op.type == 'concat' and self._is_optimize_op(op):
                for input_name in op.desc.input("X"):
                    if block._find_var_recursive(input_name):
                        reserved_x.append(input_name)
                op.desc.set_input('X', reserved_x)
            elif op.type == 'update_loss_scaling':
                for input_name in op.desc.input("X"):
                    if block._find_var_recursive(input_name):
                        reserved_x.append(input_name)
                op.desc.set_input('X', reserved_x)
                op.desc.set_output('Out', reserved_x)
4542 4543 4544 4545 4546 4547 4548 4549 4550 4551
            elif op.type == 'check_finite_and_unscale':
                for input_name in op.desc.input("X"):
                    if block._find_var_recursive(input_name):
                        reserved_x.append(input_name)
                op.desc.set_input('X', reserved_x)
                op.desc.set_output('Out', reserved_x)
                if len(reserved_x) == 0:
                    block._remove_op(op_idx)
                    op_size -= 1
                    continue
4552 4553 4554 4555 4556 4557 4558 4559
            elif op.type == 'sum' and self._is_gradient_clip_op(op):
                for input_name in op.desc.input("X"):
                    if block._find_var_recursive(input_name):
                        reserved_x.append(input_name)
                op.desc.set_input('X', reserved_x)
                should_insert = True

            vars = op.desc.input_arg_names() + op.desc.output_arg_names()
H
hutuxian 已提交
4560
            for var in vars:
4561 4562
                # a var whose name contains "blocking_queue"
                # only exists in startup program
4563
                if var in used_var_set or "_blocking_queue" in var:
H
hutuxian 已提交
4564 4565
                    continue
                used_var_set.add(var)
4566 4567
                if block._find_var_recursive(str(var)): continue
                source_var = ori_block._var_recursive(str(var))
4568
                if source_var.type == core.VarDesc.VarType.READER:
4569
                    dest_var = block.create_var(
4570 4571 4572
                        name=var,
                        type=core.VarDesc.VarType.READER,
                        persistable=source_var.persistable)
4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584
                elif isinstance(source_var, Parameter):
                    dest_var = block.create_parameter(
                        name=source_var.name,
                        shape=source_var.shape,
                        dtype=source_var.dtype,
                        type=source_var.type,
                        lod_level=source_var.lod_level,
                        stop_gradient=source_var.stop_gradient,
                        trainable=source_var.trainable,
                        optimize_attr=source_var.optimize_attr,
                        regularizer=source_var.regularizer,
                        error_clip=source_var.error_clip)
4585
                else:
4586
                    dest_var = block._clone_variable(source_var, False)
4587
                self._clone_var_attr(dest_var, source_var)
4588 4589 4590 4591 4592 4593 4594 4595
            # When use with sharding, allreduce_sum and allreduce_max
            # used for global gradient clip and amp will be added by sharding.
            op_idx += 1
            if self.use_sharding or not should_insert: continue
            inserted_ops = self._insert_allreduce_op(op_idx - 1, block)
            added_op_num += inserted_ops
            op_idx += inserted_ops
        block._sync_with_cpp()
H
hutuxian 已提交
4596

4597
    def _is_loss_grad_op(self, op):
4598 4599
        assert self._op_role_key in op.attr_names
        op_role = int(op.attr(self._op_role_key))
4600 4601 4602
        return op_role & int(self._op_role.Backward) and op_role & int(
            self._op_role.Loss)

4603
    def _is_forward_op(self, op):
4604 4605
        return self._op_role_key in op.attr_names and (int(
            op.attr(self._op_role_key)) == int(self._op_role.Forward))
4606

4607
    def _is_backward_op(self, op):
4608 4609 4610 4611 4612 4613
        return self._op_role_key in op.attr_names and (
            int(op.attr(self._op_role_key)) & int(self._op_role.Backward))

    def _is_loss_op(self, op):
        assert self._op_role_key in op.attr_names
        return int(op.attr(self._op_role_key)) == int(self._op_role.Loss)
4614 4615

    def _is_optimize_op(self, op):
4616 4617
        return self._op_role_key in op.attr_names and (
            int(op.attr(self._op_role_key)) & int(self._op_role.Optimize))
4618 4619 4620 4621 4622

    def _is_update_op(self, op):
        return 'Param' in op.input_names and 'Grad' in op.input_names and (
            "LearningRate" in op.input_names)

4623
    def _split_program(self, main_program, devices):
H
hutuxian 已提交
4624
        """
4625
        Split a program into sections according to devices that ops run on.
4626
        The op whose op_device attr is "gpu:all" is copied to all sections.
4627 4628 4629

        Args:
            main_program (Program): the main program
4630
            devices: all used devices
H
hutuxian 已提交
4631
        """
4632
        # Map from device to its corresponding section program info
4633
        device_program_map = defaultdict(Program)
4634

4635
        block = main_program.block(0)
4636 4637
        for op in block.ops:
            device = op.attr(self._op_device_key)
4638
            # Copy ops whose op_device set to "gpu:all" to all sections.
4639
            if device == f"{self._device}:all":
4640
                for device in devices:
4641 4642
                    program = device_program_map[device]
                    op_desc = op.desc
4643
                    ap_op = program.global_block().desc.append_op()
4644
                    ap_op.copy_from(op_desc)
4645
                    ap_op._set_attr(self._op_device_key, "")
4646 4647 4648
            else:
                program = device_program_map[device]
                op_desc = op.desc
4649
                ap_op = program.global_block().desc.append_op()
4650
                ap_op.copy_from(op_desc)
4651
                ap_op._set_attr(self._op_device_key, "")
4652

4653
        program_list = []
4654
        for key in devices:
4655
            program = device_program_map[key]
4656 4657
            program._sync_with_cpp()
            program_list.append(program)
H
hutuxian 已提交
4658

4659
        return program_list
H
hutuxian 已提交
4660

4661 4662 4663 4664 4665 4666 4667
    def _get_op_device_for_startup_program(self, var_name):
        """
        For adam optimizer, it will add accumulators and initialize them
        with fill_constant, and force the op device to cpu. Hence, we should
        get the real op_device attribute of the fill_constant as the device
        where the corresponding parameters on.
        """
4668 4669 4670
        assert "beta1_pow_acc" in var_name or "beta2_pow_acc" in var_name, \
            'For accumulators for Adam, the name must contain beta1_pow_acc ' \
            'or beta2_pow_acc.'
4671 4672 4673 4674
        param_name = var_name[0:var_name.index('_beta')]
        device = self._param_device_map[param_name]
        return device

4675 4676
    def _split_startup_program(self, startup_program, device_id):
        block = startup_program.global_block()
4677 4678 4679
        new_startup_program = Program()
        for op in block.ops:
            device = op.attr(self._op_device_key)
4680 4681
            if device == "cpu":
                assert op.type == "fill_constant", (
4682 4683
                    "For ops in startup program with the op_device attribute "
                    "of cpu, they must be of type fill_constant.")
4684 4685 4686
                output_var = op.output_arg_names[0]
                device = self._get_op_device_for_startup_program(output_var)

4687
            if device:
4688
                device_index = int(device.split(':')[1])
4689
            else:
4690 4691
                # LR related ops
                device = None
4692
            if device and device_index != device_id: continue
4693
            op_desc = op.desc
4694
            ap_op = new_startup_program.global_block().desc.append_op()
4695 4696 4697
            ap_op.copy_from(op_desc)
            ap_op._set_attr(self._op_device_key, "")
        new_startup_program._sync_with_cpp()
4698
        self._create_vars(new_startup_program.global_block(), block)
4699 4700
        return new_startup_program

4701
    def _find_post_op(self, index, var_name):
H
hutuxian 已提交
4702
        """
4703
        Find the post op that has variable named var_name as input.
H
hutuxian 已提交
4704
        """
4705 4706 4707 4708 4709 4710
        # bugfix for uniform hybrid parallelism
        if '.cast_fp32' in var_name:
            var_name = var_name.replace('.cast_fp32', '')
        if '.cast_fp16' in var_name:
            var_name = var_name.replace('.cast_fp16', '')

4711 4712 4713 4714 4715 4716 4717 4718
        post_ops = self.input_var_to_op[var_name]
        if post_ops == None: return None
        result_op = None
        for post_op, post_idx in reversed(post_ops):
            if post_idx > index:
                result_op = post_op
                break
        return result_op
4719

4720
    def _find_prev_op(self, index, var_name):
H
hutuxian 已提交
4721
        """
4722 4723
        Find the previous op of op with index that outputs
        variable named var_name.
H
hutuxian 已提交
4724
        """
4725 4726 4727 4728 4729 4730
        prev_ops = self.output_var_to_op[var_name]
        if prev_ops == None: return None
        result_op = None
        for prev_op, prev_idx in reversed(prev_ops):
            if prev_idx < index:
                result_op = prev_op
4731
                break
4732
        return result_op
4733 4734

    def _rename_arg(self, op, old_name, new_name):
4735 4736
        op._rename_input(old_name, new_name)
        op._rename_output(old_name, new_name)
4737

4738
    def _create_var(self, block, ref_var, name, dtype=None):
4739 4740 4741 4742 4743 4744 4745 4746
        """
        Create a new var for block, which has the same type,
        shape and dtype as ref_var, then rename it with the
        name `name`.
        """
        new_var = block.create_var(
            name=name,
            shape=ref_var.shape,
4747
            dtype=ref_var.dtype if dtype is None else dtype,
4748 4749
            type=ref_var.type,
            lod_level=ref_var.lod_level,
4750 4751
            persistable=ref_var.persistable,
            is_data=ref_var.is_data,
4752
            need_check_feed=ref_var.desc.need_check_feed())
4753
        self._clone_var_attr(new_var, ref_var)
4754 4755
        return new_var

4756 4757 4758 4759 4760
    def _clone_var_attr(self, dest, src):
        dest.stop_gradient = src.stop_gradient
        if hasattr(src, 'is_distributed'):
            dest.is_distributed = src.is_distributed

4761 4762 4763 4764 4765 4766
    def _strip_grad_suffix(self, name):
        """
        Strip the grad suffix from the given variable name
        """
        pos = name.find(core.grad_var_suffix())
        return name[:pos] if pos != -1 else name
H
hutuxian 已提交
4767

4768 4769 4770 4771 4772 4773
    def _append_grad_suffix(self, name):
        """
        Append grad suffix to the given variable name
        """
        return name + core.grad_var_suffix()

4774
    def _get_op_device_attr(self, op):
H
hutuxian 已提交
4775
        """
4776
        Get the op_device attribute of a op.
H
hutuxian 已提交
4777
        """
4778 4779 4780
        device = op.attr(self._op_device_key) \
            if op.has_attr(self._op_device_key) else None
        if device:
B
Baibaifan 已提交
4781
            assert device[0:3] == 'gpu' or device[0:3] == 'npu', "Now, only gpu and npu devices are " \
4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795
                "supported in pipeline parallemism."
        return device

    def _add_op_device_attr_for_op(self, op, idx, block):
        """
        Add op_device attrribute for ops that have not that attribute set.
        We use "gpu:all" to represent the op should be put on all
        sub-programs, such as lr-related ops. Note that: "gpu:all"
        is only used by pipeline as an indicator.
        """
        lrsched_role = int(self._op_role.LRSched)
        if op.attr(self._op_role_key) == lrsched_role:
            # For LRSched ops, we should put them on all sub-programs to
            # make sure each sub-program update the lr correctly
4796
            op._set_attr(self._op_device_key, f"{self._device}:all")
4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811
        # bugfix in hybrid parallelism
        elif op.type == "sum" and self._is_backward_op(op):
            # For sum ops that compute the sum of @RENAMED@ vars
            for name in op.desc.input_arg_names():
                assert '@RENAME@' in name, \
                    "The op must be sum used to accumulate renamed vars."
            assert len(op.desc.output_arg_names()) == 1
            out_name = op.desc.output_arg_names()[0]
            post_op = self._find_post_op(idx, out_name)
            assert post_op.has_attr(
                'op_device'), "{} has no op_device attr for var {}".format(
                    post_op.type, out_name)
            device = post_op.attr(self._op_device_key)
            assert device, "The post op must have op_device set."
            op._set_attr(self._op_device_key, device)
4812 4813
        elif (op.type == "cast"
              or op.type == "scale") and self._is_backward_op(op):
4814
            prev_op = self._find_prev_op(idx, op.desc.input("X")[0])
4815 4816
            op._set_attr(self._op_device_key, prev_op.attr(self._op_device_key))
        elif op.type == "memcpy" and not self._is_optimize_op(op):
4817
            # for checkpoint offloading
4818 4819 4820 4821 4822
            assert len(op.input_arg_names) == 1 and len(
                op.output_arg_names) == 1
            input_name = op.input_arg_names[0]
            output_name = op.output_arg_names[0]
            if '@Fetch' in output_name:
4823
                post_op = self._find_post_op(idx, output_name)
4824 4825 4826
                op._set_attr(self._op_device_key,
                             post_op.attr(self._op_device_key))
            else:
4827
                prev_op = self._find_prev_op(idx, op.desc.input("X")[0])
4828 4829 4830 4831 4832
                op._set_attr(self._op_device_key,
                             prev_op.attr(self._op_device_key))
        elif self._is_loss_op(op):
            # For loss * loss_scaling op added by AMP
            offset = 1
4833 4834
            while (not block.ops[idx + offset].has_attr(self._op_device_key)
                   or not block.ops[idx + offset].attr(self._op_device_key)):
4835 4836 4837 4838 4839 4840 4841 4842 4843
                offset += 1
            device = block.ops[idx + offset].attr(self._op_device_key)
            assert device, "Please put you program within device_guard scope."
            for i in range(offset):
                block.ops[idx + i]._set_attr(self._op_device_key, device)
        elif self._is_optimize_op(op) and op.type == "cast":
            # For fp16-->fp32 cast added by AMP
            grad_name = op.output('Out')
            assert len(grad_name) == 1
4844
            param_name = self._strip_grad_suffix(grad_name[0])
4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856
            device = self._param_device_map[param_name]
            op._set_attr(self._op_device_key, device)
        elif self._is_gradient_clip_op(op) or self._is_regularization_op(op):
            # For gradient clip and regularization ops, we set their op_device
            # attribute to the device where their corresponding parameters on.
            assert self._op_role_var_key in op.attr_names, "gradient_clip " \
                "and regularization ops must have op_role_var attribute."
            op_role_var = op.attr(self._op_role_var_key)
            assert len(op_role_var) == 2, "op_role_var for gradient_clip " \
                "regularization ops must have two elements."
            param_name = op_role_var[0]
            device = self._param_device_map[param_name]
4857
            # For sum op added by global gradient clip, it must be
4858
            # put on all devices
4859 4860 4861 4862
            if (op.type == 'sum' or op.type == 'sqrt'
                    or op.type == 'fill_constant'
                    or op.type == 'elementwise_max'
                    or op.type == 'elementwise_div'):
4863
                device = f"{self._device}:all"
4864
            op._set_attr(self._op_device_key, device)
R
Roc 已提交
4865
        elif op.type == "alloc_float_status" or op.type == "clear_float_status":
4866
            op._set_attr(self._op_device_key, f"{self._device}:all")
4867 4868 4869 4870 4871 4872 4873 4874 4875 4876
            # NOTE(wangxi): NPU should only clear the float status
            # once at each batch step
            op._set_attr(self._op_role_key, self._op_role.LRSched)

            float_status_name = op.output_arg_names[0]
            float_status_var = block.var(float_status_name)
            # FIXME(wangxi): pipeline lr schedule will exec on sub_scope(0)
            # while update will exec on sub_scope(last_micro_step), should
            # set persistable to use global scope
            float_status_var.persistable = True
4877 4878
        else:
            other_known_ops = [
R
Roc 已提交
4879
                'update_loss_scaling', 'reduce_any', 'concat', 'sum',
4880
                'check_finite_and_unscale', 'memcpy'
4881 4882 4883 4884 4885
            ]
            assert op.type in other_known_ops, "For other ops without " \
                "op_device set, they must be one of {}, but it " \
                "is {}".format(other_known_ops, op.type)
            assert self._is_optimize_op(op)
4886
            op._set_attr(self._op_device_key, f"{self._device}:all")
4887 4888

    def _add_op_device_attr(self, block):
4889
        """
4890
        Add op_device attrribute for ops in block that have
4891
        not that attribute set.
4892
        """
4893
        for idx, op in enumerate(list(block.ops)):
4894 4895 4896
            if (op.type == "create_py_reader" or op.type == "read"
                    or op.type == "create_double_buffer_reader"):
                # Copy read related ops to all section to make them exit
4897 4898 4899 4900
                # after each epoch.
                # We use "gpu:all" to represent the op should be put on all
                # sub-programs, such as lr-related ops. Note that: "gpu:all"
                # is only used by pipeline as an indicator.
4901
                op._set_attr(self._op_device_key, f"{self._device}:all")
4902 4903 4904 4905
                continue
            # op_device attribute has been set
            if self._get_op_device_attr(op): continue
            self._add_op_device_attr_for_op(op, idx, block)
H
hutuxian 已提交
4906

4907 4908
    def _check_validation(self, block):
        """
4909
        Check whether ops in a block have both the op_device and the
4910 4911
        op_role attributes set.
        Then, return all devices in order.
4912
        """
4913 4914 4915 4916 4917 4918 4919 4920 4921 4922
        device_list = []
        # Section worker only supports the following op_role
        valid_op_role_value = [
            int(self._op_role.LRSched),
            int(self._op_role.Forward),
            int(self._op_role.Backward),
            int(self._op_role.Loss),
            int(self._op_role.Optimize),
            int(self._op_role.Backward) | int(self._op_role.Loss),
        ]
4923
        for op in block.ops:
4924
            if not op._has_kernel(op.type):
4925 4926
                assert op.type == "conditional_block" and (op.attr(
                    self._op_role_key) == int(self._op_role.LRSched)), (
4927 4928
                        "Now, the only supported op without kernel is "
                        "conditional_block, and its op role must be LRSched.")
4929 4930 4931
            assert op.has_attr(
                self._op_role_key), ("op ({}) has no {} attribute.".format(
                    op.type, self._op_role_key))
4932 4933
            op_role = op.attr(self._op_role_key)
            assert int(op_role) in valid_op_role_value, \
4934
                "op_role {} for op {} must be one of {}".format(
4935
                    op_role,
4936 4937
                    op.type,
                    valid_op_role_value)
4938

4939 4940 4941
            assert op.has_attr(
                self._op_device_key), ("op ({}) has no {} attribute.".format(
                    op.type, self._op_device_key))
4942 4943 4944 4945

            device = op.attr(self._op_device_key)
            assert device, ("op_device attribute for op "
                            "{} has not been set.".format(op.type))
4946
            if device == f"{self._device}:all": continue
4947

4948
            dev_type = device.split(':')[0]
B
Baibaifan 已提交
4949 4950 4951
            assert dev_type == "gpu" or dev_type == 'npu', (
                "Now only gpu and npu devices are supported "
                "for pipeline parallelism.")
4952 4953

            if device not in device_list:
4954
                device_list.append(device)
4955

4956
        return device_list
4957

4958
    def _insert_sendrecv_ops_for_boundaries(self, block):
4959
        """
4960
        Insert a pair of send and recv ops for every two
4961 4962
        consecutive ops on different devices.
        """
4963
        # A map from var to device where op takes it as input,
4964
        # avoiding multiple send and recv ops.
4965
        input_var_to_device = dict()
4966 4967 4968 4969 4970 4971 4972 4973 4974 4975
        # bugfix hybrid parallelism
        first_optimize_index = None
        for index, op in enumerate(list(block.ops)):
            if self._is_optimize_op(op):
                first_optimize_index = index
                break
        extra_index_info = {
            'index': 0,
            'first_optimize_index': first_optimize_index
        }
4976

4977
        for index, op in enumerate(list(block.ops)):
4978
            cur_device = op.attr(self._op_device_key)
4979
            if cur_device == f"{self._device}:all": continue
4980 4981
            for var_name in op.input_arg_names:
                var = block.var(var_name)
4982
                # skip data var
4983
                if var.is_data: continue
4984
                prev_device = None
4985 4986 4987

                prev_op = self._find_prev_op(index, var_name)
                if prev_op is None:
4988 4989
                    if var_name not in self._param_device_map:
                        continue
4990
                    prev_device = self._param_device_map[var_name]
4991

4992 4993 4994
                if not prev_device:
                    prev_device = prev_op.attr(self._op_device_key) \
                        if prev_op else None
4995

4996 4997
                if prev_device is None or prev_device == f"{self._device}:all":
                    continue
4998 4999

                if prev_device == cur_device: continue
5000

5001 5002 5003 5004 5005 5006 5007
                if var_name not in input_var_to_device:
                    input_var_to_device[var_name] = []
                if (cur_device, prev_device) in input_var_to_device[var_name]:
                    continue

                device_type = cur_device.split(':')[0] + ':'

5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026
                def _check_stage(cur_id, prev_id):
                    # check send/recv stage valid
                    is_forward = self._is_forward_op(op)
                    is_backward = self._is_backward_op(op)
                    assert is_forward or is_backward, \
                        'send/recv in pipeline should only be inserted in forward or backward,' \
                        'please check the op_role of op={}'.format(op)

                    if is_forward:
                        assert prev_id < cur_id, \
                            "In forward, send/recv can only be passed forward, but now " \
                            "prev_stage={} great than cur_stage={}, please check op_device of op={}".format(
                                prev_id, cur_id, op)
                    elif is_backward:
                        assert prev_id > cur_id, \
                            "In backward, send/recv can only be passed backward, but now " \
                            "prev_stage={} less than cur_stage={}, please check op_device of op={}".format(
                                prev_id, cur_id, op)

5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049
                def _insert_send_recv(cur_id, prev_id):
                    cur_dev = device_type + str(cur_id)
                    prev_dev = device_type + str(prev_id)
                    if (cur_dev, prev_dev) in input_var_to_device[var_name]:
                        return

                    if cur_id - prev_id > 1:
                        _insert_send_recv(cur_id - 1, prev_id)
                        _insert_send_recv(cur_id, cur_id - 1)
                        input_var_to_device[var_name].append(
                            (cur_dev, prev_dev))
                        return
                    elif cur_id - prev_id < -1:
                        _insert_send_recv(cur_id + 1, prev_id)
                        _insert_send_recv(cur_id, cur_id + 1)
                        input_var_to_device[var_name].append(
                            (cur_dev, prev_dev))
                        return

                    assert abs(cur_id - prev_id) == 1
                    input_var_to_device[var_name].append((cur_dev, prev_dev))

                    op_role = op.attr(self._op_role_key)
5050
                    var = block.vars[var_name]
5051 5052 5053
                    pair = (prev_id, cur_id)
                    # 1000 is just a magic number
                    pair_key = prev_id * 1000 + cur_id
5054 5055 5056 5057 5058 5059 5060
                    if pair not in self._pipeline_pair:
                        self._pipeline_pair.append(pair)
                        self._pp_ring_map[pair_key] = self.ring_id
                        ring_id = self.ring_id
                        self.ring_id += 1
                    else:
                        ring_id = self._pp_ring_map[pair_key]
5061

5062
                    if self.schedule_mode == 'F-then-B':  # F-then-B
F
fangshuixun007 已提交
5063
                        block._insert_op_without_sync(
5064
                            index=index + extra_index_info['index'],
5065 5066 5067
                            type='send_v2',
                            inputs={'X': var},
                            attrs={
5068
                                self._op_device_key: prev_dev,
5069 5070 5071 5072 5073
                                self._op_role_key: op_role,
                                'use_calc_stream': True,
                                'peer': 1,
                                'ring_id': ring_id
                            })
5074
                        extra_index_info['index'] += 1
5075 5076 5077
                        var_shape = list(var.shape)
                        var_shape[0] = self.micro_batch_size if var_shape[
                            0] < 0 else var_shape[0]
F
fangshuixun007 已提交
5078
                        block._insert_op_without_sync(
5079
                            index=index + extra_index_info['index'],
5080 5081 5082
                            type='recv_v2',
                            outputs={'Out': [var]},
                            attrs={
5083
                                'out_shape': var_shape,
5084
                                'dtype': var.dtype,
5085
                                self._op_device_key: cur_dev,
5086 5087 5088 5089 5090
                                self._op_role_key: op_role,
                                'use_calc_stream': True,
                                'peer': 0,
                                'ring_id': ring_id
                            })
5091
                        extra_index_info['index'] += 1
5092
                    elif self.schedule_mode == '1F1B':  # 1F1B
5093 5094 5095 5096
                        var_shape = list(var.shape)
                        var_shape[0] = self.micro_batch_size if var_shape[
                            0] < 0 else var_shape[0]

5097
                        numel = np.prod(var_shape)
5098 5099
                        use_mp = (self.mp_degree > 1) and (numel %
                                                           self.mp_degree == 0)
5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125

                        if 'subprog' in var.name:
                            # For recompute, if the checkpoints var is layer_norm_6.tmp_2
                            # this var will be sent twice, layer_norm_6.tmp_2 for forward pass,
                            # layer_norm_6.tmp_2.subprog_* for recompute pass.
                            # We can store the first sent var and copy the value to the
                            # second one to reduce one send/recv op.
                            # The origin_ckpt_name is layer_norm_6.tmp_2, which will be used
                            # to find the stored var for the forward pass.
                            origin_name = var.name.split('subprog')[0][0:-1]
                            associate_var = block.var(origin_name)
                            block._insert_op_without_sync(
                                index=index + extra_index_info['index'],
                                type='assign',
                                inputs={'X': [associate_var]},
                                outputs={'Out': [var]},
                                attrs={
                                    'out_shape': var_shape,
                                    'dtype': var.dtype,
                                    self._op_device_key: cur_dev,
                                    self._op_role_key: op_role,
                                    'use_calc_stream': True,
                                })
                            extra_index_info['index'] += 1
                            return

5126 5127
                        _check_stage(cur_id, prev_id)

5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138
                        block._insert_op_without_sync(index=index +
                                                      extra_index_info['index'],
                                                      type='c_sync_calc_stream',
                                                      inputs={'X': [var]},
                                                      outputs={'Out': [var]},
                                                      attrs={
                                                          self._op_device_key:
                                                          prev_dev,
                                                          self._op_role_key:
                                                          op_role,
                                                      })
5139
                        extra_index_info['index'] += 1
5140 5141 5142 5143
                        prefix_name = var.name.split('@')[0]
                        prefix_var = block.var(prefix_name)
                        is_param = True if isinstance(prefix_var,
                                                      Parameter) else False
F
fangshuixun007 已提交
5144
                        block._insert_op_without_sync(
5145
                            index=index + extra_index_info['index'],
5146 5147
                            type='send_v2'
                            if not use_mp or is_param else 'partial_send',
5148 5149
                            inputs={'X': var},
                            attrs={
5150
                                self._op_device_key: prev_dev,
5151 5152 5153 5154
                                self._op_role_key: op_role,
                                'use_calc_stream': False,
                                'ring_id': ring_id,
                                'peer': 1,
5155 5156 5157
                                # if send_v2, num&id attr is not in op_attrs, will not insert
                                'num': self.mp_degree,
                                'id': self.mp_rank,
5158
                            })
5159
                        extra_index_info['index'] += 1
5160 5161 5162 5163 5164 5165 5166 5167
                        insert_index = None
                        if int(op_role) == int(self._op_role.Backward):
                            insert_index = extra_index_info[
                                'first_optimize_index']
                            new_op_role = self._op_role.Optimize
                        else:
                            insert_index = index
                            new_op_role = self._op_role.Backward
5168
                        sync_comm_op = block._insert_op_without_sync(
5169
                            index=insert_index + extra_index_info['index'],
5170 5171 5172 5173
                            type='c_sync_comm_stream',
                            inputs={'X': [var]},
                            outputs={'Out': [var]},
                            attrs={
5174
                                self._op_device_key: prev_dev,
5175
                                self._op_role_key: new_op_role,
5176 5177
                                'ring_id': ring_id,
                            })
5178
                        if int(op_role) == int(self._op_role.Forward):
5179
                            sync_comm_op._set_attr('pipeline_flag', '')
5180
                            extra_index_info['index'] += 1
F
fangshuixun007 已提交
5181
                        block._insert_op_without_sync(
5182
                            index=index + extra_index_info['index'],
5183 5184
                            type='recv_v2'
                            if not use_mp or is_param else 'partial_recv',
5185 5186 5187 5188
                            outputs={'Out': [var]},
                            attrs={
                                'out_shape': var_shape,
                                'dtype': var.dtype,
5189
                                self._op_device_key: cur_dev,
5190 5191 5192
                                self._op_role_key: op_role,
                                'use_calc_stream': True,
                                'peer': 0,
5193 5194 5195 5196
                                'ring_id': ring_id,
                                # if recv_v2, num&id attr is not in op_attrs, will not insert
                                'num': self.mp_degree,
                                'id': self.mp_rank,
5197
                            })
5198
                        extra_index_info['index'] += 1
5199
                        if use_mp and not is_param:
5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214
                            block._insert_op_without_sync(
                                index=index + extra_index_info['index'],
                                type='partial_allgather',
                                inputs={'X': [var]},
                                outputs={'Out': [var]},
                                attrs={
                                    self._op_device_key: cur_dev,
                                    self._op_role_key: op_role,
                                    'use_calc_stream': True,
                                    'ring_id': 0,
                                    # if recv_v2, num&id attr is not in op_attrs, will not insert
                                    'nranks': self.mp_degree,
                                    'rank': self.mp_rank,
                                })
                            extra_index_info['index'] += 1
5215 5216 5217 5218 5219
                    else:
                        raise ValueError(
                            "Now only 'F-then-B' and '1F1B' are supported."
                            "The given value is {}.".format(self.schedule_mode))

5220 5221
                _insert_send_recv(int(cur_device.split(':')[1]),
                                  int(prev_device.split(':')[1]))
5222 5223
        block._sync_with_cpp()

5224
    def _insert_loss_scale(self, block):
5225
        """
5226
        Scale the loss corresponding to number of micro-batches.
5227
        """
5228
        if self._num_microbatches == 1: return
5229
        for index, op in reversed(tuple(enumerate(list(block.ops)))):
5230
            if self._is_loss_grad_op(op):
5231 5232 5233 5234 5235 5236 5237
                assert op.type == 'fill_constant', \
                    "loss_grad_op must be fill_constant op, " \
                    "but this op is {}".format(op.type)
                assert op.has_attr('value')
                loss_scale = float(op.attr('value'))
                loss_scale = loss_scale / self._num_microbatches
                op._set_attr('value', loss_scale)
5238 5239
                break

5240 5241 5242 5243 5244 5245
    def _rename_gradient_var_name(self, block):
        for index, op in enumerate(block.ops):
            if not self._is_optimize_op(op): continue
            input_names = op.input_arg_names
            output_names = op.output_arg_names
            in_out_names = input_names + output_names
L
lilong12 已提交
5246
            if op.type == 'cast' or op.type == "c_sync_comm_stream": continue
5247 5248 5249 5250 5251 5252 5253 5254
            # append "MERGED" to the names of parameter gradients,
            # and mofify the op_role_var attribute (by rename_arg func).
            for name in in_out_names:
                if not core.grad_var_suffix() in name: continue
                param_name = name.strip(core.grad_var_suffix())
                new_grad_name = name + "@MERGED"
                self._rename_arg(op, name, new_grad_name)

5255 5256 5257
    def _accumulate_gradients(self,
                              block,
                              pp_allreduce_in_optimize=False,
5258 5259
                              strategy=None,
                              shard=None):
5260 5261 5262 5263
        """
        Create a new merged gradient for each parameter and accumulate the
        corresponding gradient to it.
        """
5264 5265
        fp16_allreduce = strategy.fp16_allreduce if strategy else False
        if strategy and strategy.fuse_grad_merge:
5266
            fused_gradient_names = self._accumulate_gradients_with_fuse(
5267
                block, fp16_allreduce, strategy.fuse_grad_size_in_MB, shard)
5268 5269
            return fused_gradient_names

5270 5271 5272
        merged_gradient_names = []
        first_opt_op_idx = None

5273 5274 5275
        merged_suffix = '@MERGED@FP16' if fp16_allreduce else '@MERGED'
        dtype = paddle.float16 if fp16_allreduce else None

5276 5277 5278 5279 5280 5281 5282 5283
        for index, op in reversed(tuple(enumerate(list(block.ops)))):
            # remove the cast op of fp16 grad to fp32 grad
            if self._is_optimize_op(op) and op.type == 'cast':
                in_name = op.input_arg_names[0]
                out_name = op.output_arg_names[0]
                if out_name.strip('@GRAD') in self._param_device_map:
                    assert in_name.replace('.cast_fp16', '') == out_name
                    block._remove_op(index)
5284
                    continue
5285

5286
            if self._is_backward_op(op) and first_opt_op_idx is None:
5287
                first_opt_op_idx = index + 1
5288 5289
                # maybe have no optimize
                # if first_opt_op_idx == len(block.ops): return
5290

5291 5292
            if self._is_backward_op(op) and (self._op_role_var_key
                                             in op.attr_names):
5293 5294
                op_role_var = op.attr(self._op_role_var_key)
                if len(op_role_var) == 0: continue
5295 5296
                assert len(op_role_var) % 2 == 0
                for i in range(0, len(op_role_var), 2):
5297 5298 5299 5300
                    offset = 0
                    param_name = op_role_var[i]
                    if not block.has_var(param_name): continue
                    if '@BroadCast' in param_name: continue
5301

5302
                    param_grad_name = param_name + core.grad_var_suffix()
5303
                    merged_param_grad_name = param_grad_name + merged_suffix
5304 5305
                    if not block.has_var(merged_param_grad_name):
                        self._create_var(block, block.vars[param_name],
5306
                                         merged_param_grad_name, dtype)
5307
                    assert block.has_var(merged_param_grad_name)
5308

5309 5310 5311
                    param_grad_var = block.var(param_grad_name)
                    merged_param_grad_var = block.var(merged_param_grad_name)
                    merged_param_grad_var.persistable = True
5312
                    block._insert_op(
5313 5314 5315 5316
                        index=first_opt_op_idx + offset,
                        type='fill_constant',
                        inputs={},
                        outputs={'Out': [merged_param_grad_var]},
5317
                        attrs={
5318 5319 5320 5321 5322 5323
                            'shape':
                            merged_param_grad_var.shape,
                            'dtype':
                            merged_param_grad_var.dtype,
                            'value':
                            float(0),
5324
                            # a trick to run this op once per mini-batch
5325 5326
                            self._op_role_key:
                            self._op_role.Optimize.LRSched,
5327 5328
                        })
                    offset += 1
5329 5330
                    grad_name = op_role_var[i + 1]
                    grad_var = block.vars[grad_name]
5331 5332 5333 5334 5335 5336 5337 5338 5339

                    is_fp16_grad = 'cast_fp16' in grad_name
                    need_cast = (is_fp16_grad is not fp16_allreduce)

                    if need_cast:
                        # if fp16_allreduce:
                        #     cast grad to fp16 to accumulate to merged gradient
                        # else:
                        #     cast grad to fp32 to accumulate to merged gradient
5340
                        cast_grad_var_name = param_grad_name + '@TMP'
5341 5342
                        cast_grad_var = self._create_var(
                            block, param_grad_var, cast_grad_var_name, dtype)
5343
                        cast_grad_var.persistable = False
5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355
                        block._insert_op(index=first_opt_op_idx + offset,
                                         type='cast',
                                         inputs={'X': grad_var},
                                         outputs={'Out': cast_grad_var},
                                         attrs={
                                             'in_dtype':
                                             grad_var.dtype,
                                             'out_dtype':
                                             cast_grad_var.dtype,
                                             self._op_role_key:
                                             self._op_role.Backward,
                                         })
5356
                        offset += 1
5357 5358 5359 5360 5361 5362 5363
                        grad_var = cast_grad_var

                    block._insert_op(
                        index=first_opt_op_idx + offset,
                        type='sum',
                        inputs={'X': [merged_param_grad_var, grad_var]},
                        outputs={'Out': merged_param_grad_var},
5364 5365 5366
                        attrs={
                            self._op_role_key: self._op_role.Backward,
                        })
5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393
                    offset += 1
                    merged_gradient_names.append(merged_param_grad_name)

        if not fp16_allreduce: return merged_gradient_names

        first_opt_op_idx = None
        for index, op in reversed(tuple(enumerate(list(block.ops)))):
            if self._is_backward_op(op) and first_opt_op_idx is None:
                first_opt_op_idx = index + 1
                break
        assert first_opt_op_idx is not None

        # insert cast op from fp16->fp32
        # FIXME(wangxi): maybe put in sharding is better, for some grad
        #                is not in sharding device.
        for fp16_grad_name in merged_gradient_names:
            grad_name = fp16_grad_name.replace('@FP16', '')
            param_name = fp16_grad_name.replace('@GRAD@MERGED@FP16', '')

            if not block.has_var(grad_name):
                self._create_var(block, block.vars[param_name], grad_name)
            assert block.has_var(grad_name)

            fp16_grad_var = block.var(fp16_grad_name)
            grad_var = block.var(grad_name)
            grad_var.persistable = False

5394 5395 5396 5397 5398 5399 5400 5401 5402
            block._insert_op(index=first_opt_op_idx,
                             type='cast',
                             inputs={'X': fp16_grad_var},
                             outputs={'Out': grad_var},
                             attrs={
                                 'in_dtype': fp16_grad_var.dtype,
                                 'out_dtype': grad_var.dtype,
                                 self._op_role_key: self._op_role.Optimize,
                             })
5403

5404
        return merged_gradient_names
5405

5406 5407 5408
    def _insert_accumulate_gradients_with_fuse(self, main_block, fp16,
                                               fused_size, grad_param_pairs,
                                               first_opt_op_idx):
5409 5410
        grad_param_pairs = self._sort_grad_param_by_dtype(
            main_block, grad_param_pairs)
5411

5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427
        grad_param_segments = []
        merged_suffix = '@MERGED@FP16' if fp16 else '@MERGED'
        dtype = paddle.float16 if fp16 else paddle.float32
        cur_size = 0.
        last_dtype = None
        # split the grad based on dtype and fused size
        for grad, param in grad_param_pairs:
            real_grad = main_block.var(grad)
            # create the gradient merged var for each grad
            merged_grad_var = main_block.create_var(
                name=param + core.grad_var_suffix() + merged_suffix,
                dtype=dtype,
                shape=real_grad.shape,
                persistable=True,
                stop_gradient=False)
            real_param = main_block.var(param)
5428 5429
            if hasattr(real_param, 'is_distributed'):
                merged_grad_var.is_distributed = real_param.is_distributed
5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452
            tmp_size = self._get_var_size(real_grad)
            # two strategies for splitting the grad
            # 1. the current segment's size reach the user defined grad_size_in_MB
            # 2. the upcoming grad holds different dtype compared with grads in current segment
            if len(grad_param_segments) == 0 \
                    or cur_size + tmp_size > fused_size \
                    or real_grad.dtype != last_dtype:
                grad_param_segments.append(
                    ([real_grad], [real_param], [merged_grad_var]))
                last_dtype = real_grad.dtype
                cur_size = 0.
            else:
                grad_param_segments[-1][0].append(real_grad)
                grad_param_segments[-1][1].append(real_param)
                grad_param_segments[-1][2].append(merged_grad_var)
                cur_size += tmp_size

        fused_gradients = []
        fused_merged_gradients = []
        # create fused vars for grad and param
        for grad_param_segment in grad_param_segments:
            grad_segment = grad_param_segment[0]
            merged_grad_segment = grad_param_segment[2]
5453 5454 5455 5456 5457
            fused_grad = main_block.create_var(name='FusedGrad_{}'.format(
                grad_segment[0].name),
                                               dtype=grad_segment[0].dtype,
                                               persistable=False,
                                               stop_gradient=False)
5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492
            # keep the '.cast_fp16' info in the fuse var name
            fused_merged_grad_name_prefix = 'FusedMergedGrad.cast_fp16.' if \
                merged_grad_segment[0].dtype == paddle.float16 else 'FusedMergedGrad'
            fused_merged_grad_name = fused_merged_grad_name_prefix + '_{}'.format(
                merged_grad_segment[0].name)
            fused_merged_grad = main_block.create_var(
                name=fused_merged_grad_name,
                dtype=merged_grad_segment[0].dtype,
                persistable=True,
                stop_gradient=False)
            fused_gradients.append(fused_grad)
            fused_merged_gradients.append(fused_merged_grad)

        assert len(fused_gradients) == len(grad_param_segments)
        assert len(fused_merged_gradients) == len(grad_param_segments)

        # insert coalesce op at the start of the backward pass
        # use param as the coalesce input to make sure the two Fused vars are in same shape
        first_back_op_idx = None
        for index, op in enumerate(main_block.ops):
            if self._is_backward_op(op) and first_back_op_idx is None:
                first_back_op_idx = index
                break
        assert first_back_op_idx is not None
        offset = 0
        for i in range(len(grad_param_segments)):
            fused_grad = fused_gradients[i]
            fused_merged_grad = fused_merged_gradients[i]
            grads = grad_param_segments[i][0]
            params = grad_param_segments[i][1]
            merged_grads = grad_param_segments[i][2]
            main_block._insert_op_without_sync(
                first_back_op_idx + offset,
                type="coalesce_tensor",
                inputs={"Input": params},
5493 5494 5495 5496
                outputs={
                    "Output": grads,
                    "FusedOutput": fused_grad
                },
5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512
                attrs={
                    # Explanation of user_defined_size_of_dtype:
                    # In coalesce op, the align size is 256 bytes
                    # the float takes 4 bytes while fp16 takes 2 bytes.
                    # To meet the requirement, 128 fp16 or 64 float will be aligned
                    # Think the total shape of the input tensors if [64],
                    # if the dtype is float, then the shape of the fuse var is [64]
                    # however if the dytpe if fp16, the shape of the fuse var is [128],
                    # which will cause the fused vars' shape vary between each other.
                    # To make sure the shape of the fused vars are identical,
                    # we set the dtype of float and fp16 both to 2.
                    # Under this way, the fused vars' shape for float and fp16 are all [128]
                    "user_defined_size_of_dtype": 2,
                    "copy_data": False,
                    "use_align": True,
                    "dtype": grads[0].dtype,
5513 5514 5515 5516 5517 5518 5519
                    self._op_role_key: self._op_role.Backward,
                    # On npu, the nan/inf check login is different with gpu.
                    # If there are some not initialized sections in the fused var,
                    # and the value in those sections are nan/inf, it will trigger the nan/inf check.
                    # To avoid these problematic triggers, set constant is needed for npu
                    "set_constant": core.is_compiled_with_npu(),
                    "constant": float(0.0),
5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555
                })
            offset += 1
            # For the gradient_merged_fused_var, given a init value during the coalesce op
            # this will remove a problematic fill_constant op. This op role of this coalesce
            # is set to be LRSched to make this coalesce (with init) only run once
            main_block._insert_op_without_sync(
                first_back_op_idx + offset,
                type="coalesce_tensor",
                inputs={"Input": params},
                outputs={
                    "Output": merged_grads,
                    "FusedOutput": fused_merged_grad
                },
                attrs={
                    "user_defined_size_of_dtype": 2,
                    "set_constant": True,
                    "constant": float(0.0),
                    "copy_data": False,
                    "use_align": True,
                    "dtype": merged_grads[0].dtype,
                    self._op_role_key: self._op_role.Optimize.LRSched
                })
            offset += 1

        # insert gradient merge relating ops
        first_opt_op_idx += offset
        offset = 0
        for i in range(len(fused_gradients)):
            fused_grad = fused_gradients[i]
            fused_merged_grad = fused_merged_gradients[i]
            is_fp16_grad = 'cast_fp16' in fused_grad.name
            need_cast = (is_fp16_grad is not fp16)
            if need_cast:
                # for fp16 allreduce, cast fp32 grad to fp16
                # for fp32 allreduce, cast fp16 grad to fp32
                cast_grad_var_name = fused_grad.name + '@TMP'
5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569
                cast_grad_var = main_block.create_var(name=cast_grad_var_name,
                                                      dtype=dtype,
                                                      persistable=False,
                                                      stop_gradient=False)
                main_block._insert_op(index=first_opt_op_idx + offset,
                                      type='cast',
                                      inputs={'X': fused_grad},
                                      outputs={'Out': cast_grad_var},
                                      attrs={
                                          'in_dtype': fused_grad.dtype,
                                          'out_dtype': cast_grad_var.dtype,
                                          self._op_role_key:
                                          self._op_role.Backward,
                                      })
5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587
                offset += 1
                fused_grad = cast_grad_var
            main_block._insert_op(
                index=first_opt_op_idx + offset,
                type='sum',
                inputs={'X': [fused_merged_grad, fused_grad]},
                outputs={'Out': fused_merged_grad},
                attrs={self._op_role_key: self._op_role.Backward})
            offset += 1

        if fp16:
            # if using fp16 allreduce, the optimizer needs fp32 grads, cast them back to fp32
            for grad, param in grad_param_pairs:
                real_grad = main_block.var(grad)
                fp16_grad_name = param + core.grad_var_suffix() + '@MERGED@FP16'
                assert main_block.has_var(fp16_grad_name)
                fp16_grad = main_block.var(fp16_grad_name)
                fp32_grad_name = param + core.grad_var_suffix() + '@MERGED'
5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602
                fp32_grad = main_block.create_var(name=fp32_grad_name,
                                                  dtype=paddle.float32,
                                                  shape=real_grad.shape,
                                                  persistable=False,
                                                  stop_gradient=False)
                main_block._insert_op(index=first_opt_op_idx + offset,
                                      type='cast',
                                      inputs={'X': fp16_grad},
                                      outputs={'Out': fp32_grad},
                                      attrs={
                                          'in_dtype': paddle.float16,
                                          'out_dtype': paddle.float32,
                                          self._op_role_key:
                                          self._op_role.Optimize,
                                      })
5603 5604 5605 5606 5607 5608
                offset += 1

        # replace the var with it's name, which will be used for inserting allreduce
        for i in range(len(fused_merged_gradients)):
            fused_merged_gradients[i] = fused_merged_gradients[i].name

5609
        return fused_merged_gradients, first_opt_op_idx
5610

5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634
    def _accumulate_gradients_with_fuse(self,
                                        main_block,
                                        fp16,
                                        fused_size,
                                        shard=None):
        first_opt_op_idx = None
        grad_param_pairs = []
        # obtain all param/grad pairs that needed to be fused
        for index, op in reversed(tuple(enumerate(list(main_block.ops)))):
            # remove the cast op of fp16 grad to fp32 grad
            if self._is_optimize_op(op) and op.type == 'cast':
                in_name = op.input_arg_names[0]
                out_name = op.output_arg_names[0]
                if out_name.strip('@GRAD') in self._param_device_map:
                    assert in_name.replace('.cast_fp16', '') == out_name
                    main_block._remove_op(index)
                    continue

            if self._is_backward_op(op) and first_opt_op_idx is None:
                first_opt_op_idx = index + 1
                # no optimize phase
                if first_opt_op_idx == len(main_block.ops):
                    return

5635 5636
            if self._is_backward_op(op) and (self._op_role_var_key
                                             in op.attr_names):
5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668
                op_role_var = op.attr(self._op_role_var_key)
                if len(op_role_var) == 0:
                    continue
                assert len(op_role_var) % 2 == 0
                for i in range(0, len(op_role_var), 2):
                    param_name = op_role_var[i]
                    if not main_block.has_var(param_name):
                        continue
                    if '@BroadCast' in param_name:
                        continue
                    grad_param_pairs.append(
                        (op_role_var[i + 1], op_role_var[i]))

        if len(grad_param_pairs) == 0:
            return

        nranks = shard.worker_num if shard else 1
        device_to_pairs = [[] for _ in range(nranks)]
        for pair in grad_param_pairs:
            root_id = shard.device(pair[1]) if shard else 0
            assert 0 <= root_id < nranks
            device_to_pairs[root_id].append(pair)

        all_fused_merged_gradients = []
        for pairs in device_to_pairs:
            fused_merged_gradients, first_opt_op_idx = \
                self._insert_accumulate_gradients_with_fuse(
                    main_block, fp16, fused_size, pairs, first_opt_op_idx)
            all_fused_merged_gradients += fused_merged_gradients

        main_block._sync_with_cpp()
        return all_fused_merged_gradients
5669

5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687
    def _sort_grad_param_by_dtype(self, main_block, grad_param_pairs):
        # sort the grad param paris by the dtype
        fp16_pairs = []
        fp32_pairs = []
        other_pairs = []
        for pairs in grad_param_pairs:
            dtype = main_block.var(pairs[0]).dtype
            if dtype == paddle.float32:
                fp32_pairs.append(pairs)
            elif dtype == paddle.float16:
                fp16_pairs.append(pairs)
            else:
                other_pairs.append(pairs)
        sorted_pairs = fp16_pairs
        sorted_pairs.extend(fp32_pairs)
        sorted_pairs.extend(other_pairs)
        return sorted_pairs

5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702
    def _get_var_size(self, var):
        dtype_to_size = {
            core.VarDesc.VarType.FP16: 2,
            core.VarDesc.VarType.FP32: 4,
            core.VarDesc.VarType.FP64: 8,
            core.VarDesc.VarType.INT16: 2,
            core.VarDesc.VarType.INT32: 4,
            core.VarDesc.VarType.INT64: 8,
            core.VarDesc.VarType.BOOL: 1,
            core.VarDesc.VarType.UINT8: 1,
        }
        assert -1 not in var.shape
        return reduce(lambda x, y: x * y,
                      var.shape) * dtype_to_size[var.dtype] / 1024.0 / 1024.0

5703 5704
    def _add_sub_blocks(self, main_block, program_list):
        main_program = main_block.program
5705
        for prog in program_list:
5706 5707 5708 5709 5710 5711
            for op in prog.block(0).ops:
                if not op.has_attr('sub_block'):
                    continue
                origin_sub_block_id = op.attr('sub_block').id
                origin_sub_block = main_program.block(origin_sub_block_id)
                new_sub_block = prog._create_block(parent_idx=0)
5712 5713
                for sub_op in origin_sub_block.ops:
                    op_desc = sub_op.desc
5714 5715 5716
                    ap_op = new_sub_block.desc.append_op()
                    ap_op.copy_from(op_desc)
                new_sub_block._sync_with_cpp()
5717
                self._create_vars(new_sub_block, origin_sub_block)
5718
                op._set_attr('sub_block', new_sub_block)
5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734

    def _get_device_info(self, block):
        for op in block.ops:
            if not op._has_kernel(op.type): continue
            op_device = op.attr(self._op_device_key)
            return op_device

    def _process_persistable_vars_in_multi_sections(self, main_program,
                                                    startup_prog, program_list):
        """
        Special Case: process persistable vars that exist in
        multiple sections, e.g., shared weight
        """
        # var_info = {var_name: [program1, program2...]},
        # persistable var only
        var_info = dict()
5735
        for prog in program_list:
5736 5737
            block = prog.block(0)
            for var_name in block.vars:
5738
                if var_name == "double_buffer_0": continue
5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755
                var = block.var(var_name)
                if not var.persistable: continue
                if not var_name in var_info:
                    var_info[var_name] = []
                if not prog in var_info[var_name]:
                    var_info[var_name].append(prog)
        for var_name in list(var_info.keys()):
            if len(var_info[var_name]) == 1:
                var_info.pop(var_name)

        # write_info = {var_name: program}, where program is the only program
        # in which the var named var_name is written.
        write_info = dict()
        for var_name in var_info.keys():
            for prog in var_info[var_name]:
                block = prog.block(0)
                for op in block.ops:
5756
                    if op.type == "recv_v2" or op.type == "create_py_reader" or \
5757
                        op.type == "read" or op.type == "update_loss_scaling":
5758
                        continue
5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777
                    # We have processed lr related vars
                    if op.attr(self._op_role_key) == int(
                            self._op_role.Optimize.LRSched):
                        continue
                    if var_name in op.desc.output_arg_names():
                        assert var_name not in write_info, (
                            "two sections write the same var({}): second "
                            "op {}.".format(var_name, op))
                        write_info[var_name] = prog
                        break

        for var_name in var_info.keys():
            # Case 1: read only variables, no special process
            if not var_name in write_info: continue

            # Case 2: one write multiple reads
            write_prog = write_info[var_name]
            write_block = write_prog.block(0)
            write_device = self._get_device_info(write_block)
5778
            write_dev_index = int(write_device.split(':')[1])
5779 5780 5781
            all_progs = var_info[var_name]
            for prog in all_progs:
                if prog == write_prog: continue
5782 5783 5784
                read_block = prog.block(0)
                read_device = self._get_device_info(read_block)
                read_dev_index = int(read_device.split(':')[1])
5785 5786 5787 5788 5789 5790 5791 5792 5793
                pair = (write_dev_index, read_dev_index)
                pair_key = write_dev_index * 1000 + read_dev_index
                if pair not in self._pipeline_pair:
                    self._pipeline_pair.append(pair)
                    self._pp_ring_map[pair_key] = self.ring_id
                    ring_id = self.ring_id
                    self.ring_id += 1
                else:
                    ring_id = self._pp_ring_map[pair_key]
5794 5795 5796

                write_block._insert_op(
                    index=0,
5797
                    type='send_v2',
5798 5799 5800
                    inputs={
                        'X': write_block.var(var_name),
                    },
5801
                    attrs={
5802 5803 5804 5805
                        self._op_device_key:
                        write_device,
                        'use_calc_stream':
                        False,
5806 5807
                        # A trick to make the role LRSched to avoid copy every
                        # microbatch
5808 5809 5810 5811 5812 5813
                        self._op_role_key:
                        self._op_role.LRSched,
                        'peer':
                        read_dev_index,
                        'ring_id':
                        ring_id
5814 5815 5816
                    })
                read_block._insert_op(
                    index=0,
5817
                    type='recv_v2',
5818 5819
                    outputs={'Out': [read_block.var(var_name)]},
                    attrs={
5820 5821 5822 5823 5824 5825 5826 5827
                        'out_shape':
                        read_block.var(var_name).shape,
                        'dtype':
                        read_block.var(var_name).dtype,
                        self._op_device_key:
                        read_device,
                        'use_calc_stream':
                        False,
5828 5829
                        # A trick to make the role LRSched to avoid copy every
                        # microbatch
5830 5831 5832 5833 5834 5835
                        self._op_role_key:
                        self._op_role.LRSched,
                        'peer':
                        write_dev_index,
                        'ring_id':
                        ring_id
5836
                    })
5837 5838 5839 5840 5841 5842
                read_block._insert_op(
                    index=1,
                    type='c_sync_comm_stream',
                    inputs={'X': [read_block.var(var_name)]},
                    outputs={'Out': [read_block.var(var_name)]},
                    attrs={
5843 5844
                        self._op_device_key:
                        read_device,
5845 5846
                        # A trick to make the role LRSched to avoid copy every
                        # microbatch
5847 5848 5849 5850
                        self._op_role_key:
                        self._op_role.LRSched,
                        'ring_id':
                        ring_id
5851 5852 5853 5854 5855 5856 5857 5858 5859
                    })

    def _is_gradient_clip_op(self, op):
        return op.desc.has_attr("op_namescope") \
            and op.desc.attr("op_namescope").startswith("/gradient_clip")

    def _is_regularization_op(self, op):
        return op.desc.has_attr("op_namescope") \
            and op.desc.attr("op_namescope").startswith("/regularization")
H
hutuxian 已提交
5860

5861 5862 5863 5864 5865
    def _is_weight_decay_op(self, op):
        # in AdamW namescope is /optimizer_*/weight decay/
        return op.desc.has_attr("op_namescope") \
            and 'weight decay' in op.desc.attr("op_namescope")

5866 5867 5868 5869 5870
    def _get_input_output_info(self, block):
        '''
        Get info of op input and output.
        '''
        # A map from output var to op which generate it.
5871
        output_var_to_op = defaultdict(list)
5872
        # A map from var to op which takes it as input.
5873
        input_var_to_op = defaultdict(list)
5874

5875
        for index, op in enumerate(block.ops):
5876
            for var_name in op.input_arg_names:
5877
                input_var_to_op[var_name].append([op, index])
5878
            for var_name in op.output_arg_names:
5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890
                output_var_to_op[var_name].append([op, index])

        return output_var_to_op, input_var_to_op

    def _optimize_forward_send_sync(self, program):
        """
        optimize forward send's sync_comm_stream schedule
        """
        if self.schedule_mode != '1F1B': return

        block = program.block(0)

5891
        recv_type = 'recv_v2' if self.mp_degree == 1 else 'partial_recv'
5892 5893
        backward_recv_index = None
        for index, op in enumerate(block.ops):
5894
            if op.type == recv_type and self._is_backward_op(op):
5895 5896 5897
                backward_recv_index = index
                break

5898
        # last pipeline stage
5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921
        if backward_recv_index is None: return

        offset = 0
        for index, op in enumerate(list(block.ops)):
            if index >= backward_recv_index: break
            if op.type == 'c_sync_comm_stream' and op.has_attr('pipeline_flag'):
                var_name = op.input_arg_names[0]
                var = block.var(var_name)
                block._remove_op(index + offset, sync=False)
                offset -= 1
                # NOTE:
                # 1. When the backward recv is completed, it indicates
                # that the forward send is completed too. So we only need
                # to use the NOP op to prevent memory release.
                # 2. Because we removed sync_comm_op,
                # we will insert NOP after recv_op.
                block._insert_op_without_sync(
                    index=backward_recv_index,
                    type='nop',
                    inputs={'X': [var]},
                    outputs={'Out': [var]},
                    attrs={self._op_role_key: self._op_role.Backward})
        block._sync_with_cpp()
5922

5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958
    def _mv_head_recv(self, program):
        """
        A pass to move the recv op to the beginning of
        the forward/backward phase
        """
        forward_insert_index = 0
        backward_insert_index = None
        block = program.global_block()
        num_ops = len(program.global_block().ops)
        for i in range(num_ops):
            insert_index = None
            op = program.global_block().ops[i]
            op_role = int(op.attr(self._op_role_key))
            if op_role == int(
                    self._op_role.Backward) and backward_insert_index is None:
                backward_insert_index = i
            if op.type != "partial_recv" and op.type != "partial_allgather" and op.type != "nop" and op.type != "recv_v2":
                continue
            if op_role == int(self._op_role.Forward):
                if i == forward_insert_index:
                    forward_insert_index += 1
                    continue
                insert_index = forward_insert_index
            elif op_role == int(self._op_role.Backward):
                if i == backward_insert_index:
                    backward_insert_index += 1
                    continue
                insert_index = backward_insert_index
            else:
                raise ValueError("Unknown op_role: {}".format(op_role))
            op_inputs = dict()
            for name in op.input_names:
                op_inputs[name] = op.input(name)
            op_outputs = dict()
            for name in op.output_names:
                op_outputs[name] = op.output(name)
5959 5960 5961 5962 5963
            block._insert_op_without_sync(index=insert_index,
                                          type=op.type,
                                          inputs=op_inputs,
                                          outputs=op_outputs,
                                          attrs=op.all_attrs())
5964 5965 5966 5967 5968 5969 5970
            block._remove_op(i + 1)
            if op_role == int(self._op_role.Forward):
                forward_insert_index += 1
            elif op_role == int(self._op_role.Backward):
                backward_insert_index += 1
        block._sync_with_cpp()

5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999
    def _check_pipeline_persist_var(self, program):
        """
        Pipeline may need multiple forward before
        """
        block = program.global_block()

        persist_output = set()
        used_in_backward = set()
        for op in block.ops:
            if self._is_forward_op(op):
                for var_name in op.output_arg_names:
                    var = block.vars[var_name]
                    if var.persistable:
                        persist_output.add(var_name)
            elif self._is_backward_op(op):
                for var_name in op.input_arg_names:
                    if var_name in persist_output:
                        used_in_backward.add(var_name)
        if len(used_in_backward) == 0:
            return
        warnings.warn(
            "The pipeline requires multiple forward calculations before backward, "
            "so when the persistable var is changed in the forward, it may cause "
            "errors in the backward calculation who using this persistable var. "
            "However, some backward op don't need this var(NoNeedBufferVars), "
            "there will be no error at this time.\n"
            "So please check these persistable vars which changed in "
            "forward and used in backward:\n{}".format(used_in_backward))

H
hutuxian 已提交
6000 6001 6002 6003 6004
    def minimize(self,
                 loss,
                 startup_program=None,
                 parameter_list=None,
                 no_grad_set=None):
6005
        main_block = loss.block
6006
        self.origin_main_block = main_block
6007
        main_program = main_block.program
6008 6009
        if startup_program is None:
            startup_program = default_startup_program()
6010

6011 6012
        pipeline_opt = main_program._pipeline_opt
        assert pipeline_opt, 'Please use pipeline with fleet.'
6013 6014 6015 6016 6017 6018 6019
        required_keys = [
            'local_rank',
            'schedule_mode',
            'micro_batch_size',
            'ring_id',
            'global_ring_id',
            'use_sharding',
6020 6021
            'mp_degree',
            'mp_rank',
6022 6023
        ]
        for key in required_keys:
6024
            assert key in pipeline_opt, \
6025
                'Please use pipeline with fleet to use {}.'.format(key)
6026 6027 6028 6029 6030 6031 6032 6033
        self.local_rank = pipeline_opt['local_rank']
        self.schedule_mode = pipeline_opt['schedule_mode']
        self.micro_batch_size = pipeline_opt['micro_batch_size']
        self.use_sharding = pipeline_opt['use_sharding']
        self.ring_id = pipeline_opt['ring_id']
        self.global_ring_id = pipeline_opt['global_ring_id']
        self.mp_degree = pipeline_opt['mp_degree']
        self.mp_rank = pipeline_opt['mp_rank']
6034
        self.scale_gradient = pipeline_opt.get('scale_gradient', False)
6035 6036
        assert self.mp_degree >= 1
        assert 0 <= self.mp_rank < self.mp_degree
6037 6038 6039 6040

        optimize_ops, params_grads = self._optimizer.minimize(
            loss, startup_program, parameter_list, no_grad_set)
        self._param_device_map = self._origin_optimizer._param_device_map
6041

6042 6043
        self.output_var_to_op, self.input_var_to_op = \
            self._get_input_output_info(main_block)
6044 6045 6046
        # Step1: add default op_device attribute for ops.
        self._add_op_device_attr(main_block)
        device_list = self._check_validation(main_block)
6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057

        def device_cmp(device1, device2):
            dev1_id = int(device1.split(':')[1])
            dev2_id = int(device2.split(':')[1])
            if dev1_id < dev2_id:
                return -1
            elif dev1_id > dev2_id:
                return 1
            else:
                return 0

6058 6059 6060 6061 6062
        sorted_device_list = sorted(device_list, key=cmp_to_key(device_cmp))
        assert sorted_device_list == device_list, (
            "With pipeline parallelism, you must use gpu devices one after "
            "another in the order of their ids.")
        # Step2: add send and recv ops between section boundaries
6063
        self._insert_sendrecv_ops_for_boundaries(main_block)
6064

6065
        # Step3: split program into sections and add pairs of
6066 6067
        # send and recv ops for data var.
        main_program = main_block.program
6068
        program_list = self._split_program(main_program, device_list)
6069
        for p in program_list:
6070
            self._create_vars(p.global_block(), main_block)
6071

L
lilong12 已提交
6072 6073 6074 6075 6076 6077 6078 6079
        if os.getenv("PADDLE_MANUAL_PIPELINE_STAGE", None):
            self.local_rank = int(os.getenv("PADDLE_MANUAL_PIPELINE_STAGE"))
            assert self.local_rank < len(device_list), (
                "Manually specified "
                "pipeline stage must be less than total number of pipeline "
                "stages.")
        else:
            self.local_rank %= len(device_list)
6080 6081 6082
        # Step3.5: optimize forward send sync_comm to overlap send and recv
        self._optimize_forward_send_sync(program_list[self.local_rank])

6083
        # Step4: Special Case: process persistable vars that exist in
6084
        # multiple sections
6085
        # FIXME
6086 6087
        # self._process_persistable_vars_in_multi_sections(
        #     main_program, startup_program, program_list)
6088

6089
        # Step5: Add sub blocks for section programs
6090 6091
        self._add_sub_blocks(main_block, program_list)

6092
        place_list = []
6093 6094
        for dev in device_list:
            dev_index = int(dev.split(":")[1])
6095 6096 6097 6098
            if core.is_compiled_with_cuda():
                place_list.append(core.CUDAPlace(dev_index % 1))
            elif core.is_compiled_with_npu():
                place_list.append(core.NPUPlace(dev_index % 1))
6099

6100
        # Step6: Split startup program
6101 6102
        new_startup_program = self._split_startup_program(
            startup_program, self.local_rank)
6103 6104 6105 6106

        startup_program._pipeline_opt = {
            "startup_program": new_startup_program,
        }
6107
        real_block = program_list[self.local_rank].global_block()
6108 6109
        if not self.scale_gradient:
            self._insert_loss_scale(real_block)
6110
        if not self.use_sharding:
6111
            # Step7: clear gradients before each mini-batch and
6112 6113 6114 6115 6116
            # accumulate gradients during backward
            self._rename_gradient_var_name(real_block)
            real_block._sync_with_cpp()
            self._accumulate_gradients(real_block)
            real_block._sync_with_cpp()
6117

6118 6119 6120 6121
        if core.is_compiled_with_cuda():
            place_id = int(os.getenv("FLAGS_selected_gpus", "0"))
        elif core.is_compiled_with_npu():
            place_id = int(os.getenv("FLAGS_selected_npus", "0"))
6122 6123 6124
        # A pass to move the recv op to the beginning of
        # the forward/backward phase
        self._mv_head_recv(program_list[self.local_rank])
6125 6126 6127 6128 6129

        # A pass to check pipeline persist var which changed in
        # forward and used in backward
        self._check_pipeline_persist_var(program_list[self.local_rank])

6130
        main_program._pipeline_opt = {
H
hutuxian 已提交
6131 6132
            "trainer": "PipelineTrainer",
            "device_worker": "Section",
6133
            "pipeline_stage": self.local_rank,
6134
            "num_pipeline_stages": len(device_list),
6135
            "schedule_mode": self.schedule_mode,
6136
            "inner_parallelism": len(device_list),
6137 6138
            "section_program": program_list[self.local_rank],
            "place": place_list[self.local_rank],
6139
            "place_id": place_id,
6140
            "sync_steps": -1,
L
lilong12 已提交
6141
            "num_microbatches": self._num_microbatches,
H
hutuxian 已提交
6142 6143
            "start_cpu_core_id": self._start_cpu_core_id,
        }
6144
        return optimize_ops, params_grads, program_list, self._pipeline_pair, self._pp_ring_map
M
mapingshuo 已提交
6145 6146


M
mapingshuo 已提交
6147 6148
class RecomputeOptimizer(Optimizer):
    """
6149
	:api_attr: Static Graph
S
swtkiwi 已提交
6150

M
mapingshuo 已提交
6151 6152 6153
    Recompute Optimizer Wrapper

    Normally, a training step contains three sub-steps: first, run forward
6154
    Operators to calculate the loss; second, run backward Operators to
M
mapingshuo 已提交
6155 6156 6157
    calculate gradient of the parameters; third, apply optimization method
    to update the value of the parameters.

6158
    In the forward computation process, all variables that are needed by
M
mapingshuo 已提交
6159 6160 6161
    backward computation process will be kept in memory, which occupy a great
    amount of memory when the network becomes very deep.

6162
    Recompute split the network to k segments. In each segment, It will
M
mapingshuo 已提交
6163 6164
    recompute the forward Operators, before running backward operators. It is
    very helpful for saving memory.
6165

M
mapingshuo 已提交
6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210
    The Variables that separate a network to segments are called as checkpoints,
    and users should set it manually. The usage is very simple:

    Args:
        optimizer (Optimizer): The optimizer that is applied to parameters.

    Examples:
        .. code-block:: python

            import paddle.fluid as fluid
            import numpy as np
            def gen_data():
                return {"x": np.random.random(size=(32, 32)).astype('float32'),
                "y": np.random.randint(2, size=(32, 1)).astype('int64')}
            def mlp(input_x, input_y, hid_dim=128, label_dim=2):
                print(input_x)
                fc_1 = fluid.layers.fc(input=input_x, size=hid_dim)
                prediction = fluid.layers.fc(input=[fc_1], size=label_dim, act='softmax')
                cost = fluid.layers.cross_entropy(input=prediction, label=input_y)
                sum_cost = fluid.layers.reduce_mean(cost)
                return sum_cost, fc_1, prediction
            input_x = fluid.layers.data(name="x", shape=[32], dtype='float32')
            input_y = fluid.layers.data(name="y", shape=[1], dtype='int64')
            cost, fc_1, pred = mlp(input_x, input_y)

            sgd = fluid.optimizer.Adam(learning_rate=0.01)
            sgd = fluid.optimizer.RecomputeOptimizer(sgd)
            sgd._set_checkpoints([fc_1, pred])
            sgd.minimize(cost)

            print("Finished optimize")
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())
            step = 10

            for i in range(step):
                cost_val = exe.run(feed=gen_data(),
                       program=fluid.default_main_program(),
                       fetch_list=[cost.name])
                print("step=%d cost=%f" % (i, cost_val[0]))

    """

    def __init__(self, optimizer):
J
Jiabin Yang 已提交
6211
        if framework._non_static_mode():
Z
zhongpu 已提交
6212
            raise Exception("In dygraph, don't support RecomputeOptimizer.")
M
mapingshuo 已提交
6213 6214
        self._optimizer = optimizer
        self._checkpoints = None
M
mapingshuo 已提交
6215 6216
        self._learning_rate = self._optimizer._learning_rate
        self._learning_rate_map = self._optimizer._learning_rate_map
J
JZ-LIANG 已提交
6217
        self.enable_offload = False
M
mapingshuo 已提交
6218 6219

    def _set_checkpoints(self, checkpoints):
6220 6221
        """
        Args:
6222
            checkpoints (list): List of Variable or string
6223 6224 6225 6226 6227 6228
        """
        assert isinstance(
            checkpoints, list
        ), "_checkpoints should be a list of Variable or a list of String"
        for ckpt in checkpoints:
            assert (
6229
                isinstance(ckpt, str) or isinstance(ckpt, Variable)
6230
            ), "_checkpoints should be a list of Variable or a list of String"
M
mapingshuo 已提交
6231 6232
        self._checkpoints = checkpoints

6233
    # should enable offload before calling backward
J
JZ-LIANG 已提交
6234 6235 6236
    def _enable_offload(self):
        self.enable_offload = True

6237 6238
    @framework.deprecate_stat_dict
    def load(self, state_dict):
M
mapingshuo 已提交
6239
        """
6240
	    :api_attr: Static Graph
S
swtkiwi 已提交
6241

M
mapingshuo 已提交
6242 6243 6244 6245
        load function is not supported by Recompute Optimizer for now.
        :return: None

        Args:
6246
            state_dict: the dict load by load_persistable method
M
mapingshuo 已提交
6247 6248 6249 6250 6251 6252

        Examples:
            .. code-block:: python

                import paddle.fluid as fluid
                import paddle.compat as cpt
6253

M
mapingshuo 已提交
6254 6255 6256 6257 6258 6259
                def mlp(input_x, input_y, hid_dim=128, label_dim=2):
                    fc_1 = fluid.layers.fc(input=input_x, size=hid_dim)
                    prediction = fluid.layers.fc(input=[fc_1], size=label_dim, act='softmax')
                    cost = fluid.layers.cross_entropy(input=prediction, label=input_y)
                    sum_cost = fluid.layers.reduce_mean(cost)
                    return sum_cost, fc_1, prediction
6260

M
mapingshuo 已提交
6261 6262 6263 6264
                input_x = fluid.layers.data(name="x", shape=[32], dtype='float32')
                input_y = fluid.layers.data(name="y", shape=[1], dtype='int64')
                cost, fc_1, pred = mlp(input_x, input_y)
                print("Finished FF")
6265

M
mapingshuo 已提交
6266 6267 6268 6269
                sgd = fluid.optimizer.Adam(learning_rate=0.01)
                sgd = fluid.optimizer.RecomputeOptimizer(sgd)
                sgd._set_checkpoints([fc_1, pred])
                try:
6270 6271
                    state_dict = {}
                    sgd.load(state_dict)
M
mapingshuo 已提交
6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308
                except NotImplementedError as e:
                    print(cpt.get_exception_message(e))
        """
        raise NotImplementedError(
            "load function is not supported by Recompute Optimizer for now")

    def apply_gradients(self, params_grads):
        """
        call apply_gradients function of self._optimizer.

        Args:
            params_grads (list): list of (param, grad) pair to do optimization.

        Returns:
            list: A list of operators appended to the current program.

        Examples:
            .. code-block:: python

                import paddle.fluid as fluid
                import paddle.fluid.framework as framework

                def mlp(input_x, input_y, hid_dim=128, label_dim=2):
                    fc_1 = fluid.layers.fc(input=input_x, size=hid_dim)
                    prediction = fluid.layers.fc(input=[fc_1], size=label_dim, act='softmax')
                    cost = fluid.layers.cross_entropy(input=prediction, label=input_y)
                    sum_cost = fluid.layers.reduce_mean(cost)
                    return sum_cost, fc_1, prediction


                input_x = fluid.layers.data(name="x", shape=[32], dtype='float32')
                input_y = fluid.layers.data(name="y", shape=[1], dtype='int64')
                cost, fc_1, pred = mlp(input_x, input_y)
                print("Finished FF")

                sgd = fluid.optimizer.Adam(learning_rate=0.01)
                sgd = fluid.optimizer.RecomputeOptimizer(sgd)
6309
                sgd._set_checkpoints([fc_1, pred])
M
mapingshuo 已提交
6310 6311 6312 6313
                params_grads = sgd.backward(
                    cost,
                    startup_program=None,
                    parameter_list=None,
6314
                    no_grad_set=None)
M
mapingshuo 已提交
6315 6316 6317 6318 6319 6320 6321 6322 6323 6324

                program = cost.block.program
                with framework.program_guard(program, None):
                    optimize_ops = sgd.apply_gradients(params_grads)

                print("Finished apply gradients")
        """

        return self._optimizer.apply_gradients(params_grads=params_grads)

J
JZ-LIANG 已提交
6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349
    def _creat_vars(self, varname):
        pinned_var_name = unique_name.generate(varname + "@Pinned")
        fetched_var_name = unique_name.generate(varname + "@Fetch")

        pinned_var = self._main_program.global_block().create_var(
            name=pinned_var_name,
            shape=self.checkpoint_shape,
            dtype=self._main_program.global_block().var(varname).dtype,
            persistable=False,
            stop_gradient=True)

        fetch_var = self._main_program.global_block().create_var(
            name=fetched_var_name,
            shape=self.checkpoint_shape,
            dtype=self._main_program.global_block().var(varname).dtype,
            persistable=False,
            stop_gradient=False)

        return pinned_var_name, fetched_var_name

    def _append_fill_constant_ops(self, startup_program):
        """
        add fill_constant_ops to the end of the prog

        we should fill the pinned vars before runing the main_prog
6350 6351 6352
        to instantiate their tensor hold_, which could tell us whether
        the host memory could hold all the checkpoints from all the
        GPU devices in this node.
J
JZ-LIANG 已提交
6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366
        """
        op_role = 0
        block = startup_program.global_block()
        fill_constant_vars = self.checkpoint_name2pinned_name.values()
        OP_ROLE_KEY = core.op_proto_and_checker_maker.kOpRoleAttrName()
        for varname in fill_constant_vars:
            var = self._main_program.global_block().var(varname)
            # NOTE (JZ-LIANG) to pre-allocate the CUDAPinned MEM
            pinned_var = block.create_var(
                name=varname,
                shape=self.checkpoint_shape,
                dtype=self._main_program.global_block().var(var.name).dtype,
                persistable=False,
                stop_gradient=True)
6367 6368 6369 6370 6371 6372 6373 6374 6375
            block.append_op(type='fill_constant',
                            outputs={'Out': varname},
                            attrs={
                                "shape": var.shape,
                                "dtype": var.dtype,
                                "value": 0.0,
                                "place_type": 2,
                                OP_ROLE_KEY: op_role,
                            })
J
JZ-LIANG 已提交
6376 6377 6378 6379

        return

    def _insert_async_memcpy_op(self, insert_idx, src_varname, dst_varname,
6380
                                op_role, dst_place_type):
J
JZ-LIANG 已提交
6381 6382 6383 6384 6385 6386 6387 6388
        OP_ROLE_KEY = core.op_proto_and_checker_maker.kOpRoleAttrName()
        self.block._insert_op_without_sync(
            insert_idx,
            type='memcpy',
            inputs={'X': [self._main_program.global_block().var(src_varname)]},
            outputs={
                'Out': [self._main_program.global_block().var(dst_varname)]
            },
6389 6390 6391 6392
            attrs={
                "dst_place_type": int(dst_place_type),
                OP_ROLE_KEY: op_role
            })
J
JZ-LIANG 已提交
6393 6394 6395 6396 6397 6398 6399

    def _insert_fetch_op(self, idx, varname):
        assert varname in self.checkpoint_name2pinned_name, "Try to fetch {} from Pinned Memory, but it is NOT a checkpoint".format(
            varname)

        pinned_varname = self.checkpoint_name2pinned_name[varname]
        fetch_varname = self.checkpoint_name2fetch_name[varname]
6400
        self._insert_async_memcpy_op(idx, pinned_varname, fetch_varname, 1, 1)
J
JZ-LIANG 已提交
6401 6402 6403 6404 6405

    def _insert_offload_op(self, idx, varname):
        assert varname in self.checkpoint_name2pinned_name, "Try to offload {} to Pinned Memory, but it is NOT a checkpoint".format(
            varname)
        pinned_varname = self.checkpoint_name2pinned_name[varname]
6406
        self._insert_async_memcpy_op(idx, varname, pinned_varname, 0, 2)
J
JZ-LIANG 已提交
6407 6408

    def _insert_sync_op(self, op_idx, checkpoint_name):
6409
        # single stream offload no need sync
J
JZ-LIANG 已提交
6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437
        pass

    def _record_fetch_op(self, idx):
        assert len(self.un_fetch_checkpoint_names
                   ) > 0, "Could NOT found checkpoint to fetch"
        checkpoint_name = self.un_fetch_checkpoint_names.pop(-1)
        logging.debug("Record fetch [{}]".format(checkpoint_name))
        self.idx2insertions[idx] = ("fetch", checkpoint_name)

        return checkpoint_name

    def _record_offload_op(self, idx, checkpoint_name):
        expected_checkpoint_name = self.un_offload_checkpoint_names.pop(0)
        assert checkpoint_name == expected_checkpoint_name, "expected to offload [{}] but got [{}]".format(
            expected_checkpoint_name, checkpoint_name)
        logging.debug("Record offload [{}]".format(checkpoint_name))
        self.idx2insertions[idx] = ("offload", checkpoint_name)

    def _record_sync_op(self, idx, checkpoint_name):
        assert checkpoint_name not in self.synced_checkpoints, "Try to sync the checkpoint [{}] twice".format(
            checkpoint_name)
        self.synced_checkpoints.add(checkpoint_name)
        logging.debug("Record offload sync [{}]".format(checkpoint_name))
        self.idx2insertions[idx] = ("sync", checkpoint_name)

    def _parse_backward(self):

        self.idx2insertions = {}
6438
        # don't offload the last checkpoints, to favor throughput
J
JZ-LIANG 已提交
6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470
        self.un_fetch_checkpoint_names = self.sorted_checkpoint_names[:]
        self.un_fetch_checkpoint_names.pop(-1)
        need_fetch_checkpoint_names = self.un_fetch_checkpoint_names[:]
        self.checkpoint_usage_count = {}
        for checkpoint_name in self.un_fetch_checkpoint_names:
            self.checkpoint_usage_count[checkpoint_name] = 0

        self.bw_strart_op_idx = len(self.block.ops)
        for idx, op in enumerate(self.block.ops):
            if int(op.desc.attr("op_role")) == 1:
                self.bw_strart_op_idx = idx
                break

        assert self.bw_strart_op_idx < len(
            self.block.ops), "Could NOT found backword op in prog"

        # fetch second to last checkpoint at the beginning of BW
        fetched_checkpoint_varname = self._record_fetch_op(
            self.bw_strart_op_idx)
        last_last_fetch_checkpoint = None

        for i, op in enumerate(self.block.ops[self.bw_strart_op_idx:]):
            idx = self.bw_strart_op_idx + i
            input_vars = op.desc.input_arg_names()

            for input_var in input_vars:
                if input_var in need_fetch_checkpoint_names:
                    if input_var not in self.un_fetch_checkpoint_names:
                        # fetch the  offloade checkpoint when the first usage of its previous one
                        if self.checkpoint_usage_count[input_var] == 0:
                            # TODO (JZ-LIANG) sync memcpy_stream if extra stream for memcpy
                            second_to_last_fetch_checkpoint = fetched_checkpoint_varname
6471
                            # there is NO fetch ahead the first checkpoint
J
JZ-LIANG 已提交
6472 6473 6474 6475
                            if input_var != self.sorted_checkpoint_names[0]:
                                fetched_checkpoint_varname = self._record_fetch_op(
                                    idx)

6476
                        # should check the current used checkpoint is ths last fetch one
J
JZ-LIANG 已提交
6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501
                        assert second_to_last_fetch_checkpoint == input_var, "Current recompute segment should use [{}] BUT got [{}]".format(
                            second_to_last_fetch_checkpoint, input_var)
                        # rename
                        self.block.ops[idx]._rename_input(
                            input_var,
                            self.checkpoint_name2fetch_name[input_var])
                        self.checkpoint_usage_count[input_var] += 1
                    else:
                        raise ValueError(
                            "use checkpoint [{}] before fetch in BW".format(
                                input_var))

        assert len(self.un_fetch_checkpoint_names
                   ) == 0, "{} checkpoints have NOT been Recorded".format(
                       self.un_fetch_checkpoint_names)

    def _update_backward(self):
        if len(self.idx2insertions) == 0:
            return
        total_op = len(self.block.ops)
        for op_idx in reversed(range(self.bw_strart_op_idx, total_op)):
            if op_idx in self.idx2insertions:
                operation, checkpoint_name = self.idx2insertions[op_idx]
                if operation == "fetch":
                    self._insert_fetch_op(op_idx, checkpoint_name)
6502 6503
                    logging.debug(
                        "Insert [{}] fetch op.".format(checkpoint_name))
J
JZ-LIANG 已提交
6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515
                    del self.idx2insertions[op_idx]
                elif operation == "sync":
                    self._insert_sync_op(op_idx, checkpoint_name)
                    logging.debug("Sync [{}] fetch op.".format(checkpoint_name))
        self.block._sync_with_cpp()
        assert len(
            self.idx2insertions) == 0, "{} checkpoints left un-Fecthed".format(
                [ele[1] for ele in self.idx2insertions.values()])

    def _parse_forward(self):

        self.idx2insertions = {}
6516
        # don't offload the last checkpoints, faster, less memory saving
J
JZ-LIANG 已提交
6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536
        self.un_offload_checkpoint_names = self.sorted_checkpoint_names[:]
        last_checkpoint = self.un_offload_checkpoint_names.pop(-1)
        need_offload_checkpoint_names = self.un_offload_checkpoint_names[:]
        self.checkpoint_usage_count_and_idx = {}
        for checkpoint_name in self.un_offload_checkpoint_names:
            self.checkpoint_usage_count_and_idx[checkpoint_name] = {
                'count': 0,
                'idx': -1
            }
        self.synced_checkpoints = set()
        self.fw_strart_op_idx = len(self.block.ops)
        for idx, op in enumerate(self.block.ops):
            if int(op.desc.attr("op_role")) == 0:
                self.fw_strart_op_idx = idx
                break

        assert self.fw_strart_op_idx < len(
            self.block.ops), "Could NOT found Forward op in prog"
        last_offload_checkpoint = None

6537 6538
        for i, op in enumerate(
                self.block.ops[self.fw_strart_op_idx:self.bw_strart_op_idx]):
J
JZ-LIANG 已提交
6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569

            idx = self.fw_strart_op_idx + i
            output_vars = op.desc.output_arg_names()
            input_vars = op.desc.input_arg_names()

            for output_var in output_vars:
                if output_var in need_offload_checkpoint_names:
                    assert len(
                        output_vars
                    ) == 1, "chekpoint should be the only Output of a certain op, but [{}] is from [{}]".format(
                        output_var, op)

                    if output_var in self.un_offload_checkpoint_names:
                        # insert sync op if last checkpoint has not been sync
                        if last_offload_checkpoint != None:
                            if self.checkpoint_usage_count_and_idx[
                                    last_offload_checkpoint]['count'] == 0:
                                self._record_sync_op(idx,
                                                     last_offload_checkpoint)
                            else:
                                last_usage_idx = self.checkpoint_usage_count_and_idx[
                                    last_offload_checkpoint]['idx']
                                assert last_usage_idx > 0, "last_usage_idx of checkpoint [{}] should large than 0".format(
                                    last_offload_checkpoint)
                                self._record_sync_op(last_usage_idx + 1,
                                                     last_offload_checkpoint)
                        # insert offload op after the checkpoint's generation op
                        self._record_offload_op(idx + 1, output_var)
                        last_offload_checkpoint = output_var
                    else:
                        raise ValueError(
6570 6571
                            "There should be just ONE op that output checkpoint [{}]"
                            .format(output_var))
J
JZ-LIANG 已提交
6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592
                # need to sync the last need to offload checkpoint before the last checkpoint as output op
                if output_var == last_checkpoint:
                    assert len(
                        output_vars
                    ) == 1, "chekpoint should be the only Output of a certain op, but [{}] is from [{}]".format(
                        output_var, op)
                    assert last_offload_checkpoint == self.sorted_checkpoint_names[
                        -2], "the last offload chekpoint before [{}] is suppose to be [{}], but got [{}]".format(
                            last_checkpoint, self.sorted_checkpoint_names[-2],
                            last_offload_checkpoint)
                    # sync if last checkpoint has not been sync
                    if self.checkpoint_usage_count_and_idx[
                            last_offload_checkpoint]['idx'] == 0:
                        self._record_sync_op(idx, last_offload_checkpoint)
                    else:
                        last_usage_idx = self.checkpoint_usage_count_and_idx[
                            last_offload_checkpoint]['idx']
                        assert last_usage_idx > 0, "last_usage_idx of checkpoint [{}] should large than 0".format(
                            last_offload_checkpoint)
                        self._record_sync_op(last_usage_idx + 1,
                                             last_offload_checkpoint)
6593
            # record checkpoint usage
J
JZ-LIANG 已提交
6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617
            for input_var in input_vars:
                if input_var in need_offload_checkpoint_names:
                    assert input_var not in self.synced_checkpoints, "checkpoint [{}] used after sync".format(
                        input_var)
                    self.checkpoint_usage_count_and_idx[input_var]['count'] += 1
                    self.checkpoint_usage_count_and_idx[input_var]['idx'] = idx

        assert len(self.un_offload_checkpoint_names
                   ) == 0, "{} checkpoints have NOT been Recorded".format(
                       self.un_fetch_checkpoint_names)
        assert len(self.synced_checkpoints) == len(
            need_offload_checkpoint_names
        ), "{} checkpoints have NOT been Recorded".format(
            set(need_offload_checkpoint_names) - set(self.synced_checkpoints))

    def _update_forward(self):
        if len(self.idx2insertions) == 0:
            return
        for op_idx in reversed(
                range(self.fw_strart_op_idx, self.bw_strart_op_idx)):
            if op_idx in self.idx2insertions:
                operation, checkpoint_name = self.idx2insertions[op_idx]
                if operation == "offload":
                    self._insert_offload_op(op_idx, checkpoint_name)
6618 6619
                    logging.debug(
                        "Insert [{}] offload op.".format(checkpoint_name))
J
JZ-LIANG 已提交
6620 6621 6622
                    del self.idx2insertions[op_idx]
                elif operation == "sync":
                    self._insert_sync_op(op_idx, checkpoint_name)
6623 6624
                    logging.debug(
                        "Insert [{}] offload_sync op.".format(checkpoint_name))
J
JZ-LIANG 已提交
6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638
                    del self.idx2insertions[op_idx]

        self.block._sync_with_cpp()
        assert len(self.idx2insertions
                   ) == 0, "{} checkpoints left un-Offloaded".format(
                       [ele[1] for ele in self.idx2insertions.values()])

    def _check_offload_fetch(self):
        # TODO(JZ-LIANG) the single stream offload need no sync
        pass

    def _offload(self, loss, startup_program=None):
        """
        core steps for recompute offload
6639
        1. create pinned vars and temp vars
J
JZ-LIANG 已提交
6640 6641 6642 6643 6644 6645 6646
        2. parse & update Forward pass: offload, sync
        3. parse & update Backward pass: rename, fetch, sync
        4. verify the correctness
        """
        self._main_program = loss.block.program
        self.block = loss.block
        if startup_program == None:
J
JZ-LIANG 已提交
6647
            startup_program = paddle.static.default_startup_program()
J
JZ-LIANG 已提交
6648 6649 6650

        with program_guard(self._main_program, startup_program):
            assert len(self.checkpoint_shape) > 0, (
6651 6652
                "checkpoints shape {} should be an non empty list like: [12, 512, 1024]"
                .format(self.checkpoint_shape))
J
JZ-LIANG 已提交
6653
            assert all([ele > 0 for ele in self.checkpoint_shape]), (
6654 6655
                "all ele in checkpoints shape {} should be a determined integer larger than 0"
                .format(self.checkpoint_shape))
J
JZ-LIANG 已提交
6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677
            self.checkpoint_name2pinned_name = dict()
            self.checkpoint_name2fetch_name = dict()
            for checkpoint_varname in self.sorted_checkpoint_names:
                pinned_var_name, fetch_var_name = self._creat_vars(
                    checkpoint_varname)
                self.checkpoint_name2pinned_name[
                    checkpoint_varname] = pinned_var_name
                self.checkpoint_name2fetch_name[
                    checkpoint_varname] = fetch_var_name
            self._append_fill_constant_ops(startup_program)
            # TODO (JZ-LIANG) to provide two offload stragtegy in future
            # step 2. parse & update FW: rename, offload, sync
            self._parse_backward()
            self._update_backward()
            # step 3. parse & update BW: rename, offload, sync
            self._parse_forward()
            self._update_forward()
            # step 4. verify the correctness
            self._check_offload_fetch()

        return

M
mapingshuo 已提交
6678 6679 6680 6681 6682
    def backward(self,
                 loss,
                 startup_program=None,
                 parameter_list=None,
                 no_grad_set=None,
6683
                 callbacks=None):
M
mapingshuo 已提交
6684 6685 6686 6687 6688 6689 6690
        """
        call append_backward with checkpoints.

        Args:
            loss (Variable): loss variable to run optimizations.
            startup_program (Program): startup_program for initializing parameters
                in `parameter_list`.
6691 6692
            parameter_list (list): list of Variables or Variable.names to update.
            no_grad_set (set|None): set of Variables or Variables.names should be ignored.
M
mapingshuo 已提交
6693 6694 6695 6696 6697 6698 6699 6700
            callbacks (list|None): list of callables to run when appending backward
                operator for one parameter.
            checkpoints (list): list of Variables as checkpoints

        Examples:
            .. code-block:: python

                import paddle.fluid as fluid
6701

M
mapingshuo 已提交
6702 6703 6704 6705 6706 6707
                def mlp(input_x, input_y, hid_dim=128, label_dim=2):
                    fc_1 = fluid.layers.fc(input=input_x, size=hid_dim)
                    prediction = fluid.layers.fc(input=[fc_1], size=label_dim, act='softmax')
                    cost = fluid.layers.cross_entropy(input=prediction, label=input_y)
                    sum_cost = fluid.layers.reduce_mean(cost)
                    return sum_cost, fc_1, prediction
6708 6709


M
mapingshuo 已提交
6710 6711 6712 6713
                input_x = fluid.layers.data(name="x", shape=[32], dtype='float32')
                input_y = fluid.layers.data(name="y", shape=[1], dtype='int64')
                cost, fc_1, pred = mlp(input_x, input_y)
                print("Finished FF")
6714

M
mapingshuo 已提交
6715 6716
                sgd = fluid.optimizer.Adam(learning_rate=0.01)
                sgd = fluid.optimizer.RecomputeOptimizer(sgd)
6717
                sgd._set_checkpoints([fc_1, pred])
M
mapingshuo 已提交
6718 6719 6720 6721
                params_grads = sgd.backward(
                    cost,
                    startup_program=None,
                    parameter_list=None,
6722
                    no_grad_set=None)
M
mapingshuo 已提交
6723 6724
                print("Finished backward")
        """
6725 6726
        assert (self._checkpoints
                is not None), "You should call _set_checkpoints first"
M
mapingshuo 已提交
6727

J
Jiabin Yang 已提交
6728
        if framework._non_static_mode():
M
mapingshuo 已提交
6729 6730 6731 6732 6733 6734
            raise NotImplementedError(
                "DyGraph current does not support recompute")

        self._dtype = loss.dtype
        program = loss.block.program
        with program_guard(program, startup_program):
6735 6736 6737 6738 6739 6740 6741
            checkpoint_vars = []
            for ckpt in self._checkpoints:
                if isinstance(ckpt, Variable):
                    checkpoint_vars.append(ckpt)
                else:
                    checkpoint_vars.append(loss.block.var(ckpt))

J
JZ-LIANG 已提交
6742 6743 6744 6745 6746 6747 6748 6749
            # allow return to non-recompute when checkpoints is empty
            if len(checkpoint_vars) > 0:
                params_grads, sorted_checkpoint_names = append_backward(
                    loss,
                    parameter_list,
                    no_grad_set,
                    checkpoints=checkpoint_vars)
            else:
6750 6751 6752 6753
                params_grads = append_backward(loss,
                                               parameter_list,
                                               no_grad_set,
                                               checkpoints=checkpoint_vars)
J
JZ-LIANG 已提交
6754 6755 6756 6757 6758

        if self.enable_offload:
            self.sorted_checkpoint_names = sorted_checkpoint_names
            self._offload(loss, startup_program=startup_program)

M
mapingshuo 已提交
6759 6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770 6771
        return params_grads

    def apply_optimize(self, loss, startup_program, params_grads):
        """
        call the apply_optimize function of self._optimizer
        Args:
            loss (Variable): loss variable to run optimizations.
            startup_program (Program): startup_program for initializing parameters
                in `parameter_list`.
            params_grads (list): list of (param, grad) pair to do optimization.
        Examples:
            .. code-block:: python
                import paddle.fluid as fluid
6772

M
mapingshuo 已提交
6773 6774 6775 6776 6777
                def mlp(input_x, input_y, hid_dim=128, label_dim=2):
                    fc_1 = fluid.layers.fc(input=input_x, size=hid_dim)
                    prediction = fluid.layers.fc(input=[fc_1], size=label_dim, act='softmax')
                    cost = fluid.layers.cross_entropy(input=prediction, label=input_y)
                    sum_cost = fluid.layers.reduce_mean(cost)
6778 6779
                    return sum_cost, fc_1, prediction

M
mapingshuo 已提交
6780 6781 6782 6783
                input_x = fluid.layers.data(name="x", shape=[32], dtype='float32')
                input_y = fluid.layers.data(name="y", shape=[1], dtype='int64')
                cost, fc_1, pred = mlp(input_x, input_y)
                print("Finished FF")
6784

M
mapingshuo 已提交
6785 6786
                sgd = fluid.optimizer.Adam(learning_rate=0.01)
                sgd = fluid.optimizer.RecomputeOptimizer(sgd)
6787
                sgd._set_checkpoints([fc_1, pred])
M
mapingshuo 已提交
6788 6789 6790 6791
                params_grads = sgd.backward(
                    cost,
                    startup_program=None,
                    parameter_list=None,
6792
                    no_grad_set=None)
6793

M
mapingshuo 已提交
6794 6795
                optimize_ops = sgd.apply_optimize(
                    cost, startup_program=None, params_grads=params_grads)
6796

M
mapingshuo 已提交
6797 6798 6799
                print("Finished apply_optimize")
        """

Y
Yuang Liu 已提交
6800 6801 6802
        func = self._optimizer.apply_optimize if hasattr(
            self._optimizer,
            'apply_optimize') else self._optimizer._apply_optimize
6803 6804 6805
        return func(loss,
                    startup_program=startup_program,
                    params_grads=params_grads)
M
mapingshuo 已提交
6806 6807 6808 6809 6810

    def minimize(self,
                 loss,
                 startup_program=None,
                 parameter_list=None,
6811
                 no_grad_set=None):
6812
        assert isinstance(loss, Variable), "The loss should be an Variable."
6813 6814
        assert (self._checkpoints
                is not None), "You should call _set_checkpoints first"
J
Jiabin Yang 已提交
6815
        if framework._non_static_mode():
M
mapingshuo 已提交
6816 6817
            raise NotImplementedError(
                "DyGraph current does not support recompute")
6818 6819 6820 6821
        params_grads = self.backward(loss,
                                     startup_program=startup_program,
                                     parameter_list=parameter_list,
                                     no_grad_set=no_grad_set)
M
mapingshuo 已提交
6822

6823 6824 6825
        optimize_ops = self.apply_optimize(loss,
                                           startup_program=startup_program,
                                           params_grads=params_grads)
M
mapingshuo 已提交
6826 6827 6828 6829

        return optimize_ops, params_grads


M
mapingshuo 已提交
6830
class LookaheadOptimizer(object):
6831
    r"""
6832
	:api_attr: Static Graph
S
swtkiwi 已提交
6833

M
mapingshuo 已提交
6834 6835 6836 6837
    This implements the Lookahead optimizer of the
    paper : https://arxiv.org/abs/1907.08610.

    Lookahead keeps two sets of params: the fast_params and
6838 6839
    the slow_params. inner_optimizer update fast_params every
    training step. Lookahead updates the slow_params and fast_params
M
mapingshuo 已提交
6840 6841 6842
    every k training steps as follows:

    .. math::
6843

M
mapingshuo 已提交
6844
        slow\_param_t &= slow\_param_{t-1} + \\alpha * (fast\_param_{t-1} - slow\_param_{t-1})
6845

M
mapingshuo 已提交
6846 6847 6848
	fast\_param_t &=  slow\_param_t

    Args:
6849
        inner_optimizer (Optimizer): The optimizer that update fast params step by step.
M
mapingshuo 已提交
6850 6851 6852 6853 6854 6855 6856 6857 6858
        alpha (float): The learning rate of Lookahead.
        k (int): The slow params is updated every k steps.

    Examples:
        .. code-block:: python

            import paddle
            import paddle.fluid as fluid
            import numpy as np
6859
            import numpy.random as random
M
mapingshuo 已提交
6860

6861
            paddle.enable_static()
6862

6863 6864 6865 6866
            x = fluid.layers.data(name='x', shape=[2], dtype='float32')
            label = fluid.layers.data(name="label", shape=[1], dtype="int64")
            y = fluid.layers.fc(input=[x], size=2, act="softmax")
            loss = fluid.layers.cross_entropy(input=y, label=label)
6867
            loss = paddle.mean(x=loss)
6868 6869 6870 6871 6872 6873 6874 6875 6876
            sgd = fluid.optimizer.SGD(learning_rate=0.01)
            optimizer = fluid.optimizer.LookaheadOptimizer(sgd,
                                                alpha=0.5,
                                                k=5)
            optimizer.minimize(loss)
            main_program = fluid.default_main_program()
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())
M
mapingshuo 已提交
6877

6878 6879 6880
            def train_reader(limit=5):
                for i in range(limit):
                    yield random.random([2]).astype('float32'), random.random([1]).astype('int64')
6881

6882 6883
            feeder = fluid.DataFeeder(feed_list=[x, label], place=place)
            reader = paddle.batch(paddle.reader.shuffle(train_reader, buf_size=50000),batch_size=1)
6884

6885 6886 6887
            for batch_data in reader():
                exe.run(fluid.default_main_program(),
                feed=feeder.feed(batch_data))
M
mapingshuo 已提交
6888 6889 6890 6891 6892

    """

    def __init__(self, inner_optimizer, alpha=0.5, k=5):

J
Jiabin Yang 已提交
6893
        if framework._non_static_mode():
Z
zhongpu 已提交
6894
            raise Exception("In dygraph, don't support LookaheadOptimizer.")
M
mapingshuo 已提交
6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920 6921 6922
        assert (inner_optimizer is not None), "inner optimizer can not be None"
        assert (
            0.0 <= alpha <= 1.0
        ), "alpha should be larger or equal to 0.0, and less or equal than 1.0"
        assert (isinstance(k, int) and k > 0), "k should be a positive integer"

        self.inner_optimizer = inner_optimizer
        self.alpha = alpha
        self.k = k
        self.type = "lookahead"

    def minimize(self, loss, startup_program=None):

        # Apply inner optimizer to the main_program
        mini_out = self.inner_optimizer.minimize(
            loss, startup_program=startup_program)

        # Get startup_program and main_program
        if startup_program is None:
            startup_program = default_startup_program()
        main_block = loss.block

        # add some vars to the main_program
        params = [param.name for param in main_block.all_parameters()]
        param_to_slow = {}
        for param in params:
            fast_var = main_block.var(param)
            assert (fast_var is not None)
6923 6924 6925 6926
            slow_var = main_block.create_var(name=param + "@SLOW",
                                             shape=fast_var.shape,
                                             dtype=fast_var.dtype,
                                             persistable=True)
M
mapingshuo 已提交
6927 6928 6929 6930 6931 6932 6933
            param_to_slow[param] = slow_var

        # add some vars to the startup_program
        startup_block = startup_program.global_block()
        for param in params:
            fast_var = startup_block.var(param)
            assert (fast_var is not None)
6934 6935 6936 6937
            slow_var = startup_block.create_var(name=param + "@SLOW",
                                                shape=fast_var.shape,
                                                dtype=fast_var.dtype,
                                                persistable=True)
M
mapingshuo 已提交
6938

6939 6940 6941
            startup_block.append_op(type="assign",
                                    inputs={"X": fast_var},
                                    outputs={"Out": slow_var})
M
mapingshuo 已提交
6942

6943 6944
        with framework.program_guard(main_block.program, startup_program):
            # Add Var k to main prog and startup prog
6945 6946 6947 6948 6949
            k = layers.create_global_var(name="lookahead_k",
                                         shape=[1],
                                         value=int(self.k),
                                         dtype='int32',
                                         persistable=True)
M
mapingshuo 已提交
6950

6951
            # Add Var alpha to main prog and startup prog
6952 6953 6954 6955 6956
            alpha = layers.create_global_var(name="lookahead_alpha",
                                             shape=[1],
                                             value=float(self.alpha),
                                             dtype='float32',
                                             persistable=True)
M
mapingshuo 已提交
6957

6958
            # Add Var step
6959 6960 6961 6962 6963
            step = layers.create_global_var(name="lookahead_step",
                                            shape=[1],
                                            value=int(0),
                                            dtype='int32',
                                            persistable=True)
6964 6965 6966
            layers.increment(x=step, value=1.0, in_place=True)

            # lookahead
6967 6968 6969
            zero_var = layers.fill_constant(shape=[1],
                                            dtype='float32',
                                            value=0.0)
6970

6971 6972 6973
            one_var = layers.fill_constant(shape=[1],
                                           dtype='float32',
                                           value=1.0)
6974 6975 6976

            mod = layers.elementwise_mod(step, k)
            with layers.control_flow.Switch() as switch:
6977 6978 6979 6980 6981
                with switch.case(step == one_var):
                    for param_name in params:
                        fast_var = main_block.var(param_name)
                        slow_var = param_to_slow[param_name]
                        layers.assign(input=fast_var, output=slow_var)
6982 6983 6984 6985 6986 6987 6988 6989 6990 6991 6992 6993 6994
                with switch.case(mod == zero_var):
                    for param_name in params:
                        fast_var = main_block.var(param_name)
                        slow_var = param_to_slow[param_name]
                        tmp_var = layers.elementwise_add(
                            layers.elementwise_mul(fast_var, alpha),
                            layers.elementwise_mul(
                                slow_var,
                                layers.elementwise_sub(one_var, alpha)))
                        layers.assign(input=tmp_var, output=slow_var)
                        layers.assign(input=tmp_var, output=fast_var)
                with switch.default():
                    pass
M
mapingshuo 已提交
6995
        return mini_out
6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008 7009 7010 7011 7012 7013 7014 7015 7016 7017 7018 7019 7020 7021 7022 7023 7024 7025 7026 7027 7028 7029 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052


class GradientMergeOptimizer(object):
    """
    Gradient Merge, also called as Gradient Accumulation,
    is a training strategy for larger batches. With this strategy,
    the parameter will not be updated until specific steps.

    For each step, the forward network and the backward network
    will run to calculate the gradient of the parameters.

    For every k step, the optimization network will run,
    applying a specific optimization method (such as SGD, Adam)
    to the parameters.

    Args:
        inner_optimizer (Optimizer): The specific optimization (such as SGD, Adam)
            which update the parameters
        k_steps (int): the update period of the parameters
        avg (bool): whether to average the gradients of each mini-batch,
            the default value is `True`

    Examples:
        .. code-block:: python

        import paddle.fluid as fluid
        import numpy as np

        def gen_data(batch_size):
            return {"x": np.random.random(size=(batch_size, 32)).astype('float32'),
                    "y": np.random.random(size=(batch_size, 1)).astype('int64')}

        def mlp(input_x, input_y, hid_dim=128, label_dim=2):
            fc_1 = fluid.layers.fc(input=input_x, size=hid_dim)
            prediction = fluid.layers.fc(input=[fc_1], size=label_dim, act='softmax')
            cost = fluid.layers.cross_entropy(input=prediction, label=input_y)
            sum_cost = fluid.layers.reduce_mean(cost)
            return sum_cost, fc_1, prediction

        input_x = fluid.layers.data(name="x", shape=[32], dtype='float32')
        input_y = fluid.layers.data(name="y", shape=[1], dtype='int64')
        cost, fc_1, pred = mlp(input_x, input_y)
        sgd = fluid.optimizer.Adam(learning_rate=0.01)
        sgd = fluid.optimizer.GradientMergeOptimizer(sgd, k_steps=4, avg=True)
        sgd.minimize(cost)

        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())

        for i in range(10):
            cost_val = exe.run(feed=gen_data(32),
                       program=fluid.default_main_program(),
                       fetch_list=[cost.name])
            print("step=%d, cost=%f" % (i, cost_val[0]))
    """

7053 7054
    GRAD_MERGE_COND_NAME = "grad_merge_cond_name"

7055
    def __init__(self, inner_optimizer, k_steps=1, avg=True):
J
Jiabin Yang 已提交
7056
        if framework._non_static_mode():
7057 7058 7059 7060 7061 7062
            raise Exception(
                "In dygraph, we don't support GradientMergeOptimizer."
                "You can do Gradient merge by yourself with k-times forward + backward, "
                "and one-time optimizer.minimize()")

        assert (inner_optimizer is not None), "inner optimizer can not be None"
7063 7064
        assert (isinstance(k_steps, int)
                and k_steps > 0), "k_steps should be a positive integer"
7065 7066 7067 7068 7069

        self.inner_optimizer = inner_optimizer
        self.k_steps = k_steps
        self.type = "gradient_merge"
        self.avg = avg
7070
        self._optimize_ops = None
7071

7072 7073 7074 7075 7076 7077
    def _set_k_steps(self, k_steps):
        self.k_steps = k_steps

    def _set_avg(self, avg):
        self.avg = avg

7078
    def backward(self,
7079 7080 7081
                 loss,
                 startup_program=None,
                 parameter_list=None,
7082 7083
                 no_grad_set=None,
                 callbacks=None):
7084 7085 7086 7087 7088 7089 7090 7091 7092 7093
        assert isinstance(loss, Variable), "The loss should be an Variable."
        assert (
            parameter_list is None
        ), "The parameter_list should be None when using GradientMergeOptimizer"
        assert (
            no_grad_set is None
        ), "The no_grad_set should be None when using GradientMergeOptimizer"

        params_grads = self.inner_optimizer.backward(
            loss, startup_program=startup_program)
7094 7095 7096 7097 7098 7099 7100 7101 7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112 7113 7114 7115 7116 7117 7118 7119 7120 7121 7122 7123 7124 7125 7126 7127 7128 7129 7130 7131 7132 7133 7134 7135 7136 7137 7138 7139 7140 7141 7142 7143 7144 7145 7146 7147 7148 7149 7150
        return params_grads

    def apply_optimize(self, loss, startup_program, params_grads):
        program = loss.block.program
        with program_guard(program, startup_program):
            optimize_ops = self.apply_gradients(params_grads)
        return optimize_ops

    def _is_the_backward_op(self, op):
        op_maker = core.op_proto_and_checker_maker
        backward = core.op_proto_and_checker_maker.OpRole.Backward
        if op_maker.kOpRoleVarAttrName() in op.attr_names and \
                int(op.all_attrs()[op_maker.kOpRoleAttrName()]) == int(backward):
            return True
        return False

    def _remove_op_role_var(self, param, grad):
        op_maker = core.op_proto_and_checker_maker
        op = grad.op
        assert self._is_the_backward_op(op), \
            'grad.op={} is not the backward op which produces the grad={}' \
            .format(op, grad.name)

        block = grad.block
        var_attr = op.all_attrs()[op_maker.kOpRoleVarAttrName()]
        assert param.name in var_attr, \
            'when using GradientMergeOptimizer, param={} must be in var_attr={}' \
            .format(param.name, var_attr)
        assert grad.name in var_attr, \
            'when using GradientMergeOptimizer, grad={} must be in var_attr={}' \
            .format(param.name, var_attr)

        # remove (param, grad) from op_role_var
        var_attr.remove(param.name)
        var_attr.remove(grad.name)
        if len(var_attr) > 1:
            op._set_attr(op_maker.kOpRoleVarAttrName(), var_attr)
        else:
            op._remove_attr(op_maker.kOpRoleVarAttrName())

    def _add_gm_op_role_var(self, op, param, grad, cond):
        grad.op = op
        op_maker = core.op_proto_and_checker_maker
        backward = op_maker.OpRole.Backward

        # NOTE(wangxi). When distributed, we will insert grad_merge_all_reduce_op_handle
        # in multi_devices_graph_pass, which will allreduce(grad) if cond is True, else
        # do nothing.
        # In this way, the gradient can be merged first, and then communicate when the
        # condition is met, reducing the number of communications to increase the
        # speed.
        op._set_attr(self.GRAD_MERGE_COND_NAME, cond.name)
        op._set_attr(op_maker.kOpRoleAttrName(), backward)
        op._set_attr(op_maker.kOpRoleVarAttrName(), [param.name, grad.name])

    def _get_gm_cond_var(self, main_block):
        # Add const var
7151 7152 7153 7154 7155 7156 7157 7158 7159 7160 7161 7162 7163
        k_step_var = layers.create_global_var(name="gradient_merge_k",
                                              shape=[1],
                                              value=int(self.k_steps),
                                              dtype='int32',
                                              persistable=True,
                                              force_cpu=True)

        zero_var = layers.create_global_var(name="gradient_merge_zero",
                                            shape=[1],
                                            value=int(0),
                                            dtype='int32',
                                            persistable=True,
                                            force_cpu=True)
7164 7165

        # Add step var & cond var
7166 7167 7168 7169 7170 7171
        step_var = layers.create_global_var(name="gradient_merge_step",
                                            shape=[1],
                                            value=int(0),
                                            dtype='int32',
                                            persistable=True,
                                            force_cpu=True)
7172

7173 7174 7175
        cond_var = main_block.create_var(name="gradient_merge_cond",
                                         shape=[1],
                                         dtype='bool')
7176 7177 7178 7179

        with device_guard("cpu"):
            # step_var = (step_var + 1) % k_step
            layers.increment(x=step_var, value=1.0, in_place=True)
7180 7181 7182 7183 7184 7185 7186 7187 7188 7189
            main_block.append_op(type='elementwise_mod',
                                 inputs={
                                     'X': step_var,
                                     'Y': k_step_var
                                 },
                                 outputs={'Out': step_var},
                                 attrs={
                                     'axis': -1,
                                     'use_mkldnn': False
                                 })
7190 7191

            # cond_var = (step_var == 0)
7192 7193 7194 7195 7196 7197
            main_block.append_op(type='equal',
                                 inputs={
                                     'X': step_var,
                                     'Y': zero_var
                                 },
                                 outputs={'Out': cond_var})
7198 7199 7200 7201 7202 7203 7204 7205 7206 7207

        return cond_var

    def apply_gradients(self, params_grads):
        main_program = default_main_program()
        startup_program = default_startup_program()
        main_block = main_program.global_block()
        startup_block = startup_program.global_block()

        cond = self._get_gm_cond_var(main_block)
7208 7209

        #TODO(mapingshuo) support sparse embedding
7210 7211
        # step1: remove grad.op's op_role_var
        for param, grad in params_grads:
7212
            assert (
7213
                param.type != core.VarDesc.VarType.SELECTED_ROWS
7214 7215
            ), "SELECTED_ROWS is not supported in GradientMergeOptimizer for now"

7216
            self._remove_op_role_var(param, grad)
7217

7218
        param_to_grad = {k.name: v for (k, v) in params_grads}
7219 7220 7221
        param_names = param_to_grad.keys()
        param_to_gradient_merge = {}

7222 7223 7224 7225 7226
        new_params_grads = []
        # step2: create gradient_merge var and init with 0
        # and update op_role_var
        for param, grad in params_grads:
            param_name = param.name
7227 7228
            param_var = main_block.var(param_name)
            assert (param_var is not None)
7229 7230 7231 7232 7233
            gradient_merge_var = main_block.create_var(name=param_name +
                                                       "@GRAD@GradientMerge",
                                                       shape=param_var.shape,
                                                       dtype=param_var.dtype,
                                                       persistable=True)
7234
            param_to_gradient_merge[param_name] = gradient_merge_var
7235

7236 7237 7238 7239 7240
            startup_gradient_merge_var = startup_block.create_var(
                name=param_name + "@GRAD@GradientMerge",
                shape=param_var.shape,
                dtype=param_var.dtype,
                persistable=True)
7241 7242 7243 7244 7245 7246 7247
            startup_block.append_op(type="fill_constant",
                                    outputs={"Out": startup_gradient_merge_var},
                                    attrs={
                                        "shape": param_var.shape,
                                        "dtype": param_var.dtype,
                                        "value": float(0),
                                    })
7248

7249 7250 7251
            # grad_merge += grad
            new_grad_op = main_block.append_op(
                type="elementwise_add",
7252 7253 7254 7255
                inputs={
                    'X': grad,
                    'Y': gradient_merge_var
                },
7256
                outputs={'Out': gradient_merge_var},
7257 7258 7259 7260
                attrs={
                    'axis': -1,
                    'use_mkldnn': False
                })
7261 7262 7263 7264 7265 7266 7267 7268 7269 7270
            self._add_gm_op_role_var(new_grad_op, param, gradient_merge_var,
                                     cond)
            new_params_grads.append([param, gradient_merge_var])

        def true_apply_gradient():
            cur_block_idx = main_program.current_block_idx
            cur_block = main_program.current_block()

            # cur_block's forward_block & backward_block is itself
            cur_block._set_forward_block_idx(cur_block_idx)
7271
            op_maker = core.op_proto_and_checker_maker
7272 7273 7274 7275

            if self.avg:
                for param, new_grad in new_params_grads:
                    # grad /= k_steps
7276 7277 7278 7279 7280 7281 7282 7283
                    cur_block.append_op(type='scale',
                                        inputs={'X': new_grad},
                                        outputs={'Out': new_grad},
                                        attrs={
                                            'scale': 1.0 / self.k_steps,
                                            'bias': 0.0,
                                            'bias_after_scale': False
                                        })
7284 7285
                    new_grad.op._set_attr(op_maker.kOpRoleAttrName(),
                                          op_maker.OpRole.Backward)
7286

7287 7288 7289 7290 7291 7292
            for param, new_grad in new_params_grads:
                # NOTE. regularization will append ops to grad.block,
                # while new_grad's real block is global_block,
                # but we want append regularization ops to cur_block,
                # so we set new_grad.block = cur_block
                new_grad.block = cur_block
7293

7294 7295
            self._optimize_ops = self.inner_optimizer.apply_gradients(
                new_params_grads)
7296

7297 7298
            # clear gradient_merge_vars
            for param, new_grad in new_params_grads:
7299 7300 7301 7302
                layers.fill_constant(shape=new_grad.shape,
                                     dtype=new_grad.dtype,
                                     value=0.0,
                                     out=new_grad)
7303 7304
                new_grad.op._set_attr(op_maker.kOpRoleAttrName(),
                                      op_maker.OpRole.Optimize)
7305 7306 7307 7308 7309 7310 7311 7312 7313 7314 7315 7316 7317

        # step3. apply gradient
        layers.cond(cond, true_fn=true_apply_gradient, false_fn=None)

        return self._optimize_ops

    def minimize(self,
                 loss,
                 startup_program=None,
                 parameter_list=None,
                 no_grad_set=None):
        assert isinstance(loss, Variable), "The loss should be an Variable."

7318 7319 7320 7321
        params_grads = self.backward(loss,
                                     startup_program=startup_program,
                                     parameter_list=parameter_list,
                                     no_grad_set=no_grad_set)
7322

7323 7324 7325
        optimize_ops = self.apply_optimize(loss,
                                           startup_program=startup_program,
                                           params_grads=params_grads)
7326 7327

        return optimize_ops, params_grads