optimizer.py 278.3 KB
Newer Older
1
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14 15

from __future__ import print_function
16

17
import numpy as np
18
import six
19
import os
20
import logging
21
from collections import defaultdict
22

23
import paddle
Q
Qiao Longfei 已提交
24
from paddle.fluid.distribute_lookup_table import find_distributed_lookup_table
25
from paddle.fluid.framework import Program, Variable, name_scope, default_main_program, default_startup_program, device_guard
26

27 28
from . import framework
from . import layers
29
from . import unique_name
30
from .backward import append_backward, _some_in_set_, _append_grad_suffix_, _get_no_grad_set_name
31
from .clip import GradientClipBase, GradientClipByNorm, error_clip_callback, append_gradient_clip_ops, ClipGradByGlobalNorm
32 33 34
from .framework import program_guard
from .initializer import Constant
from .layer_helper import LayerHelper
S
sneaxiy 已提交
35
from .layers import ops
36
from .dygraph import base as imperative_base
37
from .dygraph import no_grad
38
from .dygraph.learning_rate_scheduler import LearningRateDecay, _LearningRateEpochDecay
39 40 41
from paddle.fluid import core
from paddle.fluid.layers import tensor
from functools import reduce
42
from functools import cmp_to_key
43
from .wrapped_decorator import signature_safe_contextmanager
M
mapingshuo 已提交
44
from .. import compat as cpt
45
import warnings
W
wanghuancoder 已提交
46
from paddle import _C_ops
47

48
__all__ = [
49 50 51 52
    'SGD', 'Momentum', 'Adagrad', 'Adam', 'Adamax', 'Dpsgd', 'DecayedAdagrad',
    'Ftrl', 'SGDOptimizer', 'MomentumOptimizer', 'AdagradOptimizer',
    'AdamOptimizer', 'AdamaxOptimizer', 'DpsgdOptimizer',
    'DecayedAdagradOptimizer', 'RMSPropOptimizer', 'FtrlOptimizer', 'Adadelta',
Z
Zeng Jinle 已提交
53
    'AdadeltaOptimizer', 'ModelAverage', 'LarsMomentum',
54 55
    'LarsMomentumOptimizer', 'LambOptimizer', 'ExponentialMovingAverage',
    'PipelineOptimizer', 'LookaheadOptimizer', 'RecomputeOptimizer'
56
]
Q
Qiao Longfei 已提交
57 58 59 60 61 62


class Optimizer(object):
    """Optimizer Base class.

    Define the common interface of an optimizer.
63 64
    User should not use this class directly,
    but need to use one of it's implementation.
Q
Qiao Longfei 已提交
65 66
    """

67
    @imperative_base.no_grad
68 69 70 71
    def __init__(self,
                 learning_rate,
                 parameter_list=None,
                 regularization=None,
72
                 grad_clip=None,
73 74
                 flatten_param_grads=False,
                 align_size=-1,
75
                 name=None):
76 77 78 79 80 81
        """
        Args:
            flatten_param_grads (bool, optional): Whether to flatten all the parameters and grads. 
                If true, the parameters and gradients will be coalesce to contiguous mempry, 
                and the grad_clip ops / optimizer ops will be fuse to one operator.
        """
82
        # Because of the loop import, so place it in the function body
83
        from paddle.optimizer.lr import LRScheduler
H
hong 已提交
84 85
        self._parameter_list = list(
            parameter_list) if parameter_list is not None else None
86
        self._name = name
L
lujun 已提交
87
        if framework.in_dygraph_mode():
88
            if not isinstance(learning_rate,
89
                              (float, LearningRateDecay, LRScheduler)):
M
minqiyang 已提交
90
                raise TypeError(
91
                    "learning rate should be float or LRScheduler, got %s here"
M
minqiyang 已提交
92
                    % type(learning_rate))
93
            if self._parameter_list is None:
94 95 96
                raise AttributeError(
                    "parameter_list argument given to the Optimizer should not be None in dygraph mode."
                )
97 98 99 100 101 102 103 104
            if regularization is not None:
                for param in self._parameter_list:
                    if param.regularizer is not None:
                        logging.info(
                            "If regularizer of a Parameter has been set by 'fluid.ParamAttr' or 'fluid.WeightNormParamAttr' already. "
                            "The Regularization[%s] in Optimizer will not take effect, and it will only be applied to other Parameters!"
                            % regularization.__str__())
                        break
M
minqiyang 已提交
105
        else:
106
            if not isinstance(learning_rate,
107
                              (float, framework.Variable, LRScheduler)):
M
minqiyang 已提交
108
                raise TypeError(
109
                    "learning rate should be float or LRScheduler, got %s here"
110
                    % type(learning_rate))
M
minqiyang 已提交
111

112 113 114 115 116
        if grad_clip is not None:
            if not isinstance(grad_clip, GradientClipBase):
                raise TypeError(
                    "'grad_clip' should be an instance of GradientClipBase's derived class"
                )
D
dzhwinter 已提交
117
        self.regularization = regularization
118
        self._grad_clip = grad_clip
119
        self._learning_rate = learning_rate
120 121
        self._flatten_param_grads = flatten_param_grads
        self._align_size = align_size
L
Leo Chen 已提交
122

D
dzhwinter 已提交
123
        self._dtype = None
L
Leo Chen 已提交
124 125 126 127
        # Infer the dtype form parameter
        if self._parameter_list:
            self._dtype = self._parameter_list[0].dtype

128
        # each program should have a independent learning rate
129
        # program -> Variable(learning_rate)
Q
qiaolongfei 已提交
130
        self._learning_rate_map = dict()
131
        if isinstance(self._learning_rate, framework.Variable):
132 133
            self._learning_rate_map[framework.default_main_program(
            )] = self._learning_rate
134 135 136 137 138
        # Dictionary of accumulators. Some optimizer subclasses need to
        # allocate and manage extra variables associated with the parameters
        # to train. These variables are called accumulators.
        # {accum_name : { paramter_name : accumulator_for_parameter, ...}, ...}
        self._accumulators = defaultdict(lambda: dict())
139 140
        # global_accumulator dict, {accum_name : acc_variable, ...}
        self._global_accumulators = {}
141
        self.helper = LayerHelper(self.__class__.__name__)
142
        self._opti_name_list = []
H
hong 已提交
143
        self._accumulators_holder = {}
144
        self._param_device_map = dict()
H
hong 已提交
145 146 147 148

    @framework.dygraph_only
    def state_dict(self):
        '''
T
tianshuo78520a 已提交
149 150
        Get state dict information from optimizer. It contain all the variable used by optimizer. For Adam optimizer, contains beta1, beta2, momentum etc. If LearningRateDecay have been used, global_step will be include in state dict.
        If the optimizer never be called(minimize function), the state_dict is empty.
H
hong 已提交
151 152 153

        Args: None
        Return:
T
tianshuo78520a 已提交
154
            state_dict(dict) : dict contains all the variable used by optimizer
H
hong 已提交
155 156 157 158 159
        
        Examples:
            .. code-block:: python

                import paddle.fluid as fluid
160 161 162 163 164 165

                with fluid.dygraph.guard():
                    emb = fluid.dygraph.Embedding([10, 10])

                    adam = fluid.optimizer.Adam(0.001, parameter_list=emb.parameters())
                    state_dict = adam.state_dict()
H
hong 已提交
166 167

        '''
168
        from paddle.optimizer.lr import LRScheduler
H
hong 已提交
169 170 171 172
        state_dict = {}
        for k, v in self._accumulators.items():
            for para_name, var_tmp in v.items():
                state_dict[var_tmp.name] = var_tmp
173 174
        for k, v in self._global_accumulators.items():
            state_dict[v.name] = v
H
hong 已提交
175
        # global step if use lr decay
176
        if isinstance(self._learning_rate, LRScheduler):
177 178
            state_dict["LR_Scheduler"] = self._learning_rate.state_dict()
            return state_dict
H
hong 已提交
179
        if isinstance(self._learning_rate, LearningRateDecay):
180 181 182 183
            state_dict["LR_Scheduler"] = self._learning_rate.state_dict()

            if not isinstance(self._learning_rate, _LearningRateEpochDecay):
                var_tmp = None
184 185 186
                var_temp = framework._varbase_creator(
                    None, name='global_step', dtype='int32')

187 188
                tensor.fill_constant(
                    [1], "int32", self._learning_rate.step_num, out=var_temp)
H
hong 已提交
189

190
                state_dict['global_step'] = var_temp
H
hong 已提交
191 192 193
        return state_dict

    @framework.dygraph_only
194
    def set_state_dict(self, state_dict):
H
hong 已提交
195
        '''
T
tianshuo78520a 已提交
196
        Load optimizer state dict. For Adam optimizer, contains beta1, beta2, momentum etc. If LearningRateDecay have been used, global_step will be changed.
H
hong 已提交
197 198 199 200 201 202 203 204

        Args: 
            state_dict(dict) : Dict contains all the Variable needed by optimizer
        Return:
            None
        
        Examples:
            .. code-block:: python
205

206 207
                import paddle
                import paddle.fluid as fluid
208 209 210

                paddle.disable_static()

211
                emb = paddle.nn.Embedding(10, 10)
212

213
                state_dict = emb.state_dict()
214
                fluid.save_dygraph(state_dict, "paddle_dy")
215

216
                scheduler = paddle.optimizer.lr.NoamDecay(	
217 218 219 220
                    d_model=0.01, warmup_steps=100, verbose=True)
                adam = paddle.optimizer.Adam(
                    learning_rate=scheduler,
                    parameters=emb.parameters())
221
                state_dict = adam.state_dict()
222
                fluid.save_dygraph(state_dict, "paddle_dy")
223

224
                para_state_dict, opti_state_dict = fluid.load_dygraph("paddle_dy")
H
hong 已提交
225
        '''
226 227
        from paddle.optimizer.lr import LRScheduler
        if isinstance(self._learning_rate, LRScheduler):
228
            self._learning_rate.set_dict(state_dict["LR_Scheduler"])
H
hong 已提交
229 230

        if isinstance(self._learning_rate, LearningRateDecay):
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
            self._learning_rate.set_dict(state_dict["LR_Scheduler"])

            if not isinstance(self._learning_rate, _LearningRateEpochDecay):
                assert 'global_step' in state_dict, \
                        'Global step not in state dict, Dygraph use LearningRateDecay, global_step must in state_dict'
                global_step = state_dict['global_step']

                if isinstance(global_step, Variable):
                    step_np = global_step
                    step_np = np.array(step_np.value().get_tensor())
                    assert step_np.shape == (1,),  \
                            "global step shape is (1,), the shape is {}".format( step_np.shape )

                    self._learning_rate.step_num = int(step_np[0])
                elif isinstance(global_step, np.ndarray):
                    assert global_step.shape == (1,),  \
                            "global step shape is (1,), the shape is {}".format( global_step.shape )
                    self._learning_rate.step_num = global_step[0]
                else:
                    raise RuntimeError(
                        "Type not supprt, value in state dict must be [VarBase, Variable, numpy], the type is ",
                        type(global_step))
H
hong 已提交
253

254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
        def _load_state_para(state_dict, param):
            var = param.value()
            tensor = var.get_tensor()
            model_np = np.array(tensor)
            load_para = state_dict[param.name]
            if isinstance(load_para, Variable):
                load_para_np = load_para.numpy()
            elif isinstance(load_para, core.VarBase):
                load_para_np = load_para.numpy()
            elif isinstance(load_para, np.ndarray):
                load_para_np = load_para
            else:
                raise RuntimeError("State dict type {} not supprt".format(
                    str(type(load_para))))

            assert model_np.shape == load_para_np.shape,  \
                                        "Parameter shape not match, Dygraph Parameter [ {} ] need tensor with shape {} but load tensor with shape {}".format(
271
                                                param.name, model_np.shape, load_para_np.shape)
272 273 274

            assert model_np.dtype == load_para_np.dtype, \
                                        "Parameter dtype not match, Dygraph Parameter [ {} ] need tensor with dtype {}  but load tensor with dtype {}".format(
275
                                            param.name, model_np.dtype, load_para_np.dtype)
276 277 278

            tensor.set(load_para_np, framework._current_expected_place())

H
hong 已提交
279 280 281 282 283
        self._accumulators_holder = state_dict
        for k, v in self._accumulators.items():
            for para_name, var_tmp in v.items():
                assert var_tmp.name in state_dict, \
                        "optimizer variable {} not found".format( var_tmp.name )
284
                _load_state_para(state_dict, var_tmp)
H
hong 已提交
285

286 287 288 289
        for k, v in self._global_accumulators.items():
            assert v.name in state_dict, \
                        "optimizer variable {} not found".format( v.name )
            _load_state_para(state_dict, v)
290

291 292 293
    # [aliases] Compatible with old method names
    set_dict = set_state_dict

294 295
    def get_opti_var_name_list(self):
        return self._opti_name_list
Q
Qiao Longfei 已提交
296

Q
Qiao Longfei 已提交
297
    def _create_global_learning_rate(self):
298 299
        from paddle.optimizer.lr import LRScheduler
        if isinstance(self._learning_rate, LRScheduler):
300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
            lr_var = self._global_learning_rate()
            # only create global lr_var once
            if not isinstance(lr_var, framework.Variable):
                lr_name = unique_name.generate('learning_rate')
                self._learning_rate._var_name = lr_name
                lr_var = self.helper.create_global_variable(
                    name=lr_name,
                    shape=[1],
                    persistable=True,
                    stop_gradient=True,
                    dtype='float32' if self._dtype is None else self._dtype)
                main_prog = framework.default_main_program()
                main_prog.lr_sheduler = self._learning_rate
                main_prog.lr_var = lr_var
                self._learning_rate_map[framework.default_main_program(
                )] = lr_var

            lr_value = float(self._learning_rate())
            self.helper.set_variable_initializer(
                lr_var, initializer=Constant(value=lr_value))
            return

322 323 324
        if imperative_base.enabled():
            # create learning rate Variable
            if isinstance(self._learning_rate, float):
M
minqiyang 已提交
325 326 327 328 329 330 331 332 333 334 335 336
                lr = self._global_learning_rate()

                if isinstance(lr, framework.Variable):
                    return
                else:
                    self._learning_rate_map[framework.default_main_program(
                    )] = layers.create_global_var(
                        name=unique_name.generate("learning_rate"),
                        shape=[1],
                        value=float(self._learning_rate),
                        dtype='float32' if self._dtype is None else self._dtype,
                        persistable=True)
337
            # get learning rate Variable from LearningRateDecay
M
minqiyang 已提交
338
            elif isinstance(self._learning_rate, LearningRateDecay):
339 340 341
                self._learning_rate_map[framework.default_main_program(
                )] = self._learning_rate()
            else:
Q
qiaolongfei 已提交
342
                raise TypeError(
343 344
                    "optimizer's learning rate must be float or LearningRateDecay"
                )
345
        else:
346 347 348 349
            lr = self._global_learning_rate()

            if isinstance(lr, framework.Variable):
                return
M
minqiyang 已提交
350 351 352 353 354 355
            else:
                if not isinstance(self._learning_rate, float):
                    raise TypeError(
                        "learning rate variable is create outside optimizer,"
                        "can not create new learning rate variable for new program"
                    )
Q
Qiao Longfei 已提交
356

357 358 359 360 361 362 363 364
            # create learning rate in the current main program
            self._learning_rate_map[framework.default_main_program(
            )] = layers.create_global_var(
                name=unique_name.generate("learning_rate"),
                shape=[1],
                value=float(self._learning_rate),
                dtype='float32' if self._dtype is None else self._dtype,
                persistable=True)
365

366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
    @framework.dygraph_only
    def set_lr(self, value):
        """
        :api_attr: imperative
        
        Set the value of the learning rate manually in the optimizer. If the optimizer use LearningRateDecay,
        this API cannot be invoked, because it will lead to conflict.

        Args:
            value (float|Variable): the value of learning rate

        Returns:
            None
          
        Examples:
            .. code-block:: python

                import paddle.fluid as fluid
                        
                with fluid.dygraph.guard():
                    linear = fluid.dygraph.nn.Linear(10, 10)

                    adam = fluid.optimizer.Adam(0.1, parameter_list=linear.parameters())

                    # set learning rate manually by python float value
                    lr_list = [0.2, 0.3, 0.4, 0.5, 0.6]
                    for i in range(5):
                        adam.set_lr(lr_list[i])
                        lr = adam.current_step_lr()
                        print("current lr is {}".format(lr))
                    # Print:
                    #    current lr is 0.2
                    #    current lr is 0.3
                    #    current lr is 0.4
                    #    current lr is 0.5
                    #    current lr is 0.6


                    # set learning rate manually by framework Variable
                    lr_var = fluid.layers.create_global_var(
                        shape=[1], value=0.7, dtype='float32')
                    adam.set_lr(lr_var)
                    lr = adam.current_step_lr()
                    print("current lr is {}".format(lr))
                    # Print:
                    #    current lr is 0.7



        """
        if not isinstance(value, (framework.Variable, float)):
            raise TypeError(
                "The type of 'value' in optimizer.set_lr must be (float, Variable), but received %s."
                % (type(value)))
        if isinstance(self._learning_rate, LearningRateDecay):
            raise RuntimeError(
                "optimizer's learning rate can't be LearningRateDecay when invoke this API, because this will lead to conflict."
            )
        if isinstance(value, float):
            self._learning_rate = value
            current_lr = self._global_learning_rate()
            if current_lr is not None:
                global_block = framework.default_main_program().global_block()
                global_block.append_op(
                    type='fill_constant',
                    outputs={'Out': [current_lr]},
                    attrs={
                        'dtype': current_lr.dtype,
                        'shape': list(current_lr.shape),
                        'value': float(value)
                    },
                    stop_gradient=True)
        else:
            assert len(value.shape) == 1 and value.shape[
                0] == 1, "optimizer's learning rate must be 1-D Tensor with shape[1]"
            self._learning_rate_map[framework.default_main_program()] = value

443 444 445
    @framework.dygraph_only
    def current_step_lr(self):
        """
446
        :api_attr: imperative
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
        
        Get current step learning rate. The return value is all the same When LearningRateDecay is not used,
        otherwise return the step learning rate.

        Returns:
            float: The learning rate of the current step.

        Examples:
            .. code-block:: python

                import paddle.fluid as fluid
                import numpy as np

                # example1: LearningRateDecay is not used, return value is all the same
                with fluid.dygraph.guard():
                    emb = fluid.dygraph.Embedding([10, 10])
                    adam = fluid.optimizer.Adam(0.001, parameter_list = emb.parameters())
                    lr = adam.current_step_lr()
                    print(lr) # 0.001

                # example2: PiecewiseDecay is used, return the step learning rate
                with fluid.dygraph.guard():
                    inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
                    linear = fluid.dygraph.nn.Linear(10, 10)
                    inp = fluid.dygraph.to_variable(inp)
                    out = linear(inp)
                    loss = fluid.layers.reduce_mean(out)
                    
                    bd = [2, 4, 6, 8]
                    value = [0.2, 0.4, 0.6, 0.8, 1.0]
                    adam = fluid.optimizer.Adam(fluid.dygraph.PiecewiseDecay(bd, value, 0),
                                           parameter_list=linear.parameters())

                    # first step: learning rate is 0.2
                    np.allclose(adam.current_step_lr(), 0.2, rtol=1e-06, atol=0.0) # True

                    # learning rate for different steps
                    ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0]
                    for i in range(12):
                        adam.minimize(loss)
                        lr = adam.current_step_lr()
                        np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True

        """
        current_lr = self._global_learning_rate()
492
        if isinstance(current_lr, framework.Variable):
493 494 495 496
            return self._global_learning_rate().numpy()[0]

        if isinstance(self._learning_rate, float):
            return self._learning_rate
497 498 499
        elif isinstance(self._learning_rate, _LearningRateEpochDecay):
            step_lr = self._learning_rate()
            return step_lr.numpy()[0]
500 501 502 503 504 505 506
        else:
            step_lr = self._learning_rate.step()
            if isinstance(step_lr, (float, int)):
                return step_lr
            else:
                return step_lr.numpy()[0]

Y
yuyang18 已提交
507
    def _global_learning_rate(self, program=None):
Q
Qiao Longfei 已提交
508 509 510 511
        """
        get global decayed learning rate
        :return:
        """
512 513
        if program is None:
            program = framework.default_main_program()
Q
qiaolongfei 已提交
514
        return self._learning_rate_map.get(program, None)
Q
Qiao Longfei 已提交
515

Q
Qiao Longfei 已提交
516 517 518 519 520
    def _append_optimize_op(self, block, param_and_grad):
        """ append optimize operator to block and return all the added optimize_op
        """
        raise NotImplementedError()

521 522 523 524
    def _create_param_lr(self, param_and_grad):
        # create learning rate variable for every parameter
        param = param_and_grad[0]
        param_lr = param.optimize_attr['learning_rate']
W
Wu Yi 已提交
525 526
        if type(param_lr) == Variable:
            return param_lr
Q
qiaolongfei 已提交
527
        else:
W
Wu Yi 已提交
528
            if param_lr == 1.0:
Y
yuyang18 已提交
529
                return self._global_learning_rate()
W
Wu Yi 已提交
530
            else:
X
Xin Pan 已提交
531 532 533
                with default_main_program()._lr_schedule_guard(
                        is_with_opt=True), framework.name_scope(
                            'scale_with_param_lr'):
534
                    return self._global_learning_rate() * param_lr
535 536 537 538 539 540 541

    def _create_accumulators(self, block, parameters):
        """Create all accumulators needed by the parameters

        Args:
            block: the block in which the loss variable is present
            parameters: list of parameter variables for the optimizer
Q
Qiao Longfei 已提交
542
        """
543 544
        pass

545
    def _finish_update(self, block, parameters_and_grads):
546 547 548 549 550 551 552 553
        """Finish any custom updates needed
           before completing an optimization step

        Args:
            block: the block in which the loss variable is present
            parameters: list of parameter variables for the optimizer

        Returns:
Q
qiaolongfei 已提交
554
            None
555 556 557
        """
        pass

558 559 560 561 562
    def _add_accumulator(self,
                         name,
                         param,
                         dtype=None,
                         fill_value=0.0,
563
                         shape=None,
564
                         type=None,
565
                         device=None):
566 567 568 569 570 571 572 573 574
        """Utility function to add an accumulator for a parameter

        Args:
            block: the block in which the loss variable is present
            name: name of the accumulator
            param: parameter variable for which accumulator is to be added
            dtype: data type of the accumulator variable
            fill_value: value to initialize the accumulator variable
        """
W
whs 已提交
575 576
        if self._name is not None:
            name = self._name + "_" + name
577 578
        if (name in self._accumulators and
                param.name in self._accumulators[name]):
L
lujun 已提交
579
            if framework.in_dygraph_mode():
X
polish  
Xin Pan 已提交
580
                return self._accumulators[name][param.name]
581
            raise Exception("Accumulator {} already exists for parameter {}".
582
                            format(name, param.name))
583 584
        if shape == None:
            shape = param.shape
Q
Qiao Longfei 已提交
585
        assert isinstance(self.helper, LayerHelper)
586 587 588 589 590

        var_name = param.name + "_" + name
        var_name = unique_name.generate(var_name)
        self._opti_name_list.append(var_name)

Q
Qiao Longfei 已提交
591
        var = self.helper.create_global_variable(
592
            name=var_name,
Q
Qiao Longfei 已提交
593
            persistable=True,
F
fengjiayi 已提交
594
            dtype=dtype or param.dtype,
595
            type=param.type if type is None else type,
H
hong 已提交
596 597
            shape=shape,
            belong_to_optimizer=True)
598 599 600 601 602
        if device is None:
            device = self._get_device_for_param(param.name)
        with device_guard(device):
            self.helper.set_variable_initializer(
                var, initializer=Constant(value=float(fill_value)))
H
hong 已提交
603 604 605 606 607 608 609

        if framework.in_dygraph_mode():
            if len(self._accumulators_holder) > 0:
                assert var_name in self._accumulators_holder, \
                        "Optimizer set error, {} should in state dict".format( var_name )
                var.set_value(self._accumulators_holder[var_name])

Q
Qiao Longfei 已提交
610
        self._accumulators[name][param.name] = var
611
        return var
612

613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666
    def _add_global_accumulator(self,
                                name,
                                dtype=None,
                                fill_value=0.0,
                                shape=None,
                                type=None,
                                device=None):
        """Utility function to add a global accumulator for all parameters in the model

        Args:
            block: the block in which the loss variable is present
            name: name of the accumulator
            dtype: data type of the accumulator variable
            fill_value: value to initialize the accumulator variable
            shape: the shape of the accumulator
            type: the variable type of the accumulator
            device: the target place of the accumulator
        """
        if self._name is not None:
            name = self._name + "_" + name
        if (name in self._global_accumulators):
            if framework.in_dygraph_mode():
                return self._global_accumulators[name]
            raise Exception("Global accumulator {} already exists".format(name))
        if shape == None:
            shape = [1]  # most case, global accumulator is of shape [1]
        assert isinstance(self.helper, LayerHelper)

        var_name = name
        var_name = unique_name.generate(var_name)
        self._opti_name_list.append(var_name)

        var = self.helper.create_global_variable(
            name=var_name,
            persistable=True,
            dtype=dtype if dtype else self._dtype,
            type=type,
            shape=shape,
            belong_to_optimizer=True)
        if device is None:
            device = 'cpu'
        with device_guard(device):
            self.helper.set_variable_initializer(
                var, initializer=Constant(value=float(fill_value)))

        if framework.in_dygraph_mode():
            if len(self._accumulators_holder) > 0:
                assert var_name in self._accumulators_holder, \
                        "Optimizer set error, {} should in state dict".format( var_name )
                var.set_value(self._accumulators_holder[var_name])

        self._global_accumulators[name] = var
        return var

667 668 669 670 671 672 673 674
    def _get_accumulator(self, name, param):
        """Utility function to fetch an accumulator for a parameter

        Args:
            name: name of the accumulator
            param: parameter variable for which accumulator is to be fetched

        Returns:
675
            accumulator variable
676
        """
W
whs 已提交
677 678
        if self._name is not None:
            name = self._name + "_" + name
679 680 681 682 683 684
        if (name not in self._accumulators or
                param.name not in self._accumulators[name]):
            raise Exception("Accumulator {} does not exist for parameter {}".
                            format(name, param.name))
        return self._accumulators[name][param.name]

685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
    def _get_global_accumulator(self, name):
        """Utility function to fetch a global accumulator

        Args:
            name: name of the accumulator

        Returns:
            accumulator variable
        """
        if self._name is not None:
            name = self._name + "_" + name
        if (name not in self._global_accumulators):
            raise Exception("Global accumulator {} does not exist".format(name))
        return self._global_accumulators[name]

700 701 702 703 704 705 706 707 708 709 710 711
    def _update_param_device_map(self, parameters_and_grads, target_block):
        for param_and_grad in parameters_and_grads:
            if param_and_grad[0].trainable is True:
                param_name = param_and_grad[0].name
                ops = target_block.ops
                device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName(
                )
                for op in ops:
                    input_arg_names = op.input_arg_names
                    if param_name in input_arg_names:
                        self._param_device_map[param_name] = op.attr(
                            device_attr_name)
712
                        break
713 714 715 716 717 718 719

    def _get_device_for_param(self, param_name):
        device = None
        if param_name in self._param_device_map:
            device = self._param_device_map[param_name]
        return device

720
    def _create_optimization_pass(self, parameters_and_grads):
Q
Qiao Longfei 已提交
721 722 723
        """Add optimization operators to update gradients to variables.

        Args:
Q
qiaolongfei 已提交
724
          parameters_and_grads(list(tuple(Variable, Variable))):
725
            a list of (variable, gradient) pair to update.
Q
Qiao Longfei 已提交
726 727

        Returns:
728
          return_op_list: a list of operators that will complete one step of
729 730 731
            optimization. This will include parameter update ops, global step
            update ops and any other custom ops required by subclasses to manage
            their internal state.
Q
Qiao Longfei 已提交
732
        """
733 734 735 736 737
        # This is a default implementation of create_optimization_pass that
        # can be shared by most optimizers. This implementation assumes that
        # the subclass will implement the _append_optimize_op method and the
        #  _initialize_tensors method. The subclass can extend the
        # _create_accumulators method if it needs to create accumulators
738
        # for parameters and extend _finish_update method to add custom ops.
739

740
        # Allways called under program_guard use global block as loss block
741 742 743
        # But if current block is in control flow, append optimize op in the
        # grad block of current block

744
        global_block = framework.default_main_program().global_block()
745 746 747 748 749 750 751 752 753
        target_block = global_block
        current_block = framework.default_main_program().current_block()
        if current_block.idx != global_block.idx:
            assert current_block.backward_block_idx != -1, \
                "current block is not global_block, but it doesn't have backward block."
            target_block = framework.default_main_program().blocks[
                current_block.backward_block_idx]

        start = len(target_block.ops)
754

755
        self._update_param_device_map(parameters_and_grads, target_block)
C
chengduo 已提交
756
        self._create_accumulators(
757
            target_block,
C
chengduo 已提交
758
            [p[0] for p in parameters_and_grads if p[0].trainable])
759 760
        self._create_global_learning_rate()

M
minqiyang 已提交
761
        if framework.in_dygraph_mode():
762 763 764
            for param_and_grad in parameters_and_grads:
                if param_and_grad[1] is None:
                    continue
765 766
                if param_and_grad[0].trainable is True:
                    self._append_optimize_op(target_block, param_and_grad)
767 768 769 770 771 772 773
        else:
            for param_and_grad in parameters_and_grads:
                if param_and_grad[1] is None:
                    continue
                with param_and_grad[0].block.program._optimized_guard(
                        param_and_grad), name_scope("optimizer"):
                    if param_and_grad[0].trainable is True:
774 775 776 777 778
                        device = self._get_device_for_param(param_and_grad[0]
                                                            .name)
                        with device_guard(device):
                            optimize_op = self._append_optimize_op(
                                target_block, param_and_grad)
779 780 781

        # Get custom finish ops for subclasses
        # FIXME: Need to fix this once we figure out how to handle dependencies
782
        self._finish_update(target_block, parameters_and_grads)
783

784 785
        end = len(target_block.ops)
        return target_block._slice_ops(start, end)
786 787

    def _process_distribute_lookuptable(self, param_grads):
Q
Qiao Longfei 已提交
788 789 790 791 792 793 794 795 796
        """
        Because distribute lookup table only support SGD optimizer for now, not support
        other optimizer and regularization, so we should find the table parameter out,
        and avoid to add regularization and other op for it, and add sgd optimize op
        for it independently.
        :param param_grads(list((Var, Var))): list of (param, grad) pair.
        :param loss: the loss variable.
        :param startup_program: the startup program
        """
797 798
        program = framework.default_main_program()
        global_block = framework.default_main_program().global_block()
Q
Qiao Longfei 已提交
799 800 801 802 803 804 805 806 807 808 809 810 811 812 813
        table_name = find_distributed_lookup_table(program)
        table_param = None
        table_grad = None
        new_param_grads = []
        for p, g in param_grads:
            if p.name == table_name:
                if table_param is not None:
                    raise RuntimeError(
                        "multi dist table var found, only support one now!")
                table_param = p
                table_grad = g
            else:
                new_param_grads.append((p, g))
        sgd_op = None
        if table_param is not None:
814 815 816 817 818 819 820 821 822 823 824 825 826
            param_and_grad = [table_param, table_grad]
            with table_param.block.program._optimized_guard(param_and_grad), \
                    framework.name_scope("optimizer"):
                self._create_global_learning_rate()
                # create the optimize op
                sgd_op = global_block.append_op(
                    type='sgd',
                    inputs={
                        "Param": table_param,
                        "Grad": table_grad,
                        "LearningRate": self._create_param_lr(param_and_grad)
                    },
                    outputs={"ParamOut": param_and_grad[0]})
Q
Qiao Longfei 已提交
827 828
        return new_param_grads, (table_param, table_grad), sgd_op

829 830 831 832 833 834 835
    def backward(self,
                 loss,
                 startup_program=None,
                 parameter_list=None,
                 no_grad_set=None,
                 callbacks=None):
        """
836
        The first part of ``minimize``, do auto-diff to append backward operations for
837 838 839
        the current program.

        Args:
840 841 842 843
            loss (Variable): ``loss`` variable to run optimizations.
            startup_program (Program, optional): :ref:`api_fluid_Program` for
                initializing parameters in ``parameter_list``. The default value
                is None, at this time :ref:`api_fluid_default_startup_program` will be used.
H
hong 已提交
844
            parameter_list (Iterable, optional): Iterable of ``Variable`` or ``Variable.name`` to update
845 846
                to minimize ``loss``. The default value is None, at this time all parameters
                will be updated.
847
            no_grad_set (set, optional): Set of ``Variable``  or ``Variable.name`` that don't need
848 849 850
                to be updated. The default value is None.
            callbacks (list, optional): list of callable objects to run when appending backward
                operator for one parameter. The default value is None.
M
minqiyang 已提交
851

852
        Return:
853 854
            list: list of (param, grad) variable pairs, param is ``Parameter``,
                grad is the gradient value corresponding to the parameter.
M
minqiyang 已提交
855

856
        Examples:
857
            See examples in ``apply_gradients``.
858
        """
859
        act_no_grad_set = None
L
Leo Chen 已提交
860
        if framework.in_dygraph_mode():
861
            pass
L
Leo Chen 已提交
862 863
        else:
            act_no_grad_set = self._get_no_grad_set(loss, no_grad_set)
G
gongweibao 已提交
864

L
Leo Chen 已提交
865 866 867 868
        # Infer dtype by loss if None
        if self._dtype is None:
            self._dtype = loss.dtype

L
lujun 已提交
869
        if framework.in_dygraph_mode():
870 871 872
            parameter_list = parameter_list if parameter_list \
                else self._parameter_list

C
chengduo 已提交
873
            params_grads = []
874
            for param in parameter_list:
C
chengduo 已提交
875 876
                if not param.trainable:
                    continue
877
                if param._grad_ivar() is not None:
C
chengduo 已提交
878
                    # create gradient variable
879
                    grad_var = param._grad_ivar()
C
chengduo 已提交
880
                    params_grads.append((param, grad_var))
881
        else:
C
chengduo 已提交
882 883 884 885 886
            if callbacks is None:
                callbacks = [error_clip_callback]
            else:
                assert (isinstance(callbacks, list))
            program = loss.block.program
C
chengduo 已提交
887 888 889 890
            assert len(loss.shape) == 1 and loss.shape[0] == 1, \
                "The loss.shape should be (1L,), but the current loss.shape is {}. " \
                "Maybe that you should call fluid.layers.mean to process the current loss.".format(
                    loss.shape)
891 892
            parameter_list = parameter_list if parameter_list \
                else self._parameter_list
C
chengduo 已提交
893 894
            with program_guard(program, startup_program):
                params_grads = append_backward(loss, parameter_list,
895
                                               act_no_grad_set, callbacks)
C
chengduo 已提交
896
        return params_grads
897

898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917
    def _create_regularization_of_grad(self, param, grad, regularization=None):
        """ Create and add backward regularization Operators
    
        Function helper of append_regularization_ops.
        """
        # If no gradient or no regularization is specified,  then we don't need to do anything
        if grad is None or ((not hasattr(param, 'regularizer') or
                             (hasattr(param, 'regularizer') and
                              param.regularizer is None)) and
                            regularization is None):
            return grad
        regularization_term = None
        if hasattr(param, 'regularizer') and param.regularizer is not None:
            # Add variable for regularization term in grad block
            regularization_term = param.regularizer(param, grad, grad.block)
        elif regularization is not None:
            regularization_term = regularization(param, grad, grad.block)

        assert regularization_term is not None

918
        if framework.in_dygraph_mode():
W
wanghuancoder 已提交
919
            return _C_ops.sum([grad, regularization_term])
920

921 922 923 924 925 926 927 928 929 930 931 932 933 934 935
        new_grad = grad
        if grad.type == core.VarDesc.VarType.SELECTED_ROWS:
            # FIXME(zcd): If the grad is SELECTED_ROWS, after regularization,
            # the grad's type and name will be changed. But the gradient's name
            # is used in ParallelExecutor Reduce mode, so I add a flag for
            # the new_grad here.
            new_grad = grad.block.create_var(
                name=grad.name + core.kNewGradSuffix(),
                dtype=param.dtype,
                shape=param.shape,
                lod_level=param.lod_level,
                type=core.VarDesc.VarType.LOD_TENSOR)

        inputs = {"X": [grad, regularization_term]}
        outputs = {"Out": [new_grad]}
936
        grad.block.append_op(type='sum', inputs=inputs, outputs=outputs)
937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972

        return new_grad

    def append_regularization_ops(self,
                                  parameters_and_grads,
                                  regularization=None):
        r"""Create and add backward regularization Operators
    
        Creates and adds backward regularization operators in the BlockDesc.
        This will add gradients of the regularizer function to the gradients
        of the parameters and return these modified gradients. This is the
        same as implementing weight decay in optimizers for regularization.
    
        Args:
            parameters_and_grads: A list of (parameters, gradients) pairs
                                  that need to be regularized.
            regularization: A global regularizer. If the parameter is not
                            set. It will be applied with regularizer.
    
        Returns:
            list[(Variable, Variable)]: list of (parameters, gradients) \
            pair with the regularized gradient
    
        Raises:
            Exception: Unknown regularization type
        """
        params_and_grads = []
        if framework.in_dygraph_mode():
            for param, grad in parameters_and_grads:
                new_grad = self._create_regularization_of_grad(param, grad,
                                                               regularization)
                params_and_grads.append((param, new_grad))
        else:
            repeate_regularizer = False
            with framework.name_scope('regularization'):
                for param, grad in parameters_and_grads:
973 974 975
                    if not repeate_regularizer and getattr(
                            param, 'regularizer',
                            None) is not None and regularization is not None:
976 977 978 979 980 981 982 983 984 985 986
                        repeate_regularizer = True
                        logging.info(
                            "If regularizer of a Parameter has been set by 'fluid.ParamAttr' or 'fluid.WeightNormParamAttr' already. "
                            "The Regularization[%s] in Optimizer will not take effect, and it will only be applied to other Parameters!"
                            % regularization.__str__())
                    with param.block.program._optimized_guard([param, grad]):
                        new_grad = self._create_regularization_of_grad(
                            param, grad, regularization)
                        params_and_grads.append((param, new_grad))
        return params_and_grads

987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063
    def flatten_param_grads(self, params_grads):
        need_flatten_params = []
        need_flatten_grads = []
        for p, g in params_grads:
            if g is None:
                continue
            g.persistable = True
            if getattr(p, 'need_clip', True) is False or getattr(
                    p, 'regularizer', None) is not None:
                warnings.warn(
                    "flatten_param_grads=True will be discarded since paramter '{}''s need_clip is False or "
                    "the regularizer is set".format(p.name))
                self._flatten_param_grads = False
                return params_grads

            need_flatten_params.append(p)
            need_flatten_grads.append(g)

        shape = [np.prod(p.shape) for p in need_flatten_params]
        block = need_flatten_params[0].block

        flatten_param = self.helper.create_global_variable(
            name='flatten_param',
            persistable=True,
            dtype=need_flatten_params[0].dtype,
            shape=[np.sum(shape)],
            belong_to_optimizer=True)

        flatten_param.trainable = True
        flatten_param.optimize_attr = need_flatten_params[0].optimize_attr
        flatten_param.regularizer = need_flatten_params[0].regularizer

        flatten_grad = self.helper.create_global_variable(
            name='flatten_grad',
            persistable=True,
            dtype=need_flatten_grads[0].dtype,
            shape=[np.sum(shape)],
            belong_to_optimizer=True)

        with program_guard(default_main_program()):
            block.append_op(
                type="coalesce_tensor",
                inputs={"Input": need_flatten_params},
                outputs={
                    "Output": need_flatten_params,
                    "FusedOutput": flatten_param
                },
                attrs={
                    "copy_data": True,
                    "use_align": True,
                    "align_size": self._align_size,
                    "dtype": need_flatten_params[0].dtype
                })

            block.append_op(
                type="coalesce_tensor",
                inputs={"Input": need_flatten_grads},
                outputs={
                    "Output": need_flatten_grads,
                    "FusedOutput": flatten_grad
                },
                attrs={
                    "copy_data": True,
                    "use_align": True,
                    "align_size": self._align_size,
                    "dtype": need_flatten_grads[0].dtype
                })

        #NOTE(zhiqiu): the initializer should be set after coalesce_tensor op,
        # so the shape of flatten_param and flatten_grad will be inferred.
        self.helper.set_variable_initializer(
            flatten_param, initializer=Constant(0.0))
        self.helper.set_variable_initializer(
            flatten_grad, initializer=Constant(0.0))

        return [(flatten_param, flatten_grad)]

1064 1065 1066 1067 1068 1069 1070
    def apply_gradients(self, params_grads):
        """
        Second part of `minimize`, appending optimization operators for
        given `params_grads` pairs.

        Args:
            params_grads (list): list of (param, grad) pair to do optimization.
M
minqiyang 已提交
1071

1072 1073
        Returns:
            list: A list of operators appended to the current program.
M
minqiyang 已提交
1074

1075 1076 1077
        Examples:
            .. code-block:: python

1078
                import paddle.fluid as fluid
1079 1080 1081 1082 1083 1084 1085 1086 1087
                loss = network()
                optimizer = fluid.optimizer.SGD(learning_rate=0.1)
                params_grads = optimizer.backward(loss)
                # you may append operations for params_grads here
                # ...
                optimizer.apply_gradients(params_grads)
        """
        params_grads = sorted(params_grads, key=lambda x: x[0].name)

1088 1089 1090 1091 1092 1093
        # NOTE(zhiqiu): currently, only support ClipGradByGlobalNorm and without regularization.
        if self._flatten_param_grads and self.regularization is None:
            if self._grad_clip == None or isinstance(self._grad_clip,
                                                     ClipGradByGlobalNorm):
                params_grads = self.flatten_param_grads(params_grads)

1094
        # 'optimizer(grad_clip)' or 'set_gradient_clip'
1095 1096 1097 1098
        if self._grad_clip is not None:
            params_grads = self._grad_clip(params_grads)
        else:
            params_grads = append_gradient_clip_ops(params_grads)
1099 1100

        # Add regularization if any
1101 1102
        params_grads = self.append_regularization_ops(params_grads,
                                                      self.regularization)
1103 1104 1105 1106

        optimize_ops = self._create_optimization_pass(params_grads)
        return optimize_ops

C
chengduo 已提交
1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118
    def apply_optimize(self, loss, startup_program, params_grads):
        """
        Second part of `minimize`, appending optimization operators for
        given `params_grads` pairs.
        Args:
            loss (Variable): loss variable to run optimizations.
            startup_program (Program): startup_program for initializing parameters
                in `parameter_list`.
            params_grads (list): list of (param, grad) pair to do optimization.
        Returns:
            list: A list of operators appended to the current program.
        """
L
lujun 已提交
1119
        if framework.in_dygraph_mode():
C
chengduo 已提交
1120 1121
            with program_guard(framework.default_main_program(),
                               framework.default_startup_program()):
1122 1123
                if self._grad_clip is not None:
                    params_grads = self._grad_clip(params_grads)
1124 1125
                params_grads = self.append_regularization_ops(
                    params_grads, self.regularization)
C
chengduo 已提交
1126 1127 1128 1129 1130 1131 1132
                optimize_ops = self._create_optimization_pass(params_grads)
        else:
            program = loss.block.program
            with program_guard(program, startup_program):
                optimize_ops = self.apply_gradients(params_grads)
        return optimize_ops

G
gongweibao 已提交
1133
    def _get_no_grad_set(self, loss, no_grad_set=None):
1134
        no_grad_set = _get_no_grad_set_name(no_grad_set)
G
gongweibao 已提交
1135 1136 1137 1138 1139 1140 1141 1142
        parameters = loss.block.program.global_block().all_parameters()
        param_no_trainable = set(
            [param.name for param in parameters if param.trainable is False])
        # If the parameter is no trainable, it should not have a gradient.
        no_grad_set.update(param_no_trainable)

        return no_grad_set

1143 1144 1145 1146
    @framework.dygraph_only
    def clear_gradients(self):
        """
        Clear the gradients of all optimized parameters for model.
1147 1148

        If not, new gradient will accumulat on previous gradient.
1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175
        
        Returns:
            None
        
        Examples:
            .. code-block:: python

                import paddle.fluid as fluid
                import numpy as np

                with fluid.dygraph.guard():
                    value = np.arange(26).reshape(2, 13).astype("float32")
                    a = fluid.dygraph.to_variable(value)
                    linear = fluid.Linear(13, 5, dtype="float32")
                    # This can be any optimizer supported by dygraph.
                    adam = fluid.optimizer.Adam(learning_rate = 0.01, 
                                                parameter_list = linear.parameters())
                    out = linear(a)
                    out.backward()
                    adam.minimize(out)
                    adam.clear_gradients()

        """
        for p in self._parameter_list:
            if p.trainable:
                p.clear_gradient()

1176
    @imperative_base.no_grad
Q
Qiao Longfei 已提交
1177 1178
    def minimize(self,
                 loss,
1179
                 startup_program=None,
Q
Qiao Longfei 已提交
1180
                 parameter_list=None,
1181
                 no_grad_set=None):
1182
        """
1183
        Add operations to minimize ``loss`` by updating ``parameter_list``.
M
minqiyang 已提交
1184

1185
        Args:
1186 1187 1188 1189
            loss (Variable): A ``Variable`` containing the value to minimize.
            startup_program (Program, optional): :ref:`api_fluid_Program` for
                initializing parameters in ``parameter_list``. The default value
                is None, at this time :ref:`api_fluid_default_startup_program` will be used.
H
hong 已提交
1190
            parameter_list (Iterable, optional): Iterable of ``Variable`` or ``Variable.name`` to update
1191 1192
                to minimize ``loss``. The default value is None, at this time all parameters
                will be updated.
1193
            no_grad_set (set, optional): Set of ``Variable``  or ``Variable.name`` that don't need
1194
                to be updated. The default value is None.
Q
Qiao Longfei 已提交
1195

1196
        Returns:
1197 1198 1199
            tuple: tuple (optimize_ops, params_grads), A list of operators appended
            by minimize and a list of (param, grad) variable pairs, param is
            ``Parameter``, grad is the gradient value corresponding to the parameter.
1200 1201 1202
            The returned tuple can be passed to ``fetch_list`` in ``Executor.run()`` to 
            indicate program pruning. If so, the program will be pruned by ``feed`` and 
            ``fetch_list`` before run, see details in ``Executor``.
1203 1204 1205

        Examples:
            Please refer to the example of current Optimizer.
Q
Qiao Longfei 已提交
1206
        """
C
chengduo 已提交
1207
        assert isinstance(loss, Variable), "The loss should be an Variable."
1208

1209 1210
        parameter_list = parameter_list if parameter_list \
            else self._parameter_list
1211

C
chengduo 已提交
1212 1213 1214 1215 1216
        params_grads = self.backward(
            loss,
            startup_program=startup_program,
            parameter_list=parameter_list,
            no_grad_set=no_grad_set)
1217

C
chengduo 已提交
1218 1219
        optimize_ops = self.apply_optimize(
            loss, startup_program=startup_program, params_grads=params_grads)
M
minqiyang 已提交
1220

Q
Qiao Longfei 已提交
1221
        return optimize_ops, params_grads
Q
Qiao Longfei 已提交
1222 1223 1224


class SGDOptimizer(Optimizer):
1225
    r"""
Q
qiaolongfei 已提交
1226 1227 1228 1229 1230 1231
    Optimizer of the stochastic gradient descent algorithm.

    .. math::

        param\_out = param - learning\_rate * grad

1232 1233 1234
    Parameters:
        learning_rate (float|Variable): The learning rate used to update parameters. \
            Can be a float value or a Variable with one float value as data element.
H
hong 已提交
1235
        parameter_list (Iterable, optional):  Iterable of ``Variable`` names to update to minimize ``loss``. \
1236 1237
            This parameter is required in dygraph mode. \
            The default value is None in static mode, at this time all parameters will be updated.
1238 1239 1240 1241 1242
        regularization (WeightDecayRegularizer, optional): The strategy of regularization. There are two method: \
             :ref:`api_fluid_regularizer_L1Decay` , :ref:`api_fluid_regularizer_L2Decay` . If a parameter has set \
            regularizer using :ref:`api_fluid_ParamAttr` already, the regularization setting here in optimizer will be \
            ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect.  \
            Default None, meaning there is no regularization.
1243 1244 1245 1246
        grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of 
            some derived class of ``GradientClipBase`` . There are three cliping strategies 
            ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` , 
            :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping.
1247 1248
        name (str, optional): This parameter is used by developers to print debugging information. \
            For details, please refer to :ref:`api_guide_Name`. Default is None.
Q
qiaolongfei 已提交
1249 1250 1251 1252

    Examples:
        .. code-block:: python

1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277
            import paddle
            import paddle.fluid as fluid
            import numpy as np

            place = fluid.CPUPlace()
            main = fluid.Program()
            with fluid.program_guard(main):
                x = fluid.layers.data(name='x', shape=[13], dtype='float32')
                y = fluid.layers.data(name='y', shape=[1], dtype='float32')
                y_predict = fluid.layers.fc(input=x, size=1, act=None)
                cost = fluid.layers.square_error_cost(input=y_predict, label=y)
                avg_cost = fluid.layers.mean(cost)

                sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001)
                sgd_optimizer.minimize(avg_cost)

                fetch_list = [avg_cost]
                train_reader = paddle.batch(
                    paddle.dataset.uci_housing.train(), batch_size=1)
                feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
                exe = fluid.Executor(place)
                exe.run(fluid.default_startup_program())
                for data in train_reader():
                    exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)

Q
Qiao Longfei 已提交
1278 1279
    """

1280 1281 1282 1283
    def __init__(self,
                 learning_rate,
                 parameter_list=None,
                 regularization=None,
1284
                 grad_clip=None,
1285
                 name=None):
Q
Qiao Longfei 已提交
1286
        assert learning_rate is not None
Q
Qiao Longfei 已提交
1287
        super(SGDOptimizer, self).__init__(
X
Xin Pan 已提交
1288
            learning_rate=learning_rate,
1289
            parameter_list=parameter_list,
X
Xin Pan 已提交
1290
            regularization=regularization,
1291
            grad_clip=grad_clip,
X
Xin Pan 已提交
1292
            name=name)
Q
Qiao Longfei 已提交
1293 1294
        self.type = "sgd"

1295
    @no_grad
1296
    def _append_optimize_op(self, block, param_and_grad):
1297
        lr = self._create_param_lr(param_and_grad)
1298
        if framework.in_dygraph_mode():
W
wanghuancoder 已提交
1299 1300
            _C_ops.sgd(param_and_grad[0], lr, param_and_grad[1],
                       param_and_grad[0])
1301
            return None
1302

1303
        assert isinstance(block, framework.Block)
Q
Qiao Longfei 已提交
1304 1305 1306 1307 1308 1309
        # create the optimize op
        sgd_op = block.append_op(
            type=self.type,
            inputs={
                "Param": param_and_grad[0],
                "Grad": param_and_grad[1],
1310
                "LearningRate": lr
Q
Qiao Longfei 已提交
1311
            },
M
minqiyang 已提交
1312 1313
            outputs={"ParamOut": param_and_grad[0]},
            stop_gradient=True)
Q
Qiao Longfei 已提交
1314 1315

        return sgd_op
1316 1317 1318


class MomentumOptimizer(Optimizer):
1319
    r"""
Q
qiaolongfei 已提交
1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332

    Simple Momentum optimizer with velocity state

    This optimizer has a flag for Nestrov Momentum.

    The update equations are as follows:

    .. math::

        & velocity = mu * velocity + gradient

        & if (use\_nesterov):

1333
        &\quad   param = param - (gradient + mu * velocity) * learning\_rate
Q
qiaolongfei 已提交
1334 1335 1336

        & else:

Q
qiaolongfei 已提交
1337
        &\quad   param = param - learning\_rate * velocity
Q
qiaolongfei 已提交
1338

1339 1340 1341 1342
    Parameters:
        learning_rate (float|Variable): The learning rate used to update parameters. \
            Can be a float value or a Variable with one float value as data element.
        momentum (float): Momentum factor
H
hong 已提交
1343
        parameter_list (Iterable, optional):  Iterable of ``Variable`` names to update to minimize ``loss``. \
1344 1345
            This parameter is required in dygraph mode. \
            The default value is None in static mode, at this time all parameters will be updated.
1346
        use_nesterov (bool, optional): Enables Nesterov momentum, default is false.
1347 1348 1349 1350 1351
        regularization (WeightDecayRegularizer, optional): The strategy of regularization. There are two method: \
             :ref:`api_fluid_regularizer_L1Decay` , :ref:`api_fluid_regularizer_L2Decay` . If a parameter has set \
            regularizer using :ref:`api_fluid_ParamAttr` already, the regularization setting here in optimizer will be \
            ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect.  \
            Default None, meaning there is no regularization.
1352 1353 1354 1355
        grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of 
            some derived class of ``GradientClipBase`` . There are three cliping strategies 
            ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` , 
            :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping.
1356 1357
        name (str, optional): This parameter is used by developers to print debugging information. \
            For details, please refer to :ref:`api_guide_Name`. Default is None.
Q
qiaolongfei 已提交
1358 1359 1360 1361

    Examples:
        .. code-block:: python

1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386
            import paddle
            import paddle.fluid as fluid
            import numpy as np

            place = fluid.CPUPlace()
            main = fluid.Program()
            with fluid.program_guard(main):
                x = fluid.layers.data(name='x', shape=[13], dtype='float32')
                y = fluid.layers.data(name='y', shape=[1], dtype='float32')
                y_predict = fluid.layers.fc(input=x, size=1, act=None)
                cost = fluid.layers.square_error_cost(input=y_predict, label=y)
                avg_cost = fluid.layers.mean(cost)

                moment_optimizer = fluid.optimizer.MomentumOptimizer(learning_rate=0.001, momentum=0.9)
                moment_optimizer.minimize(avg_cost)

                fetch_list = [avg_cost]
                train_reader = paddle.batch(
                    paddle.dataset.uci_housing.train(), batch_size=1)
                feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
                exe = fluid.Executor(place)
                exe.run(fluid.default_startup_program())
                for data in train_reader():
                    exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)

1387 1388 1389
    """
    _velocity_acc_str = "velocity"

X
Xin Pan 已提交
1390 1391 1392
    def __init__(self,
                 learning_rate,
                 momentum,
1393
                 parameter_list=None,
X
Xin Pan 已提交
1394 1395
                 use_nesterov=False,
                 regularization=None,
1396
                 grad_clip=None,
X
Xin Pan 已提交
1397
                 name=None):
1398 1399
        assert learning_rate is not None
        assert momentum is not None
Q
Qiao Longfei 已提交
1400
        super(MomentumOptimizer, self).__init__(
X
Xin Pan 已提交
1401
            learning_rate=learning_rate,
1402
            parameter_list=parameter_list,
X
Xin Pan 已提交
1403
            regularization=regularization,
1404
            grad_clip=grad_clip,
X
Xin Pan 已提交
1405
            name=name)
1406 1407
        self.type = "momentum"
        self._momentum = momentum
1408
        self._use_nesterov = bool(use_nesterov)
1409 1410 1411 1412 1413

    def _create_accumulators(self, block, parameters):
        assert isinstance(block, framework.Block)

        for p in parameters:
Q
Qiao Longfei 已提交
1414
            self._add_accumulator(self._velocity_acc_str, p)
1415 1416 1417 1418 1419 1420

    def _append_optimize_op(self, block, param_and_grad):
        assert isinstance(block, framework.Block)

        velocity_acc = self._get_accumulator(self._velocity_acc_str,
                                             param_and_grad[0])
1421 1422 1423
        lr = self._create_param_lr(param_and_grad)

        if framework.in_dygraph_mode():
W
wanghuancoder 已提交
1424 1425 1426 1427
            _, _ = _C_ops.momentum(param_and_grad[0], param_and_grad[1],
                                   velocity_acc, lr, param_and_grad[0],
                                   velocity_acc, 'mu', self._momentum,
                                   'use_nesterov', self._use_nesterov)
1428
            return None
1429

1430
        attrs = {"mu": self._momentum, "use_nesterov": self._use_nesterov}
1431 1432 1433 1434
        inputs = {
            "Param": [param_and_grad[0]],
            "Grad": [param_and_grad[1]],
            "Velocity": [velocity_acc],
1435
            "LearningRate": [lr]
1436 1437 1438 1439 1440 1441
        }

        outputs = {
            "ParamOut": [param_and_grad[0]],
            "VelocityOut": [velocity_acc]
        }
1442 1443 1444
        # create the momentum optimize op
        momentum_op = block.append_op(
            type=self.type,
1445 1446 1447
            inputs=inputs,
            outputs=outputs,
            attrs=attrs,
M
minqiyang 已提交
1448
            stop_gradient=True)
1449 1450

        return momentum_op
1451 1452


1453
class DGCMomentumOptimizer(Optimizer):
1454
    r"""
1455
	:api_attr: Static Graph
S
swtkiwi 已提交
1456

1457
    DGC (Deep Gradient Compression) Momentum Optimizer. Original paper is https://arxiv.org/abs/1712.01887
1458

G
gongweibao 已提交
1459
    DGC reduces the communication bandwidth by sending only the important gradients (sparse update):\
1460 1461
        only gradients larger than a threshold are transmitted.

G
gongweibao 已提交
1462
    To avoid losing information, DGC accumulates the rest of the gradients locally.
1463 1464 1465

    Eventually, these gradients become large enough to be transmitted.

1466
    Thus, DGC sends the large gradients immediately but eventually sends all of the gradients over time.
1467

G
gongweibao 已提交
1468
    To ensure no loss of accuracy, DGC employs momentum correction and local gradient clipping on top of the gradient sparsification to maintain model performance.
1469 1470 1471 1472

    DGC also uses momentum factor masking and warmup training to overcome the staleness problem caused by reduced communication.

    This optimizer will do two things:
1473

1474 1475
        1. Compress the gradient by get TopK import value from tensor \
            and use it for allreduce to reduce network bandwidth.
1476

1477
        2. Call momentum to optimize the cost.
1478 1479

    Args:
1480 1481
        learning_rate (float|Variable): The learning rate used to update parameters. \
            It can be a float value or a Variable with one float value as a data element.
1482
        momentum (float): Momentum factor.
G
gongweibao 已提交
1483
        rampup_begin_step (int): The beginning step from which gradient compression is implemented.
1484 1485 1486 1487 1488 1489 1490
        rampup_step (int): Time steps used in sparsity warm-up periods. Default is 1.
            For example, if the sparsity is [0.75, 0.9375, 0.984375, 0.996, 0.999], and the rampup_step is 100, \
                it will use 0.75 at 0~19 steps, and 0.9375 at 20~39 steps, and so on. \
                And when reach sparsity array ends, it will use 0.999 then and after.
        sparsity (list[float]): Get top important element from gradient tensor, the ratio is (1 - current sparsity). \
            Default is [0.999]. For example, if the sparsity is [0.99, 0.999], \
                the top [1%, 0.1%] important element will be transmitted.
H
hong 已提交
1491
        parameter_list (Iterable, optional):  Iterable of ``Variable`` names to update to minimize ``loss``. \
1492 1493
            This parameter is required in dygraph mode. \
            The default value is None in static mode, at this time all parameters will be updated.
1494
        use_nesterov (bool): Enables Nesterov momentum. True means use Nesterov. Default is False.
1495 1496 1497 1498 1499
        regularization (WeightDecayRegularizer, optional): The strategy of regularization. There are two method: \
             :ref:`api_fluid_regularizer_L1Decay` , :ref:`api_fluid_regularizer_L2Decay` . If a parameter has set \
            regularizer using :ref:`api_fluid_ParamAttr` already, the regularization setting here in optimizer will be \
            ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect.  \
            Default None, meaning there is no regularization.
1500 1501 1502
        grad_clip (GradientClipByNorm, optional): Gradient cliping strategy. ``DGCMomentumOptimizer`` only support 
            :ref:`api_fluid_clip_GradientClipByNorm` , and if not, it will raise TypeError. Default None, 
            meaning there is no gradient clipping.
1503 1504
        name (str, optional): This parameter is used by developers to print debugging information. \
            For details, please refer to :ref:`api_guide_Name`. Default is None.
1505 1506 1507 1508

    Examples:
        .. code-block:: python

1509
            import paddle.fluid as fluid
1510
            optimizer = fluid.optimizer.DGCMomentumOptimizer(
G
gongweibao 已提交
1511 1512 1513 1514 1515
                        learning_rate=0.0001,
                        momentum=0.9,
                        rampup_step=1000,
                        rampup_begin_step=1252,
                        sparsity=[0.999, 0.999])
1516 1517

    """
1518 1519
    _u_velocity_acc_str = "_dgc_u_"
    _v_velocity_acc_str = "_dgc_v_"
1520 1521 1522 1523 1524 1525 1526

    def __init__(self,
                 learning_rate,
                 momentum,
                 rampup_begin_step,
                 rampup_step=1,
                 sparsity=[0.999],
1527
                 parameter_list=None,
1528 1529 1530
                 use_nesterov=False,
                 num_trainers=None,
                 regularization=None,
1531
                 grad_clip=None,
1532
                 name=None):
Z
zhongpu 已提交
1533 1534
        if framework.in_dygraph_mode():
            raise Exception("In dygraph, don't support DGCMomentumOptimizer.")
1535 1536 1537 1538

        assert core.is_compiled_with_cuda(), \
            "Paddle is not compiled with CUDA. DGC is only support GPU for now."

1539 1540 1541 1542
        assert learning_rate is not None
        assert momentum is not None
        super(DGCMomentumOptimizer, self).__init__(
            learning_rate=learning_rate,
1543
            parameter_list=parameter_list,
1544
            regularization=regularization,
1545
            grad_clip=grad_clip,
1546 1547 1548 1549
            name=name)
        self.type = "dgc_momentum"
        self._momentum = momentum
        self._use_nesterov = bool(use_nesterov)
1550

1551
        assert rampup_begin_step >= 0, "rampup_begin_step must >= 0"
1552
        self._rampup_begin_step = rampup_begin_step
1553 1554
        self._rampup_step = rampup_step
        self._sparsity = sparsity
1555

1556
        self._rampup_begin_step_var = None
1557
        self._global_step_var = None
1558

1559 1560 1561 1562 1563 1564 1565 1566 1567
        self._dgc_clip_norm = None
        if grad_clip is not None:
            if not isinstance(grad_clip, GradientClipByNorm):
                raise TypeError(
                    "The type of grad_clip should be 'GradientClipByNorm', because DGCMomentumOptimizer only support GradientClipByNorm"
                )
            assert isinstance(
                num_trainers, int
            ), "The type of num_trainers should be 'int', but received %s" % type(
J
Jiangxinz 已提交
1568
                num_trainers)
1569
            assert num_trainers > 0, "The value of num_trainers should be greater than 0!"
1570 1571

            self._num_trainers = num_trainers
1572
            self._dgc_clip_norm = grad_clip.clip_norm * (num_trainers**-0.5)
1573

1574 1575
        self.regular_type, self.regular_coeff = self._get_regularization_param(
            self.regularization)
1576

1577 1578 1579
    def _get_regularization_param(self, regularization):
        regular_type = 0
        regular_coeff = 0.0
1580

1581 1582
        if regularization is not None:
            regular_coeff = regularization._regularization_coeff
1583
            from .regularizer import L1Decay, L2Decay
1584 1585 1586 1587
            if isinstance(regularization, L1Decay):
                regular_type = 1
            elif isinstance(regularization, L2Decay):
                regular_type = 2
1588 1589
            else:
                assert False, 'regularization must be None|L1Decay|L2Deacy'
1590
        return regular_type, regular_coeff
1591

1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602
    def _is_use_dgc(self, param_var, grad_var):
        var_numel = abs(reduce(lambda x, y: x * y, param_var.shape))
        if var_numel < 16384 or \
           param_var.type == core.VarDesc.VarType.SELECTED_ROWS  or \
           grad_var.type == core.VarDesc.VarType.SELECTED_ROWS  or  \
               param_var.dtype != core.VarDesc.VarType.FP32 :
            return False
        return True

    def _append_optimize_op(self, block, param_and_grad):
        assert isinstance(block, framework.Block)
1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617
        velocity_acc = self._get_accumulator(self._u_velocity_acc_str,
                                             param_and_grad[0])
        assert velocity_acc is not None

        inputs = {
            "Param": param_and_grad[0],
            "Grad": param_and_grad[1],
            "Velocity": velocity_acc,
            "LearningRate": self._create_param_lr(param_and_grad),
        }
        outputs = {
            "ParamOut": param_and_grad[0],
            "VelocityOut": velocity_acc,
        }
        attrs = {"mu": self._momentum, "use_nesterov": self._use_nesterov}
1618 1619

        if not self._is_use_dgc(param_and_grad[0], param_and_grad[1]):
1620 1621 1622
            type = "momentum"
        else:
            type = "dgc_momentum"
1623 1624 1625 1626 1627
            inputs.update({
                "current_step": self._global_step_var,
                "nranks": self._nranks_var
            })
            outputs.update({'Grad_out': param_and_grad[1]})
1628
            attrs.update({"rampup_begin_step": float(self._rampup_begin_step)})
1629 1630 1631

        # create the dgc momentum optimize op
        dgc_momentum_op = block.append_op(
1632 1633 1634 1635
            type=type,
            inputs=inputs,
            outputs=outputs,
            attrs=attrs,
1636 1637 1638
            stop_gradient=True)
        return dgc_momentum_op

1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657
    def _add_auto_increment_var(self, counter_name, begin, step=1):
        helper = LayerHelper('global_step_counter')
        counter, is_new_var = helper.create_or_get_global_variable(
            name=counter_name, dtype='float32', shape=[1], persistable=True)
        if is_new_var:
            helper.set_variable_initializer(
                counter,
                initializer=Constant(
                    value=float(begin - 1), force_cpu=True))
            helper.main_program.global_block()._prepend_op(
                type='increment',
                inputs={'X': [counter]},
                outputs={'Out': [counter]},
                attrs={'step': float(step)},
                stop_gradient=True)
            counter.stop_gradient = True

        return counter

1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670
    def _add_nranks_var(self, name, value=-1):
        helper = LayerHelper('global_step_counter')
        counter, is_new_var = helper.create_or_get_global_variable(
            name=name, dtype='float32', shape=[1], persistable=True)
        if is_new_var:
            helper.set_variable_initializer(
                counter,
                initializer=Constant(
                    value=float(value), force_cpu=True))
            counter.stop_gradient = True

        return counter

1671 1672 1673 1674 1675 1676
    def _append_dgc_ops(self, param_and_grads):
        main_program = default_main_program()
        main_program._enable_dgc = True

        # step counter
        self._global_step_var = self._add_auto_increment_var(
G
gongweibao 已提交
1677
            counter_name=core.dgc.kDGCCounterName(), begin=0)
1678

1679 1680 1681
        self._nranks_var = self._add_nranks_var(
            name=core.dgc.kDGCNRanksName(), value=-1)

1682 1683 1684 1685 1686
        # rampup begin step var for all_reduce_op_handle
        self._rampup_begin_step_var = tensor.create_global_var(
            shape=[1],
            dtype=core.VarDesc.VarType.FP32,
            persistable=True,
G
gongweibao 已提交
1687
            name=core.dgc.kDGCRampUpBeginStepName(),
1688 1689 1690
            value=self._rampup_begin_step * 1.0,
            force_cpu=True)

1691 1692
        self.helper = LayerHelper(self.__class__.__name__)

1693
        for param_var, grad_var in param_and_grads:
1694 1695 1696
            # reuse velocity in dgc_op and dgc_momentum_op
            u_var = self._add_accumulator(self._u_velocity_acc_str, param_var)

1697
            if not self._is_use_dgc(param_var, grad_var):
1698 1699
                continue

1700
            v_var = self._add_accumulator(self._v_velocity_acc_str, param_var)
1701 1702 1703 1704 1705

            k_var = tensor.create_global_var(
                shape=[1],
                dtype=param_var.dtype,
                persistable=True,
G
gongweibao 已提交
1706
                name=param_var.name + core.dgc.kDGCKName(),
1707 1708 1709 1710 1711 1712 1713
                value=0.0,
                force_cpu=True)

            encoded_var = tensor.create_global_var(
                shape=[1],
                dtype=param_var.dtype,
                persistable=True,
G
gongweibao 已提交
1714
                name=param_var.name + core.dgc.kDGCEncodedName(),
1715 1716 1717
                value=0.0,
                force_cpu=False)

1718 1719 1720 1721 1722 1723 1724 1725
            gather_var = tensor.create_global_var(
                shape=[1],
                dtype=param_var.dtype,
                persistable=True,
                name=param_var.name + core.dgc.kDGCGatherName(),
                value=0.0,
                force_cpu=False)

1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744
            # del back oprolevarname
            op_maker = core.op_proto_and_checker_maker
            backward = core.op_proto_and_checker_maker.OpRole.Backward
            for op in main_program.global_block().ops:
                if not self._is_the_backward_op(op):
                    continue

                var_attr = op.all_attrs()[op_maker.kOpRoleVarAttrName()]
                if param_var.name not in var_attr:
                    continue

                var_attr.remove(param_var.name)
                var_attr.remove(grad_var.name)
                if len(var_attr) > 1:
                    op._set_attr(op_maker.kOpRoleVarAttrName(), var_attr)
                else:
                    op._remove_attr(op_maker.kOpRoleVarAttrName())

            clip_var = grad_var
1745 1746
            if self._dgc_clip_norm is not None:
                clip_var = self._append_clip_norm(grad_var, self._dgc_clip_norm)
1747
            self._dgc_op(param_var, clip_var, grad_var, u_var, v_var, k_var,
1748
                         encoded_var, gather_var)
1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763

    def _is_the_backward_op(self, op):
        op_maker = core.op_proto_and_checker_maker
        backward = core.op_proto_and_checker_maker.OpRole.Backward
        if op_maker.kOpRoleVarAttrName() in op.attr_names and \
                int(op.all_attrs()[op_maker.kOpRoleAttrName()]) == int(backward):
            return True
        return False

    def _clip_by_norm(self, x, max_norm, name=None):
        args = {'x': x, 'max_norm': max_norm, 'name': name}

        helper = LayerHelper("dgc_clip_by_norm_op", **args)

        if name is None:
1764 1765
            name = unique_name.generate_with_ignorable_key(".".join(
                [helper.name, 'tmp']))
1766 1767 1768 1769 1770

        out = helper.create_variable(
            type=x.type, name=name, dtype=x.dtype, persistable=False)

        helper.append_op(
G
gongweibao 已提交
1771
            type="dgc_clip_by_norm",
1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783
            inputs={"X": x,
                    "current_step": self._global_step_var},
            attrs={
                "max_norm": max_norm,
                "rampup_begin_step": float(self._rampup_begin_step)
            },
            outputs={"Out": out})
        return out

    def _append_clip_norm(self, grad_var, clip_norm):
        with grad_var.block.program._backward_role_guard():
            return self._clip_by_norm(
G
gongweibao 已提交
1784
                x=grad_var, max_norm=clip_norm, name=grad_var.name)
1785 1786

    def _dgc_op(self, param_var, clip_var, grad_var, u_var, v_var, k_var,
1787
                encoded_var, gather_var):
1788 1789
        block = framework.default_main_program().global_block()
        op_maker = core.op_proto_and_checker_maker
1790

1791 1792 1793 1794 1795 1796 1797
        regular_type = self.regular_type
        regular_coeff = self.regular_coeff
        # The regularizer of the Parameters have higher priority
        if param_var.regularizer is not None:
            regular_type, regular_coeff = self._get_regularization_param(
                param_var.regularizer)

1798 1799 1800 1801 1802 1803
        dgc_op = block.append_op(
            type="dgc",
            inputs={
                "U": u_var,
                "V": v_var,
                "Grad": clip_var,
1804
                "Param": param_var,
1805 1806
                "current_step": self._global_step_var,
                "nranks": self._nranks_var,
1807 1808 1809 1810 1811 1812
            },
            outputs={
                "U_out": u_var,
                "V_out": v_var,
                "EncodeGrad": encoded_var,
                "k": k_var,
1813 1814
                "Grad_out": grad_var,
                "GatherBuff": gather_var,
1815 1816 1817 1818 1819 1820
            },
            attrs={
                "m": self._momentum,
                "sparsity": self._sparsity,
                "use_nesterov": self._use_nesterov,
                "rampup_begin_step": float(self._rampup_begin_step),
1821
                "rampup_step": float(self._rampup_step),
1822 1823
                "regular_coeff": float(regular_coeff),
                "regular_type": int(regular_type),
1824 1825 1826 1827 1828 1829 1830 1831
            },
            stop_gradient=True)

        backward = op_maker.OpRole.Backward
        dgc_op._set_attr(op_maker.kOpRoleAttrName(), backward)
        dgc_op._set_attr(op_maker.kOpRoleVarAttrName(),
                         [param_var.name, grad_var.name])

1832
    @imperative_base.no_grad
1833
    def apply_gradients(self, params_grads):
1834 1835 1836 1837 1838
        # Note: since we can't use all_reduce_op now,
        # dgc_op should be the last op of one grad.
        # Maybe need a grad allreduce pass.
        self._append_dgc_ops(params_grads)

1839 1840 1841 1842 1843 1844
        params_grads = sorted(params_grads, key=lambda x: x[0].name)
        params_grads, table_param_and_grad, table_optimize_op = \
            self._process_distribute_lookuptable(params_grads)

        not_dgc_params_grads = []
        dgc_params_grads = []
1845
        # DGC clip and regularization in optimizer.backward
1846 1847 1848 1849 1850 1851
        for param, grad in params_grads:
            if not self._is_use_dgc(param, grad):
                not_dgc_params_grads.append((param, grad))
            else:
                dgc_params_grads.append((param, grad))

1852
        # 'optimizer(grad_clip)' or 'set_gradient_clip'
1853 1854 1855 1856 1857
        if self._grad_clip is not None:
            not_dgc_params_grads = self._grad_clip(not_dgc_params_grads)
        else:
            not_dgc_params_grads = append_gradient_clip_ops(
                not_dgc_params_grads)
1858

1859 1860
        not_dgc_params_grads = self.append_regularization_ops(
            not_dgc_params_grads, self.regularization)
1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871

        params_grads = not_dgc_params_grads + dgc_params_grads
        params_grads = sorted(params_grads, key=lambda x: x[0].name)

        optimize_ops = self._create_optimization_pass(params_grads)
        if table_optimize_op is not None:
            optimize_ops.append(table_optimize_op)
            params_grads.append(table_param_and_grad)

        return optimize_ops

1872

1873
class LarsMomentumOptimizer(Optimizer):
1874
    r"""
1875 1876 1877 1878 1879 1880 1881 1882 1883
    Momentum optimizer with LARS support

    The update equations are as follows:

    .. math::

        & local\_learning\_rate = learning\_rate * lars\_coeff * \\
          \\frac{||param||}{||gradient|| + lars\_weight\_decay * ||param||}

1884
        & velocity = mu * velocity + local\_learning\_rate * (gradient + lars\_weight\_decay * param + epsilon)
1885 1886 1887

        & param = param - velocity

1888 1889 1890 1891 1892 1893
    Parameters:
        learning_rate (float|Variable): The learning rate used to update parameters. \
            Can be a float value or a Variable with one float value as data element. \
            momentum (float): momentum factor
        lars_coeff (float): Defines how much we trust the layer to change its weights.
        lars_weight_decay (float): Weight decay coefficient for decaying using LARS.
H
hong 已提交
1894
        parameter_list (Iterable, optional):  Iterable of ``Variable`` names to update to minimize ``loss``. \
1895 1896
            This parameter is required in dygraph mode. \
            The default value is None in static mode, at this time all parameters will be updated.
1897 1898 1899 1900 1901
        regularization (WeightDecayRegularizer, optional): The strategy of regularization. There are two method: \
             :ref:`api_fluid_regularizer_L1Decay` , :ref:`api_fluid_regularizer_L2Decay` . If a parameter has set \
            regularizer using :ref:`api_fluid_ParamAttr` already, the regularization setting here in optimizer will be \
            ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect.  \
            Default None, meaning there is no regularization.
1902 1903 1904 1905
        grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of 
            some derived class of ``GradientClipBase`` . There are three cliping strategies 
            ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` , 
            :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping.
1906 1907
        name (str, optional): This parameter is used by developers to print debugging information. \
            For details, please refer to :ref:`api_guide_Name`. Default is None.
1908 1909
        exclude_from_weight_decay (list[str], optional): Name string of layers which will be exclude from lars weight decay. Default is None.
        epsilon (float, optional): Epsilon to avoid Division by Zero when calculate local lr. Default is 0.
1910 1911 1912
        multi_precision (bool, optional): Whether to use multi-precision during weight updating.
        rescale_grad (float, optional): Multiply the gradient with `rescale_grad` \
            before updating. Often choose to be `1.0/batch_size`.
1913
        
1914 1915 1916
    Examples:
        .. code-block:: python

1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932
            import paddle.fluid as fluid
            import numpy as np

            np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)
            inp = fluid.layers.data(
                name="inp", shape=[2, 2], append_batch_size=False)
            out = fluid.layers.fc(inp, size=3)
            out = fluid.layers.reduce_sum(out)
            optimizer = fluid.optimizer.LarsMomentumOptimizer(learning_rate=0.001, momentum=0.9)
            optimizer.minimize(out)

            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
            exe.run(
                feed={"inp": np_inp},
                fetch_list=[out.name])
1933 1934 1935 1936 1937 1938 1939 1940
    """
    _velocity_acc_str = "velocity"

    def __init__(self,
                 learning_rate,
                 momentum,
                 lars_coeff=0.001,
                 lars_weight_decay=0.0005,
1941
                 parameter_list=None,
1942
                 regularization=None,
1943
                 grad_clip=None,
1944 1945
                 name=None,
                 exclude_from_weight_decay=None,
1946 1947 1948
                 epsilon=0,
                 multi_precision=False,
                 rescale_grad=1.0):
1949 1950 1951 1952
        assert learning_rate is not None
        assert momentum is not None
        super(LarsMomentumOptimizer, self).__init__(
            learning_rate=learning_rate,
1953
            parameter_list=parameter_list,
1954
            regularization=regularization,
1955
            grad_clip=grad_clip,
1956 1957 1958 1959 1960
            name=name)
        self.type = "lars_momentum"
        self._momentum = momentum
        self._lars_coeff = float(lars_coeff)
        self._lars_weight_decay = float(lars_weight_decay)
1961 1962 1963 1964 1965
        self._epsilon = float(epsilon)
        if exclude_from_weight_decay is None:
            self._exclude_from_weight_decay = []
        else:
            self._exclude_from_weight_decay = exclude_from_weight_decay
1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011
        self._multi_precision = multi_precision
        self._rescale_grad = float(rescale_grad)
        self._master_weights = {}

    def _create_master_weight(self, param):
        assert isinstance(self.helper, LayerHelper)

        var_name = param.name + '_fp32_master'
        var_name = unique_name.generate(var_name)
        var = layers.create_global_var(
            name=var_name,
            shape=param.shape,
            value=0,
            dtype='float32',
            persistable=True)
        block = self.helper.startup_program.global_block()
        block.append_op(
            type="cast",
            inputs={"X": [param]},
            outputs={"Out": [var]},
            attrs={
                "in_dtype": param.dtype,
                "out_dtype": core.VarDesc.VarType.FP32
            })
        self._master_weights[param.name] = var
        return var

    def _get_accumulator(self, name, param):
        """Utility function to fetch an accumulator for a parameter
        Args:
            name: name of the accumulator
            param: parameter variable for which accumulator is to be fetched
        Returns:
            accumulator variable for the parameter
        """
        if self._name is not None:
            name = self._name + "_" + name
        find_master = self._multi_precision and param.dtype == core.VarDesc.VarType.FP16
        target_param = self._master_weights[
            param.name] if find_master else param
        target_name = target_param.name
        if (name not in self._accumulators or
                target_name not in self._accumulators[name]):
            raise Exception("Accumulator {} does not exist for parameter {}".
                            format(name, target_name))
        return self._accumulators[name][target_name]
2012 2013 2014 2015 2016

    def _create_accumulators(self, block, parameters):
        assert isinstance(block, framework.Block)

        for p in parameters:
2017 2018 2019 2020 2021 2022 2023 2024 2025
            if self._multi_precision and p.dtype == core.VarDesc.VarType.FP16:
                master_p = self._create_master_weight(p)
                self._add_accumulator(self._velocity_acc_str, master_p)
                continue
            if p.dtype == core.VarDesc.VarType.FP16 and not self._multi_precision:
                warnings.warn(
                    "Accumulating with FP16 in optimizer can lead to poor accuracy or slow convergence."
                    "Consider using multi_precision=True option of the Lars optimizer."
                )
2026 2027 2028 2029
            self._add_accumulator(self._velocity_acc_str, p)

    def _append_optimize_op(self, block, param_and_grad):
        assert isinstance(block, framework.Block)
2030 2031 2032 2033 2034 2035 2036 2037
        _lars_weight_decay = self._lars_weight_decay
        param_name = param_and_grad[0].name
        if len(self._exclude_from_weight_decay) > 0:
            for name in self._exclude_from_weight_decay:
                if name in param_name:
                    _lars_weight_decay = 0.0
                    break

2038 2039
        velocity_acc = self._get_accumulator(self._velocity_acc_str,
                                             param_and_grad[0])
2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067
        lr = self._create_param_lr(param_and_grad)

        find_master = self._multi_precision and param_and_grad[
            0].dtype == core.VarDesc.VarType.FP16
        master_weight = (self._master_weights[param_and_grad[0].name]
                         if find_master else None)

        attrs = {
            "mu": self._momentum,
            "lars_coeff": self._lars_coeff,
            "lars_weight_decay": _lars_weight_decay,
            "multi_precision": find_master,
            "rescale_grad": self._rescale_grad
        }

        inputs = {
            "Param": param_and_grad[0],
            "Grad": param_and_grad[1],
            "Velocity": velocity_acc,
            "LearningRate": lr
        }

        outputs = {"ParamOut": param_and_grad[0], "VelocityOut": velocity_acc}

        if find_master:
            inputs["MasterParam"] = master_weight
            outputs["MasterParamOut"] = master_weight

2068 2069 2070
        # create the momentum optimize op
        momentum_op = block.append_op(
            type=self.type,
2071 2072 2073
            inputs=inputs,
            outputs=outputs,
            attrs=attrs,
M
minqiyang 已提交
2074
            stop_gradient=True)
2075 2076 2077 2078

        return momentum_op


2079
class AdagradOptimizer(Optimizer):
2080
    r"""
2081 2082
    The Adaptive Gradient optimizer (Adagrad for short) can adaptively assign
    different learning rates to individual parameters.
Q
qiaolongfei 已提交
2083

2084
    The parameter ``param_out`` update rule with gradient ``grad``:
Q
qiaolongfei 已提交
2085 2086 2087 2088 2089 2090 2091

    .. math::

        moment\_out &= moment + grad * grad

        param\_out &= param - \\frac{learning\_rate * grad}{\sqrt{moment\_out} + \epsilon}

2092 2093 2094 2095 2096 2097
    Related paper: `Adaptive Subgradient Methods for Online Learning and
    Stochastic Optimization <http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf>`_.

    The original paper does not have the ``epsilon`` attribute. It is added here
    in our implementation as also proposed `Per-parameter adaptive learning rate
    methods <http://cs231n.github.io/neural-networks-3/#ada>`_
Q
qiaolongfei 已提交
2098 2099 2100
    for numerical stability to avoid the division by zero error.

    Args:
2101 2102 2103 2104
        learning_rate (float|Variable): The learning rate used to update ``Parameter``.
            It can be a float value or a ``Variable`` with a float type.
        epsilon (float, optional): A small float value for numerical stability.
            The default value is 1e-06.
H
hong 已提交
2105
        parameter_list (Iterable, optional):  Iterable of ``Variable`` names to update to minimize ``loss``. \
2106 2107
            This parameter is required in dygraph mode. \
            The default value is None in static mode, at this time all parameters will be updated.
2108 2109 2110 2111 2112
        regularization (WeightDecayRegularizer, optional): The strategy of regularization. There are two method: \
             :ref:`api_fluid_regularizer_L1Decay` , :ref:`api_fluid_regularizer_L2Decay` . If a parameter has set \
            regularizer using :ref:`api_fluid_ParamAttr` already, the regularization setting here in optimizer will be \
            ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect.  \
            Default None, meaning there is no regularization.
2113 2114 2115 2116
        grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of 
            some derived class of ``GradientClipBase`` . There are three cliping strategies 
            ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` , 
            :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping.
2117 2118 2119 2120 2121
        name (str, optional): Normally there is no need for user to set this property.
            For more information, please refer to :ref:`api_guide_Name`.
            The default value is None.
        initial_accumulator_value (float, optional): Initial value for moment accumulator.
            The default value is 0.0.
Q
qiaolongfei 已提交
2122 2123 2124 2125

    Examples:
        .. code-block:: python

2126
            import numpy as np
2127
            import paddle.fluid as fluid
2128 2129

            np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)
2130
            inp = fluid.data(name="inp", shape=[2, 2])
2131 2132
            out = fluid.layers.fc(inp, size=3)
            out = fluid.layers.reduce_sum(out)
2133
            optimizer = fluid.optimizer.AdagradOptimizer(learning_rate=0.2)
2134 2135 2136 2137 2138 2139 2140
            optimizer.minimize(out)

            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
            exe.run(
                feed={"inp": np_inp},
                fetch_list=[out.name])
2141 2142 2143
    """
    _moment_acc_str = "moment"

X
Xin Pan 已提交
2144 2145 2146
    def __init__(self,
                 learning_rate,
                 epsilon=1.0e-6,
2147
                 parameter_list=None,
X
Xin Pan 已提交
2148
                 regularization=None,
2149
                 grad_clip=None,
2150
                 name=None,
X
xuezhong 已提交
2151
                 initial_accumulator_value=0.0):
2152 2153
        assert learning_rate is not None
        assert epsilon is not None
Q
Qiao Longfei 已提交
2154
        super(AdagradOptimizer, self).__init__(
X
Xin Pan 已提交
2155
            learning_rate=learning_rate,
2156
            parameter_list=parameter_list,
X
Xin Pan 已提交
2157
            regularization=regularization,
2158
            grad_clip=grad_clip,
X
Xin Pan 已提交
2159
            name=name)
2160 2161
        self.type = "adagrad"
        self._epsilon = epsilon
2162
        self.initial_accumulator_value = initial_accumulator_value
2163 2164 2165 2166 2167

    def _create_accumulators(self, block, parameters):
        assert isinstance(block, framework.Block)

        for p in parameters:
Z
zhongpu 已提交
2168 2169 2170 2171
            self._add_accumulator(
                self._moment_acc_str,
                p,
                fill_value=self.initial_accumulator_value)
2172 2173 2174 2175 2176 2177

    def _append_optimize_op(self, block, param_and_grad):
        assert isinstance(block, framework.Block)

        moment_acc = self._get_accumulator(self._moment_acc_str,
                                           param_and_grad[0])
2178
        # Create the adagrad optimizer op
2179 2180 2181 2182 2183 2184
        adagrad_op = block.append_op(
            type=self.type,
            inputs={
                "Param": param_and_grad[0],
                "Grad": param_and_grad[1],
                "Moment": moment_acc,
2185
                "LearningRate": self._create_param_lr(param_and_grad)
2186 2187 2188
            },
            outputs={"ParamOut": param_and_grad[0],
                     "MomentOut": moment_acc},
M
minqiyang 已提交
2189 2190
            attrs={"epsilon": self._epsilon},
            stop_gradient=True)
2191 2192

        return adagrad_op
2193 2194 2195


class AdamOptimizer(Optimizer):
2196
    r"""
T
tianshuo78520a 已提交
2197
    The Adam optimizer uses an optimization described at the end
2198 2199 2200 2201 2202
    of section 2 of `Adam paper <https://arxiv.org/abs/1412.6980>`_ ,
    it can dynamically adjusts the learning rate of each parameter using
    the 1st moment estimates and the 2nd moment estimates of the gradient.
    
    The parameter ``param_out`` update rule with gradient ``grad``:
Q
qiaolongfei 已提交
2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216

    .. math::

        t & = t + 1

        moment\_1\_out & = {\\beta}_1 * moment\_1 + (1 - {\\beta}_1) * grad

        moment\_2\_out & = {\\beta}_2 * moment\_2 + (1 - {\\beta}_2) * grad * grad

        learning\_rate & = learning\_rate * \\
                          \\frac{\sqrt{1 - {\\beta}_2^t}}{1 - {\\beta}_1^t}

        param\_out & = param - learning\_rate * \\frac{moment\_1}{\sqrt{moment\_2} + \epsilon}

2217 2218
    Related paper: `Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_

Q
qiaolongfei 已提交
2219
    Args:
2220 2221
        learning_rate (float|Variable, optional): The learning rate used to update ``Parameter``.
            It can be a float value or a ``Variable`` with a float type. The default value is 0.001.
2222 2223
        beta1 (float|Variable, optional): The exponential decay rate for the 1st moment estimates.
            It should be a float number or a Variable with shape [1] and data type as float32.
2224
            The default value is 0.9.
2225 2226
        beta2 (float|Variable, optional): The exponential decay rate for the 2nd moment estimates.
            It should be a float number or a Variable with shape [1] and data type as float32.
2227
            The default value is 0.999.
2228 2229
        epsilon (float|Tensor, optional): A small float value for numerical stability.
            It should be a float number or a Variable with shape [1] and data type as float32.
2230
            The default value is 1e-08.
H
hong 已提交
2231
        parameter_list (Iterable, optional):  Iterable of ``Variable`` names to update to minimize ``loss``. \
2232 2233
            This parameter is required in dygraph mode. \
            The default value is None in static mode, at this time all parameters will be updated.
2234 2235 2236 2237 2238
        regularization (WeightDecayRegularizer, optional): The strategy of regularization. There are two method: \
             :ref:`api_fluid_regularizer_L1Decay` , :ref:`api_fluid_regularizer_L2Decay` . If a parameter has set \
            regularizer using :ref:`api_fluid_ParamAttr` already, the regularization setting here in optimizer will be \
            ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect.  \
            Default None, meaning there is no regularization.
2239 2240 2241 2242
        grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of 
            some derived class of ``GradientClipBase`` . There are three cliping strategies 
            ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` , 
            :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping.
2243 2244 2245 2246 2247 2248 2249 2250 2251 2252
        name (str, optional): Normally there is no need for user to set this property.
            For more information, please refer to :ref:`api_guide_Name`.
            The default value is None.
        lazy_mode (bool, optional): The official Adam algorithm has two moving-average accumulators.
            The accumulators are updated at every step. Every element of the two moving-average
            is updated in both dense mode and sparse mode. If the size of parameter is very large,
            then the update may be very slow. The lazy mode only update the element that has
            gradient in current mini-batch, so it will be much more faster. But this mode has
            different semantics with the original Adam algorithm and may lead to different result.
            The default value is False.
2253 2254
        use_global_beta_pow (bool, optional): Whether to use global beta_pow. If true, Adam will use global beta_pow 
            for whole model instead of creating beta_pow for each parameter. Default is false.
2255 2256 2257
        flatten_param_grads (bool, optional): Whether to flatten all parameters and gradients. Default is false.
        align_size (int, optional): The alignment size when flatten parameters and gradients. Default is -1, which means
            use same align_size as allocator. 
Q
qiaolongfei 已提交
2258 2259 2260 2261

    Examples:
        .. code-block:: python

2262 2263 2264 2265 2266 2267
            import paddle
            import paddle.fluid as fluid

            place = fluid.CPUPlace()
            main = fluid.Program()
            with fluid.program_guard(main):
2268 2269
                x = fluid.data(name='x', shape=[None, 13], dtype='float32')
                y = fluid.data(name='y', shape=[None, 1], dtype='float32')
2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284
                y_predict = fluid.layers.fc(input=x, size=1, act=None)
                cost = fluid.layers.square_error_cost(input=y_predict, label=y)
                avg_cost = fluid.layers.mean(cost)

                adam_optimizer = fluid.optimizer.AdamOptimizer(0.01)
                adam_optimizer.minimize(avg_cost)

                fetch_list = [avg_cost]
                train_reader = paddle.batch(
                    paddle.dataset.uci_housing.train(), batch_size=1)
                feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
                exe = fluid.Executor(place)
                exe.run(fluid.default_startup_program())
                for data in train_reader():
                    exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)
Q
qiaolongfei 已提交
2285

2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302
        .. code-block:: python

            # Adam with beta1/beta2 as Variable
            import paddle
            import paddle.fluid as fluid
            import paddle.fluid.layers.learning_rate_scheduler as lr_scheduler

            place = fluid.CPUPlace()
            main = fluid.Program()
            with fluid.program_guard(main):
                x = fluid.data(name='x', shape=[None, 13], dtype='float32')
                y = fluid.data(name='y', shape=[None, 1], dtype='float32')
                y_predict = fluid.layers.fc(input=x, size=1, act=None)
                cost = fluid.layers.square_error_cost(input=y_predict, label=y)
                avg_cost = fluid.layers.mean(cost)

                # define beta decay variable
2303
                def get_decayed_betas(beta1_init, beta2_init, decay_steps, decay_rate, epsilon_init):
2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319
                    global_step = lr_scheduler._decay_step_counter()

                    beta1 = fluid.layers.create_global_var(
                        shape=[1],
                        value=float(beta1_init),
                        dtype='float32',
                        # set persistable for save checkpoints and resume
                        persistable=True,
                        name="beta1")
                    beta2 = fluid.layers.create_global_var(
                        shape=[1],
                        value=float(beta2_init),
                        dtype='float32',
                        # set persistable for save checkpoints and resume
                        persistable=True,
                        name="beta2")
2320 2321 2322 2323 2324 2325 2326
                    epsilon = fluid.layers.create_global_var(
                        shape=[1],
                        value=float(epsilon_init),
                        dtype='float32',
                        # set persistable for save checkpoints and resume
                        persistable=True,
                        name="epsilon")
2327 2328 2329 2330 2331 2332 2333

                    div_res = global_step / decay_steps
                    decayed_beta1 = beta1_init * (decay_rate**div_res)
                    decayed_beta2 = beta2_init * (decay_rate**div_res)
                    fluid.layers.assign(decayed_beta1, beta1)
                    fluid.layers.assign(decayed_beta2, beta2)

2334
                    return beta1, beta2, epsilon
2335

2336
                beta1, beta2, epsilon = get_decayed_betas(0.9, 0.99, 1e5, 0.9, 1e-8)
2337 2338
                adam_optimizer = fluid.optimizer.AdamOptimizer(
                                                    learning_rate=0.01,
2339
                                                    beta1=beta1,
2340 2341
                                                    beta2=beta2,
                                                    epsilon=epsilon)
2342 2343 2344 2345 2346 2347 2348 2349 2350 2351
                adam_optimizer.minimize(avg_cost)

                fetch_list = [avg_cost]
                train_reader = paddle.batch(
                    paddle.dataset.uci_housing.train(), batch_size=1)
                feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
                exe = fluid.Executor(place)
                exe.run(fluid.default_startup_program())
                for data in train_reader():
                    exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)
2352 2353 2354
    """
    _moment1_acc_str = "moment1"
    _moment2_acc_str = "moment2"
Q
qiaolongfei 已提交
2355 2356
    _beta1_pow_acc_str = "beta1_pow_acc"
    _beta2_pow_acc_str = "beta2_pow_acc"
2357 2358 2359 2360 2361

    def __init__(self,
                 learning_rate=0.001,
                 beta1=0.9,
                 beta2=0.999,
2362
                 epsilon=1e-8,
2363
                 parameter_list=None,
X
Xin Pan 已提交
2364
                 regularization=None,
2365
                 grad_clip=None,
Q
Qiao Longfei 已提交
2366
                 name=None,
2367
                 lazy_mode=False,
2368 2369 2370
                 use_global_beta_pow=False,
                 flatten_param_grads=False,
                 align_size=-1):
2371 2372 2373 2374
        assert learning_rate is not None
        assert beta1 is not None
        assert beta2 is not None
        assert epsilon is not None
Q
Qiao Longfei 已提交
2375
        super(AdamOptimizer, self).__init__(
X
Xin Pan 已提交
2376
            learning_rate=learning_rate,
2377
            parameter_list=parameter_list,
X
Xin Pan 已提交
2378
            regularization=regularization,
2379
            grad_clip=grad_clip,
2380 2381
            flatten_param_grads=flatten_param_grads,
            align_size=align_size,
X
Xin Pan 已提交
2382
            name=name)
2383 2384 2385 2386
        self.type = "adam"
        self._beta1 = beta1
        self._beta2 = beta2
        self._epsilon = epsilon
Q
Qiao Longfei 已提交
2387
        self._lazy_mode = lazy_mode
2388
        self._use_global_beta_pow = use_global_beta_pow
2389 2390 2391 2392 2393 2394

    def _create_accumulators(self, block, parameters):
        assert isinstance(block, framework.Block)

        # Create accumulator tensors for first and second moments
        for p in parameters:
Q
Qiao Longfei 已提交
2395 2396
            self._add_accumulator(self._moment1_acc_str, p)
            self._add_accumulator(self._moment2_acc_str, p)
2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413
            if not self._use_global_beta_pow:
                self._add_accumulator(
                    name=self._beta1_pow_acc_str,
                    param=p,
                    fill_value=0.9 if isinstance(self._beta1, Variable) \
                            else self._beta1,
                    shape=[1],
                    type=core.VarDesc.VarType.LOD_TENSOR, device='cpu')
                self._add_accumulator(
                    name=self._beta2_pow_acc_str,
                    param=p,
                    fill_value=0.999 if isinstance(self._beta2, Variable) \
                            else self._beta2,
                    shape=[1],
                    type=core.VarDesc.VarType.LOD_TENSOR, device='cpu')
        if self._use_global_beta_pow:
            self._add_global_accumulator(
Q
qiaolongfei 已提交
2414
                name=self._beta1_pow_acc_str,
2415 2416
                fill_value=0.9 if isinstance(self._beta1, Variable) \
                        else self._beta1,
2417
                shape=[1],
2418
                type=core.VarDesc.VarType.LOD_TENSOR, device='cpu')
2419
            self._add_global_accumulator(
Q
qiaolongfei 已提交
2420
                name=self._beta2_pow_acc_str,
2421 2422
                fill_value=0.999 if isinstance(self._beta2, Variable) \
                        else self._beta2,
2423
                shape=[1],
2424
                type=core.VarDesc.VarType.LOD_TENSOR, device='cpu')
2425 2426 2427 2428 2429 2430 2431 2432

    def _append_optimize_op(self, block, param_and_grad):
        assert isinstance(block, framework.Block)

        moment1 = self._get_accumulator(self._moment1_acc_str,
                                        param_and_grad[0])
        moment2 = self._get_accumulator(self._moment2_acc_str,
                                        param_and_grad[0])
2433 2434 2435 2436 2437 2438 2439 2440 2441 2442
        if self._use_global_beta_pow:
            beta1_pow_acc = self._get_global_accumulator(
                self._beta1_pow_acc_str)
            beta2_pow_acc = self._get_global_accumulator(
                self._beta2_pow_acc_str)
        else:
            beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str,
                                                  param_and_grad[0])
            beta2_pow_acc = self._get_accumulator(self._beta2_pow_acc_str,
                                                  param_and_grad[0])
2443
        lr = self._create_param_lr(param_and_grad)
2444
        # create the adam optimize op
2445 2446 2447 2448 2449 2450

        if framework.in_dygraph_mode():
            _beta1 = self._beta1 if not isinstance(
                self._beta1, Variable) else self._beta1.numpy().item(0)
            _beta2 = self._beta2 if not isinstance(
                self._beta2, Variable) else self._beta2.numpy().item(0)
W
wanghuancoder 已提交
2451
            _, _, _, _, _ = _C_ops.adam(
2452 2453 2454 2455
                param_and_grad[0], param_and_grad[1], lr, moment1, moment2,
                beta1_pow_acc, beta2_pow_acc, param_and_grad[0], moment1,
                moment2, beta1_pow_acc, beta2_pow_acc, 'epsilon', self._epsilon,
                'lazy_mode', self._lazy_mode, 'min_row_size_to_use_multithread',
2456 2457
                1000, 'beta1', _beta1, 'beta2', _beta2, 'use_global_beta_pow',
                self._use_global_beta_pow)
2458 2459 2460

            return None

2461
        inputs = {
2462 2463
            "Param": [param_and_grad[0]],
            "Grad": [param_and_grad[1]],
2464
            "LearningRate": [lr],
2465 2466 2467 2468
            "Moment1": [moment1],
            "Moment2": [moment2],
            "Beta1Pow": [beta1_pow_acc],
            "Beta2Pow": [beta2_pow_acc]
2469 2470
        }
        outputs = {
2471 2472 2473 2474 2475
            "ParamOut": [param_and_grad[0]],
            "Moment1Out": [moment1],
            "Moment2Out": [moment2],
            "Beta1PowOut": [beta1_pow_acc],
            "Beta2PowOut": [beta2_pow_acc],
2476 2477 2478
        }
        attrs = {
            "lazy_mode": self._lazy_mode,
2479 2480
            "min_row_size_to_use_multithread": 1000,
            'use_global_beta_pow': self._use_global_beta_pow
2481 2482 2483 2484 2485 2486 2487 2488 2489 2490
        }

        if isinstance(self._beta1, Variable):
            inputs['Beta1Tensor'] = self._beta1
        else:
            attrs['beta1'] = self._beta1
        if isinstance(self._beta2, Variable):
            inputs['Beta2Tensor'] = self._beta2
        else:
            attrs['beta2'] = self._beta2
2491 2492 2493 2494
        if isinstance(self._epsilon, Variable):
            inputs['EpsilonTensor'] = self._epsilon
        else:
            attrs['epsilon'] = self._epsilon
2495

2496 2497
        adam_op = block.append_op(
            type=self.type,
2498 2499 2500
            inputs=inputs,
            outputs=outputs,
            attrs=attrs,
M
minqiyang 已提交
2501
            stop_gradient=True)
2502 2503 2504

        return adam_op

2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541
    def _finish_update(self, block, parameters_and_grads):
        r"""Update beta1_pow and beta2_pow accumulator
        """
        assert isinstance(block, framework.Block)
        if self._use_global_beta_pow:
            beta1_pow_acc = self._get_global_accumulator(
                self._beta1_pow_acc_str)
            beta2_pow_acc = self._get_global_accumulator(
                self._beta2_pow_acc_str)

            with block.program._optimized_guard([]):
                inputs = {"X": beta1_pow_acc}
                attrs = {}
                if isinstance(self._beta1, Variable):
                    inputs['ScaleTensor'] = self._beta1
                else:
                    attrs['scale'] = self._beta1
                block.append_op(
                    type="scale",
                    inputs=inputs,
                    outputs={"Out": beta1_pow_acc},
                    attrs=attrs,
                    stop_gradient=True)

                inputs = {"X": beta2_pow_acc}
                attrs = {}
                if isinstance(self._beta2, Variable):
                    inputs['ScaleTensor'] = self._beta2
                else:
                    attrs['scale'] = self._beta2
                block.append_op(
                    type="scale",
                    inputs=inputs,
                    outputs={"Out": beta2_pow_acc},
                    attrs=attrs,
                    stop_gradient=True)

2542 2543

class AdamaxOptimizer(Optimizer):
2544
    r"""
2545 2546 2547 2548
    The Adamax optimizer is implemented based on the Adamax Optimization 
    in Section 7 of `Adam paper <https://arxiv.org/abs/1412.6980>`_.
    The Adamax algorithm is a variant of the Adam algorithm based on the infinite norm,
    which makes the learning rate update algorithm more stable and simple.
Q
qiaolongfei 已提交
2549

2550
    The parameter ``param_out`` update rule with gradient ``grad``:
Q
qiaolongfei 已提交
2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563

    .. math::

        t & = t + 1

        moment\_out & = {\\beta}_1 * moment + (1 - {\\beta}_1) * grad

        inf\_norm\_out & = max({\\beta}_2 * inf\_norm + \epsilon, |grad|)

        learning\_rate & = \\frac{learning\_rate}{1 - {\\beta}_1^t}

        param\_out & = param - learning\_rate * \\frac{moment\_out}{inf\_norm\_out}

2564
    Related paper: `Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_
Q
qiaolongfei 已提交
2565

2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577
    The original paper does not have an ``epsilon`` attribute,
    it is added here for numerical stability to prevent the division by 0 error.

    Args:
        learning_rate (float|Variable, optional): The learning rate used to update ``Parameter``.
            It can be a float value or a ``Variable`` with a float type. The default value is 0.001.
        beta1 (float, optional): The exponential decay rate for the 1st moment estimates.
            The default value is 0.9.
        beta2 (float, optional): The exponential decay rate for the 2nd moment estimates.
            The default value is 0.999.
        epsilon (float, optional): A small float value for numerical stability.
            The default value is 1e-08.
H
hong 已提交
2578
        parameter_list (Iterable, optional):  Iterable of ``Variable`` names to update to minimize ``loss``. \
2579 2580
            This parameter is required in dygraph mode. \
            The default value is None in static mode, at this time all parameters will be updated.
2581 2582 2583 2584 2585
        regularization (WeightDecayRegularizer, optional): The strategy of regularization. There are two method: \
             :ref:`api_fluid_regularizer_L1Decay` , :ref:`api_fluid_regularizer_L2Decay` . If a parameter has set \
            regularizer using :ref:`api_fluid_ParamAttr` already, the regularization setting here in optimizer will be \
            ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect.  \
            Default None, meaning there is no regularization.
2586 2587 2588 2589
        grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of 
            some derived class of ``GradientClipBase`` . There are three cliping strategies 
            ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` , 
            :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping.
2590 2591 2592 2593 2594 2595
        name (str, optional): Normally there is no need for user to set this property.
            For more information, please refer to :ref:`api_guide_Name`.
            The default value is None.

    **Notes**:
        **Currently, AdamaxOptimizer doesn't support sparse parameter optimization.**
Q
qiaolongfei 已提交
2596

2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609
    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
          import numpy

          # First create the Executor.
          place = fluid.CPUPlace() # fluid.CUDAPlace(0)
          exe = fluid.Executor(place)

          train_program = fluid.Program()
          startup_program = fluid.Program()
          with fluid.program_guard(train_program, startup_program):
2610
              data = fluid.data(name='X', shape=[None, 1], dtype='float32')
2611 2612
              hidden = fluid.layers.fc(input=data, size=10)
              loss = fluid.layers.mean(hidden)
2613
              adam = fluid.optimizer.AdamaxOptimizer(learning_rate=0.2)
2614 2615 2616 2617 2618 2619 2620 2621 2622
              adam.minimize(loss)

          # Run the startup program once and only once.
          exe.run(startup_program)

          x = numpy.random.random(size=(10, 1)).astype('float32')
          outs = exe.run(program=train_program,
                        feed={'X': x},
                         fetch_list=[loss.name])
2623 2624 2625
    """
    _moment_acc_str = "moment"
    _inf_norm_acc_str = "inf_norm"
Q
qiaolongfei 已提交
2626
    _beta1_pow_acc_str = "beta1_pow_acc"
2627 2628 2629 2630 2631

    def __init__(self,
                 learning_rate=0.001,
                 beta1=0.9,
                 beta2=0.999,
2632
                 epsilon=1e-8,
2633
                 parameter_list=None,
X
Xin Pan 已提交
2634
                 regularization=None,
2635
                 grad_clip=None,
X
Xin Pan 已提交
2636
                 name=None):
2637 2638 2639 2640
        assert learning_rate is not None
        assert beta1 is not None
        assert beta2 is not None
        assert epsilon is not None
Q
Qiao Longfei 已提交
2641
        super(AdamaxOptimizer, self).__init__(
X
Xin Pan 已提交
2642
            learning_rate=learning_rate,
2643
            parameter_list=parameter_list,
X
Xin Pan 已提交
2644
            regularization=regularization,
2645
            grad_clip=grad_clip,
X
Xin Pan 已提交
2646
            name=name)
2647 2648 2649 2650 2651 2652 2653 2654
        self.type = "adamax"
        self._beta1 = beta1
        self._beta2 = beta2
        self._epsilon = epsilon

    def _create_accumulators(self, block, parameters):
        # Create accumulator tensors for first moment and infinity norm
        for p in parameters:
Q
Qiao Longfei 已提交
2655 2656
            self._add_accumulator(self._moment_acc_str, p)
            self._add_accumulator(self._inf_norm_acc_str, p)
Q
qiaolongfei 已提交
2657 2658 2659 2660 2661
            self._add_accumulator(
                name=self._beta1_pow_acc_str,
                param=p,
                fill_value=self._beta1,
                shape=[1])
2662 2663 2664 2665 2666 2667 2668

    def _append_optimize_op(self, block, param_and_grad):
        assert isinstance(block, framework.Block)

        moment = self._get_accumulator(self._moment_acc_str, param_and_grad[0])
        inf_norm = self._get_accumulator(self._inf_norm_acc_str,
                                         param_and_grad[0])
Q
qiaolongfei 已提交
2669 2670
        beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str,
                                              param_and_grad[0])
2671 2672 2673 2674 2675 2676
        # create the adamax optimize op
        adamax_op = block.append_op(
            type=self.type,
            inputs={
                "Param": param_and_grad[0],
                "Grad": param_and_grad[1],
2677
                "LearningRate": self._create_param_lr(param_and_grad),
2678 2679
                "Moment": moment,
                "InfNorm": inf_norm,
Q
qiaolongfei 已提交
2680
                "Beta1Pow": beta1_pow_acc
2681 2682 2683 2684 2685 2686 2687 2688 2689 2690
            },
            outputs={
                "ParamOut": param_and_grad[0],
                "MomentOut": moment,
                "InfNormOut": inf_norm
            },
            attrs={
                "beta1": self._beta1,
                "beta2": self._beta2,
                "epsilon": self._epsilon
M
minqiyang 已提交
2691 2692
            },
            stop_gradient=True)
2693 2694 2695

        return adamax_op

2696
    def _finish_update(self, block, parameters_and_grads):
2697 2698 2699
        """Update Beta1 Power accumulator
        """
        assert isinstance(block, framework.Block)
2700
        for param, grad in parameters_and_grads:
C
chengduo 已提交
2701
            if grad is None or param.trainable is False:
2702
                continue
X
Xin Pan 已提交
2703 2704
            with param.block.program._optimized_guard(
                [param, grad]), name_scope('adamx'):
2705 2706
                beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str,
                                                      param)
2707
                block.append_op(
2708 2709 2710
                    type="scale",
                    inputs={"X": beta1_pow_acc},
                    outputs={"Out": beta1_pow_acc},
M
minqiyang 已提交
2711 2712
                    attrs={"scale": self._beta1},
                    stop_gradient=True)
2713 2714


2715
class DpsgdOptimizer(Optimizer):
2716
    r"""
2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752
    We implement the Dpsgd optimizer according to CCS16 paper -
    Deep Learning with Differential Privacy.

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
          import numpy

          # First create the Executor.
          place = fluid.CPUPlace() # fluid.CUDAPlace(0)
          exe = fluid.Executor(place)

          train_program = fluid.Program()
          startup_program = fluid.Program()
          with fluid.program_guard(train_program, startup_program):
              data = fluid.layers.data(name='X', shape=[1], dtype='float32')
              hidden = fluid.layers.fc(input=data, size=10)
              loss = fluid.layers.mean(hidden)
              optimizer = fluid.optimizer.Dpsgd(learning_rate=0.01, clip=10.0, batch_size=16.0, sigma=1.0)
              optimizer.minimize(loss)

          # Run the startup program once and only once.
          exe.run(startup_program)

          x = numpy.random.random(size=(10, 1)).astype('float32')
          outs = exe.run(program=train_program,
                        feed={'X': x},
                         fetch_list=[loss.name])

    Args:
        learning_rate (float|Variable): the learning rate used to update parameters. \
        Can be a float value or a Variable with one float value as data element.
        clip (float): clipping threshold
        batch_size (float): batch size.
        sigma (float): for gaussian noise.
H
hong 已提交
2753
        parameter_list (Iterable, optional):  Iterable of ``Variable`` names to update to minimize ``loss``. \
2754 2755
            This parameter is required in dygraph mode. \
            The default value is None in static mode, at this time all parameters will be updated.
2756 2757 2758 2759 2760 2761 2762 2763
    Notes:
       Currently, DpsgdOptimizer doesn't support sparse parameter optimization.
    """

    def __init__(self,
                 learning_rate=0.001,
                 clip=0.9,
                 batch_size=0.999,
2764 2765
                 sigma=1e-8,
                 parameter_list=None):
2766 2767 2768 2769
        assert learning_rate is not None
        assert clip is not None
        assert batch_size is not None
        assert sigma is not None
2770 2771
        super(DpsgdOptimizer, self).__init__(
            learning_rate=learning_rate, parameter_list=parameter_list)
2772 2773 2774 2775
        self.type = "dpsgd"
        self._clip = clip
        self._batch_size = batch_size
        self._sigma = sigma
Z
zhongpu 已提交
2776 2777 2778 2779 2780 2781 2782
        '''
        Note(wangzhongpu):
        This property is only used for debugging, do not need to set it!
        Dpsgd operator use time(NULL) as random seed to generate random number.
        However, during debugging, we need determinated result, so we will set self._seed to a fixed number.
        '''
        self._seed = None
2783 2784 2785 2786 2787

    def _append_optimize_op(self, block, param_and_grad):
        assert isinstance(block, framework.Block)

        # create the dpsgd optimize op
Z
zhongpu 已提交
2788 2789 2790
        if self._seed == None:
            self._seed = 0

2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801
        dpsgd_op = block.append_op(
            type=self.type,
            inputs={
                "Param": param_and_grad[0],
                "Grad": param_and_grad[1],
                "LearningRate": self._create_param_lr(param_and_grad)
            },
            outputs={"ParamOut": param_and_grad[0]},
            attrs={
                "clip": self._clip,
                "batch_size": self._batch_size,
Z
zhongpu 已提交
2802 2803
                "sigma": self._sigma,
                "seed": self._seed
2804 2805 2806 2807 2808 2809
            },
            stop_gradient=True)

        return dpsgd_op


2810
class DecayedAdagradOptimizer(Optimizer):
2811
    r"""
2812 2813 2814
    The Decayed Adagrad optimizer can be seen as an Adagrad algorithm that introduces
    the decay rate to solve the problem of a sharp drop in the learning rate
    during model training when using the AdagradOptimizer.
2815

2816
    The parameter ``param_out`` update rule with gradient ``grad``:
2817 2818 2819 2820 2821 2822 2823

    .. math::

        moment\_out & = decay * moment + (1 - decay) * grad * grad

        param\_out & = param - \\frac{learning\_rate * grad}{\sqrt{moment\_out} + \epsilon}

2824 2825 2826 2827
    Related paper: `Adaptive Subgradient Methods for Online Learning and Stochastic
    Optimization <http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf>`_.

    The original paper does not have an ``epsilon`` attribute. It is added here for numerical
2828 2829 2830
    stability to avoid the division by zero error.

    Args:
2831 2832 2833 2834 2835
        learning_rate (float|Variable): The learning rate used to update ``Parameter``.
            It can be a float value or a ``Variable`` with a float type.
        decay (float, optional): The decay rate. The default value is 0.95.
        epsilon (float, optional): A small float value for numerical stability.
            The default value is 1e-06.
H
hong 已提交
2836
        parameter_list (Iterable, optional):  Iterable of ``Variable`` names to update to minimize ``loss``. \
2837 2838
            This parameter is required in dygraph mode. \
            The default value is None in static mode, at this time all parameters will be updated.
2839 2840 2841 2842 2843
        regularization (WeightDecayRegularizer, optional): The strategy of regularization. There are two method: \
             :ref:`api_fluid_regularizer_L1Decay` , :ref:`api_fluid_regularizer_L2Decay` . If a parameter has set \
            regularizer using :ref:`api_fluid_ParamAttr` already, the regularization setting here in optimizer will be \
            ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect.  \
            Default None, meaning there is no regularization.
2844 2845 2846 2847
        grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of 
            some derived class of ``GradientClipBase`` . There are three cliping strategies 
            ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` , 
            :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping.
2848 2849 2850 2851 2852 2853
        name (str, optional): Normally there is no need for user to set this property.
            For more information, please refer to :ref:`api_guide_Name`.
            The default value is None.

    **Notes**:
        **Currently, DecayedAdagradOptimizer doesn't support sparse parameter optimization.**
2854 2855 2856 2857

    Examples:
        .. code-block:: python

2858 2859
            import paddle.fluid as fluid

2860 2861 2862 2863
            x = fluid.data( name='x', shape=[None, 10], dtype='float32' )
            trans = fluid.layers.fc( x, 100 )
            cost = fluid.layers.reduce_mean( trans )
            optimizer = fluid.optimizer.DecayedAdagradOptimizer(learning_rate=0.2)
2864
            optimizer.minimize(cost)
2865 2866 2867
    """
    _moment_acc_str = "moment"

X
Xin Pan 已提交
2868 2869 2870 2871
    def __init__(self,
                 learning_rate,
                 decay=0.95,
                 epsilon=1.0e-6,
2872
                 parameter_list=None,
X
Xin Pan 已提交
2873
                 regularization=None,
2874
                 grad_clip=None,
X
Xin Pan 已提交
2875
                 name=None):
2876 2877 2878 2879
        assert learning_rate is not None
        assert decay is not None
        assert epsilon is not None

Q
Qiao Longfei 已提交
2880
        super(DecayedAdagradOptimizer, self).__init__(
X
Xin Pan 已提交
2881
            learning_rate=learning_rate,
2882
            parameter_list=parameter_list,
X
Xin Pan 已提交
2883
            regularization=regularization,
2884
            grad_clip=grad_clip,
X
Xin Pan 已提交
2885
            name=name)
2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912
        self.type = "decayed_adagrad"
        self._decay = decay
        self._epsilon = epsilon

    def _create_accumulators(self, block, parameters):
        assert isinstance(block, framework.Block)

        for p in parameters:
            self._add_accumulator(self._moment_acc_str, p)

    def _append_optimize_op(self, block, param_and_grad):
        assert isinstance(block, framework.Block)

        moment_acc = self._get_accumulator(self._moment_acc_str,
                                           param_and_grad[0])

        # Create the decayed adagrad optimizer op
        decayed_adagrad_op = block.append_op(
            type=self.type,
            inputs={
                "Param": param_and_grad[0],
                "Grad": param_and_grad[1],
                "Moment": moment_acc,
                "LearningRate": self._create_param_lr(param_and_grad)
            },
            outputs={"ParamOut": param_and_grad[0],
                     "MomentOut": moment_acc},
2913 2914
            attrs={"epsilon": self._epsilon,
                   "decay": self._decay},
M
minqiyang 已提交
2915
            stop_gradient=True)
2916 2917

        return decayed_adagrad_op
2918 2919


2920
class AdadeltaOptimizer(Optimizer):
2921
    r"""
Z
Zeng Jinle 已提交
2922
    **Notes: This API does not support sparse parameter optimization.**
Q
qiaolongfei 已提交
2923

Z
Zeng Jinle 已提交
2924
    Adadelta Optimizer. Please refer to this for details:
Z
Zeng Jinle 已提交
2925 2926 2927
    `ADADELTA: AN ADAPTIVE LEARNING RATE METHOD <https://arxiv.org/abs/1212.5701>`_.

    The update is done as follows:
2928

Z
Zeng Jinle 已提交
2929 2930
    .. math::

Z
Zeng Jinle 已提交
2931
        E(g_t^2) &= \\rho * E(g_{t-1}^2) + (1-\\rho) * g^2
2932

Z
Zeng Jinle 已提交
2933
        learning\_rate &= \sqrt{ ( E(dx_{t-1}^2) + \\epsilon ) / ( E(g_t^2) + \\epsilon ) }
Z
Zeng Jinle 已提交
2934

Z
Zeng Jinle 已提交
2935
        E(dx_t^2) &= \\rho * E(dx_{t-1}^2) + (1-\\rho) * (-g*learning\_rate)^2
2936 2937

    Args:
Z
Zeng Jinle 已提交
2938 2939 2940
        learning_rate (float|Variable): global learning rate.
        epsilon (float): a small float number for numeric stability. Default 1.0e-6.
        rho (float): a floating point value indicating the decay rate. Default 0.95.
H
hong 已提交
2941
        parameter_list (Iterable, optional):  Iterable of ``Variable`` names to update to minimize ``loss``. \
2942 2943
            This parameter is required in dygraph mode. \
            The default value is None in static mode, at this time all parameters will be updated.
2944 2945 2946 2947 2948
        regularization (WeightDecayRegularizer, optional): The strategy of regularization. There are two method: \
             :ref:`api_fluid_regularizer_L1Decay` , :ref:`api_fluid_regularizer_L2Decay` . If a parameter has set \
            regularizer using :ref:`api_fluid_ParamAttr` already, the regularization setting here in optimizer will be \
            ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect.  \
            Default None, meaning there is no regularization.
2949 2950 2951 2952
        grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of 
            some derived class of ``GradientClipBase`` . There are three cliping strategies 
            ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` , 
            :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping.
2953 2954 2955
        name (str, optional): The default value is None. Normally there is no need for user
                to set this property. For more information, please refer to
                :ref:`api_guide_Name` .
2956 2957 2958 2959

    Examples:
        .. code-block:: python

2960
            import paddle.fluid as fluid
Z
Zeng Jinle 已提交
2961

2962
            image = fluid.data(name='image', shape=[None, 28], dtype='float32')
Z
Zeng Jinle 已提交
2963 2964
            fc = fluid.layers.fc(image, size=10)
            cost = fluid.layers.reduce_mean(fc)
2965 2966
            optimizer = fluid.optimizer.Adadelta(
                learning_rate=0.0003, epsilon=1.0e-6, rho=0.95)
C
chengduo 已提交
2967

Z
Zeng Jinle 已提交
2968 2969 2970 2971
            # optimizer_ops is a list of optimizer operators to update parameters
            # params_grads is a list of (param, param_grad), where param is each
            # parameter and param_grad is the gradient variable of param.
            optimizer_ops, params_grads = optimizer.minimize(cost)
2972
    """
2973

2974 2975 2976
    _avg_squared_grad_acc_str = "_avg_squared_grad"
    _avg_squared_update_acc_str = "_avg_squared_update"

X
Xin Pan 已提交
2977 2978 2979 2980
    def __init__(self,
                 learning_rate,
                 epsilon=1.0e-6,
                 rho=0.95,
2981
                 parameter_list=None,
X
Xin Pan 已提交
2982
                 regularization=None,
2983
                 grad_clip=None,
X
Xin Pan 已提交
2984
                 name=None):
2985 2986 2987 2988 2989 2990
        if learning_rate is None:
            raise ValueError("learning_rate is not set.")
        if epsilon is None:
            raise ValueError("epsilon is not set.")
        if rho is None:
            raise ValueError("rho is not set.")
2991
        super(AdadeltaOptimizer, self).__init__(
X
Xin Pan 已提交
2992
            learning_rate=learning_rate,
2993
            parameter_list=parameter_list,
X
Xin Pan 已提交
2994
            regularization=regularization,
2995
            grad_clip=grad_clip,
X
Xin Pan 已提交
2996
            name=name)
2997 2998 2999 3000 3001
        self.type = "adadelta"
        self._epsilon = epsilon
        self._rho = rho

    def _create_accumulators(self, block, parameters):
3002 3003
        if not isinstance(block, framework.Block):
            raise TypeError("block is not instance of framework.Block.")
3004 3005 3006 3007 3008 3009

        for p in parameters:
            self._add_accumulator(self._avg_squared_grad_acc_str, p)
            self._add_accumulator(self._avg_squared_update_acc_str, p)

    def _append_optimize_op(self, block, param_and_grad):
3010 3011
        if not isinstance(block, framework.Block):
            raise TypeError("block is not instance of framework.Block.")
3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032

        avg_squared_grad_acc = self._get_accumulator(
            self._avg_squared_grad_acc_str, param_and_grad[0])
        avg_squared_update_acc = self._get_accumulator(
            self._avg_squared_update_acc_str, param_and_grad[0])

        # Create the adadelta optimizer op
        adadelta_op = block.append_op(
            type=self.type,
            inputs={
                "Param": param_and_grad[0],
                "Grad": param_and_grad[1],
                "AvgSquaredGrad": avg_squared_grad_acc,
                "AvgSquaredUpdate": avg_squared_update_acc
            },
            outputs={
                "ParamOut": param_and_grad[0],
                "AvgSquaredGradOut": avg_squared_grad_acc,
                "AvgSquaredUpdateOut": avg_squared_update_acc
            },
            attrs={"epsilon": self._epsilon,
M
minqiyang 已提交
3033 3034
                   "rho": self._rho},
            stop_gradient=True)
3035 3036 3037 3038

        return adadelta_op


Q
qingqing01 已提交
3039
class RMSPropOptimizer(Optimizer):
3040
    r"""
Q
qingqing01 已提交
3041 3042 3043 3044 3045 3046 3047 3048
    Root Mean Squared Propagation (RMSProp) is an unpublished, adaptive learning
    rate method. The original slides proposed RMSProp: Slide 29 of
    http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf .

    The original equation is as follows:

    ..  math::

Q
qiaolongfei 已提交
3049
        r(w, t) & = \\rho r(w, t-1) + (1 - \\rho)(\\nabla Q_{i}(w))^2
Q
qingqing01 已提交
3050 3051 3052 3053

        w & = w - \\frac{\\eta} {\\sqrt{r(w,t) + \\epsilon}} \\nabla Q_{i}(w)

    The first equation calculates moving average of the squared gradient for
Q
qiaolongfei 已提交
3054
    each weight. Then dividing the gradient by :math:`sqrt{v(w,t)}`.
Q
qingqing01 已提交
3055 3056 3057 3058 3059 3060

    In some cases, adding a momentum term :math: `\\beta` is beneficial.
    In our implementation, Nesterov momentum is used:

    ..  math::

Q
qiaolongfei 已提交
3061
        r(w, t) & = \\rho r(w, t-1) + (1 - \\rho)(\\nabla Q_{i}(w))^2
Q
qingqing01 已提交
3062

3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076
        v(w, t) & = \\beta v(w, t-1) + \\frac{\\eta} {\\sqrt{r(w,t) +
            \\epsilon}} \\nabla Q_{i}(w)

        w & = w - v(w, t)

    if centered is True:

    ..  math::

        r(w, t) & = \\rho r(w, t-1) + (1 - \\rho)(\\nabla Q_{i}(w))^2

        g(w, t) & = \\rho g(w, t-1) + (1 - \\rho)\\nabla Q_{i}(w)

        v(w, t) & = \\beta v(w, t-1) + \\frac{\\eta} {\\sqrt{r(w,t) - (g(w, t))^2 +
Q
qingqing01 已提交
3077 3078 3079 3080
            \\epsilon}} \\nabla Q_{i}(w)

        w & = w - v(w, t)

Q
qiaolongfei 已提交
3081
    where, :math:`\\rho` is a hyperparameter and typical values are 0.9, 0.95
Q
qingqing01 已提交
3082 3083 3084 3085 3086
    and so on. :math: `beta` is the momentum term. :math: `\\epsilon` is a
    smoothing term to avoid division by zero, usually set somewhere in range
    from 1e-4 to 1e-8.


3087 3088 3089
    Parameters:
        learning_rate(float): Global learning rate.
        rho(float): rho is :math: `\\rho` in equation, default is 0.95.
Q
qingqing01 已提交
3090
        epsilon(float): :math: `\\epsilon` in equation is smoothing term to
3091
            avoid division by zero, default is 1e-6.
Q
qiaolongfei 已提交
3092
        momentum(float): :math:`\\beta` in equation is the momentum term,
3093
            default is 0.0.
3094 3095 3096 3097
        centered(bool): If True, gradients are normalized by the estimated variance of
            the gradient; if False, by the uncentered second moment. Setting this to
            True may help with training, but is slightly more expensive in terms of
            computation and memory. Defaults to False.
H
hong 已提交
3098
        parameter_list (Iterable, optional):  Iterable of ``Variable`` names to update to minimize ``loss``. \
3099 3100
            This parameter is required in dygraph mode. \
            The default value is None in static mode, at this time all parameters will be updated.
3101 3102 3103 3104 3105
        regularization (WeightDecayRegularizer, optional): The strategy of regularization. There are two method: \
             :ref:`api_fluid_regularizer_L1Decay` , :ref:`api_fluid_regularizer_L2Decay` . If a parameter has set \
            regularizer using :ref:`api_fluid_ParamAttr` already, the regularization setting here in optimizer will be \
            ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect.  \
            Default None, meaning there is no regularization.
3106 3107 3108 3109
        grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of 
            some derived class of ``GradientClipBase`` . There are three cliping strategies 
            ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` , 
            :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping.
3110 3111
        name (str, optional): This parameter is used by developers to print debugging information. \
            For details, please refer to :ref:`api_guide_Name`. Default is None.
Q
qingqing01 已提交
3112 3113 3114 3115 3116 3117 3118

    Raises:
        ValueError: If learning_rate, rho, epsilon, momentum are None.

    Examples:
          .. code-block:: python

3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143
            import paddle
            import paddle.fluid as fluid
            import numpy as np

            place = fluid.CPUPlace()
            main = fluid.Program()
            with fluid.program_guard(main):
                x = fluid.layers.data(name='x', shape=[13], dtype='float32')
                y = fluid.layers.data(name='y', shape=[1], dtype='float32')
                y_predict = fluid.layers.fc(input=x, size=1, act=None)
                cost = fluid.layers.square_error_cost(input=y_predict, label=y)
                avg_cost = fluid.layers.mean(cost)

                rms_optimizer = fluid.optimizer.RMSProp(learning_rate=0.1)
                rms_optimizer.minimize(avg_cost)

                fetch_list = [avg_cost]
                train_reader = paddle.batch(
                    paddle.dataset.uci_housing.train(), batch_size=1)
                feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
                exe = fluid.Executor(place)
                exe.run(fluid.default_startup_program())
                for data in train_reader():
                    exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)

Q
qingqing01 已提交
3144 3145 3146 3147
    """

    _momentum_acc_str = "momentum"
    _mean_square_acc_str = "mean_square"
3148
    _mean_grad_acc_str = "mean_grad"
Q
qingqing01 已提交
3149 3150 3151 3152 3153 3154

    def __init__(self,
                 learning_rate,
                 rho=0.95,
                 epsilon=1.0e-6,
                 momentum=0.0,
3155
                 centered=False,
3156
                 parameter_list=None,
X
Xin Pan 已提交
3157
                 regularization=None,
3158
                 grad_clip=None,
X
Xin Pan 已提交
3159
                 name=None):
Q
qingqing01 已提交
3160
        super(RMSPropOptimizer, self).__init__(
X
Xin Pan 已提交
3161
            learning_rate=learning_rate,
3162
            parameter_list=parameter_list,
X
Xin Pan 已提交
3163
            regularization=regularization,
3164
            grad_clip=grad_clip,
X
Xin Pan 已提交
3165
            name=name)
Q
qingqing01 已提交
3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178
        if learning_rate is None:
            raise ValueError("learning_rate is not set.")
        if rho is None:
            raise ValueError("rho is not set.")
        if epsilon is None:
            raise ValueError("epsilon is not set.")
        if momentum is None:
            raise ValueError("momentum is not set.")

        self.type = "rmsprop"
        self._rho = rho
        self._epsilon = epsilon
        self._momentum = momentum
3179
        self._centered = centered
Q
qingqing01 已提交
3180 3181 3182 3183 3184 3185 3186 3187

    def _create_accumulators(self, block, parameters):
        if not isinstance(block, framework.Block):
            raise TypeError("block is not instance of framework.Block.")

        for p in parameters:
            self._add_accumulator(self._momentum_acc_str, p)
            self._add_accumulator(self._mean_square_acc_str, p)
3188
            self._add_accumulator(self._mean_grad_acc_str, p)
Q
qingqing01 已提交
3189 3190 3191 3192 3193 3194 3195 3196 3197

    def _append_optimize_op(self, block, param_and_grad):
        if not isinstance(block, framework.Block):
            raise TypeError("block is not instance of framework.Block.")

        momentum_acc = self._get_accumulator(self._momentum_acc_str,
                                             param_and_grad[0])
        mean_square_acc = self._get_accumulator(self._mean_square_acc_str,
                                                param_and_grad[0])
3198 3199
        mean_grad_acc = self._get_accumulator(self._mean_grad_acc_str,
                                              param_and_grad[0])
Q
qingqing01 已提交
3200 3201 3202 3203 3204 3205 3206
        rmsprop_op = block.append_op(
            type=self.type,
            inputs={
                "Param": param_and_grad[0],
                "Grad": param_and_grad[1],
                "Moment": momentum_acc,
                "MeanSquare": mean_square_acc,
3207
                "MeanGrad": mean_grad_acc,
Q
qingqing01 已提交
3208 3209 3210 3211 3212
                "LearningRate": self._create_param_lr(param_and_grad),
            },
            outputs={
                "ParamOut": param_and_grad[0],
                "MomentOut": momentum_acc,
3213 3214
                "MeanSquareOut": mean_square_acc,
                "MeanGradOut": mean_grad_acc
Q
qingqing01 已提交
3215 3216 3217 3218
            },
            attrs={
                "epsilon": self._epsilon,
                "decay": self._rho,
3219 3220
                "momentum": self._momentum,
                "centered": self._centered
M
minqiyang 已提交
3221 3222
            },
            stop_gradient=True)
Q
qingqing01 已提交
3223 3224 3225 3226

        return rmsprop_op


Q
qiaolongfei 已提交
3227
class FtrlOptimizer(Optimizer):
3228
    r"""
Q
qiaolongfei 已提交
3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266
    FTRL (Follow The Regularized Leader) Optimizer.

    The paper that proposed Follow The Regularized Leader (FTRL):
    (https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf)

    ..  math::

        &new\_accum = squared\_accum + grad^2

        &if (lr\_power == -0.5):

        &\quad  linear\_accum += grad - \\frac{\\sqrt{new\_accum} - \\sqrt{squared\_accum}}{learning\_rate * param}

        &else:

        &\quad   linear\_accum += grad - \\frac{new\_accum^{-lr\_power} - accum^{-lr\_power}}{learning\_rate * param}


        &x = l1 * sign(linear\_accum) - linear\_accum

        &if (lr\_power == -0.5):

        &\quad   y = \\frac{\\sqrt{new\_accum}}{learning\_rate} + (2 * l2)

        &\quad   pre\_shrink = \\frac{x}{y}

        &\quad   param = (abs(linear\_accum) > l1).select(pre\_shrink, 0.0)

        &else:

        &\quad   y = \\frac{new\_accum^{-lr\_power}}{learning\_rate} + (2 * l2)

        &\quad   pre\_shrink = \\frac{x}{y}

        &\quad   param = (abs(linear\_accum) > l1).select(pre\_shrink, 0.0)

        &squared\_accum += grad^2

3267 3268 3269 3270 3271
    Parameters:
        learning_rate (float|Variable): Global learning rate.
        l1 (float): L1 regularization strength, default is 0.0.
        l2 (float): L2 regularization strength, default is 0.0.
        lr_power (float): Learning Rate Power, default is -0.5.
H
hong 已提交
3272
        parameter_list (Iterable, optional):  Iterable of ``Variable`` names to update to minimize ``loss``. \
3273 3274
            This parameter is required in dygraph mode. \
            The default value is None in static mode, at this time all parameters will be updated.
3275 3276 3277 3278 3279
        regularization (WeightDecayRegularizer, optional): The strategy of regularization. There are two method: \
             :ref:`api_fluid_regularizer_L1Decay` , :ref:`api_fluid_regularizer_L2Decay` . If a parameter has set \
            regularizer using :ref:`api_fluid_ParamAttr` already, the regularization setting here in optimizer will be \
            ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect.  \
            Default None, meaning there is no regularization.
3280 3281 3282 3283
        grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of 
            some derived class of ``GradientClipBase`` . There are three cliping strategies 
            ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` , 
            :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping.
3284 3285
        name (str, optional): This parameter is used by developers to print debugging information. \
            For details, please refer to :ref:`api_guide_Name`. Default is None.
Q
qiaolongfei 已提交
3286 3287 3288 3289 3290 3291 3292

    Raises:
        ValueError: If learning_rate, rho, epsilon, momentum are None.

    Examples:
          .. code-block:: python

3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316
            import paddle
            import paddle.fluid as fluid
            import numpy as np

            place = fluid.CPUPlace()
            main = fluid.Program()
            with fluid.program_guard(main):
                x = fluid.layers.data(name='x', shape=[13], dtype='float32')
                y = fluid.layers.data(name='y', shape=[1], dtype='float32')
                y_predict = fluid.layers.fc(input=x, size=1, act=None)
                cost = fluid.layers.square_error_cost(input=y_predict, label=y)
                avg_cost = fluid.layers.mean(cost)

                ftrl_optimizer = fluid.optimizer.Ftrl(learning_rate=0.1)
                ftrl_optimizer.minimize(avg_cost)

                fetch_list = [avg_cost]
                train_reader = paddle.batch(
                    paddle.dataset.uci_housing.train(), batch_size=1)
                feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
                exe = fluid.Executor(place)
                exe.run(fluid.default_startup_program())
                for data in train_reader():
                    exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)
C
chengduo 已提交
3317

3318
    NOTE:
C
chengduo 已提交
3319
       Currently, FtrlOptimizer doesn't support sparse parameter optimization.
Q
qiaolongfei 已提交
3320 3321 3322 3323 3324
    """

    _squared_acc_str = "squared"
    _linear_acc_str = "linear"

X
Xin Pan 已提交
3325 3326 3327 3328 3329
    def __init__(self,
                 learning_rate,
                 l1=0.0,
                 l2=0.0,
                 lr_power=-0.5,
3330
                 parameter_list=None,
X
Xin Pan 已提交
3331
                 regularization=None,
3332
                 grad_clip=None,
X
Xin Pan 已提交
3333
                 name=None):
Q
qiaolongfei 已提交
3334
        super(FtrlOptimizer, self).__init__(
X
Xin Pan 已提交
3335
            learning_rate=learning_rate,
3336
            parameter_list=parameter_list,
X
Xin Pan 已提交
3337
            regularization=regularization,
3338
            grad_clip=grad_clip,
X
Xin Pan 已提交
3339
            name=name)
Q
qiaolongfei 已提交
3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378
        if learning_rate is None:
            raise ValueError("learning_rate is not set.")

        self.type = "ftrl"
        self._l1 = l1
        self._l2 = l2
        self._lr_power = lr_power

    def _create_accumulators(self, block, parameters):
        if not isinstance(block, framework.Block):
            raise TypeError("block is not instance of framework.Block.")

        for p in parameters:
            self._add_accumulator(self._squared_acc_str, p)
            self._add_accumulator(self._linear_acc_str, p)

    def _append_optimize_op(self, block, param_and_grad):
        if not isinstance(block, framework.Block):
            raise TypeError("block is not instance of framework.Block.")

        squared_acc = self._get_accumulator(self._squared_acc_str,
                                            param_and_grad[0])
        linear_acc = self._get_accumulator(self._linear_acc_str,
                                           param_and_grad[0])
        ftrl_op = block.append_op(
            type=self.type,
            inputs={
                "Param": param_and_grad[0],
                "Grad": param_and_grad[1],
                "SquaredAccumulator": squared_acc,
                "LinearAccumulator": linear_acc,
                "LearningRate": self._create_param_lr(param_and_grad),
            },
            outputs={
                "ParamOut": param_and_grad[0],
                "SquaredAccumOut": squared_acc,
                "LinearAccumOut": linear_acc
            },
            attrs={"l1": self._l1,
3379
                   "l2": self._l2,
M
minqiyang 已提交
3380 3381
                   "lr_power": self._lr_power},
            stop_gradient=True)
Q
qiaolongfei 已提交
3382 3383 3384 3385

        return ftrl_op


Y
Yibing Liu 已提交
3386
class LambOptimizer(AdamOptimizer):
3387
    r"""
Y
Yibing Liu 已提交
3388 3389 3390 3391
    LAMB (Layer-wise Adaptive Moments optimizer for Batching training) Optimizer.

    LAMB Optimizer is designed to scale up the batch size of training without losing 
    accuracy, which supports adaptive element-wise updating and accurate layer-wise 
Y
Yibing Liu 已提交
3392 3393
    correction. For more information, please refer to `Large Batch Optimization for 
    Deep Learning: Training BERT in 76 minutes <https://arxiv.org/abs/1904.00962>`_ .
Y
Yibing Liu 已提交
3394 3395 3396 3397 3398

    The updating of parameters follows:

    ..  math::

Y
Yibing Liu 已提交
3399
        m_t &= \\beta_1 m_{t - 1}+ (1 - \\beta_1)g_t 
Y
Yibing Liu 已提交
3400

Y
Yibing Liu 已提交
3401
        v_t &= \\beta_2 v_{t - 1}  + (1 - \\beta_2)g_t^2
Y
Yibing Liu 已提交
3402

3403 3404 3405 3406
        m_t &= \\frac{m_t}{\\beta_1^t}

        v_t &= \\frac{v_t}{\\beta_2^t}

Y
Yibing Liu 已提交
3407
        r_t &= \\frac{m_t}{\\sqrt{v_t}+\\epsilon}
Y
Yibing Liu 已提交
3408

Y
Yibing Liu 已提交
3409
        w_t &= w_{t-1} -\\eta_t \\frac{\\left \| w_{t-1}\\right \|}{\\left \| r_t + \\lambda w_{t-1}\\right \|} (r_t + \\lambda w_{t-1})
Y
Yibing Liu 已提交
3410 3411 3412 3413 3414 3415


    where :math:`m` is the 1st moment, and :math:`v` the 2nd moment, :math:`\\eta` the 
    learning rate, :math:`\\lambda` the LAMB weight decay rate.

    Args:
Y
Yibing Liu 已提交
3416 3417 3418 3419 3420 3421 3422 3423
        learning_rate (float|Variable, optional): the learning rate used to update parameters. \
            Can be a float value or a Variable with data type float32. Default 0.001.
        lamb_weight_decay (float, optional): The LAMB weight decay rate. Default 0.01.
        beta1 (float, optional): The exponential decay rate for the 1st moment estimates.
            Default 0.9.
        beta2 (float, optional): The exponential decay rate for the 2nd moment estimates.
            Default 0.999.
        epsilon (float, optional): A small float value for numerical stability. Default 1e-6.
H
hong 已提交
3424
        parameter_list (Iterable, optional):  Iterable of ``Variable`` names to update to minimize ``loss``. \
3425 3426
            This parameter is required in dygraph mode. \
            The default value is None in static mode, at this time all parameters will be updated.
3427 3428 3429 3430 3431
        regularization (WeightDecayRegularizer, optional): The strategy of regularization. There are two method: \
             :ref:`api_fluid_regularizer_L1Decay` , :ref:`api_fluid_regularizer_L2Decay` . If a parameter has set \
            regularizer using :ref:`api_fluid_ParamAttr` already, the regularization setting here in optimizer will be \
            ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect.  \
            Default None, meaning there is no regularization.
3432 3433
        grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of 
            some derived class of ``GradientClipBase`` . There are three cliping strategies 
3434 3435 3436
            ( :ref:`api_paddle_fluid_clip_ClipGradByGlobalNorm` , :ref:`api_paddle_fluid_clip_ClipGradByNorm` ,
            :ref:`api_paddle_fluid_clip_ClipGradByValue` ). If you want better convergence, it is recommended
            to use :ref:`api_paddle_fluid_clip_ClipGradByGlobalNorm` . Default None, meaning there is no gradient clipping.
Y
Yibing Liu 已提交
3437 3438 3439 3440 3441
        exclude_from_weight_decay_fn (function|None): Exclude a parameter from weight 
            decay when **exclude_from_weight_decay_fn(parameter)** returns true. 
            Default None.
        name(str|None): For detailed information, please refer to 
            :ref:`api_guide_Name` . Usually name is no need to set and None by default.
Y
Yibing Liu 已提交
3442 3443 3444 3445 3446 3447

    Examples:
        .. code-block:: python
            
            import paddle.fluid as fluid 

Y
Yibing Liu 已提交
3448
            data = fluid.data(name='x', shape=[-1, 5], dtype='float32')
Y
Yibing Liu 已提交
3449 3450 3451
            hidden = fluid.layers.fc(input=data, size=10)
            cost = fluid.layers.mean(hidden)

Y
Yibing Liu 已提交
3452 3453 3454 3455 3456
            def exclude_fn(param):
                return param.name.endswith('.b_0')

            optimizer = fluid.optimizer.Lamb(learning_rate=0.002,
                                             exclude_from_weight_decay_fn=exclude_fn)
Y
Yibing Liu 已提交
3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469
            optimizer.minimize(cost)
    """
    _moment1_acc_str = "moment1"
    _moment2_acc_str = "moment2"
    _beta1_pow_acc_str = "beta1_pow_acc"
    _beta2_pow_acc_str = "beta2_pow_acc"

    def __init__(self,
                 learning_rate=0.001,
                 lamb_weight_decay=0.01,
                 beta1=0.9,
                 beta2=0.999,
                 epsilon=1e-6,
3470
                 parameter_list=None,
Y
Yibing Liu 已提交
3471
                 regularization=None,
3472
                 grad_clip=None,
Y
Yibing Liu 已提交
3473
                 exclude_from_weight_decay_fn=None,
Y
Yibing Liu 已提交
3474 3475 3476 3477 3478 3479 3480 3481
                 name=None):
        assert learning_rate is not None
        assert lamb_weight_decay is not None
        assert beta1 is not None
        assert beta2 is not None
        assert epsilon is not None
        super(LambOptimizer, self).__init__(
            learning_rate=learning_rate,
3482
            parameter_list=parameter_list,
Y
Yibing Liu 已提交
3483
            regularization=regularization,
3484
            grad_clip=grad_clip,
Y
Yibing Liu 已提交
3485 3486 3487 3488 3489 3490
            beta1=beta1,
            beta2=beta2,
            epsilon=epsilon,
            name=name)
        self.type = "lamb"
        self._weight_decay = lamb_weight_decay
Y
Yibing Liu 已提交
3491
        self._exclude_from_weight_decay_fn = exclude_from_weight_decay_fn
Y
Yibing Liu 已提交
3492 3493 3494

    def _append_optimize_op(self, block, param_and_grad):
        assert isinstance(block, framework.Block)
3495
        block.program._use_lamb = True
Y
Yibing Liu 已提交
3496 3497 3498 3499 3500 3501 3502 3503 3504 3505

        moment1 = self._get_accumulator(self._moment1_acc_str,
                                        param_and_grad[0])
        moment2 = self._get_accumulator(self._moment2_acc_str,
                                        param_and_grad[0])
        beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str,
                                              param_and_grad[0])
        beta2_pow_acc = self._get_accumulator(self._beta2_pow_acc_str,
                                              param_and_grad[0])

Y
Yibing Liu 已提交
3506 3507 3508 3509 3510
        if self._exclude_from_weight_decay_fn is not None \
            and self._exclude_from_weight_decay_fn(param_and_grad[0]):
            weight_decay = 0.0
        else:
            weight_decay = self._weight_decay
3511 3512 3513
        lr = self._create_param_lr(param_and_grad)

        if framework.in_dygraph_mode():
W
wanghuancoder 已提交
3514
            _, _, _, _, _ = _C_ops.lamb(
3515 3516 3517 3518 3519 3520
                param_and_grad[0], param_and_grad[1], lr, moment1, moment2,
                beta1_pow_acc, beta2_pow_acc, param_and_grad[0], moment1,
                moment2, beta1_pow_acc, beta2_pow_acc, 'beta1', self._beta1,
                'beta2', self._beta2, 'epsilon', self._epsilon, 'weight_decay',
                weight_decay)
            return None
Y
Yibing Liu 已提交
3521

Y
Yibing Liu 已提交
3522 3523 3524 3525 3526 3527
        # create the lamb optimize op
        lamb_op = block.append_op(
            type=self.type,
            inputs={
                "Param": param_and_grad[0],
                "Grad": param_and_grad[1],
3528
                "LearningRate": lr,
Y
Yibing Liu 已提交
3529 3530 3531 3532 3533 3534 3535 3536
                "Moment1": moment1,
                "Moment2": moment2,
                "Beta1Pow": beta1_pow_acc,
                "Beta2Pow": beta2_pow_acc
            },
            outputs={
                "ParamOut": param_and_grad[0],
                "Moment1Out": moment1,
3537 3538 3539
                "Moment2Out": moment2,
                "Beta1PowOut": beta1_pow_acc,
                "Beta2PowOut": beta2_pow_acc
Y
Yibing Liu 已提交
3540 3541 3542 3543 3544
            },
            attrs={
                "beta1": self._beta1,
                "beta2": self._beta2,
                "epsilon": self._epsilon,
Y
Yibing Liu 已提交
3545
                "weight_decay": weight_decay
Y
Yibing Liu 已提交
3546 3547 3548 3549 3550 3551
            },
            stop_gradient=True)

        return lamb_op


3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564
# We short the class name, since users will use the optimizer with the package
# name. The sample code:
#
# import paddle.fluid as fluid
#
# sgd = fluid.optimizer.SGD(...)
#
# It is no need to add an `Optimizer` as the class suffix
SGD = SGDOptimizer
Momentum = MomentumOptimizer
Adagrad = AdagradOptimizer
Adam = AdamOptimizer
Adamax = AdamaxOptimizer
3565
Dpsgd = DpsgdOptimizer
3566
DecayedAdagrad = DecayedAdagradOptimizer
3567
Adadelta = AdadeltaOptimizer
Q
qingqing01 已提交
3568
RMSProp = RMSPropOptimizer
Q
qiaolongfei 已提交
3569
Ftrl = FtrlOptimizer
3570
LarsMomentum = LarsMomentumOptimizer
Y
Yibing Liu 已提交
3571
Lamb = LambOptimizer
3572 3573 3574


class ModelAverage(Optimizer):
3575
    r"""
3576
	:api_attr: Static Graph
S
swtkiwi 已提交
3577

3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595
    The ModelAverage optimizer accumulates specific continuous historical parameters
    during training. The accumulated historical range can be controlled by the passed
    ``average_window_rate`` argument. The averaged ``Parameter`` are used in the prediction,
    which usually can improve the accuracy of the prediction.

    Accumulate the average of the ``Parameter`` in the sliding window, the result will be saved
    in a temporary variable, can be applied to the current model's ``Parameter`` by calling
    the ``apply()`` method, and the current model ``Parameter`` can be restored by calling
    the ``restore()`` method.

    The window size for calculating the average is determined by ``average_window_rate``,
    ``min_average_window``, ``max_average_window`` and the current ``Parameter`` update times (num_updates).

    When the cumulative times (num_accumulates) is greater than the specific window
    threshold (average_window), the accumulated ``Parameter`` temporary variable is set to 0.0.
    The following example will help to understand the role of these arguments:

    ::
3596

3597 3598 3599 3600 3601 3602 3603 3604 3605
        if num_accumulates >= min_average_window and num_accumulates >= min(max_average_window, num_updates * average_window_rate):
            num_accumulates = 0

    In the above conditional judgment statement, ``num_accumulates`` indicates the current
    accumulated number, which can be abstractly understood as the length of the cumulative window.
    The length of the window must be at least the length set by the ``min_average_window`` argument,
    and cannot exceed the length specified by the ``max_average_window`` argument or
    ``num_updates * average_window_rate``, where ``num_updates`` indicates the current ``Parameter``
    update times, ``average_window_rate`` is a coefficient that calculates the length of the window.
3606 3607

    Args:
3608 3609 3610
        average_window_rate (float): The calculate ratio of the window length relative to ``Parameter`` update times.
        min_average_window (int, optional): the minimum size of average window length. The default value is 10000.
        max_average_window (int, optional): The maximum size of average window length. The default value is 10000.
3611 3612 3613 3614 3615
        regularization (WeightDecayRegularizer, optional): The strategy of regularization. There are two method: \
             :ref:`api_fluid_regularizer_L1Decay` , :ref:`api_fluid_regularizer_L2Decay` . If a parameter has set \
            regularizer using :ref:`api_fluid_ParamAttr` already, the regularization setting here in optimizer will be \
            ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect.  \
            Default None, meaning there is no regularization.
3616 3617 3618
        name (str, optional): Normally there is no need for user to set this property.
            For more information, please refer to :ref:`api_guide_Name`.
            The default value is None.
3619

3620
    Examples:
Q
qiaolongfei 已提交
3621 3622 3623

      .. code-block:: python

3624 3625 3626 3627 3628 3629
        import paddle.fluid as fluid
        import numpy

        # First create the Executor.
        place = fluid.CPUPlace()  # fluid.CUDAPlace(0)
        exe = fluid.Executor(place)
3630

3631 3632 3633 3634
        train_program = fluid.Program()
        startup_program = fluid.Program()
        with fluid.program_guard(train_program, startup_program):
            # build net
3635
            data = fluid.data(name='X', shape=[None, 1], dtype='float32')
3636 3637 3638 3639 3640 3641 3642 3643
            hidden = fluid.layers.fc(input=data, size=10)
            loss = fluid.layers.mean(hidden)
            optimizer = fluid.optimizer.Momentum(learning_rate=0.2, momentum=0.1)
            optimizer.minimize(loss)

            # build ModelAverage optimizer
            model_average = fluid.optimizer.ModelAverage(0.15,
                                                         min_average_window=10000,
3644
                                                         max_average_window=12500)
3645 3646

            exe.run(startup_program)
3647 3648 3649 3650 3651
            for i in range(12500):
                x = numpy.random.random(size=(10, 1)).astype('float32')
                outs = exe.run(program=train_program,
                               feed={'X': x},
                               fetch_list=[loss.name])
3652 3653

            # apply ModelAverage
3654
            with model_average.apply(exe):
3655 3656 3657 3658
                x = numpy.random.random(size=(10, 1)).astype('float32')
                exe.run(program=train_program,
                        feed={'X': x},
                        fetch_list=[loss.name])
3659 3660 3661
    """

    def __init__(self,
W
wanghaoshuang 已提交
3662
                 average_window_rate,
3663 3664
                 min_average_window=10000,
                 max_average_window=10000,
X
Xin Pan 已提交
3665 3666
                 regularization=None,
                 name=None):
Z
zhongpu 已提交
3667 3668
        if framework.in_dygraph_mode():
            raise Exception("In dygraph, don't support ModelAverage.")
X
Xin Pan 已提交
3669 3670
        super(ModelAverage, self).__init__(
            0.0, regularization=regularization, name=name)
3671 3672 3673
        self.average_window = average_window_rate
        self.min_average_window = min_average_window
        self.max_average_window = max_average_window
3674

3675
        self.params_grads = []
3676 3677
        for param in framework.default_main_program().global_block(
        ).all_parameters():
3678
            if param.do_model_average != False:
3679
                grad = param.block.create_var(
3680 3681
                    name=unique_name.generate_with_ignorable_key(".".join(
                        [param.name, 'tmp'])),
3682 3683
                    dtype=param.dtype,
                    persistable=False,
W
wanghaoshuang 已提交
3684
                    stop_gradient=True)
3685
                self.params_grads.append((param, grad))
3686

3687
        for param, grad in self.params_grads:
3688 3689
            if grad is None:
                continue
X
Xin Pan 已提交
3690 3691
            with param.block.program._optimized_guard(
                [param, grad]), name_scope('move_average'):
3692
                self._append_average_accumulate_op(param)
3693

3694 3695 3696 3697
        self.apply_program = Program()
        block = self.apply_program.global_block()
        with program_guard(main_program=self.apply_program):
            for param_grad in self.params_grads:
3698
                self._add_average_apply_op(block, param_grad)
3699 3700 3701 3702 3703

        self.restore_program = Program()
        block = self.restore_program.global_block()
        with program_guard(main_program=self.restore_program):
            for param_grad in self.params_grads:
3704
                self._add_average_restore_op(block, param_grad)
3705

3706
    def _add_average_apply_op(self, block, param_grad):
L
Luo Tao 已提交
3707 3708 3709 3710 3711 3712
        param = block._clone_variable(param_grad[0])
        grad = block._clone_variable(param_grad[1])
        sum_1 = block._clone_variable(self._get_accumulator('sum_1', param))
        sum_2 = block._clone_variable(self._get_accumulator('sum_2', param))
        sum_3 = block._clone_variable(self._get_accumulator('sum_3', param))
        num_accumulates = block._clone_variable(
3713
            self._get_accumulator('num_accumulates', param))
L
Luo Tao 已提交
3714
        old_num_accumulates = block._clone_variable(
3715
            self._get_accumulator('old_num_accumulates', param))
L
Luo Tao 已提交
3716
        num_updates = block._clone_variable(
3717 3718 3719 3720 3721 3722
            self._get_accumulator('num_updates', param))
        # backup param value to grad
        layers.assign(input=param, output=grad)
        # param = (sum_1 + sum_2 + sum_3) / (num_accumulates + old_num_accumulates)
        tmp = layers.sum(x=[num_accumulates, old_num_accumulates])
        sum = layers.sum(x=[sum_1, sum_2, sum_3])
D
dzhwinter 已提交
3723 3724 3725 3726
        tmp = layers.cast(
            x=tmp, dtype='float32' if self._dtype == None else self._dtype)
        sum = layers.cast(
            x=sum, dtype='float32' if self._dtype == None else self._dtype)
S
sneaxiy 已提交
3727
        ops._elementwise_div(x=sum, y=tmp, out=param)
3728 3729

    def _add_average_restore_op(self, block, param_grad):
L
Luo Tao 已提交
3730 3731
        param = block._clone_variable(param_grad[0])
        grad = block._clone_variable(param_grad[1])
3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768
        layers.assign(input=grad, output=param)

    def _append_average_accumulate_op(self, param):
        self.helper = LayerHelper("average_accumulate")
        sum_1 = self._add_accumulator('sum_1', param)
        sum_2 = self._add_accumulator('sum_2', param)
        sum_3 = self._add_accumulator('sum_3', param)
        num_accumulates = self._add_accumulator(
            'num_accumulates', param, dtype='int64', shape=[1])
        old_num_accumulates = self._add_accumulator(
            'old_num_accumulates', param, dtype='int64', shape=[1])
        num_updates = self._add_accumulator(
            'num_updates', param, dtype='int64', shape=[1])

        self.helper.append_op(
            type='average_accumulates',
            inputs={
                "param": param,
                "in_sum_1": sum_1,
                "in_sum_2": sum_2,
                "in_sum_3": sum_3,
                "in_num_accumulates": num_accumulates,
                "in_old_num_accumulates": old_num_accumulates,
                "in_num_updates": num_updates
            },
            outputs={
                "out_sum_1": sum_1,
                "out_sum_2": sum_2,
                "out_sum_3": sum_3,
                "out_num_accumulates": num_accumulates,
                "out_old_num_accumulates": old_num_accumulates,
                "out_num_updates": num_updates,
            },
            attrs={
                "average_window": self.average_window,
                "min_average_window": self.min_average_window,
                "max_average_window": self.max_average_window,
M
minqiyang 已提交
3769 3770
            },
            stop_gradient=True)
3771

S
rename  
sneaxiy 已提交
3772
    @signature_safe_contextmanager
3773
    def apply(self, executor, need_restore=True):
3774 3775
        """
        Apply the average of the cumulative ``Parameter`` to the parameters of the current model.
3776 3777

        Args:
3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821
            executor(fluid.Executor): The current network executor.
            need_restore(bool): Restore flag variable, if set to True, the network will restore
                the parameters of the network to the default value, if set to False,
                it will not be restored. The default value is True.

        Examples:

          .. code-block:: python

            import paddle.fluid as fluid
            import numpy

            # First create the Executor.
            place = fluid.CPUPlace()  # fluid.CUDAPlace(0)
            exe = fluid.Executor(place)

            train_program = fluid.Program()
            startup_program = fluid.Program()
            with fluid.program_guard(train_program, startup_program):
                # build net
                data = fluid.data(name='X', shape=[None, 1], dtype='float32')
                hidden = fluid.layers.fc(input=data, size=10)
                loss = fluid.layers.mean(hidden)
                optimizer = fluid.optimizer.Momentum(learning_rate=0.2, momentum=0.1)
                optimizer.minimize(loss)

                # build ModelAverage optimizer
                model_average = fluid.optimizer.ModelAverage(0.15,
                                                            min_average_window=10000,
                                                            max_average_window=12500)

                exe.run(startup_program)
                for i in range(12500):
                    x = numpy.random.random(size=(10, 1)).astype('float32')
                    outs = exe.run(program=train_program,
                                feed={'X': x},
                                fetch_list=[loss.name])

                # apply ModelAverage
                with model_average.apply(exe):
                    x = numpy.random.random(size=(10, 1)).astype('float32')
                    exe.run(program=train_program,
                            feed={'X': x},
                            fetch_list=[loss.name])
3822
        """
3823 3824 3825 3826 3827 3828
        executor.run(self.apply_program)
        try:
            yield
        finally:
            if need_restore:
                self.restore(executor)
3829 3830

    def restore(self, executor):
3831 3832
        """
        Restore ``Parameter`` values of current model.
3833 3834
        
        Args:
3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878
            executor(fluid.Executor): The current network executor.

        Examples:

          .. code-block:: python

            import paddle.fluid as fluid
            import numpy

            # First create the Executor.
            place = fluid.CPUPlace()  # fluid.CUDAPlace(0)
            exe = fluid.Executor(place)

            train_program = fluid.Program()
            startup_program = fluid.Program()
            with fluid.program_guard(train_program, startup_program):
                # build net
                data = fluid.data(name='X', shape=[None, 1], dtype='float32')
                hidden = fluid.layers.fc(input=data, size=10)
                loss = fluid.layers.mean(hidden)
                optimizer = fluid.optimizer.Momentum(learning_rate=0.2, momentum=0.1)
                optimizer.minimize(loss)

                # build ModelAverage optimizer
                model_average = fluid.optimizer.ModelAverage(0.15,
                                                            min_average_window=10000,
                                                            max_average_window=12500)

                exe.run(startup_program)
                for i in range(12500):
                    x = numpy.random.random(size=(10, 1)).astype('float32')
                    outs = exe.run(program=train_program,
                                feed={'X': x},
                                fetch_list=[loss.name])

                # apply ModelAverage
                with model_average.apply(exe, False):
                    x = numpy.random.random(size=(10, 1)).astype('float32')
                    exe.run(program=train_program,
                            feed={'X': x},
                            fetch_list=[loss.name])

                # restore Parameters
                model_average.restore(exe)
3879
        """
3880
        executor.run(self.restore_program)
3881 3882 3883


class ExponentialMovingAverage(object):
3884
    r"""
3885
	:api_attr: Static Graph
S
swtkiwi 已提交
3886

3887 3888 3889 3890 3891 3892
    Compute the moving average of parameters with exponential decay.
    Given a parameter :math:`\\theta`, its exponential moving average (EMA)
    will be

    ..  math::

3893
        \\text{EMA}_0 & = 0
3894

3895 3896
	\\text{EMA}_t & = \\text{decay} * \\text{EMA}_{t-1} + (1 - \\text{decay}) * \\theta_t

Y
Yibing Liu 已提交
3897 3898 3899 3900
    The average results calculated by **update()** method will be saved in 
    temporary variables which are created and maintained by the object, and can 
    be applied to parameters of current model by calling **apply()** method. And 
    the **restore()** method is used to restore the parameters.
3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921

    **Bias correction**. All EMAs are initialized to :math:`0` and hence they will be 
    zero biased, which can be corrected by divided by a factor 
    :math:`(1 - \\text{decay}^t)` , i.e., the actual EMAs applied to parameters 
    when calling **apply()** method would be 

    ..  math::
    
        \\widehat{\\text{EMA}}_t = \\frac{\\text{EMA}_t}{1 - \\text{decay}^t}

    **Decay rate scheduling**. A large decay rate very close to 1 would result 
    in that the averages move very slowly. And a better strategy is to set a 
    relative smaller decay rate in the very beginning. The argument **thres_steps**
    allows users to pass a Variable to schedule the decay rate, in this case, 
    the actual decay rate becomes
     
    ..  math::
    
        \\min(\\text{decay}, \\frac{1 + \\text{thres_steps}}{10 + \\text{thres_steps}})

    Usually **thres_steps** can be the global training steps.
3922 3923 3924


    Args:
Y
Yibing Liu 已提交
3925 3926 3927 3928 3929 3930 3931
	decay (float, optional): The exponential decay rate, usually close to 1, such as 
            0.999, 0.9999, ... . Default 0.999.
        thres_steps (Variable|None): If not `None`, schedule the decay rate. 
            Default None.
        name (str|None): For detailed information, please refer to 
            :ref:`api_guide_Name`. Usually name is no need to set and None by 
            default.
3932 3933 3934 3935 3936


    Examples:

	.. code-block:: python
3937 3938 3939 3940 3941

	    import numpy
	    import paddle
	    import paddle.fluid as fluid

Y
Yibing Liu 已提交
3942
	    data = fluid.data(name='x', shape=[-1, 5], dtype='float32')
3943 3944 3945 3946 3947 3948 3949 3950
	    hidden = fluid.layers.fc(input=data, size=10)
	    cost = fluid.layers.mean(hidden)

	    test_program = fluid.default_main_program().clone(for_test=True)

	    optimizer = fluid.optimizer.Adam(learning_rate=0.001)
	    optimizer.minimize(cost)

3951
	    global_steps = fluid.layers.autoincreased_step_counter()
3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980
	    ema = fluid.optimizer.ExponentialMovingAverage(0.999, thres_steps=global_steps)
	    ema.update()

	    place = fluid.CPUPlace()
	    exe = fluid.Executor(place)
	    exe.run(fluid.default_startup_program())

	    for pass_id in range(3):
		for batch_id in range(6):
		    data = numpy.random.random(size=(10, 5)).astype('float32')
		    exe.run(program=fluid.default_main_program(),
			feed={'x': data}, 
			fetch_list=[cost.name])

		# usage 1
		with ema.apply(exe):
		    data = numpy.random.random(size=(10, 5)).astype('float32')
		    exe.run(program=test_program,
			    feed={'x': data}, 
			    fetch_list=[hidden.name])
			    

		 # usage 2
		with ema.apply(exe, need_restore=False):
		    data = numpy.random.random(size=(10, 5)).astype('float32')
		    exe.run(program=test_program,
			    feed={'x': data}, 
			    fetch_list=[hidden.name])
		ema.restore(exe)
3981 3982
    """

3983
    def __init__(self, decay=0.999, thres_steps=None, name=None):
Z
zhongpu 已提交
3984 3985 3986
        if framework.in_dygraph_mode():
            raise Exception(
                "In dygraph, don't support ExponentialMovingAverage.")
3987
        self._decay = decay
3988
        self._thres_steps = thres_steps
3989
        self._name = name if name is not None else ''
3990 3991
        self._decay_var = self._get_ema_decay()

3992
        self._step_counter_name = "@EMA_STEP_COUNTER@"
Y
Yibing Liu 已提交
3993
        self._params_tmps = []
3994
        for param in default_main_program().global_block().all_parameters():
3995 3996 3997 3998 3999 4000 4001
            if param.do_model_average != False:
                tmp = param.block.create_var(
                    name=unique_name.generate(".".join(
                        [self._name + param.name, 'ema_tmp'])),
                    dtype=param.dtype,
                    persistable=False,
                    stop_gradient=True)
Y
Yibing Liu 已提交
4002
                self._params_tmps.append((param, tmp))
4003

Y
Yibing Liu 已提交
4004 4005
        self._ema_vars = {}
        for param, tmp in self._params_tmps:
4006 4007
            with param.block.program._optimized_guard(
                [param, tmp]), name_scope('moving_average'):
Y
Yibing Liu 已提交
4008
                self._ema_vars[param.name] = self._create_ema_vars(param)
4009 4010 4011 4012

        self.apply_program = Program()
        block = self.apply_program.global_block()
        with program_guard(main_program=self.apply_program):
4013
            decay_pow, global_step = self._get_decay_pow(block)
Y
Yibing Liu 已提交
4014
            for param, tmp in self._params_tmps:
4015 4016
                param = block._clone_variable(param)
                tmp = block._clone_variable(tmp)
Y
Yibing Liu 已提交
4017
                ema = block._clone_variable(self._ema_vars[param.name])
4018
                layers.assign(input=param, output=tmp)
4019
                # bias correction
4020 4021
                with layers.control_flow.Switch() as switch:
                    with switch.case(global_step > 0):
4022 4023 4024 4025
                        layers.assign(
                            output=param, input=ema / (1.0 - decay_pow))
                    with switch.default():
                        layers.assign(output=param, input=ema)
4026 4027 4028 4029

        self.restore_program = Program()
        block = self.restore_program.global_block()
        with program_guard(main_program=self.restore_program):
Y
Yibing Liu 已提交
4030
            for param, tmp in self._params_tmps:
4031 4032 4033 4034
                tmp = block._clone_variable(tmp)
                param = block._clone_variable(param)
                layers.assign(input=tmp, output=param)

4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056
    def _get_ema_decay(self):
        with default_main_program()._lr_schedule_guard():
            decay_var = layers.tensor.create_global_var(
                shape=[1],
                value=self._decay,
                dtype='float32',
                persistable=True,
                name="scheduled_ema_decay_rate")

            if self._thres_steps is not None:
                decay_t = (self._thres_steps + 1.0) / (self._thres_steps + 10.0)
                with layers.control_flow.Switch() as switch:
                    with switch.case(decay_t < self._decay):
                        layers.tensor.assign(decay_t, decay_var)
                    with switch.default():
                        layers.tensor.assign(
                            np.array(
                                [self._decay], dtype=np.float32),
                            decay_var)
        return decay_var

    def _get_decay_pow(self, block):
4057 4058 4059 4060 4061 4062 4063
        global_step = layers.create_global_var(
            name=self._step_counter_name,
            shape=[1],
            value=0,
            dtype='int64',
            persistable=True)
        global_step = layers.cast(global_step, "float32")
4064
        decay_var = block._clone_variable(self._decay_var)
4065 4066
        decay_pow_acc = layers.elementwise_pow(decay_var, global_step)
        return decay_pow_acc, global_step
4067

Y
Yibing Liu 已提交
4068
    def _create_ema_vars(self, param):
4069 4070 4071 4072 4073 4074 4075 4076 4077
        param_ema = layers.create_global_var(
            name=unique_name.generate(self._name + param.name + '_ema'),
            shape=param.shape,
            value=0.0,
            dtype=param.dtype,
            persistable=True)

        return param_ema

Y
Yibing Liu 已提交
4078 4079 4080 4081 4082
    def update(self):
        """ 
        Update Exponential Moving Average. Should only call this method in 
        train program.
        """
4083 4084
        global_step = layers.autoincreased_step_counter(
            counter_name=self._step_counter_name)
4085
        param_master_emas = []
Y
Yibing Liu 已提交
4086 4087 4088 4089
        for param, tmp in self._params_tmps:
            with param.block.program._optimized_guard(
                [param, tmp]), name_scope('moving_average'):
                param_ema = self._ema_vars[param.name]
4090
                if param.name + '.master' in self._ema_vars:
4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107
                    master_ema = self._ema_vars[param.name + '.master']
                    param_master_emas.append([param_ema, master_ema])
                else:
                    ema_t = param_ema * self._decay_var + param * (
                        1 - self._decay_var)
                    layers.assign(input=ema_t, output=param_ema)

        # for fp16 params
        for param_ema, master_ema in param_master_emas:
            default_main_program().global_block().append_op(
                type="cast",
                inputs={"X": master_ema},
                outputs={"Out": param_ema},
                attrs={
                    "in_dtype": master_ema.dtype,
                    "out_dtype": param_ema.dtype
                })
Y
Yibing Liu 已提交
4108

4109 4110 4111 4112 4113 4114 4115
    @signature_safe_contextmanager
    def apply(self, executor, need_restore=True):
        """
        Apply moving average to parameters for evaluation.
        
        Args:
            executor (Executor): The Executor to execute applying.
Y
Yibing Liu 已提交
4116 4117
            need_restore (bool, optional): Whether to restore parameters after 
                applying. Default True.
4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132
        """
        executor.run(self.apply_program)
        try:
            yield
        finally:
            if need_restore:
                self.restore(executor)

    def restore(self, executor):
        """Restore parameters.
        
        Args:
            executor (Executor): The Executor to execute restoring.
        """
        executor.run(self.restore_program)
H
hutuxian 已提交
4133 4134 4135


class PipelineOptimizer(object):
4136
    """
4137
	:api_attr: Static Graph
S
swtkiwi 已提交
4138

4139 4140 4141 4142
    Pipeline Optimizer: Make a program to run as pipeline, that is splitting a
    program into multiple sections (sub-programs) and each section run on a
    device to enable the training of large scale models and the use of
    heterogeneous devices. Meanwhile, all sections run in the stype of pipeline.
H
hutuxian 已提交
4143

4144
    Args:
4145 4146 4147 4148
        optimizer (Optimizer): The optimizer to use, such as SGD.
        num_microbatches (int): Number of microbatches. [Optional. Default:1].
        start_cpu_core_id (int): The first cpu core id to use. [Optional. Default:0].
    
4149 4150
    Examples:
        .. code-block:: python
H
hutuxian 已提交
4151

4152
            import paddle.fluid as fluid
H
hutuxian 已提交
4153 4154
            import paddle.fluid.layers as layers

4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170
            with fluid.device_guard("gpu:0"):
                x = fluid.layers.data(name='x', shape=[1], dtype='int64', lod_level=0)
                y = fluid.layers.data(name='y', shape=[1], dtype='int64', lod_level=0)
                data_loader = fluid.io.DataLoader.from_generator(
                    feed_list=[x, y],
                    capacity=64,
                    use_double_buffer=True,
                    iterable=False)

                emb_x = layers.embedding(input=x, param_attr=fluid.ParamAttr(name="embx"), size=[10,2], is_sparse=False)
                emb_y = layers.embedding(input=y, param_attr=fluid.ParamAttr(name="emby",learning_rate=0.9), size=[10,2], is_sparse=False)

            with fluid.device_guard("gpu:1"):
                concat = layers.concat([emb_x, emb_y], axis=1)
                fc = layers.fc(input=concat, name="fc", size=1, num_flatten_dims=1, bias_attr=False)
                loss = layers.reduce_mean(fc)
H
hutuxian 已提交
4171
            optimizer = fluid.optimizer.SGD(learning_rate=0.5)
4172
            optimizer = fluid.optimizer.PipelineOptimizer(optimizer)
H
hutuxian 已提交
4173
            optimizer.minimize(loss)
4174 4175 4176 4177 4178 4179 4180 4181 4182

            def train_reader():
                for _ in range(4):
                    x = np.random.random(size=[1]).astype('int64')
                    y = np.random.random(size=[1]).astype('int64')
                    yield x, y
            data_loader.set_sample_generator(train_reader, batch_size=1)

            place = fluid.CUDAPlace(0)
H
hutuxian 已提交
4183 4184
            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())
4185 4186
            batch_size = 1
            data_loader.start()
H
hutuxian 已提交
4187
            exe.train_from_dataset(
4188
                    fluid.default_main_program())
4189
            data_loader.reset()
4190 4191
    """

4192
    def __init__(self, optimizer, num_microbatches=1, start_cpu_core_id=0):
4193 4194 4195 4196 4197
        self._device = 'cpu'
        if core.is_compiled_with_npu():
            self._device = "npu"
        elif core.is_compiled_with_cuda():
            self._device = "gpu"
Z
zhongpu 已提交
4198 4199
        if framework.in_dygraph_mode():
            raise Exception("In dygraph, don't support PipelineOptimizer.")
M
MRXLT 已提交
4200
        if not isinstance(optimizer, Optimizer) and not isinstance(
A
Aurelius84 已提交
4201 4202 4203
                optimizer, paddle.optimizer.Optimizer) and not isinstance(
                    optimizer, paddle.fluid.contrib.mixed_precision.decorator.
                    OptimizerWithMixedPrecision):
4204 4205 4206 4207
            raise ValueError("The 'optimizer' parameter for "
                             "PipelineOptimizer must be an instance of "
                             "Optimizer, but the given type is {}.".format(
                                 type(optimizer)))
H
hutuxian 已提交
4208
        self._optimizer = optimizer
4209 4210 4211 4212 4213 4214

        # Get the original optimizer defined by users, such as SGD
        self._origin_optimizer = self._optimizer
        while hasattr(self._origin_optimizer, "inner_opt"):
            self._origin_optimizer = self._origin_optimizer.inner_opt

4215 4216 4217 4218
        assert num_microbatches >= 1, (
            "num_microbatches must be a positive value.")
        self._num_microbatches = num_microbatches
        assert start_cpu_core_id >= 0, (
4219
            "start_cpu_core_id must be a non-negative integer.")
H
hutuxian 已提交
4220
        self._start_cpu_core_id = start_cpu_core_id
4221 4222 4223 4224 4225 4226
        self._place_list = None
        op_maker = core.op_proto_and_checker_maker
        self._op_role = op_maker.OpRole
        self._op_role_key = op_maker.kOpRoleAttrName()
        self._op_role_var_key = op_maker.kOpRoleVarAttrName()
        self._op_device_key = op_maker.kOpDeviceAttrName()
4227
        self._param_device_map = None
4228 4229
        self._pipeline_pair = []
        self._pp_ring_map = dict()
4230 4231
        self.output_var_to_op = None
        self.input_var_to_op = None
4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266

    # insert allreduce op to sync global information for global
    # gradient clip and amp
    def _insert_allreduce_op(self, op_idx, block):
        """
        Insert allreduce op to sync global information for global
        gradient clip and amp.
        """
        op = block.ops[op_idx]
        out_name = op.desc.output_arg_names()[0]
        out_var = block.var(out_name)
        offset = 0
        if op.type == "reduce_any":
            # cast the bool var to int32 to use allreduce_max op
            temp_var_name = unique_name.generate(out_name + "_cast_int32")
            temp_var = block.create_var(
                name=temp_var_name, shape=[1], dtype="int32")
            block._insert_op(
                op_idx + 1 + offset,
                type='cast',
                inputs={'X': out_var},
                outputs={'Out': temp_var},
                attrs={
                    'in_dtype': out_var.dtype,
                    'out_dtype': temp_var.dtype,
                    self._op_role_key: self._op_role.Optimize
                })
            offset += 1
        block._insert_op(
            op_idx + 1 + offset,
            type='c_allreduce_max'
            if op.type == "reduce_any" else 'c_allreduce_sum',
            inputs={'X': temp_var if op.type == "reduce_any" else out_var},
            outputs={'Out': temp_var if op.type == "reduce_any" else out_var},
            attrs={
4267
                'ring_id': self.global_ring_id,
4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282
                self._op_role_key: self._op_role.Optimize,
                'use_calc_stream': True
            })
        offset += 1
        if op.type == "reduce_any":
            block._insert_op(
                op_idx + 1 + offset,
                type='cast',
                inputs={'X': temp_var},
                outputs={'Out': out_var},
                attrs={
                    'in_dtype': temp_var.dtype,
                    'out_dtype': out_var.dtype,
                    self._op_role_key: self._op_role.Optimize
                })
4283
            offset += 1
4284
        return offset
H
hutuxian 已提交
4285

4286
    def _create_vars(self, block, ori_block):
4287
        # Create vars for block, copied from ori_block
H
hutuxian 已提交
4288
        used_var_set = set()
4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313
        added_op_num = 0
        op_idx = 0
        op_size = block.desc.op_size()
        while op_idx < op_size + added_op_num:
            # Whether to insert allreduce_sum or allreduce_max op.
            # For amp and global gradient clip strategies, we should
            # get the global information, so allreduce op is needed.
            should_insert = False
            op = block.ops[op_idx]
            # For op process vars on all devices, remove its input 
            # vars not in this block
            reserved_x = []
            if op.type == 'reduce_any' and self._is_optimize_op(op):
                should_insert = True
            elif op.type == 'concat' and self._is_optimize_op(op):
                for input_name in op.desc.input("X"):
                    if block._find_var_recursive(input_name):
                        reserved_x.append(input_name)
                op.desc.set_input('X', reserved_x)
            elif op.type == 'update_loss_scaling':
                for input_name in op.desc.input("X"):
                    if block._find_var_recursive(input_name):
                        reserved_x.append(input_name)
                op.desc.set_input('X', reserved_x)
                op.desc.set_output('Out', reserved_x)
4314 4315 4316 4317 4318 4319 4320 4321 4322 4323
            elif op.type == 'check_finite_and_unscale':
                for input_name in op.desc.input("X"):
                    if block._find_var_recursive(input_name):
                        reserved_x.append(input_name)
                op.desc.set_input('X', reserved_x)
                op.desc.set_output('Out', reserved_x)
                if len(reserved_x) == 0:
                    block._remove_op(op_idx)
                    op_size -= 1
                    continue
4324 4325 4326 4327 4328 4329 4330 4331
            elif op.type == 'sum' and self._is_gradient_clip_op(op):
                for input_name in op.desc.input("X"):
                    if block._find_var_recursive(input_name):
                        reserved_x.append(input_name)
                op.desc.set_input('X', reserved_x)
                should_insert = True

            vars = op.desc.input_arg_names() + op.desc.output_arg_names()
H
hutuxian 已提交
4332
            for var in vars:
4333 4334 4335
                # a var whose name contains "blocking_queue" 
                # only exists in startup program 
                if var in used_var_set or "_blocking_queue" in var:
H
hutuxian 已提交
4336 4337
                    continue
                used_var_set.add(var)
4338 4339
                if block._find_var_recursive(str(var)): continue
                source_var = ori_block._var_recursive(str(var))
4340
                if source_var.type == core.VarDesc.VarType.READER:
4341
                    dest_var = block.create_var(
4342 4343 4344
                        name=var,
                        type=core.VarDesc.VarType.READER,
                        persistable=source_var.persistable)
4345
                else:
4346 4347 4348 4349 4350 4351 4352 4353 4354 4355
                    dest_var = block._clone_variable(source_var, False)
                dest_var.stop_gradient = source_var.stop_gradient
            # When use with sharding, allreduce_sum and allreduce_max
            # used for global gradient clip and amp will be added by sharding.
            op_idx += 1
            if self.use_sharding or not should_insert: continue
            inserted_ops = self._insert_allreduce_op(op_idx - 1, block)
            added_op_num += inserted_ops
            op_idx += inserted_ops
        block._sync_with_cpp()
H
hutuxian 已提交
4356

4357
    def _is_loss_grad_op(self, op):
4358 4359
        assert self._op_role_key in op.attr_names
        op_role = int(op.attr(self._op_role_key))
4360 4361 4362 4363
        return op_role & int(self._op_role.Backward) and op_role & int(
            self._op_role.Loss)

    def _is_backward_op(self, op):
4364 4365 4366 4367 4368 4369
        return self._op_role_key in op.attr_names and (
            int(op.attr(self._op_role_key)) & int(self._op_role.Backward))

    def _is_loss_op(self, op):
        assert self._op_role_key in op.attr_names
        return int(op.attr(self._op_role_key)) == int(self._op_role.Loss)
4370 4371

    def _is_optimize_op(self, op):
4372 4373
        return self._op_role_key in op.attr_names and (
            int(op.attr(self._op_role_key)) & int(self._op_role.Optimize))
4374 4375 4376 4377 4378

    def _is_update_op(self, op):
        return 'Param' in op.input_names and 'Grad' in op.input_names and (
            "LearningRate" in op.input_names)

4379
    def _split_program(self, main_program, devices):
H
hutuxian 已提交
4380
        """
4381
        Split a program into sections according to devices that ops run on.
4382
        The op whose op_device attr is "gpu:all" is copied to all sections.
4383 4384 4385

        Args:
            main_program (Program): the main program
4386
            devices: all used devices
H
hutuxian 已提交
4387
        """
4388
        # Map from device to its corresponding section program info
4389
        device_program_map = defaultdict(Program)
4390

4391
        block = main_program.block(0)
4392 4393
        for op in block.ops:
            device = op.attr(self._op_device_key)
4394
            # Copy ops whose op_device set to "gpu:all" to all sections.
4395
            if device == f"{self._device}:all":
4396
                for device in devices:
4397 4398
                    program = device_program_map[device]
                    op_desc = op.desc
4399
                    ap_op = program.global_block().desc.append_op()
4400
                    ap_op.copy_from(op_desc)
4401
                    ap_op._set_attr(self._op_device_key, "")
4402 4403 4404
            else:
                program = device_program_map[device]
                op_desc = op.desc
4405
                ap_op = program.global_block().desc.append_op()
4406
                ap_op.copy_from(op_desc)
4407
                ap_op._set_attr(self._op_device_key, "")
4408

4409
        program_list = []
4410
        for key in devices:
4411
            program = device_program_map[key]
4412 4413
            program._sync_with_cpp()
            program_list.append(program)
H
hutuxian 已提交
4414

4415
        return program_list
H
hutuxian 已提交
4416

4417 4418 4419 4420 4421 4422 4423
    def _get_op_device_for_startup_program(self, var_name):
        """
        For adam optimizer, it will add accumulators and initialize them
        with fill_constant, and force the op device to cpu. Hence, we should
        get the real op_device attribute of the fill_constant as the device
        where the corresponding parameters on.
        """
4424 4425 4426
        assert "beta1_pow_acc" in var_name or "beta2_pow_acc" in var_name, \
            'For accumulators for Adam, the name must contain beta1_pow_acc ' \
            'or beta2_pow_acc.'
4427 4428 4429 4430
        param_name = var_name[0:var_name.index('_beta')]
        device = self._param_device_map[param_name]
        return device

4431 4432
    def _split_startup_program(self, startup_program, device_id):
        block = startup_program.global_block()
4433 4434 4435
        new_startup_program = Program()
        for op in block.ops:
            device = op.attr(self._op_device_key)
4436 4437
            if device == "cpu":
                assert op.type == "fill_constant", (
4438 4439
                    "For ops in startup program with the op_device attribute "
                    "of cpu, they must be of type fill_constant.")
4440 4441 4442
                output_var = op.output_arg_names[0]
                device = self._get_op_device_for_startup_program(output_var)

4443
            if device:
4444
                device_index = int(device.split(':')[1])
4445
            else:
4446 4447
                # LR related ops
                device = None
4448
            if device and device_index != device_id: continue
4449
            op_desc = op.desc
4450
            ap_op = new_startup_program.global_block().desc.append_op()
4451 4452 4453
            ap_op.copy_from(op_desc)
            ap_op._set_attr(self._op_device_key, "")
        new_startup_program._sync_with_cpp()
4454
        self._create_vars(new_startup_program.global_block(), block)
4455 4456
        return new_startup_program

4457
    def _find_post_op(self, index, var_name):
H
hutuxian 已提交
4458
        """
4459
        Find the post op that has variable named var_name as input.
H
hutuxian 已提交
4460
        """
4461 4462 4463 4464 4465 4466
        # bugfix for uniform hybrid parallelism
        if '.cast_fp32' in var_name:
            var_name = var_name.replace('.cast_fp32', '')
        if '.cast_fp16' in var_name:
            var_name = var_name.replace('.cast_fp16', '')

4467 4468 4469 4470 4471 4472 4473 4474
        post_ops = self.input_var_to_op[var_name]
        if post_ops == None: return None
        result_op = None
        for post_op, post_idx in reversed(post_ops):
            if post_idx > index:
                result_op = post_op
                break
        return result_op
4475

4476
    def _find_prev_op(self, index, var_name):
H
hutuxian 已提交
4477
        """
4478 4479
        Find the previous op of op with index that outputs
        variable named var_name.
H
hutuxian 已提交
4480
        """
4481 4482 4483 4484 4485 4486
        prev_ops = self.output_var_to_op[var_name]
        if prev_ops == None: return None
        result_op = None
        for prev_op, prev_idx in reversed(prev_ops):
            if prev_idx < index:
                result_op = prev_op
4487
                break
4488
        return result_op
4489 4490

    def _rename_arg(self, op, old_name, new_name):
4491 4492
        op._rename_input(old_name, new_name)
        op._rename_output(old_name, new_name)
4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505

    def _create_var(self, block, ref_var, name):
        """
        Create a new var for block, which has the same type,
        shape and dtype as ref_var, then rename it with the
        name `name`.
        """
        new_var = block.create_var(
            name=name,
            shape=ref_var.shape,
            dtype=ref_var.dtype,
            type=ref_var.type,
            lod_level=ref_var.lod_level,
4506 4507
            persistable=ref_var.persistable,
            is_data=ref_var.is_data,
4508
            need_check_feed=ref_var.desc.need_check_feed())
4509
        new_var.stop_gradient = ref_var.stop_gradient
4510 4511 4512 4513 4514 4515 4516 4517
        return new_var

    def _strip_grad_suffix(self, name):
        """
        Strip the grad suffix from the given variable name
        """
        pos = name.find(core.grad_var_suffix())
        return name[:pos] if pos != -1 else name
H
hutuxian 已提交
4518

4519 4520 4521 4522 4523 4524
    def _append_grad_suffix(self, name):
        """
        Append grad suffix to the given variable name
        """
        return name + core.grad_var_suffix()

4525
    def _get_op_device_attr(self, op):
H
hutuxian 已提交
4526
        """
4527
        Get the op_device attribute of a op.
H
hutuxian 已提交
4528
        """
4529 4530 4531
        device = op.attr(self._op_device_key) \
            if op.has_attr(self._op_device_key) else None
        if device:
B
Baibaifan 已提交
4532
            assert device[0:3] == 'gpu' or device[0:3] == 'npu', "Now, only gpu and npu devices are " \
4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546
                "supported in pipeline parallemism."
        return device

    def _add_op_device_attr_for_op(self, op, idx, block):
        """
        Add op_device attrribute for ops that have not that attribute set.
        We use "gpu:all" to represent the op should be put on all
        sub-programs, such as lr-related ops. Note that: "gpu:all"
        is only used by pipeline as an indicator.
        """
        lrsched_role = int(self._op_role.LRSched)
        if op.attr(self._op_role_key) == lrsched_role:
            # For LRSched ops, we should put them on all sub-programs to
            # make sure each sub-program update the lr correctly
4547
            op._set_attr(self._op_device_key, f"{self._device}:all")
4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564
        # bugfix in hybrid parallelism
        elif op.type == "sum" and self._is_backward_op(op):
            # For sum ops that compute the sum of @RENAMED@ vars
            for name in op.desc.input_arg_names():
                assert '@RENAME@' in name, \
                    "The op must be sum used to accumulate renamed vars."
            assert len(op.desc.output_arg_names()) == 1
            out_name = op.desc.output_arg_names()[0]
            post_op = self._find_post_op(idx, out_name)
            assert post_op.has_attr(
                'op_device'), "{} has no op_device attr for var {}".format(
                    post_op.type, out_name)
            device = post_op.attr(self._op_device_key)
            assert device, "The post op must have op_device set."
            op._set_attr(self._op_device_key, device)
        elif (op.type == "cast" or
              op.type == "scale") and self._is_backward_op(op):
4565
            prev_op = self._find_prev_op(idx, op.desc.input("X")[0])
4566 4567
            op._set_attr(self._op_device_key, prev_op.attr(self._op_device_key))
        elif op.type == "memcpy" and not self._is_optimize_op(op):
4568
            # for checkpoint offloading
4569 4570 4571 4572 4573
            assert len(op.input_arg_names) == 1 and len(
                op.output_arg_names) == 1
            input_name = op.input_arg_names[0]
            output_name = op.output_arg_names[0]
            if '@Fetch' in output_name:
4574
                post_op = self._find_post_op(idx, output_name)
4575 4576 4577
                op._set_attr(self._op_device_key,
                             post_op.attr(self._op_device_key))
            else:
4578
                prev_op = self._find_prev_op(idx, op.desc.input("X")[0])
4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594
                op._set_attr(self._op_device_key,
                             prev_op.attr(self._op_device_key))
        elif self._is_loss_op(op):
            # For loss * loss_scaling op added by AMP
            offset = 1
            while (not block.ops[idx + offset].has_attr(self._op_device_key) or
                   not block.ops[idx + offset].attr(self._op_device_key)):
                offset += 1
            device = block.ops[idx + offset].attr(self._op_device_key)
            assert device, "Please put you program within device_guard scope."
            for i in range(offset):
                block.ops[idx + i]._set_attr(self._op_device_key, device)
        elif self._is_optimize_op(op) and op.type == "cast":
            # For fp16-->fp32 cast added by AMP
            grad_name = op.output('Out')
            assert len(grad_name) == 1
4595
            param_name = self._strip_grad_suffix(grad_name[0])
4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613
            device = self._param_device_map[param_name]
            op._set_attr(self._op_device_key, device)
        elif self._is_gradient_clip_op(op) or self._is_regularization_op(op):
            # For gradient clip and regularization ops, we set their op_device
            # attribute to the device where their corresponding parameters on.
            assert self._op_role_var_key in op.attr_names, "gradient_clip " \
                "and regularization ops must have op_role_var attribute."
            op_role_var = op.attr(self._op_role_var_key)
            assert len(op_role_var) == 2, "op_role_var for gradient_clip " \
                "regularization ops must have two elements."
            param_name = op_role_var[0]
            device = self._param_device_map[param_name]
            # For sum op added by global gradient clip, it must be 
            # put on all devices
            if (op.type == 'sum' or op.type == 'sqrt' or
                    op.type == 'fill_constant' or
                    op.type == 'elementwise_max' or
                    op.type == 'elementwise_div'):
4614
                device = f"{self._device}:all"
4615
            op._set_attr(self._op_device_key, device)
B
Baibaifan 已提交
4616
        elif op.type == "alloc_float_status":
4617
            op._set_attr(self._op_device_key, f"{self._device}:all")
4618 4619
        else:
            other_known_ops = [
4620 4621 4622 4623 4624
                'update_loss_scaling',
                'reduce_any',
                'concat',
                'sum',
                'check_finite_and_unscale',
B
Baibaifan 已提交
4625
                'alloc_float_status',
4626 4627 4628 4629 4630
            ]
            assert op.type in other_known_ops, "For other ops without " \
                "op_device set, they must be one of {}, but it " \
                "is {}".format(other_known_ops, op.type)
            assert self._is_optimize_op(op)
4631
            op._set_attr(self._op_device_key, f"{self._device}:all")
4632 4633

    def _add_op_device_attr(self, block):
4634
        """
4635 4636
        Add op_device attrribute for ops in block that have 
        not that attribute set.
4637
        """
4638 4639 4640 4641 4642 4643 4644 4645
        for idx, op in enumerate(list(block.ops)):
            if (op.type == "create_py_reader" or op.type == "read" or
                    op.type == "create_double_buffer_reader"):
                # Copy read related ops to all section to make them exit 
                # after each epoch.
                # We use "gpu:all" to represent the op should be put on all
                # sub-programs, such as lr-related ops. Note that: "gpu:all"
                # is only used by pipeline as an indicator.
4646
                op._set_attr(self._op_device_key, f"{self._device}:all")
4647 4648 4649 4650
                continue
            # op_device attribute has been set
            if self._get_op_device_attr(op): continue
            self._add_op_device_attr_for_op(op, idx, block)
H
hutuxian 已提交
4651

4652 4653
    def _check_validation(self, block):
        """
4654 4655 4656
        Check whether ops in a block have both the op_device and the 
        op_role attributes set.
        Then, return all devices in order.
4657
        """
4658 4659 4660 4661 4662 4663 4664 4665 4666 4667
        device_list = []
        # Section worker only supports the following op_role
        valid_op_role_value = [
            int(self._op_role.LRSched),
            int(self._op_role.Forward),
            int(self._op_role.Backward),
            int(self._op_role.Loss),
            int(self._op_role.Optimize),
            int(self._op_role.Backward) | int(self._op_role.Loss),
        ]
4668 4669 4670
        pre_stage_id = None
        decrease_flag = False
        in_optimize = False
4671
        in_forward = True
4672
        for op in block.ops:
4673
            if not op._has_kernel(op.type):
4674 4675 4676 4677
                assert op.type == "conditional_block" and (
                    op.attr(self._op_role_key) == int(self._op_role.LRSched)), (
                        "Now, the only supported op without kernel is "
                        "conditional_block, and its op role must be LRSched.")
4678 4679 4680
            assert op.has_attr(self._op_role_key), (
                "op ({}) has no {} attribute.".format(op.type,
                                                      self._op_role_key))
4681 4682
            op_role = op.attr(self._op_role_key)
            assert int(op_role) in valid_op_role_value, \
4683
                "op_role {} for op {} must be one of {}".format(
4684
                    op_role,
4685 4686
                    op.type,
                    valid_op_role_value)
4687 4688
            if int(op_role) == int(self._op_role.Optimize):
                in_optimize = True
4689 4690
            if int(op_role) == int(self._op_role.Backward):
                in_forward = False
4691

4692 4693 4694
            assert op.has_attr(self._op_device_key), (
                "op ({}) has no {} attribute.".format(op.type,
                                                      self._op_device_key))
4695 4696 4697 4698

            device = op.attr(self._op_device_key)
            assert device, ("op_device attribute for op "
                            "{} has not been set.".format(op.type))
4699
            if device == f"{self._device}:all": continue
4700

4701
            dev_type = device.split(':')[0]
4702
            stage_id = int(device.split(':')[1])
B
Baibaifan 已提交
4703 4704 4705
            assert dev_type == "gpu" or dev_type == 'npu', (
                "Now only gpu and npu devices are supported "
                "for pipeline parallelism.")
4706 4707

            if device not in device_list:
4708
                device_list.append(device)
4709 4710 4711 4712 4713 4714 4715 4716 4717

            if not in_optimize:
                if pre_stage_id is not None:
                    interval = stage_id - pre_stage_id
                    assert abs(interval) <= 1, \
                        "The stage interval of two consecutive ops in the pipeline must be < = 1," \
                        "but the interval of op={} and prev op is {}".format(op, interval)
                    # stage must be in order, such as Forward(0 1 2 3 4), Backward(4 3 2 1 0)
                    # if stage is unordered, such as Forward(0 1 2 3 4 3 4), will report error
4718 4719 4720 4721 4722 4723
                    if in_forward:
                        assert interval >= 0, \
                            "Pipeline stage must be sequential increment in Forward, prev_stage={}, " \
                            "please check the stage of op={}".format(pre_stage_id, op)
                    else:
                        # FIXME(wangxi): recompute check failed
4724
                        pass
4725 4726 4727
                        #assert interval <=0, \
                        #    "Pipeline stage must be sequential decrement in Backward, prev_stage={}, " \
                        #    "please check the stage of op={}".format(pre_stage_id, op)
4728 4729
                pre_stage_id = stage_id

4730
        return device_list
4731

4732
    def _insert_sendrecv_ops_for_boundaries(self, block):
4733
        """
4734
        Insert a pair of send and recv ops for every two
4735 4736
        consecutive ops on different devices.
        """
4737
        # A map from var to device where op takes it as input,
4738
        # avoiding multiple send and recv ops.
4739
        input_var_to_device = dict()
4740 4741 4742 4743 4744 4745 4746 4747 4748 4749
        # bugfix hybrid parallelism
        first_optimize_index = None
        for index, op in enumerate(list(block.ops)):
            if self._is_optimize_op(op):
                first_optimize_index = index
                break
        extra_index_info = {
            'index': 0,
            'first_optimize_index': first_optimize_index
        }
4750

4751
        for index, op in enumerate(list(block.ops)):
4752
            cur_device = op.attr(self._op_device_key)
4753
            if cur_device == f"{self._device}:all": continue
4754 4755
            for var_name in op.input_arg_names:
                var = block.var(var_name)
4756
                # skip data var
4757
                if var.is_data: continue
4758
                prev_device = None
4759 4760 4761 4762
                generate_ops = self.output_var_to_op.get(var_name)
                if generate_ops is None:
                    if var_name not in self._param_device_map:
                        continue
4763
                    prev_device = self._param_device_map[var_name]
4764 4765 4766

                prev_op = self._find_prev_op(index, var_name)

4767 4768 4769
                if not prev_device:
                    prev_device = prev_op.attr(self._op_device_key) \
                        if prev_op else None
4770

4771 4772
                if prev_device is None or prev_device == f"{self._device}:all":
                    continue
4773 4774

                if prev_device == cur_device: continue
4775

4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805
                if var_name not in input_var_to_device:
                    input_var_to_device[var_name] = []
                if (cur_device, prev_device) in input_var_to_device[var_name]:
                    continue

                device_type = cur_device.split(':')[0] + ':'

                def _insert_send_recv(cur_id, prev_id):
                    cur_dev = device_type + str(cur_id)
                    prev_dev = device_type + str(prev_id)
                    if (cur_dev, prev_dev) in input_var_to_device[var_name]:
                        return

                    if cur_id - prev_id > 1:
                        _insert_send_recv(cur_id - 1, prev_id)
                        _insert_send_recv(cur_id, cur_id - 1)
                        input_var_to_device[var_name].append(
                            (cur_dev, prev_dev))
                        return
                    elif cur_id - prev_id < -1:
                        _insert_send_recv(cur_id + 1, prev_id)
                        _insert_send_recv(cur_id, cur_id + 1)
                        input_var_to_device[var_name].append(
                            (cur_dev, prev_dev))
                        return

                    assert abs(cur_id - prev_id) == 1
                    input_var_to_device[var_name].append((cur_dev, prev_dev))

                    op_role = op.attr(self._op_role_key)
4806
                    var = block.vars[var_name]
4807 4808 4809
                    pair = (prev_id, cur_id)
                    # 1000 is just a magic number
                    pair_key = prev_id * 1000 + cur_id
4810 4811 4812 4813 4814 4815 4816
                    if pair not in self._pipeline_pair:
                        self._pipeline_pair.append(pair)
                        self._pp_ring_map[pair_key] = self.ring_id
                        ring_id = self.ring_id
                        self.ring_id += 1
                    else:
                        ring_id = self._pp_ring_map[pair_key]
4817

4818
                    if self.schedule_mode == 'F-then-B':  # F-then-B
F
fangshuixun007 已提交
4819
                        block._insert_op_without_sync(
4820
                            index=index + extra_index_info['index'],
4821 4822 4823
                            type='send_v2',
                            inputs={'X': var},
                            attrs={
4824
                                self._op_device_key: prev_dev,
4825 4826 4827 4828 4829
                                self._op_role_key: op_role,
                                'use_calc_stream': True,
                                'peer': 1,
                                'ring_id': ring_id
                            })
4830
                        extra_index_info['index'] += 1
4831 4832 4833
                        var_shape = list(var.shape)
                        var_shape[0] = self.micro_batch_size if var_shape[
                            0] < 0 else var_shape[0]
F
fangshuixun007 已提交
4834
                        block._insert_op_without_sync(
4835
                            index=index + extra_index_info['index'],
4836 4837 4838
                            type='recv_v2',
                            outputs={'Out': [var]},
                            attrs={
4839
                                'out_shape': var_shape,
4840
                                'dtype': var.dtype,
4841
                                self._op_device_key: cur_dev,
4842 4843 4844 4845 4846
                                self._op_role_key: op_role,
                                'use_calc_stream': True,
                                'peer': 0,
                                'ring_id': ring_id
                            })
4847
                        extra_index_info['index'] += 1
4848
                    elif self.schedule_mode == '1F1B':  # 1F1B
F
fangshuixun007 已提交
4849
                        block._insert_op_without_sync(
4850
                            index=index + extra_index_info['index'],
4851 4852 4853 4854
                            type='c_sync_calc_stream',
                            inputs={'X': [var]},
                            outputs={'Out': [var]},
                            attrs={
4855
                                self._op_device_key: prev_dev,
4856 4857
                                self._op_role_key: op_role,
                            })
4858
                        extra_index_info['index'] += 1
F
fangshuixun007 已提交
4859
                        block._insert_op_without_sync(
4860
                            index=index + extra_index_info['index'],
4861 4862
                            type='send_v2'
                            if self.mp_degree == 1 else 'partial_send',
4863 4864
                            inputs={'X': var},
                            attrs={
4865
                                self._op_device_key: prev_dev,
4866 4867 4868 4869
                                self._op_role_key: op_role,
                                'use_calc_stream': False,
                                'ring_id': ring_id,
                                'peer': 1,
4870 4871 4872
                                # if send_v2, num&id attr is not in op_attrs, will not insert
                                'num': self.mp_degree,
                                'id': self.mp_rank,
4873
                            })
4874
                        extra_index_info['index'] += 1
4875
                        insert_index = None
4876

4877 4878 4879 4880 4881 4882 4883
                        if int(op_role) == int(self._op_role.Backward):
                            insert_index = extra_index_info[
                                'first_optimize_index']
                            new_op_role = self._op_role.Optimize
                        else:
                            insert_index = index
                            new_op_role = self._op_role.Backward
4884 4885

                        sync_comm_op = block._insert_op_without_sync(
4886
                            index=insert_index + extra_index_info['index'],
4887 4888 4889 4890
                            type='c_sync_comm_stream',
                            inputs={'X': [var]},
                            outputs={'Out': [var]},
                            attrs={
4891
                                self._op_device_key: prev_dev,
4892
                                self._op_role_key: new_op_role,
4893 4894
                                'ring_id': ring_id,
                            })
4895

4896
                        if int(op_role) == int(self._op_role.Forward):
4897
                            sync_comm_op._set_attr('pipeline_flag', '')
4898
                            extra_index_info['index'] += 1
4899

4900 4901 4902
                        var_shape = list(var.shape)
                        var_shape[0] = self.micro_batch_size if var_shape[
                            0] < 0 else var_shape[0]
4903 4904 4905 4906

                        numel = np.prod(var.shape)
                        assert numel % self.mp_degree == 0, \
                            "The numel={} must be divisible by mp_degree={}".format(numel, self.mp_degree)
F
fangshuixun007 已提交
4907
                        block._insert_op_without_sync(
4908
                            index=index + extra_index_info['index'],
4909 4910
                            type='recv_v2'
                            if self.mp_degree == 1 else 'partial_recv',
4911 4912 4913 4914
                            outputs={'Out': [var]},
                            attrs={
                                'out_shape': var_shape,
                                'dtype': var.dtype,
4915
                                self._op_device_key: cur_dev,
4916 4917 4918
                                self._op_role_key: op_role,
                                'use_calc_stream': True,
                                'peer': 0,
4919 4920 4921 4922
                                'ring_id': ring_id,
                                # if recv_v2, num&id attr is not in op_attrs, will not insert
                                'num': self.mp_degree,
                                'id': self.mp_rank,
4923
                            })
4924
                        extra_index_info['index'] += 1
4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940
                        if self.mp_degree > 1:
                            block._insert_op_without_sync(
                                index=index + extra_index_info['index'],
                                type='partial_allgather',
                                inputs={'X': [var]},
                                outputs={'Out': [var]},
                                attrs={
                                    self._op_device_key: cur_dev,
                                    self._op_role_key: op_role,
                                    'use_calc_stream': True,
                                    'ring_id': 0,
                                    # if recv_v2, num&id attr is not in op_attrs, will not insert
                                    'nranks': self.mp_degree,
                                    'rank': self.mp_rank,
                                })
                            extra_index_info['index'] += 1
4941 4942 4943 4944 4945
                    else:
                        raise ValueError(
                            "Now only 'F-then-B' and '1F1B' are supported."
                            "The given value is {}.".format(self.schedule_mode))

4946 4947 4948 4949 4950
                _insert_send_recv(
                    int(cur_device.split(':')[1]),
                    int(prev_device.split(':')[1]))
        block._sync_with_cpp()

4951
    def _insert_loss_scale(self, block):
4952
        """
4953
        Scale the loss corresponding to number of micro-batches.
4954
        """
4955
        if self._num_microbatches == 1: return
4956
        for index, op in reversed(tuple(enumerate(list(block.ops)))):
4957 4958 4959 4960 4961 4962 4963 4964
            if self._is_loss_grad_op(op):
                loss_grad_var = block.vars[op.output_arg_names[0]]
                block._insert_op(
                    index=index + 1,
                    type='scale',
                    inputs={'X': loss_grad_var},
                    outputs={'Out': loss_grad_var},
                    attrs={
4965
                        'scale': 1.0 / self._num_microbatches,
4966 4967 4968 4969
                        self._op_role_key: self._op_role.Backward
                    })
                break

4970 4971 4972 4973 4974 4975
    def _rename_gradient_var_name(self, block):
        for index, op in enumerate(block.ops):
            if not self._is_optimize_op(op): continue
            input_names = op.input_arg_names
            output_names = op.output_arg_names
            in_out_names = input_names + output_names
L
lilong12 已提交
4976
            if op.type == 'cast' or op.type == "c_sync_comm_stream": continue
4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000
            # append "MERGED" to the names of parameter gradients,
            # and mofify the op_role_var attribute (by rename_arg func).
            for name in in_out_names:
                if not core.grad_var_suffix() in name: continue
                param_name = name.strip(core.grad_var_suffix())
                new_grad_name = name + "@MERGED"
                self._rename_arg(op, name, new_grad_name)

    def _accumulate_gradients(self, block, pp_allreduce_in_optimize=False):
        """
        Create a new merged gradient for each parameter and accumulate the
        corresponding gradient to it.
        """
        merged_gradient_names = []
        first_opt_op_idx = None

        for index, op in reversed(tuple(enumerate(list(block.ops)))):
            # remove the cast op of fp16 grad to fp32 grad
            if self._is_optimize_op(op) and op.type == 'cast':
                in_name = op.input_arg_names[0]
                out_name = op.output_arg_names[0]
                if out_name.strip('@GRAD') in self._param_device_map:
                    assert in_name.replace('.cast_fp16', '') == out_name
                    block._remove_op(index)
5001
                    continue
5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013

            if self._is_backward_op(op) and not first_opt_op_idx:
                first_opt_op_idx = index + 1
                # no optimize phase
                if first_opt_op_idx == len(block.ops): return
                if block.ops[first_opt_op_idx].type == "c_sync_comm_stream":
                    first_opt_op_idx += 1

            if self._is_backward_op(op) and (
                    self._op_role_var_key in op.attr_names):
                op_role_var = op.attr(self._op_role_var_key)
                if len(op_role_var) == 0: continue
5014 5015
                assert len(op_role_var) % 2 == 0
                for i in range(0, len(op_role_var), 2):
5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028
                    offset = 0
                    param_name = op_role_var[i]
                    if not block.has_var(param_name): continue
                    if '@BroadCast' in param_name: continue
                    param_grad_name = param_name + core.grad_var_suffix()
                    merged_param_grad_name = param_grad_name + '@MERGED'
                    if not block.has_var(merged_param_grad_name):
                        self._create_var(block, block.vars[param_name],
                                         merged_param_grad_name)
                    assert block.has_var(merged_param_grad_name)
                    param_grad_var = block.var(param_grad_name)
                    merged_param_grad_var = block.var(merged_param_grad_name)
                    merged_param_grad_var.persistable = True
5029
                    block._insert_op(
5030 5031 5032 5033
                        index=first_opt_op_idx + offset,
                        type='fill_constant',
                        inputs={},
                        outputs={'Out': [merged_param_grad_var]},
5034
                        attrs={
5035 5036 5037 5038 5039
                            'shape': merged_param_grad_var.shape,
                            'dtype': merged_param_grad_var.dtype,
                            'value': float(0),
                            # a trick to run this op once per mini-batch
                            self._op_role_key: self._op_role.Optimize.LRSched,
5040 5041
                        })
                    offset += 1
5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084
                    grad_name = op_role_var[i + 1]
                    grad_var = block.vars[grad_name]
                    if not 'cast_fp16' in grad_name:
                        block._insert_op(
                            index=first_opt_op_idx + offset,
                            type='sum',
                            inputs={'X': [grad_var, merged_param_grad_var]},
                            outputs={'Out': merged_param_grad_var},
                            attrs={
                                self._op_role_key: self._op_role.Backward,
                            })
                        offset += 1
                        merged_gradient_names.append(merged_param_grad_name)
                    else:
                        # cast gradient to fp32 to accumulate to merged gradient
                        cast_grad_var_name = param_grad_name + '@TMP'
                        cast_grad_var = self._create_var(block, param_grad_var,
                                                         cast_grad_var_name)
                        cast_grad_var.persistable = False
                        block._insert_op(
                            index=first_opt_op_idx + offset,
                            type='cast',
                            inputs={'X': grad_var},
                            outputs={'Out': cast_grad_var},
                            attrs={
                                'in_dtype': grad_var.dtype,
                                'out_dtype': cast_grad_var.dtype,
                                self._op_role_key: self._op_role.Backward,
                            })
                        offset += 1
                        block._insert_op(
                            index=first_opt_op_idx + offset,
                            type='sum',
                            inputs={
                                'X': [merged_param_grad_var, cast_grad_var]
                            },
                            outputs={'Out': merged_param_grad_var},
                            attrs={
                                self._op_role_key: self._op_role.Backward,
                            })
                        offset += 1
                        merged_gradient_names.append(merged_param_grad_name)
        return merged_gradient_names
5085 5086 5087

    def _add_sub_blocks(self, main_block, program_list):
        main_program = main_block.program
5088
        for prog in program_list:
5089 5090 5091 5092 5093 5094
            for op in prog.block(0).ops:
                if not op.has_attr('sub_block'):
                    continue
                origin_sub_block_id = op.attr('sub_block').id
                origin_sub_block = main_program.block(origin_sub_block_id)
                new_sub_block = prog._create_block(parent_idx=0)
5095 5096
                for sub_op in origin_sub_block.ops:
                    op_desc = sub_op.desc
5097 5098 5099
                    ap_op = new_sub_block.desc.append_op()
                    ap_op.copy_from(op_desc)
                new_sub_block._sync_with_cpp()
5100
                self._create_vars(new_sub_block, origin_sub_block)
5101
                op._set_attr('sub_block', new_sub_block)
5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117

    def _get_device_info(self, block):
        for op in block.ops:
            if not op._has_kernel(op.type): continue
            op_device = op.attr(self._op_device_key)
            return op_device

    def _process_persistable_vars_in_multi_sections(self, main_program,
                                                    startup_prog, program_list):
        """
        Special Case: process persistable vars that exist in
        multiple sections, e.g., shared weight
        """
        # var_info = {var_name: [program1, program2...]},
        # persistable var only
        var_info = dict()
5118
        for prog in program_list:
5119 5120
            block = prog.block(0)
            for var_name in block.vars:
5121
                if var_name == "double_buffer_0": continue
5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138
                var = block.var(var_name)
                if not var.persistable: continue
                if not var_name in var_info:
                    var_info[var_name] = []
                if not prog in var_info[var_name]:
                    var_info[var_name].append(prog)
        for var_name in list(var_info.keys()):
            if len(var_info[var_name]) == 1:
                var_info.pop(var_name)

        # write_info = {var_name: program}, where program is the only program
        # in which the var named var_name is written.
        write_info = dict()
        for var_name in var_info.keys():
            for prog in var_info[var_name]:
                block = prog.block(0)
                for op in block.ops:
5139
                    if op.type == "recv_v2" or op.type == "create_py_reader" or \
5140
                        op.type == "read" or op.type == "update_loss_scaling":
5141
                        continue
5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160
                    # We have processed lr related vars
                    if op.attr(self._op_role_key) == int(
                            self._op_role.Optimize.LRSched):
                        continue
                    if var_name in op.desc.output_arg_names():
                        assert var_name not in write_info, (
                            "two sections write the same var({}): second "
                            "op {}.".format(var_name, op))
                        write_info[var_name] = prog
                        break

        for var_name in var_info.keys():
            # Case 1: read only variables, no special process
            if not var_name in write_info: continue

            # Case 2: one write multiple reads
            write_prog = write_info[var_name]
            write_block = write_prog.block(0)
            write_device = self._get_device_info(write_block)
5161
            write_dev_index = int(write_device.split(':')[1])
5162 5163 5164
            all_progs = var_info[var_name]
            for prog in all_progs:
                if prog == write_prog: continue
5165 5166 5167
                read_block = prog.block(0)
                read_device = self._get_device_info(read_block)
                read_dev_index = int(read_device.split(':')[1])
5168 5169 5170 5171 5172 5173 5174 5175 5176
                pair = (write_dev_index, read_dev_index)
                pair_key = write_dev_index * 1000 + read_dev_index
                if pair not in self._pipeline_pair:
                    self._pipeline_pair.append(pair)
                    self._pp_ring_map[pair_key] = self.ring_id
                    ring_id = self.ring_id
                    self.ring_id += 1
                else:
                    ring_id = self._pp_ring_map[pair_key]
5177 5178 5179

                write_block._insert_op(
                    index=0,
5180
                    type='send_v2',
5181 5182 5183
                    inputs={'X': write_block.var(var_name), },
                    attrs={
                        self._op_device_key: write_device,
5184
                        'use_calc_stream': False,
5185 5186
                        # A trick to make the role LRSched to avoid copy every
                        # microbatch
5187 5188
                        self._op_role_key: self._op_role.LRSched,
                        'peer': read_dev_index,
5189
                        'ring_id': ring_id
5190 5191 5192
                    })
                read_block._insert_op(
                    index=0,
5193
                    type='recv_v2',
5194 5195
                    outputs={'Out': [read_block.var(var_name)]},
                    attrs={
5196 5197
                        'out_shape': read_block.var(var_name).shape,
                        'dtype': read_block.var(var_name).dtype,
5198
                        self._op_device_key: read_device,
5199
                        'use_calc_stream': False,
5200 5201 5202
                        # A trick to make the role LRSched to avoid copy every
                        # microbatch
                        self._op_role_key: self._op_role.LRSched,
5203 5204
                        'peer': write_dev_index,
                        'ring_id': ring_id
5205
                    })
5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225
                read_block._insert_op(
                    index=1,
                    type='c_sync_comm_stream',
                    inputs={'X': [read_block.var(var_name)]},
                    outputs={'Out': [read_block.var(var_name)]},
                    attrs={
                        self._op_device_key: read_device,
                        # A trick to make the role LRSched to avoid copy every
                        # microbatch
                        self._op_role_key: self._op_role.LRSched,
                        'ring_id': ring_id
                    })

    def _is_gradient_clip_op(self, op):
        return op.desc.has_attr("op_namescope") \
            and op.desc.attr("op_namescope").startswith("/gradient_clip")

    def _is_regularization_op(self, op):
        return op.desc.has_attr("op_namescope") \
            and op.desc.attr("op_namescope").startswith("/regularization")
H
hutuxian 已提交
5226

5227 5228 5229 5230 5231
    def _get_input_output_info(self, block):
        '''
        Get info of op input and output.
        '''
        # A map from output var to op which generate it.
5232
        output_var_to_op = defaultdict(list)
5233
        # A map from var to op which takes it as input.
5234
        input_var_to_op = defaultdict(list)
5235

5236
        for index, op in enumerate(block.ops):
5237
            for var_name in op.input_arg_names:
5238
                input_var_to_op[var_name].append([op, index])
5239
            for var_name in op.output_arg_names:
5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251
                output_var_to_op[var_name].append([op, index])

        return output_var_to_op, input_var_to_op

    def _optimize_forward_send_sync(self, program):
        """
        optimize forward send's sync_comm_stream schedule
        """
        if self.schedule_mode != '1F1B': return

        block = program.block(0)

5252
        recv_type = 'recv_v2' if self.mp_degree == 1 else 'partial_recv'
5253 5254
        backward_recv_index = None
        for index, op in enumerate(block.ops):
5255
            if op.type == recv_type and self._is_backward_op(op):
5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281
                backward_recv_index = index
                break

        if backward_recv_index is None: return

        offset = 0
        for index, op in enumerate(list(block.ops)):
            if index >= backward_recv_index: break
            if op.type == 'c_sync_comm_stream' and op.has_attr('pipeline_flag'):
                var_name = op.input_arg_names[0]
                var = block.var(var_name)
                block._remove_op(index + offset, sync=False)
                offset -= 1
                # NOTE:
                # 1. When the backward recv is completed, it indicates
                # that the forward send is completed too. So we only need
                # to use the NOP op to prevent memory release.
                # 2. Because we removed sync_comm_op,
                # we will insert NOP after recv_op.
                block._insert_op_without_sync(
                    index=backward_recv_index,
                    type='nop',
                    inputs={'X': [var]},
                    outputs={'Out': [var]},
                    attrs={self._op_role_key: self._op_role.Backward})
        block._sync_with_cpp()
5282

H
hutuxian 已提交
5283 5284 5285 5286 5287
    def minimize(self,
                 loss,
                 startup_program=None,
                 parameter_list=None,
                 no_grad_set=None):
5288
        main_block = loss.block
5289
        self.origin_main_block = main_block
5290
        main_program = main_block.program
5291 5292
        if startup_program is None:
            startup_program = default_startup_program()
5293

5294 5295
        pipeline_opt = main_program._pipeline_opt
        assert pipeline_opt, 'Please use pipeline with fleet.'
5296 5297 5298 5299 5300 5301 5302
        required_keys = [
            'local_rank',
            'schedule_mode',
            'micro_batch_size',
            'ring_id',
            'global_ring_id',
            'use_sharding',
5303 5304
            'mp_degree',
            'mp_rank',
5305 5306
        ]
        for key in required_keys:
5307
            assert key in pipeline_opt, \
5308
                'Please use pipeline with fleet to use {}.'.format(key)
5309 5310 5311 5312 5313 5314 5315 5316 5317 5318
        self.local_rank = pipeline_opt['local_rank']
        self.schedule_mode = pipeline_opt['schedule_mode']
        self.micro_batch_size = pipeline_opt['micro_batch_size']
        self.use_sharding = pipeline_opt['use_sharding']
        self.ring_id = pipeline_opt['ring_id']
        self.global_ring_id = pipeline_opt['global_ring_id']
        self.mp_degree = pipeline_opt['mp_degree']
        self.mp_rank = pipeline_opt['mp_rank']
        assert self.mp_degree >= 1
        assert 0 <= self.mp_rank < self.mp_degree
5319 5320 5321 5322

        optimize_ops, params_grads = self._optimizer.minimize(
            loss, startup_program, parameter_list, no_grad_set)
        self._param_device_map = self._origin_optimizer._param_device_map
5323

5324 5325
        self.output_var_to_op, self.input_var_to_op = \
            self._get_input_output_info(main_block)
5326 5327 5328
        # Step1: add default op_device attribute for ops.
        self._add_op_device_attr(main_block)
        device_list = self._check_validation(main_block)
5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339

        def device_cmp(device1, device2):
            dev1_id = int(device1.split(':')[1])
            dev2_id = int(device2.split(':')[1])
            if dev1_id < dev2_id:
                return -1
            elif dev1_id > dev2_id:
                return 1
            else:
                return 0

5340 5341 5342 5343 5344
        sorted_device_list = sorted(device_list, key=cmp_to_key(device_cmp))
        assert sorted_device_list == device_list, (
            "With pipeline parallelism, you must use gpu devices one after "
            "another in the order of their ids.")
        # Step2: add send and recv ops between section boundaries
5345
        self._insert_sendrecv_ops_for_boundaries(main_block)
5346

5347
        # Step3: split program into sections and add pairs of
5348 5349
        # send and recv ops for data var.
        main_program = main_block.program
5350
        program_list = self._split_program(main_program, device_list)
5351
        for p in program_list:
5352
            self._create_vars(p.global_block(), main_block)
5353

5354 5355 5356 5357
        self.local_rank %= len(device_list)
        # Step3.5: optimize forward send sync_comm to overlap send and recv
        self._optimize_forward_send_sync(program_list[self.local_rank])

5358
        # Step4: Special Case: process persistable vars that exist in
5359
        # multiple sections
5360 5361 5362
        # FIXME 
        # self._process_persistable_vars_in_multi_sections(
        #     main_program, startup_program, program_list)
5363

5364
        # Step5: Add sub blocks for section programs
5365 5366
        self._add_sub_blocks(main_block, program_list)

5367
        place_list = []
5368 5369
        for dev in device_list:
            dev_index = int(dev.split(":")[1])
5370 5371 5372 5373
            if core.is_compiled_with_cuda():
                place_list.append(core.CUDAPlace(dev_index % 1))
            elif core.is_compiled_with_npu():
                place_list.append(core.NPUPlace(dev_index % 1))
5374

5375
        # Step6: Split startup program
5376
        new_startup_program = self._split_startup_program(startup_program,
5377
                                                          self.local_rank)
5378 5379 5380 5381

        startup_program._pipeline_opt = {
            "startup_program": new_startup_program,
        }
5382
        real_block = program_list[self.local_rank].global_block()
5383 5384 5385 5386 5387 5388 5389 5390
        self._insert_loss_scale(real_block)
        if not self.use_sharding:
            # Step7: clear gradients before each mini-batch and 
            # accumulate gradients during backward
            self._rename_gradient_var_name(real_block)
            real_block._sync_with_cpp()
            self._accumulate_gradients(real_block)
            real_block._sync_with_cpp()
5391

5392 5393 5394 5395
        if core.is_compiled_with_cuda():
            place_id = int(os.getenv("FLAGS_selected_gpus", "0"))
        elif core.is_compiled_with_npu():
            place_id = int(os.getenv("FLAGS_selected_npus", "0"))
5396
        main_program._pipeline_opt = {
H
hutuxian 已提交
5397 5398
            "trainer": "PipelineTrainer",
            "device_worker": "Section",
5399
            "pipeline_stage": self.local_rank,
5400
            "num_pipeline_stages": len(device_list),
5401
            "schedule_mode": self.schedule_mode,
5402
            "inner_parallelism": len(device_list),
5403 5404
            "section_program": program_list[self.local_rank],
            "place": place_list[self.local_rank],
5405
            "place_id": place_id,
5406
            "sync_steps": -1,
L
lilong12 已提交
5407
            "num_microbatches": self._num_microbatches,
H
hutuxian 已提交
5408 5409
            "start_cpu_core_id": self._start_cpu_core_id,
        }
5410
        return optimize_ops, params_grads, program_list, self._pipeline_pair, self._pp_ring_map
M
mapingshuo 已提交
5411 5412


M
mapingshuo 已提交
5413 5414
class RecomputeOptimizer(Optimizer):
    """
5415
	:api_attr: Static Graph
S
swtkiwi 已提交
5416

M
mapingshuo 已提交
5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476
    Recompute Optimizer Wrapper

    Normally, a training step contains three sub-steps: first, run forward
    Operators to calculate the loss; second, run backward Operators to 
    calculate gradient of the parameters; third, apply optimization method
    to update the value of the parameters.

    In the forward computation process, all variables that are needed by 
    backward computation process will be kept in memory, which occupy a great
    amount of memory when the network becomes very deep.

    Recompute split the network to k segments. In each segment, It will 
    recompute the forward Operators, before running backward operators. It is
    very helpful for saving memory.
 
    The Variables that separate a network to segments are called as checkpoints,
    and users should set it manually. The usage is very simple:

    Args:
        optimizer (Optimizer): The optimizer that is applied to parameters.

    Examples:
        .. code-block:: python

            import paddle.fluid as fluid
            import numpy as np
            def gen_data():
                return {"x": np.random.random(size=(32, 32)).astype('float32'),
                "y": np.random.randint(2, size=(32, 1)).astype('int64')}
            def mlp(input_x, input_y, hid_dim=128, label_dim=2):
                print(input_x)
                fc_1 = fluid.layers.fc(input=input_x, size=hid_dim)
                prediction = fluid.layers.fc(input=[fc_1], size=label_dim, act='softmax')
                cost = fluid.layers.cross_entropy(input=prediction, label=input_y)
                sum_cost = fluid.layers.reduce_mean(cost)
                return sum_cost, fc_1, prediction
            input_x = fluid.layers.data(name="x", shape=[32], dtype='float32')
            input_y = fluid.layers.data(name="y", shape=[1], dtype='int64')
            cost, fc_1, pred = mlp(input_x, input_y)

            sgd = fluid.optimizer.Adam(learning_rate=0.01)
            sgd = fluid.optimizer.RecomputeOptimizer(sgd)
            sgd._set_checkpoints([fc_1, pred])
            sgd.minimize(cost)

            print("Finished optimize")
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())
            step = 10

            for i in range(step):
                cost_val = exe.run(feed=gen_data(),
                       program=fluid.default_main_program(),
                       fetch_list=[cost.name])
                print("step=%d cost=%f" % (i, cost_val[0]))

    """

    def __init__(self, optimizer):
Z
zhongpu 已提交
5477 5478
        if framework.in_dygraph_mode():
            raise Exception("In dygraph, don't support RecomputeOptimizer.")
M
mapingshuo 已提交
5479 5480
        self._optimizer = optimizer
        self._checkpoints = None
M
mapingshuo 已提交
5481 5482
        self._learning_rate = self._optimizer._learning_rate
        self._learning_rate_map = self._optimizer._learning_rate_map
J
JZ-LIANG 已提交
5483
        self.enable_offload = False
M
mapingshuo 已提交
5484 5485

    def _set_checkpoints(self, checkpoints):
5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496
        """
        Args:
            checkpoints (list): List of Variable or string    
        """
        assert isinstance(
            checkpoints, list
        ), "_checkpoints should be a list of Variable or a list of String"
        for ckpt in checkpoints:
            assert (
                isinstance(ckpt, six.string_types) or isinstance(ckpt, Variable)
            ), "_checkpoints should be a list of Variable or a list of String"
M
mapingshuo 已提交
5497 5498
        self._checkpoints = checkpoints

J
JZ-LIANG 已提交
5499 5500 5501 5502
    # should enable offload before calling backward 
    def _enable_offload(self):
        self.enable_offload = True

5503 5504
    @framework.deprecate_stat_dict
    def load(self, state_dict):
M
mapingshuo 已提交
5505
        """
5506
	    :api_attr: Static Graph
S
swtkiwi 已提交
5507

M
mapingshuo 已提交
5508 5509 5510 5511
        load function is not supported by Recompute Optimizer for now.
        :return: None

        Args:
5512
            state_dict: the dict load by load_persistable method
M
mapingshuo 已提交
5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535

        Examples:
            .. code-block:: python

                import paddle.fluid as fluid
                import paddle.compat as cpt
                
                def mlp(input_x, input_y, hid_dim=128, label_dim=2):
                    fc_1 = fluid.layers.fc(input=input_x, size=hid_dim)
                    prediction = fluid.layers.fc(input=[fc_1], size=label_dim, act='softmax')
                    cost = fluid.layers.cross_entropy(input=prediction, label=input_y)
                    sum_cost = fluid.layers.reduce_mean(cost)
                    return sum_cost, fc_1, prediction
                
                input_x = fluid.layers.data(name="x", shape=[32], dtype='float32')
                input_y = fluid.layers.data(name="y", shape=[1], dtype='int64')
                cost, fc_1, pred = mlp(input_x, input_y)
                print("Finished FF")
                
                sgd = fluid.optimizer.Adam(learning_rate=0.01)
                sgd = fluid.optimizer.RecomputeOptimizer(sgd)
                sgd._set_checkpoints([fc_1, pred])
                try:
5536 5537
                    state_dict = {}
                    sgd.load(state_dict)
M
mapingshuo 已提交
5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574
                except NotImplementedError as e:
                    print(cpt.get_exception_message(e))
        """
        raise NotImplementedError(
            "load function is not supported by Recompute Optimizer for now")

    def apply_gradients(self, params_grads):
        """
        call apply_gradients function of self._optimizer.

        Args:
            params_grads (list): list of (param, grad) pair to do optimization.

        Returns:
            list: A list of operators appended to the current program.

        Examples:
            .. code-block:: python

                import paddle.fluid as fluid
                import paddle.fluid.framework as framework

                def mlp(input_x, input_y, hid_dim=128, label_dim=2):
                    fc_1 = fluid.layers.fc(input=input_x, size=hid_dim)
                    prediction = fluid.layers.fc(input=[fc_1], size=label_dim, act='softmax')
                    cost = fluid.layers.cross_entropy(input=prediction, label=input_y)
                    sum_cost = fluid.layers.reduce_mean(cost)
                    return sum_cost, fc_1, prediction


                input_x = fluid.layers.data(name="x", shape=[32], dtype='float32')
                input_y = fluid.layers.data(name="y", shape=[1], dtype='int64')
                cost, fc_1, pred = mlp(input_x, input_y)
                print("Finished FF")

                sgd = fluid.optimizer.Adam(learning_rate=0.01)
                sgd = fluid.optimizer.RecomputeOptimizer(sgd)
5575
                sgd._set_checkpoints([fc_1, pred])
M
mapingshuo 已提交
5576 5577 5578 5579
                params_grads = sgd.backward(
                    cost,
                    startup_program=None,
                    parameter_list=None,
5580
                    no_grad_set=None)
M
mapingshuo 已提交
5581 5582 5583 5584 5585 5586 5587 5588 5589 5590

                program = cost.block.program
                with framework.program_guard(program, None):
                    optimize_ops = sgd.apply_gradients(params_grads)

                print("Finished apply gradients")
        """

        return self._optimizer.apply_gradients(params_grads=params_grads)

J
JZ-LIANG 已提交
5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646
    def _creat_vars(self, varname):
        pinned_var_name = unique_name.generate(varname + "@Pinned")
        fetched_var_name = unique_name.generate(varname + "@Fetch")

        pinned_var = self._main_program.global_block().create_var(
            name=pinned_var_name,
            shape=self.checkpoint_shape,
            dtype=self._main_program.global_block().var(varname).dtype,
            persistable=False,
            stop_gradient=True)

        fetch_var = self._main_program.global_block().create_var(
            name=fetched_var_name,
            shape=self.checkpoint_shape,
            dtype=self._main_program.global_block().var(varname).dtype,
            persistable=False,
            stop_gradient=False)

        return pinned_var_name, fetched_var_name

    def _append_fill_constant_ops(self, startup_program):
        """
        add fill_constant_ops to the end of the prog

        we should fill the pinned vars before runing the main_prog
        to instantiate their tensor hold_, which could tell us whether 
        the host memory could hold all the checkpoints from all the 
        GPU devices in this node. 
        """
        op_role = 0
        block = startup_program.global_block()
        fill_constant_vars = self.checkpoint_name2pinned_name.values()
        OP_ROLE_KEY = core.op_proto_and_checker_maker.kOpRoleAttrName()
        for varname in fill_constant_vars:
            var = self._main_program.global_block().var(varname)
            # NOTE (JZ-LIANG) to pre-allocate the CUDAPinned MEM
            pinned_var = block.create_var(
                name=varname,
                shape=self.checkpoint_shape,
                dtype=self._main_program.global_block().var(var.name).dtype,
                persistable=False,
                stop_gradient=True)
            block.append_op(
                type='fill_constant',
                outputs={'Out': varname},
                attrs={
                    "shape": var.shape,
                    "dtype": var.dtype,
                    "value": 0.0,
                    "place_type": 2,
                    OP_ROLE_KEY: op_role,
                })

        return

    def _insert_async_memcpy_op(self, insert_idx, src_varname, dst_varname,
5647
                                op_role, dst_place_type):
J
JZ-LIANG 已提交
5648 5649 5650 5651 5652 5653 5654 5655
        OP_ROLE_KEY = core.op_proto_and_checker_maker.kOpRoleAttrName()
        self.block._insert_op_without_sync(
            insert_idx,
            type='memcpy',
            inputs={'X': [self._main_program.global_block().var(src_varname)]},
            outputs={
                'Out': [self._main_program.global_block().var(dst_varname)]
            },
5656 5657 5658 5659
            attrs={
                "dst_place_type": int(dst_place_type),
                OP_ROLE_KEY: op_role
            })
J
JZ-LIANG 已提交
5660 5661 5662 5663 5664 5665 5666

    def _insert_fetch_op(self, idx, varname):
        assert varname in self.checkpoint_name2pinned_name, "Try to fetch {} from Pinned Memory, but it is NOT a checkpoint".format(
            varname)

        pinned_varname = self.checkpoint_name2pinned_name[varname]
        fetch_varname = self.checkpoint_name2fetch_name[varname]
5667
        self._insert_async_memcpy_op(idx, pinned_varname, fetch_varname, 1, 1)
J
JZ-LIANG 已提交
5668 5669 5670 5671 5672

    def _insert_offload_op(self, idx, varname):
        assert varname in self.checkpoint_name2pinned_name, "Try to offload {} to Pinned Memory, but it is NOT a checkpoint".format(
            varname)
        pinned_varname = self.checkpoint_name2pinned_name[varname]
5673
        self._insert_async_memcpy_op(idx, varname, pinned_varname, 0, 2)
J
JZ-LIANG 已提交
5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944

    def _insert_sync_op(self, op_idx, checkpoint_name):
        # single stream offload no need sync 
        pass

    def _record_fetch_op(self, idx):
        assert len(self.un_fetch_checkpoint_names
                   ) > 0, "Could NOT found checkpoint to fetch"
        checkpoint_name = self.un_fetch_checkpoint_names.pop(-1)
        logging.debug("Record fetch [{}]".format(checkpoint_name))
        self.idx2insertions[idx] = ("fetch", checkpoint_name)

        return checkpoint_name

    def _record_offload_op(self, idx, checkpoint_name):
        expected_checkpoint_name = self.un_offload_checkpoint_names.pop(0)
        assert checkpoint_name == expected_checkpoint_name, "expected to offload [{}] but got [{}]".format(
            expected_checkpoint_name, checkpoint_name)
        logging.debug("Record offload [{}]".format(checkpoint_name))
        self.idx2insertions[idx] = ("offload", checkpoint_name)

    def _record_sync_op(self, idx, checkpoint_name):
        assert checkpoint_name not in self.synced_checkpoints, "Try to sync the checkpoint [{}] twice".format(
            checkpoint_name)
        self.synced_checkpoints.add(checkpoint_name)
        logging.debug("Record offload sync [{}]".format(checkpoint_name))
        self.idx2insertions[idx] = ("sync", checkpoint_name)

    def _parse_backward(self):

        self.idx2insertions = {}
        # don't offload the last checkpoints, to favor throughput        
        self.un_fetch_checkpoint_names = self.sorted_checkpoint_names[:]
        self.un_fetch_checkpoint_names.pop(-1)
        need_fetch_checkpoint_names = self.un_fetch_checkpoint_names[:]
        self.checkpoint_usage_count = {}
        for checkpoint_name in self.un_fetch_checkpoint_names:
            self.checkpoint_usage_count[checkpoint_name] = 0

        self.bw_strart_op_idx = len(self.block.ops)
        for idx, op in enumerate(self.block.ops):
            if int(op.desc.attr("op_role")) == 1:
                self.bw_strart_op_idx = idx
                break

        assert self.bw_strart_op_idx < len(
            self.block.ops), "Could NOT found backword op in prog"

        # fetch second to last checkpoint at the beginning of BW
        fetched_checkpoint_varname = self._record_fetch_op(
            self.bw_strart_op_idx)
        last_last_fetch_checkpoint = None

        for i, op in enumerate(self.block.ops[self.bw_strart_op_idx:]):
            idx = self.bw_strart_op_idx + i
            input_vars = op.desc.input_arg_names()

            for input_var in input_vars:
                if input_var in need_fetch_checkpoint_names:
                    if input_var not in self.un_fetch_checkpoint_names:
                        # fetch the  offloade checkpoint when the first usage of its previous one
                        if self.checkpoint_usage_count[input_var] == 0:
                            # TODO (JZ-LIANG) sync memcpy_stream if extra stream for memcpy
                            second_to_last_fetch_checkpoint = fetched_checkpoint_varname
                            # there is NO fetch ahead the first checkpoint 
                            if input_var != self.sorted_checkpoint_names[0]:
                                fetched_checkpoint_varname = self._record_fetch_op(
                                    idx)

                        # should check the current used checkpoint is ths last fetch one 
                        assert second_to_last_fetch_checkpoint == input_var, "Current recompute segment should use [{}] BUT got [{}]".format(
                            second_to_last_fetch_checkpoint, input_var)
                        # rename
                        self.block.ops[idx]._rename_input(
                            input_var,
                            self.checkpoint_name2fetch_name[input_var])
                        self.checkpoint_usage_count[input_var] += 1
                    else:
                        raise ValueError(
                            "use checkpoint [{}] before fetch in BW".format(
                                input_var))

        assert len(self.un_fetch_checkpoint_names
                   ) == 0, "{} checkpoints have NOT been Recorded".format(
                       self.un_fetch_checkpoint_names)

    def _update_backward(self):
        if len(self.idx2insertions) == 0:
            return
        total_op = len(self.block.ops)
        for op_idx in reversed(range(self.bw_strart_op_idx, total_op)):
            if op_idx in self.idx2insertions:
                operation, checkpoint_name = self.idx2insertions[op_idx]
                if operation == "fetch":
                    self._insert_fetch_op(op_idx, checkpoint_name)
                    logging.debug("Insert [{}] fetch op.".format(
                        checkpoint_name))
                    del self.idx2insertions[op_idx]
                elif operation == "sync":
                    self._insert_sync_op(op_idx, checkpoint_name)
                    logging.debug("Sync [{}] fetch op.".format(checkpoint_name))
        self.block._sync_with_cpp()
        assert len(
            self.idx2insertions) == 0, "{} checkpoints left un-Fecthed".format(
                [ele[1] for ele in self.idx2insertions.values()])

    def _parse_forward(self):

        self.idx2insertions = {}
        # don't offload the last checkpoints, faster, less memory saving       
        self.un_offload_checkpoint_names = self.sorted_checkpoint_names[:]
        last_checkpoint = self.un_offload_checkpoint_names.pop(-1)
        need_offload_checkpoint_names = self.un_offload_checkpoint_names[:]
        self.checkpoint_usage_count_and_idx = {}
        for checkpoint_name in self.un_offload_checkpoint_names:
            self.checkpoint_usage_count_and_idx[checkpoint_name] = {
                'count': 0,
                'idx': -1
            }
        self.synced_checkpoints = set()
        self.fw_strart_op_idx = len(self.block.ops)
        for idx, op in enumerate(self.block.ops):
            if int(op.desc.attr("op_role")) == 0:
                self.fw_strart_op_idx = idx
                break

        assert self.fw_strart_op_idx < len(
            self.block.ops), "Could NOT found Forward op in prog"
        last_offload_checkpoint = None

        for i, op in enumerate(self.block.ops[self.fw_strart_op_idx:
                                              self.bw_strart_op_idx]):

            idx = self.fw_strart_op_idx + i
            output_vars = op.desc.output_arg_names()
            input_vars = op.desc.input_arg_names()

            for output_var in output_vars:
                if output_var in need_offload_checkpoint_names:
                    assert len(
                        output_vars
                    ) == 1, "chekpoint should be the only Output of a certain op, but [{}] is from [{}]".format(
                        output_var, op)

                    if output_var in self.un_offload_checkpoint_names:
                        # insert sync op if last checkpoint has not been sync
                        if last_offload_checkpoint != None:
                            if self.checkpoint_usage_count_and_idx[
                                    last_offload_checkpoint]['count'] == 0:
                                self._record_sync_op(idx,
                                                     last_offload_checkpoint)
                            else:
                                last_usage_idx = self.checkpoint_usage_count_and_idx[
                                    last_offload_checkpoint]['idx']
                                assert last_usage_idx > 0, "last_usage_idx of checkpoint [{}] should large than 0".format(
                                    last_offload_checkpoint)
                                self._record_sync_op(last_usage_idx + 1,
                                                     last_offload_checkpoint)
                        # insert offload op after the checkpoint's generation op
                        self._record_offload_op(idx + 1, output_var)
                        last_offload_checkpoint = output_var
                    else:
                        raise ValueError(
                            "There should be just ONE op that output checkpoint [{}]".
                            format(output_var))
                # need to sync the last need to offload checkpoint before the last checkpoint as output op
                if output_var == last_checkpoint:
                    assert len(
                        output_vars
                    ) == 1, "chekpoint should be the only Output of a certain op, but [{}] is from [{}]".format(
                        output_var, op)
                    assert last_offload_checkpoint == self.sorted_checkpoint_names[
                        -2], "the last offload chekpoint before [{}] is suppose to be [{}], but got [{}]".format(
                            last_checkpoint, self.sorted_checkpoint_names[-2],
                            last_offload_checkpoint)
                    # sync if last checkpoint has not been sync
                    if self.checkpoint_usage_count_and_idx[
                            last_offload_checkpoint]['idx'] == 0:
                        self._record_sync_op(idx, last_offload_checkpoint)
                    else:
                        last_usage_idx = self.checkpoint_usage_count_and_idx[
                            last_offload_checkpoint]['idx']
                        assert last_usage_idx > 0, "last_usage_idx of checkpoint [{}] should large than 0".format(
                            last_offload_checkpoint)
                        self._record_sync_op(last_usage_idx + 1,
                                             last_offload_checkpoint)
            # record checkpoint usage  
            for input_var in input_vars:
                if input_var in need_offload_checkpoint_names:
                    assert input_var not in self.synced_checkpoints, "checkpoint [{}] used after sync".format(
                        input_var)
                    self.checkpoint_usage_count_and_idx[input_var]['count'] += 1
                    self.checkpoint_usage_count_and_idx[input_var]['idx'] = idx

        assert len(self.un_offload_checkpoint_names
                   ) == 0, "{} checkpoints have NOT been Recorded".format(
                       self.un_fetch_checkpoint_names)
        assert len(self.synced_checkpoints) == len(
            need_offload_checkpoint_names
        ), "{} checkpoints have NOT been Recorded".format(
            set(need_offload_checkpoint_names) - set(self.synced_checkpoints))

    def _update_forward(self):
        if len(self.idx2insertions) == 0:
            return
        for op_idx in reversed(
                range(self.fw_strart_op_idx, self.bw_strart_op_idx)):
            if op_idx in self.idx2insertions:
                operation, checkpoint_name = self.idx2insertions[op_idx]
                if operation == "offload":
                    self._insert_offload_op(op_idx, checkpoint_name)
                    logging.debug("Insert [{}] offload op.".format(
                        checkpoint_name))
                    del self.idx2insertions[op_idx]
                elif operation == "sync":
                    self._insert_sync_op(op_idx, checkpoint_name)
                    logging.debug("Insert [{}] offload_sync op.".format(
                        checkpoint_name))
                    del self.idx2insertions[op_idx]

        self.block._sync_with_cpp()
        assert len(self.idx2insertions
                   ) == 0, "{} checkpoints left un-Offloaded".format(
                       [ele[1] for ele in self.idx2insertions.values()])

    def _check_offload_fetch(self):
        # TODO(JZ-LIANG) the single stream offload need no sync
        pass

    def _offload(self, loss, startup_program=None):
        """
        core steps for recompute offload
        1. create pinned vars and temp vars 
        2. parse & update Forward pass: offload, sync
        3. parse & update Backward pass: rename, fetch, sync
        4. verify the correctness
        """
        self._main_program = loss.block.program
        self.block = loss.block
        if startup_program == None:
            startup_program = fluid.default_startup_program()

        with program_guard(self._main_program, startup_program):
            assert len(self.checkpoint_shape) > 0, (
                "checkpoints shape {} should be an non empty list like: [12, 512, 1024]".
                format(self.checkpoint_shape))
            assert all([ele > 0 for ele in self.checkpoint_shape]), (
                "all ele in checkpoints shape {} should be a determined integer larger than 0".
                format(self.checkpoint_shape))
            self.checkpoint_name2pinned_name = dict()
            self.checkpoint_name2fetch_name = dict()
            for checkpoint_varname in self.sorted_checkpoint_names:
                pinned_var_name, fetch_var_name = self._creat_vars(
                    checkpoint_varname)
                self.checkpoint_name2pinned_name[
                    checkpoint_varname] = pinned_var_name
                self.checkpoint_name2fetch_name[
                    checkpoint_varname] = fetch_var_name
            self._append_fill_constant_ops(startup_program)
            # TODO (JZ-LIANG) to provide two offload stragtegy in future
            # step 2. parse & update FW: rename, offload, sync
            self._parse_backward()
            self._update_backward()
            # step 3. parse & update BW: rename, offload, sync
            self._parse_forward()
            self._update_forward()
            # step 4. verify the correctness
            self._check_offload_fetch()

        return

M
mapingshuo 已提交
5945 5946 5947 5948 5949
    def backward(self,
                 loss,
                 startup_program=None,
                 parameter_list=None,
                 no_grad_set=None,
5950
                 callbacks=None):
M
mapingshuo 已提交
5951 5952 5953 5954 5955 5956 5957
        """
        call append_backward with checkpoints.

        Args:
            loss (Variable): loss variable to run optimizations.
            startup_program (Program): startup_program for initializing parameters
                in `parameter_list`.
5958 5959
            parameter_list (list): list of Variables or Variable.names to update.
            no_grad_set (set|None): set of Variables or Variables.names should be ignored.
M
mapingshuo 已提交
5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983
            callbacks (list|None): list of callables to run when appending backward
                operator for one parameter.
            checkpoints (list): list of Variables as checkpoints

        Examples:
            .. code-block:: python

                import paddle.fluid as fluid
    
                def mlp(input_x, input_y, hid_dim=128, label_dim=2):
                    fc_1 = fluid.layers.fc(input=input_x, size=hid_dim)
                    prediction = fluid.layers.fc(input=[fc_1], size=label_dim, act='softmax')
                    cost = fluid.layers.cross_entropy(input=prediction, label=input_y)
                    sum_cost = fluid.layers.reduce_mean(cost)
                    return sum_cost, fc_1, prediction
    
    
                input_x = fluid.layers.data(name="x", shape=[32], dtype='float32')
                input_y = fluid.layers.data(name="y", shape=[1], dtype='int64')
                cost, fc_1, pred = mlp(input_x, input_y)
                print("Finished FF")
    
                sgd = fluid.optimizer.Adam(learning_rate=0.01)
                sgd = fluid.optimizer.RecomputeOptimizer(sgd)
5984
                sgd._set_checkpoints([fc_1, pred])
M
mapingshuo 已提交
5985 5986 5987 5988
                params_grads = sgd.backward(
                    cost,
                    startup_program=None,
                    parameter_list=None,
5989
                    no_grad_set=None)
M
mapingshuo 已提交
5990 5991
                print("Finished backward")
        """
5992 5993
        assert (self._checkpoints is not None
                ), "You should call _set_checkpoints first"
M
mapingshuo 已提交
5994 5995 5996 5997 5998 5999 6000 6001

        if framework.in_dygraph_mode():
            raise NotImplementedError(
                "DyGraph current does not support recompute")

        self._dtype = loss.dtype
        program = loss.block.program
        with program_guard(program, startup_program):
6002 6003 6004 6005 6006 6007 6008
            checkpoint_vars = []
            for ckpt in self._checkpoints:
                if isinstance(ckpt, Variable):
                    checkpoint_vars.append(ckpt)
                else:
                    checkpoint_vars.append(loss.block.var(ckpt))

J
JZ-LIANG 已提交
6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026
            # allow return to non-recompute when checkpoints is empty
            if len(checkpoint_vars) > 0:
                params_grads, sorted_checkpoint_names = append_backward(
                    loss,
                    parameter_list,
                    no_grad_set,
                    checkpoints=checkpoint_vars)
            else:
                params_grads = append_backward(
                    loss,
                    parameter_list,
                    no_grad_set,
                    checkpoints=checkpoint_vars)

        if self.enable_offload:
            self.sorted_checkpoint_names = sorted_checkpoint_names
            self._offload(loss, startup_program=startup_program)

M
mapingshuo 已提交
6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045
        return params_grads

    def apply_optimize(self, loss, startup_program, params_grads):
        """
        call the apply_optimize function of self._optimizer
        Args:
            loss (Variable): loss variable to run optimizations.
            startup_program (Program): startup_program for initializing parameters
                in `parameter_list`.
            params_grads (list): list of (param, grad) pair to do optimization.
        Examples:
            .. code-block:: python
                import paddle.fluid as fluid
                
                def mlp(input_x, input_y, hid_dim=128, label_dim=2):
                    fc_1 = fluid.layers.fc(input=input_x, size=hid_dim)
                    prediction = fluid.layers.fc(input=[fc_1], size=label_dim, act='softmax')
                    cost = fluid.layers.cross_entropy(input=prediction, label=input_y)
                    sum_cost = fluid.layers.reduce_mean(cost)
M
mapingshuo 已提交
6046
                    return sum_cost, fc_1, prediction                
M
mapingshuo 已提交
6047 6048 6049 6050 6051 6052 6053 6054
                
                input_x = fluid.layers.data(name="x", shape=[32], dtype='float32')
                input_y = fluid.layers.data(name="y", shape=[1], dtype='int64')
                cost, fc_1, pred = mlp(input_x, input_y)
                print("Finished FF")
                
                sgd = fluid.optimizer.Adam(learning_rate=0.01)
                sgd = fluid.optimizer.RecomputeOptimizer(sgd)
6055
                sgd._set_checkpoints([fc_1, pred])
M
mapingshuo 已提交
6056 6057 6058 6059
                params_grads = sgd.backward(
                    cost,
                    startup_program=None,
                    parameter_list=None,
6060
                    no_grad_set=None)
M
mapingshuo 已提交
6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074
                
                optimize_ops = sgd.apply_optimize(
                    cost, startup_program=None, params_grads=params_grads)
                
                print("Finished apply_optimize")
        """

        return self._optimizer.apply_optimize(
            loss, startup_program=startup_program, params_grads=params_grads)

    def minimize(self,
                 loss,
                 startup_program=None,
                 parameter_list=None,
6075
                 no_grad_set=None):
6076
        assert isinstance(loss, Variable), "The loss should be an Variable."
M
mapingshuo 已提交
6077 6078 6079 6080 6081 6082 6083 6084 6085
        assert (self._checkpoints is not None
                ), "You should call _set_checkpoints first"
        if framework.in_dygraph_mode():
            raise NotImplementedError(
                "DyGraph current does not support recompute")
        params_grads = self.backward(
            loss,
            startup_program=startup_program,
            parameter_list=parameter_list,
6086
            no_grad_set=no_grad_set)
M
mapingshuo 已提交
6087 6088 6089 6090 6091 6092 6093

        optimize_ops = self.apply_optimize(
            loss, startup_program=startup_program, params_grads=params_grads)

        return optimize_ops, params_grads


M
mapingshuo 已提交
6094
class LookaheadOptimizer(object):
6095
    r"""
6096
	:api_attr: Static Graph
S
swtkiwi 已提交
6097

M
mapingshuo 已提交
6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122
    This implements the Lookahead optimizer of the
    paper : https://arxiv.org/abs/1907.08610.

    Lookahead keeps two sets of params: the fast_params and
    the slow_params. inner_optimizer update fast_params every 
    training step. Lookahead updates the slow_params and fast_params 
    every k training steps as follows:

    .. math::
        
        slow\_param_t &= slow\_param_{t-1} + \\alpha * (fast\_param_{t-1} - slow\_param_{t-1})
	
	fast\_param_t &=  slow\_param_t

    Args:
        inner_optimizer (Optimizer): The optimizer that update fast params step by step. 
        alpha (float): The learning rate of Lookahead.
        k (int): The slow params is updated every k steps.

    Examples:
        .. code-block:: python

            import paddle
            import paddle.fluid as fluid
            import numpy as np
6123
            import numpy.random as random
M
mapingshuo 已提交
6124

6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140
            paddle.enable_static()
        
            x = fluid.layers.data(name='x', shape=[2], dtype='float32')
            label = fluid.layers.data(name="label", shape=[1], dtype="int64")
            y = fluid.layers.fc(input=[x], size=2, act="softmax")
            loss = fluid.layers.cross_entropy(input=y, label=label)
            loss = fluid.layers.mean(x=loss)
            sgd = fluid.optimizer.SGD(learning_rate=0.01)
            optimizer = fluid.optimizer.LookaheadOptimizer(sgd,
                                                alpha=0.5,
                                                k=5)
            optimizer.minimize(loss)
            main_program = fluid.default_main_program()
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())
M
mapingshuo 已提交
6141

6142 6143 6144 6145 6146 6147 6148 6149 6150 6151
            def train_reader(limit=5):
                for i in range(limit):
                    yield random.random([2]).astype('float32'), random.random([1]).astype('int64')
            
            feeder = fluid.DataFeeder(feed_list=[x, label], place=place)
            reader = paddle.batch(paddle.reader.shuffle(train_reader, buf_size=50000),batch_size=1)
            
            for batch_data in reader():
                exe.run(fluid.default_main_program(),
                feed=feeder.feed(batch_data))
M
mapingshuo 已提交
6152 6153 6154 6155 6156

    """

    def __init__(self, inner_optimizer, alpha=0.5, k=5):

Z
zhongpu 已提交
6157 6158
        if framework.in_dygraph_mode():
            raise Exception("In dygraph, don't support LookaheadOptimizer.")
M
mapingshuo 已提交
6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209
        assert (inner_optimizer is not None), "inner optimizer can not be None"
        assert (
            0.0 <= alpha <= 1.0
        ), "alpha should be larger or equal to 0.0, and less or equal than 1.0"
        assert (isinstance(k, int) and k > 0), "k should be a positive integer"

        self.inner_optimizer = inner_optimizer
        self.alpha = alpha
        self.k = k
        self.type = "lookahead"

    def minimize(self, loss, startup_program=None):

        # Apply inner optimizer to the main_program
        mini_out = self.inner_optimizer.minimize(
            loss, startup_program=startup_program)

        # Get startup_program and main_program
        if startup_program is None:
            startup_program = default_startup_program()
        main_block = loss.block

        # add some vars to the main_program
        params = [param.name for param in main_block.all_parameters()]
        param_to_slow = {}
        for param in params:
            fast_var = main_block.var(param)
            assert (fast_var is not None)
            slow_var = main_block.create_var(
                name=param + "@SLOW",
                shape=fast_var.shape,
                dtype=fast_var.dtype,
                persistable=True)
            param_to_slow[param] = slow_var

        # add some vars to the startup_program
        startup_block = startup_program.global_block()
        for param in params:
            fast_var = startup_block.var(param)
            assert (fast_var is not None)
            slow_var = startup_block.create_var(
                name=param + "@SLOW",
                shape=fast_var.shape,
                dtype=fast_var.dtype,
                persistable=True)

            startup_block.append_op(
                type="assign",
                inputs={"X": fast_var},
                outputs={"Out": slow_var})

6210 6211 6212 6213 6214 6215 6216 6217
        with framework.program_guard(main_block.program, startup_program):
            # Add Var k to main prog and startup prog
            k = layers.create_global_var(
                name="lookahead_k",
                shape=[1],
                value=int(self.k),
                dtype='int32',
                persistable=True)
M
mapingshuo 已提交
6218

6219 6220 6221 6222 6223 6224 6225
            # Add Var alpha to main prog and startup prog
            alpha = layers.create_global_var(
                name="lookahead_alpha",
                shape=[1],
                value=float(self.alpha),
                dtype='float32',
                persistable=True)
M
mapingshuo 已提交
6226

6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244
            # Add Var step
            step = layers.create_global_var(
                name="lookahead_step",
                shape=[1],
                value=int(0),
                dtype='int32',
                persistable=True)
            layers.increment(x=step, value=1.0, in_place=True)

            # lookahead
            zero_var = layers.fill_constant(
                shape=[1], dtype='float32', value=0.0)

            one_var = layers.fill_constant(
                shape=[1], dtype='float32', value=1.0)

            mod = layers.elementwise_mod(step, k)
            with layers.control_flow.Switch() as switch:
6245 6246 6247 6248 6249
                with switch.case(step == one_var):
                    for param_name in params:
                        fast_var = main_block.var(param_name)
                        slow_var = param_to_slow[param_name]
                        layers.assign(input=fast_var, output=slow_var)
6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262
                with switch.case(mod == zero_var):
                    for param_name in params:
                        fast_var = main_block.var(param_name)
                        slow_var = param_to_slow[param_name]
                        tmp_var = layers.elementwise_add(
                            layers.elementwise_mul(fast_var, alpha),
                            layers.elementwise_mul(
                                slow_var,
                                layers.elementwise_sub(one_var, alpha)))
                        layers.assign(input=tmp_var, output=slow_var)
                        layers.assign(input=tmp_var, output=fast_var)
                with switch.default():
                    pass
M
mapingshuo 已提交
6263
        return mini_out
6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320


class GradientMergeOptimizer(object):
    """
    Gradient Merge, also called as Gradient Accumulation,
    is a training strategy for larger batches. With this strategy,
    the parameter will not be updated until specific steps.

    For each step, the forward network and the backward network
    will run to calculate the gradient of the parameters.

    For every k step, the optimization network will run,
    applying a specific optimization method (such as SGD, Adam)
    to the parameters.

    Args:
        inner_optimizer (Optimizer): The specific optimization (such as SGD, Adam)
            which update the parameters
        k_steps (int): the update period of the parameters
        avg (bool): whether to average the gradients of each mini-batch,
            the default value is `True`

    Examples:
        .. code-block:: python

        import paddle.fluid as fluid
        import numpy as np

        def gen_data(batch_size):
            return {"x": np.random.random(size=(batch_size, 32)).astype('float32'),
                    "y": np.random.random(size=(batch_size, 1)).astype('int64')}

        def mlp(input_x, input_y, hid_dim=128, label_dim=2):
            fc_1 = fluid.layers.fc(input=input_x, size=hid_dim)
            prediction = fluid.layers.fc(input=[fc_1], size=label_dim, act='softmax')
            cost = fluid.layers.cross_entropy(input=prediction, label=input_y)
            sum_cost = fluid.layers.reduce_mean(cost)
            return sum_cost, fc_1, prediction

        input_x = fluid.layers.data(name="x", shape=[32], dtype='float32')
        input_y = fluid.layers.data(name="y", shape=[1], dtype='int64')
        cost, fc_1, pred = mlp(input_x, input_y)
        sgd = fluid.optimizer.Adam(learning_rate=0.01)
        sgd = fluid.optimizer.GradientMergeOptimizer(sgd, k_steps=4, avg=True)
        sgd.minimize(cost)

        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())

        for i in range(10):
            cost_val = exe.run(feed=gen_data(32),
                       program=fluid.default_main_program(),
                       fetch_list=[cost.name])
            print("step=%d, cost=%f" % (i, cost_val[0]))
    """

6321 6322
    GRAD_MERGE_COND_NAME = "grad_merge_cond_name"

6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337
    def __init__(self, inner_optimizer, k_steps=1, avg=True):
        if framework.in_dygraph_mode():
            raise Exception(
                "In dygraph, we don't support GradientMergeOptimizer."
                "You can do Gradient merge by yourself with k-times forward + backward, "
                "and one-time optimizer.minimize()")

        assert (inner_optimizer is not None), "inner optimizer can not be None"
        assert (isinstance(k_steps, int) and
                k_steps > 0), "k_steps should be a positive integer"

        self.inner_optimizer = inner_optimizer
        self.k_steps = k_steps
        self.type = "gradient_merge"
        self.avg = avg
6338
        self._optimize_ops = None
6339

6340 6341 6342 6343 6344 6345
    def _set_k_steps(self, k_steps):
        self.k_steps = k_steps

    def _set_avg(self, avg):
        self.avg = avg

6346
    def backward(self,
6347 6348 6349
                 loss,
                 startup_program=None,
                 parameter_list=None,
6350 6351
                 no_grad_set=None,
                 callbacks=None):
6352 6353 6354 6355 6356 6357 6358 6359 6360 6361
        assert isinstance(loss, Variable), "The loss should be an Variable."
        assert (
            parameter_list is None
        ), "The parameter_list should be None when using GradientMergeOptimizer"
        assert (
            no_grad_set is None
        ), "The no_grad_set should be None when using GradientMergeOptimizer"

        params_grads = self.inner_optimizer.backward(
            loss, startup_program=startup_program)
6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478
        return params_grads

    def apply_optimize(self, loss, startup_program, params_grads):
        program = loss.block.program
        with program_guard(program, startup_program):
            optimize_ops = self.apply_gradients(params_grads)
        return optimize_ops

    def _is_the_backward_op(self, op):
        op_maker = core.op_proto_and_checker_maker
        backward = core.op_proto_and_checker_maker.OpRole.Backward
        if op_maker.kOpRoleVarAttrName() in op.attr_names and \
                int(op.all_attrs()[op_maker.kOpRoleAttrName()]) == int(backward):
            return True
        return False

    def _remove_op_role_var(self, param, grad):
        op_maker = core.op_proto_and_checker_maker
        op = grad.op
        assert self._is_the_backward_op(op), \
            'grad.op={} is not the backward op which produces the grad={}' \
            .format(op, grad.name)

        block = grad.block
        var_attr = op.all_attrs()[op_maker.kOpRoleVarAttrName()]
        assert param.name in var_attr, \
            'when using GradientMergeOptimizer, param={} must be in var_attr={}' \
            .format(param.name, var_attr)
        assert grad.name in var_attr, \
            'when using GradientMergeOptimizer, grad={} must be in var_attr={}' \
            .format(param.name, var_attr)

        # remove (param, grad) from op_role_var
        var_attr.remove(param.name)
        var_attr.remove(grad.name)
        if len(var_attr) > 1:
            op._set_attr(op_maker.kOpRoleVarAttrName(), var_attr)
        else:
            op._remove_attr(op_maker.kOpRoleVarAttrName())

    def _add_gm_op_role_var(self, op, param, grad, cond):
        grad.op = op
        op_maker = core.op_proto_and_checker_maker
        backward = op_maker.OpRole.Backward

        # NOTE(wangxi). When distributed, we will insert grad_merge_all_reduce_op_handle
        # in multi_devices_graph_pass, which will allreduce(grad) if cond is True, else
        # do nothing.
        # In this way, the gradient can be merged first, and then communicate when the
        # condition is met, reducing the number of communications to increase the
        # speed.
        op._set_attr(self.GRAD_MERGE_COND_NAME, cond.name)
        op._set_attr(op_maker.kOpRoleAttrName(), backward)
        op._set_attr(op_maker.kOpRoleVarAttrName(), [param.name, grad.name])

    def _get_gm_cond_var(self, main_block):
        # Add const var
        k_step_var = layers.create_global_var(
            name="gradient_merge_k",
            shape=[1],
            value=int(self.k_steps),
            dtype='int32',
            persistable=True,
            force_cpu=True)

        zero_var = layers.create_global_var(
            name="gradient_merge_zero",
            shape=[1],
            value=int(0),
            dtype='int32',
            persistable=True,
            force_cpu=True)

        # Add step var & cond var
        step_var = layers.create_global_var(
            name="gradient_merge_step",
            shape=[1],
            value=int(0),
            dtype='int32',
            persistable=True,
            force_cpu=True)

        cond_var = layers.create_global_var(
            name="gradient_merge_cond",
            shape=[1],
            value=bool(0),
            dtype='bool',
            persistable=True,
            force_cpu=True)

        with device_guard("cpu"):
            # step_var = (step_var + 1) % k_step
            layers.increment(x=step_var, value=1.0, in_place=True)
            main_block.append_op(
                type='elementwise_mod',
                inputs={'X': step_var,
                        'Y': k_step_var},
                outputs={'Out': step_var},
                attrs={'axis': -1,
                       'use_mkldnn': False})

            # cond_var = (step_var == 0)
            main_block.append_op(
                type='equal',
                inputs={'X': step_var,
                        'Y': zero_var},
                outputs={'Out': cond_var})

        return cond_var

    def apply_gradients(self, params_grads):
        main_program = default_main_program()
        startup_program = default_startup_program()
        main_block = main_program.global_block()
        startup_block = startup_program.global_block()

        cond = self._get_gm_cond_var(main_block)
6479 6480

        #TODO(mapingshuo) support sparse embedding
6481 6482
        # step1: remove grad.op's op_role_var
        for param, grad in params_grads:
6483
            assert (
6484
                param.type != core.VarDesc.VarType.SELECTED_ROWS
6485 6486
            ), "SELECTED_ROWS is not supported in GradientMergeOptimizer for now"

6487
            self._remove_op_role_var(param, grad)
6488

6489
        param_to_grad = {k.name: v for (k, v) in params_grads}
6490 6491 6492
        param_names = param_to_grad.keys()
        param_to_gradient_merge = {}

6493 6494 6495 6496 6497
        new_params_grads = []
        # step2: create gradient_merge var and init with 0
        # and update op_role_var
        for param, grad in params_grads:
            param_name = param.name
6498 6499 6500 6501 6502 6503 6504 6505
            param_var = main_block.var(param_name)
            assert (param_var is not None)
            gradient_merge_var = main_block.create_var(
                name=param_name + "@GRAD@GradientMerge",
                shape=param_var.shape,
                dtype=param_var.dtype,
                persistable=True)
            param_to_gradient_merge[param_name] = gradient_merge_var
6506

6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520
            startup_gradient_merge_var = startup_block.create_var(
                name=param_name + "@GRAD@GradientMerge",
                shape=param_var.shape,
                dtype=param_var.dtype,
                persistable=True)
            startup_block.append_op(
                type="fill_constant",
                outputs={"Out": startup_gradient_merge_var},
                attrs={
                    "shape": param_var.shape,
                    "dtype": param_var.dtype,
                    "value": float(0),
                })

6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551
            # grad_merge += grad
            new_grad_op = main_block.append_op(
                type="elementwise_add",
                inputs={'X': grad,
                        'Y': gradient_merge_var},
                outputs={'Out': gradient_merge_var},
                attrs={'axis': -1,
                       'use_mkldnn': False})
            self._add_gm_op_role_var(new_grad_op, param, gradient_merge_var,
                                     cond)
            new_params_grads.append([param, gradient_merge_var])

        def true_apply_gradient():
            cur_block_idx = main_program.current_block_idx
            cur_block = main_program.current_block()

            # cur_block's forward_block & backward_block is itself
            cur_block._set_forward_block_idx(cur_block_idx)

            if self.avg:
                for param, new_grad in new_params_grads:
                    # grad /= k_steps
                    cur_block.append_op(
                        type='scale',
                        inputs={'X': new_grad},
                        outputs={'Out': new_grad},
                        attrs={
                            'scale': 1.0 / self.k_steps,
                            'bias': 0.0,
                            'bias_after_scale': False
                        })
6552

6553 6554 6555 6556 6557 6558
            for param, new_grad in new_params_grads:
                # NOTE. regularization will append ops to grad.block,
                # while new_grad's real block is global_block,
                # but we want append regularization ops to cur_block,
                # so we set new_grad.block = cur_block
                new_grad.block = cur_block
6559

6560 6561
            self._optimize_ops = self.inner_optimizer.apply_gradients(
                new_params_grads)
6562

6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590
            # clear gradient_merge_vars
            for param, new_grad in new_params_grads:
                layers.fill_constant(
                    shape=new_grad.shape,
                    dtype=new_grad.dtype,
                    value=0.0,
                    out=new_grad)

        # step3. apply gradient
        layers.cond(cond, true_fn=true_apply_gradient, false_fn=None)

        return self._optimize_ops

    def minimize(self,
                 loss,
                 startup_program=None,
                 parameter_list=None,
                 no_grad_set=None):
        assert isinstance(loss, Variable), "The loss should be an Variable."

        params_grads = self.backward(
            loss,
            startup_program=startup_program,
            parameter_list=parameter_list,
            no_grad_set=no_grad_set)

        optimize_ops = self.apply_optimize(
            loss, startup_program=startup_program, params_grads=params_grads)
6591 6592

        return optimize_ops, params_grads