optimizer.py 35.2 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14
import re
15
from collections import defaultdict
16
from paddle.fluid.framework import Program
17
import framework
18
import layers
19
from backward import append_backward
Y
Yu Yang 已提交
20 21
from framework import program_guard
import unique_name
22 23 24
from initializer import Constant
from layer_helper import LayerHelper
from regularizer import append_regularization_ops
F
fengjiayi 已提交
25
from clip import append_gradient_clip_ops, error_clip_callback
26
from contextlib import contextmanager
27

28 29
__all__ = [
    'SGD', 'Momentum', 'Adagrad', 'Adam', 'Adamax', 'DecayedAdagrad',
30 31
    'SGDOptimizer', 'MomentumOptimizer', 'AdagradOptimizer', 'AdamOptimizer',
    'AdamaxOptimizer', 'DecayedAdagradOptimizer', 'Adadelta', 'ModelAverage'
32
]
Q
Qiao Longfei 已提交
33 34 35 36 37 38


class Optimizer(object):
    """Optimizer Base class.

    Define the common interface of an optimizer.
39 40
    User should not use this class directly,
    but need to use one of it's implementation.
Q
Qiao Longfei 已提交
41 42
    """

Y
Yu Yang 已提交
43
    def __init__(self, learning_rate, regularization=None):
44 45
        if not isinstance(learning_rate, float) and \
                not isinstance(learning_rate, framework.Variable):
Q
qiaolongfei 已提交
46
            raise TypeError("learning rate should be float or Variable")
47
        self.regularization = regularization
48 49 50
        self._learning_rate = learning_rate
        # each program should have a independent learning rate
        # program -> Variable(learning_rate)
Q
qiaolongfei 已提交
51
        self._learning_rate_map = dict()
52 53 54
        if isinstance(self._learning_rate, framework.Variable):
            self._learning_rate_map[framework.default_main_program(
            )] = self._learning_rate
55 56 57 58 59
        # Dictionary of accumulators. Some optimizer subclasses need to
        # allocate and manage extra variables associated with the parameters
        # to train. These variables are called accumulators.
        # {accum_name : { paramter_name : accumulator_for_parameter, ...}, ...}
        self._accumulators = defaultdict(lambda: dict())
60
        self.helper = None
Q
Qiao Longfei 已提交
61

62
    def _create_global_learning_rate(self):
63
        lr = self.global_learning_rate()
64

65 66 67 68
        if isinstance(lr, framework.Variable):
            return
        else:
            if not isinstance(self._learning_rate, float):
Q
qiaolongfei 已提交
69
                raise TypeError(
70 71
                    "learning rate variable is create outside optimizer,"
                    "can not create new learning rate variable for new program")
72

73 74 75 76 77 78 79 80 81 82
        # create learning rate in the current main program
        self._learning_rate_map[framework.default_main_program(
        )] = layers.create_global_var(
            name=unique_name.generate("learning_rate"),
            shape=[1],
            value=float(self._learning_rate),
            dtype='float32',
            persistable=True)

    def global_learning_rate(self, program=None):
83 84 85 86
        """
        get global decayed learning rate
        :return:
        """
87 88
        if program is None:
            program = framework.default_main_program()
Q
qiaolongfei 已提交
89
        return self._learning_rate_map.get(program, None)
90

Q
Qiao Longfei 已提交
91 92 93 94 95
    def _append_optimize_op(self, block, param_and_grad):
        """ append optimize operator to block and return all the added optimize_op
        """
        raise NotImplementedError()

96 97 98 99
    def _create_param_lr(self, param_and_grad):
        # create learning rate variable for every parameter
        param = param_and_grad[0]
        param_lr = param.optimize_attr['learning_rate']
100 101 102 103
        if param_lr == 1.0:
            return self.global_learning_rate()
        else:
            return self.global_learning_rate() * param_lr
104 105 106 107 108 109 110

    def _create_accumulators(self, block, parameters):
        """Create all accumulators needed by the parameters

        Args:
            block: the block in which the loss variable is present
            parameters: list of parameter variables for the optimizer
Q
Qiao Longfei 已提交
111
        """
112 113
        pass

114 115 116 117 118 119 120 121 122 123 124 125 126
    def _finish_update(self, block):
        """Finish any custom updates needed
           before completing an optimization step

        Args:
            block: the block in which the loss variable is present
            parameters: list of parameter variables for the optimizer

        Returns:
            list of finish ops or None
        """
        pass

127 128 129 130 131 132
    def _add_accumulator(self,
                         name,
                         param,
                         dtype=None,
                         fill_value=0.0,
                         shape=None):
133 134 135 136 137 138 139 140 141 142 143
        """Utility function to add an accumulator for a parameter

        Args:
            block: the block in which the loss variable is present
            name: name of the accumulator
            param: parameter variable for which accumulator is to be added
            dtype: data type of the accumulator variable
            fill_value: value to initialize the accumulator variable
        """
        if (name in self._accumulators and
                param.name in self._accumulators[name]):
144
            raise Exception("Accumulator {} already exists for parameter {}".
145
                            format(name, param.name))
146 147
        if shape == None:
            shape = param.shape
148 149
        assert isinstance(self.helper, LayerHelper)
        var = self.helper.create_global_variable(
Y
Yu Yang 已提交
150
            name=unique_name.generate(name),
151
            persistable=True,
152
            dtype=dtype or param.dtype,
153
            type=param.type,
154
            shape=shape)
155
        self.helper.set_variable_initializer(
156
            var, initializer=Constant(value=float(fill_value)))
157
        self._accumulators[name][param.name] = var
158
        return var
159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175

    def _get_accumulator(self, name, param):
        """Utility function to fetch an accumulator for a parameter

        Args:
            name: name of the accumulator
            param: parameter variable for which accumulator is to be fetched

        Returns:
            accumulator variable for the parameter
        """
        if (name not in self._accumulators or
                param.name not in self._accumulators[name]):
            raise Exception("Accumulator {} does not exist for parameter {}".
                            format(name, param.name))
        return self._accumulators[name][param.name]

176 177 178
    def create_optimization_pass(self,
                                 parameters_and_grads,
                                 loss,
179
                                 startup_program=None):
Q
Qiao Longfei 已提交
180 181 182 183 184 185 186
        """Add optimization operators to update gradients to variables.

        Args:
          loss: the target that this optimization is for.
          parameters_and_grads: a list of (variable, gradient) pair to update.

        Returns:
187 188 189 190
          return_op_list: a list of operators that will complete one step of
          optimization. This will include parameter update ops, global step
          update ops and any other custom ops required by subclasses to manage
          their internal state.
191
          :param startup_program:
Q
Qiao Longfei 已提交
192
        """
193 194 195 196 197
        # This is a default implementation of create_optimization_pass that
        # can be shared by most optimizers. This implementation assumes that
        # the subclass will implement the _append_optimize_op method and the
        #  _initialize_tensors method. The subclass can extend the
        # _create_accumulators method if it needs to create accumulators
198
        # for parameters and extend _finish_update method to add custom ops.
199 200

        # Create any accumulators
201
        program = loss.block.program
202
        with program_guard(program, startup_program):
203 204
            global_block = framework.default_main_program().global_block()
            start = len(global_block.ops)
205 206 207
            self.helper = LayerHelper(self.__class__.__name__)
            self._create_accumulators(loss.block,
                                      [p[0] for p in parameters_and_grads])
208
            self._create_global_learning_rate()
209 210 211 212 213 214 215 216 217 218 219

            optimize_ops = []
            for param_and_grad in parameters_and_grads:
                if param_and_grad[0].trainable is True and param_and_grad[
                        1] is not None:
                    optimize_op = self._append_optimize_op(loss.block,
                                                           param_and_grad)
                    optimize_ops.append(optimize_op)

            # Get custom finish ops for subclasses
            # FIXME: Need to fix this once we figure out how to handle dependencies
220
            self._finish_update(loss.block)
221

222 223
            end = len(global_block.ops)
            return global_block.slice_ops(start, end)
Q
Qiao Longfei 已提交
224

225 226
    def minimize(self,
                 loss,
227
                 startup_program=None,
228 229
                 parameter_list=None,
                 no_grad_set=None):
Q
Qiao Longfei 已提交
230 231
        """Add operations to minimize `loss` by updating `parameter_list`.

232
        This method combines interface `append_backward()` and
Q
Qiao Longfei 已提交
233 234
        `create_optimization_pass()` into one.
        """
F
fengjiayi 已提交
235
        params_grads = append_backward(loss, parameter_list, no_grad_set,
236
                                       [error_clip_callback])
237

Y
Yu Yang 已提交
238 239
        params_grads = sorted(params_grads, key=lambda x: x[0].name)

240 241
        params_grads = append_gradient_clip_ops(params_grads)

242
        # Add regularization if any
243 244
        params_grads = append_regularization_ops(params_grads,
                                                 self.regularization)
245

246
        optimize_ops = self.create_optimization_pass(params_grads, loss,
247
                                                     startup_program)
248
        return optimize_ops, params_grads
Q
Qiao Longfei 已提交
249 250 251 252 253 254


class SGDOptimizer(Optimizer):
    """ Simple SGD optimizer without any state.
    """

255
    def __init__(self, learning_rate, **kwargs):
Q
Qiao Longfei 已提交
256
        assert learning_rate is not None
257 258
        super(SGDOptimizer, self).__init__(
            learning_rate=learning_rate, **kwargs)
Q
Qiao Longfei 已提交
259 260
        self.type = "sgd"

261 262
    def _append_optimize_op(self, block, param_and_grad):
        assert isinstance(block, framework.Block)
263

Q
Qiao Longfei 已提交
264 265 266 267 268 269
        # create the optimize op
        sgd_op = block.append_op(
            type=self.type,
            inputs={
                "Param": param_and_grad[0],
                "Grad": param_and_grad[1],
270
                "LearningRate": self._create_param_lr(param_and_grad)
Q
Qiao Longfei 已提交
271
            },
272
            outputs={"ParamOut": param_and_grad[0]})
Q
Qiao Longfei 已提交
273 274

        return sgd_op
275 276 277 278 279 280 281


class MomentumOptimizer(Optimizer):
    """Simple Momentum optimizer with velocity state
    """
    _velocity_acc_str = "velocity"

282
    def __init__(self, learning_rate, momentum, use_nesterov=False, **kwargs):
283 284
        assert learning_rate is not None
        assert momentum is not None
285 286
        super(MomentumOptimizer, self).__init__(
            learning_rate=learning_rate, **kwargs)
287 288
        self.type = "momentum"
        self._momentum = momentum
289
        self._use_nesterov = bool(use_nesterov)
290 291 292 293 294

    def _create_accumulators(self, block, parameters):
        assert isinstance(block, framework.Block)

        for p in parameters:
295
            self._add_accumulator(self._velocity_acc_str, p)
296 297 298 299 300 301 302 303 304 305 306 307 308

    def _append_optimize_op(self, block, param_and_grad):
        assert isinstance(block, framework.Block)

        velocity_acc = self._get_accumulator(self._velocity_acc_str,
                                             param_and_grad[0])
        # create the momentum optimize op
        momentum_op = block.append_op(
            type=self.type,
            inputs={
                "Param": param_and_grad[0],
                "Grad": param_and_grad[1],
                "Velocity": velocity_acc,
309
                "LearningRate": self._create_param_lr(param_and_grad)
310 311 312 313 314
            },
            outputs={
                "ParamOut": param_and_grad[0],
                "VelocityOut": velocity_acc
            },
315
            attrs={"mu": self._momentum,
316
                   "use_nesterov": self._use_nesterov})
317 318

        return momentum_op
319 320 321 322 323 324 325


class AdagradOptimizer(Optimizer):
    """Simple Adagrad optimizer with moment state
    """
    _moment_acc_str = "moment"

326
    def __init__(self, learning_rate, epsilon=1.0e-6, **kwargs):
327 328
        assert learning_rate is not None
        assert epsilon is not None
329 330
        super(AdagradOptimizer, self).__init__(
            learning_rate=learning_rate, **kwargs)
331 332 333 334 335 336 337
        self.type = "adagrad"
        self._epsilon = epsilon

    def _create_accumulators(self, block, parameters):
        assert isinstance(block, framework.Block)

        for p in parameters:
338
            self._add_accumulator(self._moment_acc_str, p)
339 340 341 342 343 344 345

    def _append_optimize_op(self, block, param_and_grad):
        assert isinstance(block, framework.Block)

        moment_acc = self._get_accumulator(self._moment_acc_str,
                                           param_and_grad[0])

346
        # Create the adagrad optimizer op
347 348 349 350 351 352
        adagrad_op = block.append_op(
            type=self.type,
            inputs={
                "Param": param_and_grad[0],
                "Grad": param_and_grad[1],
                "Moment": moment_acc,
353
                "LearningRate": self._create_param_lr(param_and_grad)
354 355 356 357 358 359
            },
            outputs={"ParamOut": param_and_grad[0],
                     "MomentOut": moment_acc},
            attrs={"epsilon": self._epsilon})

        return adagrad_op
360 361 362 363 364 365 366 367 368 369 370 371


class AdamOptimizer(Optimizer):
    """Implements the Adam Optimizer
    """
    _moment1_acc_str = "moment1"
    _moment2_acc_str = "moment2"

    def __init__(self,
                 learning_rate=0.001,
                 beta1=0.9,
                 beta2=0.999,
372
                 epsilon=1e-8,
373
                 **kwargs):
374 375 376 377
        assert learning_rate is not None
        assert beta1 is not None
        assert beta2 is not None
        assert epsilon is not None
378 379
        super(AdamOptimizer, self).__init__(
            learning_rate=learning_rate, **kwargs)
380 381 382 383 384 385 386 387
        self.type = "adam"
        self._beta1 = beta1
        self._beta2 = beta2
        self._epsilon = epsilon

    def _create_accumulators(self, block, parameters):
        assert isinstance(block, framework.Block)

388
        main_block = block.program.global_block()
389 390
        # Create beta1 and beta2 power tensors
        beta_shape = [1]
391
        self._beta1_pow_acc = self.helper.create_global_variable(
Y
Yu Yang 已提交
392
            name=unique_name.generate('beta1_pow_acc'),
393 394 395 396 397
            dtype='float32',
            shape=beta_shape,
            lod_level=0,
            persistable=True)
        self.helper.set_variable_initializer(
398
            self._beta1_pow_acc, initializer=Constant(self._beta1))
399 400

        self._beta2_pow_acc = self.helper.create_global_variable(
Y
Yu Yang 已提交
401
            name=unique_name.generate('beta2_pow_acc'),
402 403 404 405 406 407
            dtype='float32',
            shape=beta_shape,
            lod_level=0,
            persistable=True)

        self.helper.set_variable_initializer(
408
            self._beta2_pow_acc, initializer=Constant(self._beta2))
409 410 411

        # Create accumulator tensors for first and second moments
        for p in parameters:
412 413
            self._add_accumulator(self._moment1_acc_str, p)
            self._add_accumulator(self._moment2_acc_str, p)
414 415 416 417 418 419 420 421

    def _append_optimize_op(self, block, param_and_grad):
        assert isinstance(block, framework.Block)

        moment1 = self._get_accumulator(self._moment1_acc_str,
                                        param_and_grad[0])
        moment2 = self._get_accumulator(self._moment2_acc_str,
                                        param_and_grad[0])
422
        # create the adam optimize op
423 424 425 426 427
        adam_op = block.append_op(
            type=self.type,
            inputs={
                "Param": param_and_grad[0],
                "Grad": param_and_grad[1],
428
                "LearningRate": self._create_param_lr(param_and_grad),
429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450
                "Moment1": moment1,
                "Moment2": moment2,
                "Beta1Pow": self._beta1_pow_acc,
                "Beta2Pow": self._beta2_pow_acc
            },
            outputs={
                "ParamOut": param_and_grad[0],
                "Moment1Out": moment1,
                "Moment2Out": moment2
            },
            attrs={
                "beta1": self._beta1,
                "beta2": self._beta2,
                "epsilon": self._epsilon
            })

        return adam_op

    def _finish_update(self, block):
        """Update Beta1 and Beta2 Power accumulators
        """
        assert isinstance(block, framework.Block)
451 452
        main_block = block.program.global_block()
        scale_beta1 = main_block.append_op(
453 454 455 456 457
            type="scale",
            inputs={"X": self._beta1_pow_acc},
            outputs={"Out": self._beta1_pow_acc},
            attrs={"scale": self._beta1})

458
        scale_beta2 = main_block.append_op(
459 460 461 462 463 464
            type="scale",
            inputs={"X": self._beta2_pow_acc},
            outputs={"Out": self._beta2_pow_acc},
            attrs={"scale": self._beta2})

        return [scale_beta1, scale_beta2]
465 466 467 468 469 470 471 472 473 474 475 476


class AdamaxOptimizer(Optimizer):
    """Implements the Adamax Optimizer
    """
    _moment_acc_str = "moment"
    _inf_norm_acc_str = "inf_norm"

    def __init__(self,
                 learning_rate=0.001,
                 beta1=0.9,
                 beta2=0.999,
477
                 epsilon=1e-8,
478
                 **kwargs):
479 480 481 482
        assert learning_rate is not None
        assert beta1 is not None
        assert beta2 is not None
        assert epsilon is not None
483 484
        super(AdamaxOptimizer, self).__init__(
            learning_rate=learning_rate, **kwargs)
485 486 487 488 489 490 491 492
        self.type = "adamax"
        self._beta1 = beta1
        self._beta2 = beta2
        self._epsilon = epsilon

    def _create_accumulators(self, block, parameters):
        # Create beta1 power accumulator tensor
        beta_shape = [1]
493
        self._beta1_pow_acc = self.helper.create_global_variable(
Y
Yu Yang 已提交
494
            name=unique_name.generate('beta1_pow_acc'),
495 496 497 498 499
            dtype='float32',
            shape=beta_shape,
            lod_level=0,
            persistable=True)
        self.helper.set_variable_initializer(
500
            self._beta1_pow_acc, initializer=Constant(self._beta1))
501 502 503

        # Create accumulator tensors for first moment and infinity norm
        for p in parameters:
504 505
            self._add_accumulator(self._moment_acc_str, p)
            self._add_accumulator(self._inf_norm_acc_str, p)
506 507 508 509 510 511 512 513 514 515 516 517 518

    def _append_optimize_op(self, block, param_and_grad):
        assert isinstance(block, framework.Block)

        moment = self._get_accumulator(self._moment_acc_str, param_and_grad[0])
        inf_norm = self._get_accumulator(self._inf_norm_acc_str,
                                         param_and_grad[0])
        # create the adamax optimize op
        adamax_op = block.append_op(
            type=self.type,
            inputs={
                "Param": param_and_grad[0],
                "Grad": param_and_grad[1],
519
                "LearningRate": self._create_param_lr(param_and_grad),
520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540
                "Moment": moment,
                "InfNorm": inf_norm,
                "Beta1Pow": self._beta1_pow_acc
            },
            outputs={
                "ParamOut": param_and_grad[0],
                "MomentOut": moment,
                "InfNormOut": inf_norm
            },
            attrs={
                "beta1": self._beta1,
                "beta2": self._beta2,
                "epsilon": self._epsilon
            })

        return adamax_op

    def _finish_update(self, block):
        """Update Beta1 Power accumulator
        """
        assert isinstance(block, framework.Block)
541 542
        main_block = block.program.global_block()
        scale_beta1 = main_block.append_op(
543 544 545 546 547 548
            type="scale",
            inputs={"X": self._beta1_pow_acc},
            outputs={"Out": self._beta1_pow_acc},
            attrs={"scale": self._beta1})

        return [scale_beta1]
549 550 551 552 553 554 555


class DecayedAdagradOptimizer(Optimizer):
    """Simple Decayed Adagrad optimizer with moment state
    """
    _moment_acc_str = "moment"

556
    def __init__(self, learning_rate, decay=0.95, epsilon=1.0e-6, **kwargs):
557 558 559 560
        assert learning_rate is not None
        assert decay is not None
        assert epsilon is not None

561 562
        super(DecayedAdagradOptimizer, self).__init__(
            learning_rate=learning_rate, **kwargs)
563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
        self.type = "decayed_adagrad"
        self._decay = decay
        self._epsilon = epsilon

    def _create_accumulators(self, block, parameters):
        assert isinstance(block, framework.Block)

        for p in parameters:
            self._add_accumulator(self._moment_acc_str, p)

    def _append_optimize_op(self, block, param_and_grad):
        assert isinstance(block, framework.Block)

        moment_acc = self._get_accumulator(self._moment_acc_str,
                                           param_and_grad[0])

        # Create the decayed adagrad optimizer op
        decayed_adagrad_op = block.append_op(
            type=self.type,
            inputs={
                "Param": param_and_grad[0],
                "Grad": param_and_grad[1],
                "Moment": moment_acc,
                "LearningRate": self._create_param_lr(param_and_grad)
            },
            outputs={"ParamOut": param_and_grad[0],
                     "MomentOut": moment_acc},
            attrs={"epsilon": self._epsilon})

        return decayed_adagrad_op
593 594


595
class AdadeltaOptimizer(Optimizer):
596 597 598
    """
    **Adadelta Optimizer**
    Simple Adadelta optimizer with average squared grad state and
599
    average squared update state.
600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621
    The details of adadelta please refer to this
    `ADADELTA: AN ADAPTIVE LEARNING RATE METHOD
    <http://www.matthewzeiler.com/pubs/googleTR2012/googleTR2012.pdf>`_.

    ..  math::

        E(g_t^2) &= \\rho * E(g_{t-1}^2) + (1-\\rho) * g^2 \\\\
        learning\\_rate &= sqrt( ( E(dx_{t-1}^2) + \\epsilon ) / ( \\
                          E(g_t^2) + \\epsilon ) ) \\\\
        E(dx_t^2) &= \\rho * E(dx_{t-1}^2) + (1-\\rho) * (-g*learning\\_rate)^2

    Args:
        learning_rate(float): global leraning rate
        rho(float): rho in equation
        epsilon(float): epsilon in equation

    Examples:
        .. code-block:: python

            optimizer = fluid.optimizer.Adadelta(
                learning_rate=0.0003, epsilon=1.0e-6, rho=0.95)
            _, params_grads = optimizer.minimize(cost)
622
    """
623

624 625 626 627
    _avg_squared_grad_acc_str = "_avg_squared_grad"
    _avg_squared_update_acc_str = "_avg_squared_update"

    def __init__(self, learning_rate, epsilon=1.0e-6, rho=0.95, **kwargs):
628 629 630 631 632 633
        if learning_rate is None:
            raise ValueError("learning_rate is not set.")
        if epsilon is None:
            raise ValueError("epsilon is not set.")
        if rho is None:
            raise ValueError("rho is not set.")
634 635 636 637 638 639 640
        super(AdadeltaOptimizer, self).__init__(
            learning_rate=learning_rate, **kwargs)
        self.type = "adadelta"
        self._epsilon = epsilon
        self._rho = rho

    def _create_accumulators(self, block, parameters):
641 642
        if not isinstance(block, framework.Block):
            raise TypeError("block is not instance of framework.Block.")
643 644 645 646 647 648

        for p in parameters:
            self._add_accumulator(self._avg_squared_grad_acc_str, p)
            self._add_accumulator(self._avg_squared_update_acc_str, p)

    def _append_optimize_op(self, block, param_and_grad):
649 650
        if not isinstance(block, framework.Block):
            raise TypeError("block is not instance of framework.Block.")
651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676

        avg_squared_grad_acc = self._get_accumulator(
            self._avg_squared_grad_acc_str, param_and_grad[0])
        avg_squared_update_acc = self._get_accumulator(
            self._avg_squared_update_acc_str, param_and_grad[0])

        # Create the adadelta optimizer op
        adadelta_op = block.append_op(
            type=self.type,
            inputs={
                "Param": param_and_grad[0],
                "Grad": param_and_grad[1],
                "AvgSquaredGrad": avg_squared_grad_acc,
                "AvgSquaredUpdate": avg_squared_update_acc
            },
            outputs={
                "ParamOut": param_and_grad[0],
                "AvgSquaredGradOut": avg_squared_grad_acc,
                "AvgSquaredUpdateOut": avg_squared_update_acc
            },
            attrs={"epsilon": self._epsilon,
                   "rho": self._rho})

        return adadelta_op


677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793
class RMSPropOptimizer(Optimizer):
    """
    Root Mean Squared Propagation (RMSProp) is an unpublished, adaptive learning
    rate method. The original slides proposed RMSProp: Slide 29 of
    http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf .

    The original equation is as follows:

    ..  math::

        r(w, t) & = \\rho r(w, t-1) + (1 - \\rho)(\\nabla Q_{i}(w))^2 \\\\

        w & = w - \\frac{\\eta} {\\sqrt{r(w,t) + \\epsilon}} \\nabla Q_{i}(w)

    The first equation calculates moving average of the squared gradient for
    each weight. Then dividing the gradient by :math: `sqrt{v(w,t)}`.

    In some cases, adding a momentum term :math: `\\beta` is beneficial.
    In our implementation, Nesterov momentum is used:

    ..  math::

        r(w, t) & = \\rho r(w, t-1) + (1 - \\rho)(\\nabla Q_{i}(w))^2 \\\\

        v(w, t) & = \\beta v(w, t-1) + \\frac{\\eta} {\\sqrt{v(w,t) +
            \\epsilon}} \\nabla Q_{i}(w)

        w & = w - v(w, t)

    where, :math: `\\rho` is a hyperparameter and typical values are 0.9, 0.95
    and so on. :math: `beta` is the momentum term. :math: `\\epsilon` is a
    smoothing term to avoid division by zero, usually set somewhere in range
    from 1e-4 to 1e-8.


    Args:
        learning_rate(float): global leraning rate.
        rho(float): rho is :math: `\\rho` in equation, set 0.95 by default.
        epsilon(float): :math: `\\epsilon` in equation is smoothing term to
            avoid division by zero, set 1e-6 by default.
        momentum(float): :math: `\\beta` in equation is the momentum term,
            set 0.0 by default.

    Raises:
        ValueError: If learning_rate, rho, epsilon, momentum are None.

    Examples:
          .. code-block:: python

              optimizer = fluid.optimizer.RMSProp(0.0001)
              _, params_grads = optimizer.minimize(cost)
    """

    _momentum_acc_str = "momentum"
    _mean_square_acc_str = "mean_square"

    def __init__(self,
                 learning_rate,
                 rho=0.95,
                 epsilon=1.0e-6,
                 momentum=0.0,
                 **kwargs):
        super(RMSPropOptimizer, self).__init__(
            learning_rate=learning_rate, **kwargs)
        if learning_rate is None:
            raise ValueError("learning_rate is not set.")
        if rho is None:
            raise ValueError("rho is not set.")
        if epsilon is None:
            raise ValueError("epsilon is not set.")
        if momentum is None:
            raise ValueError("momentum is not set.")

        self.type = "rmsprop"
        self._rho = rho
        self._epsilon = epsilon
        self._momentum = momentum

    def _create_accumulators(self, block, parameters):
        if not isinstance(block, framework.Block):
            raise TypeError("block is not instance of framework.Block.")

        for p in parameters:
            self._add_accumulator(self._momentum_acc_str, p)
            self._add_accumulator(self._mean_square_acc_str, p)

    def _append_optimize_op(self, block, param_and_grad):
        if not isinstance(block, framework.Block):
            raise TypeError("block is not instance of framework.Block.")

        momentum_acc = self._get_accumulator(self._momentum_acc_str,
                                             param_and_grad[0])
        mean_square_acc = self._get_accumulator(self._mean_square_acc_str,
                                                param_and_grad[0])
        rmsprop_op = block.append_op(
            type=self.type,
            inputs={
                "Param": param_and_grad[0],
                "Grad": param_and_grad[1],
                "Moment": momentum_acc,
                "MeanSquare": mean_square_acc,
                "LearningRate": self._create_param_lr(param_and_grad),
            },
            outputs={
                "ParamOut": param_and_grad[0],
                "MomentOut": momentum_acc,
                "MeanSquareOut": mean_square_acc
            },
            attrs={
                "epsilon": self._epsilon,
                "decay": self._rho,
                "momentum": self._momentum
            })

        return rmsprop_op


794 795 796 797 798 799 800 801 802 803 804 805 806 807
# We short the class name, since users will use the optimizer with the package
# name. The sample code:
#
# import paddle.fluid as fluid
#
# sgd = fluid.optimizer.SGD(...)
#
# It is no need to add an `Optimizer` as the class suffix
SGD = SGDOptimizer
Momentum = MomentumOptimizer
Adagrad = AdagradOptimizer
Adam = AdamOptimizer
Adamax = AdamaxOptimizer
DecayedAdagrad = DecayedAdagradOptimizer
808
Adadelta = AdadeltaOptimizer
809
RMSProp = RMSPropOptimizer
810 811 812 813 814 815 816 817 818 819 820 821 822


class ModelAverage(Optimizer):
    """Accumulate the average of parameters whtin sliding window. The average
    result will be saved in temporary variables which can be applied to
    parameter variables of current model by calling 'apply()' method. And the
    'restore()' method is used to restored the parameter values of current model.

    The size of average window is determined by average_window_rate,
    min_average_window, max_average_window and current update times.

    Args:
        average_window_rate: The rate of average window.
823
        params_grads: A list of parameter-grad variable pairs.
824 825 826 827 828 829 830 831 832 833 834 835 836
        min_average_window: The minimum size of average window.
        max_average_window: The maximum size of average window.

    Examples:
        ...
        optimizer = fluid.optimizer.Momentum()
        _, params_grads = optimizer.minimize(cost)
        model_average = fluid.optimizer.ModelAverage(params_grads, 0.15,
                                                min_average_window=10000,
                                                max_average_window=20000)
        for pass_id in range(args.pass_num):
            for data in train_reader():
                exe.run(fluid.default_main_program()...)
837 838 839 840

            with model_average.apply(exe):
                for data in test_reader():
                    exe.run(inference_program...)
841 842 843
    """

    def __init__(self,
W
wanghaoshuang 已提交
844
                 average_window_rate,
845
                 params_grads=None,
846 847 848 849 850 851 852
                 min_average_window=10000,
                 max_average_window=10000,
                 **kwargs):
        super(ModelAverage, self).__init__(0.0, **kwargs)
        self.average_window = average_window_rate
        self.min_average_window = min_average_window
        self.max_average_window = max_average_window
853

854 855 856
        self.params_grads = [] if params_grads is None else params_grads
        params = {}
        for param, grad in self.params_grads:
857 858
            if param.do_model_average != False:
                params[param.name] = (param, grad)
859 860
        for param in framework.default_main_program().global_block(
        ).all_parameters():
W
wanghaoshuang 已提交
861
            if param.name not in params and param.do_model_average != False:
862 863 864 865
                grad = param.block.create_var(
                    name=unique_name.generate(".".join([param.name, 'tmp'])),
                    dtype=param.dtype,
                    persistable=False,
866 867 868
                    stop_gradient=True)
                params[param.name] = (param, grad)
        self.params_grads = params.values()
869

870
        for param, grad in self.params_grads:
871
            self._append_average_accumulate_op(param)
872

873 874 875 876
        self.apply_program = Program()
        block = self.apply_program.global_block()
        with program_guard(main_program=self.apply_program):
            for param_grad in self.params_grads:
877
                self._add_average_apply_op(block, param_grad)
878 879 880 881 882

        self.restore_program = Program()
        block = self.restore_program.global_block()
        with program_guard(main_program=self.restore_program):
            for param_grad in self.params_grads:
883
                self._add_average_restore_op(block, param_grad)
884

885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947
    def _add_average_apply_op(self, block, param_grad):
        param = block.clone_variable(param_grad[0])
        grad = block.clone_variable(param_grad[1])
        sum_1 = block.clone_variable(self._get_accumulator('sum_1', param))
        sum_2 = block.clone_variable(self._get_accumulator('sum_2', param))
        sum_3 = block.clone_variable(self._get_accumulator('sum_3', param))
        num_accumulates = block.clone_variable(
            self._get_accumulator('num_accumulates', param))
        old_num_accumulates = block.clone_variable(
            self._get_accumulator('old_num_accumulates', param))
        num_updates = block.clone_variable(
            self._get_accumulator('num_updates', param))
        # backup param value to grad
        layers.assign(input=param, output=grad)
        # param = (sum_1 + sum_2 + sum_3) / (num_accumulates + old_num_accumulates)
        tmp = layers.sum(x=[num_accumulates, old_num_accumulates])
        sum = layers.sum(x=[sum_1, sum_2, sum_3])
        tmp = layers.cast(x=tmp, dtype='float32')
        sum = layers.cast(x=sum, dtype='float32')
        layers.elementwise_div(x=sum, y=tmp, out=param)

    def _add_average_restore_op(self, block, param_grad):
        param = block.clone_variable(param_grad[0])
        grad = block.clone_variable(param_grad[1])
        layers.assign(input=grad, output=param)

    def _append_average_accumulate_op(self, param):
        self.helper = LayerHelper("average_accumulate")
        sum_1 = self._add_accumulator('sum_1', param)
        sum_2 = self._add_accumulator('sum_2', param)
        sum_3 = self._add_accumulator('sum_3', param)
        num_accumulates = self._add_accumulator(
            'num_accumulates', param, dtype='int64', shape=[1])
        old_num_accumulates = self._add_accumulator(
            'old_num_accumulates', param, dtype='int64', shape=[1])
        num_updates = self._add_accumulator(
            'num_updates', param, dtype='int64', shape=[1])

        self.helper.append_op(
            type='average_accumulates',
            inputs={
                "param": param,
                "in_sum_1": sum_1,
                "in_sum_2": sum_2,
                "in_sum_3": sum_3,
                "in_num_accumulates": num_accumulates,
                "in_old_num_accumulates": old_num_accumulates,
                "in_num_updates": num_updates
            },
            outputs={
                "out_sum_1": sum_1,
                "out_sum_2": sum_2,
                "out_sum_3": sum_3,
                "out_num_accumulates": num_accumulates,
                "out_old_num_accumulates": old_num_accumulates,
                "out_num_updates": num_updates,
            },
            attrs={
                "average_window": self.average_window,
                "min_average_window": self.min_average_window,
                "max_average_window": self.max_average_window,
            })

948 949
    @contextmanager
    def apply(self, executor, need_restore=True):
950 951
        """Apply average values to parameters of current model.
        """
952 953 954 955 956 957
        executor.run(self.apply_program)
        try:
            yield
        finally:
            if need_restore:
                self.restore(executor)
958 959 960 961

    def restore(self, executor):
        """Restore parameter values of current model.
        """
962
        executor.run(self.restore_program)
反馈
建议
客服 返回
顶部