rmsprop.py 10.9 KB
Newer Older
M
MRXLT 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from .optimizer import Optimizer
from ..fluid import core
from ..fluid import framework
from ..fluid.framework import Variable

20 21
__all__ = []

M
MRXLT 已提交
22 23

class RMSProp(Optimizer):
24
    r"""
M
MRXLT 已提交
25 26 27 28 29 30 31 32
    Root Mean Squared Propagation (RMSProp) is an unpublished, adaptive learning
    rate method. The original slides proposed RMSProp: Slide 29 of
    http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf .

    The original equation is as follows:

    ..  math::

33
        r(w, t) & = \rho r(w, t-1) + (1 - \rho)(\nabla Q_{i}(w))^2
M
MRXLT 已提交
34

35
        w & = w - \frac{\eta} {\sqrt{r(w,t) + \epsilon}} \nabla Q_{i}(w)
M
MRXLT 已提交
36 37 38 39 40 41 42 43 44

    The first equation calculates moving average of the squared gradient for
    each weight. Then dividing the gradient by :math:`sqrt{v(w,t)}`.

    In some cases, adding a momentum term :math: `\\beta` is beneficial.
    In our implementation, Nesterov momentum is used:

    ..  math::

45
        r(w, t) & = \rho r(w, t-1) + (1 - \rho)(\nabla Q_{i}(w))^2
M
MRXLT 已提交
46

47 48
        v(w, t) & = \beta v(w, t-1) + \frac{\eta} {\sqrt{r(w,t) +
            \epsilon}} \nabla Q_{i}(w)
M
MRXLT 已提交
49 50 51 52 53 54 55

        w & = w - v(w, t)

    if centered is True:

    ..  math::

56
        r(w, t) & = \rho r(w, t-1) + (1 - \rho)(\nabla Q_{i}(w))^2
M
MRXLT 已提交
57

58
        g(w, t) & = \rho g(w, t-1) + (1 - \rho)\nabla Q_{i}(w)
M
MRXLT 已提交
59

60 61
        v(w, t) & = \beta v(w, t-1) + \frac{\eta} {\sqrt{r(w,t) - (g(w, t))^2 +
            \epsilon}} \nabla Q_{i}(w)
M
MRXLT 已提交
62 63 64

        w & = w - v(w, t)

S
sunzhongkai588 已提交
65 66
    where, :math:`\rho` is a hyperparameter and typical values are 0.9, 0.95
    and so on. :math:`\beta` is the momentum term. :math:`\epsilon` is a
M
MRXLT 已提交
67 68 69 70 71
    smoothing term to avoid division by zero, usually set somewhere in range
    from 1e-4 to 1e-8.


    Parameters:
72
        learning_rate (float|LRScheduler): The learning rate used to update ``Parameter``.
S
sunzhongkai588 已提交
73 74 75 76 77 78
          It can be a float value or a LRScheduler.
        rho(float): rho is :math:`\rho` in equation, default is 0.95.
        epsilon(float): :math:`\epsilon` in equation is smoothing term to
          avoid division by zero, default is 1e-6.
        momentum(float): :math:`\beta` in equation is the momentum term,
          default is 0.0.
M
MRXLT 已提交
79
        centered(bool): If True, gradients are normalized by the estimated variance of
S
sunzhongkai588 已提交
80 81 82
          the gradient; if False, by the uncentered second moment. Setting this to
          True may help with training, but is slightly more expensive in terms of
          computation and memory. Defaults to False.
83 84 85 86 87
        parameters (list|tuple, optional): List/Tuple of ``Tensor`` to update to minimize ``loss``.
          This parameter is required in dygraph mode. And you can specify different options for
          different parameter groups such as the learning rate, weight decay, etc,
          then the parameters are list of dict. Note that the learning_rate in paramter groups
          represents the scale of base learning_rate.
S
sunzhongkai588 已提交
88
          The default value is None in static mode, at this time all parameters will be updated.
89
        weight_decay (float|WeightDecayRegularizer, optional): The strategy of regularization.
S
sunzhongkai588 已提交
90 91
          It canbe a float value as coeff of L2 regularization or \
          :ref:`api_fluid_regularizer_L1Decay`, :ref:`api_fluid_regularizer_L2Decay`.
92 93 94
          If a parameter has set regularizer using :ref:`api_fluid_ParamAttr` already,
          the regularization setting here in optimizer will be ignored for this parameter.
          Otherwise, the regularization setting here in optimizer will take effect.
S
sunzhongkai588 已提交
95
          Default None, meaning there is no regularization.
96
        grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of
S
sunzhongkai588 已提交
97 98 99
          some derived class of ``GradientClipBase`` . There are three cliping strategies
          ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` ,
          :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping.
100
        name (str, optional): This parameter is used by developers to print debugging information.
S
sunzhongkai588 已提交
101
          For details, please refer to :ref:`api_guide_Name`. Default is None.
M
MRXLT 已提交
102 103 104 105 106 107 108 109 110

    Raises:
        ValueError: If learning_rate, rho, epsilon, momentum are None.

    Examples:
          .. code-block:: python

            import paddle

111
            inp = paddle.rand([10,10], dtype="float32")
M
MRXLT 已提交
112 113 114 115
            linear = paddle.nn.Linear(10, 10)
            out = linear(inp)
            loss = paddle.mean(out)

116 117 118
            rmsprop = paddle.optimizer.RMSProp(learning_rate=0.1,
                             parameters=linear.parameters(),
                                       weight_decay=0.01)
M
MRXLT 已提交
119
            out.backward()
120 121
            rmsprop.step()
            rmsprop.clear_grad()
M
MRXLT 已提交
122

123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
            #Note that the learning_rate of linear_2 is 0.01.
            linear_1 = paddle.nn.Linear(10, 10)
            linear_2 = paddle.nn.Linear(10, 10)
            inp = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
            out = linear_1(inp)
            out = linear_2(out)
            loss = paddle.mean(out)
            rmsprop = paddle.optimizer.RMSProp(
                learning_rate=0.1,
                parameters=[{
                    'params': linear_1.parameters()
                }, {
                    'params': linear_2.parameters(),
                    'weight_decay': 0.001,
                    'learning_rate': 0.1
                }],
139
                weight_decay=0.01)
140 141 142
            out.backward()
            rmsprop.step()
            rmsprop.clear_grad()
M
MRXLT 已提交
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
    """

    _momentum_acc_str = "momentum"
    _mean_square_acc_str = "mean_square"
    _mean_grad_acc_str = "mean_grad"

    def __init__(self,
                 learning_rate,
                 rho=0.95,
                 epsilon=1.0e-6,
                 momentum=0.0,
                 centered=False,
                 parameters=None,
                 weight_decay=None,
                 grad_clip=None,
                 name=None):
        if learning_rate is None:
            raise ValueError("learning_rate is not set.")
        if rho is None:
            raise ValueError("rho is not set.")
        if epsilon is None:
            raise ValueError("epsilon is not set.")
        if momentum is None:
            raise ValueError("momentum is not set.")
M
MRXLT 已提交
167 168 169 170 171 172
        if not 0.0 <= epsilon:
            raise ValueError("Invalid value of epsilon, expect epsilon >= 0.")
        if not 0.0 <= momentum:
            raise ValueError("Invalid value of momentum, expect momentum >= 0.")
        if not 0.0 <= rho:
            raise ValueError("Invalid value of rho, expect rho >= 0.")
M
MRXLT 已提交
173

174 175 176 177 178
        super(RMSProp, self).__init__(learning_rate=learning_rate,
                                      parameters=parameters,
                                      weight_decay=weight_decay,
                                      grad_clip=grad_clip,
                                      name=name)
M
MRXLT 已提交
179 180 181 182 183 184

        self.type = "rmsprop"
        self._rho = rho
        self._epsilon = epsilon
        self._momentum = momentum
        self._centered = centered
185 186 187 188 189 190
        self._default_dict = {
            'rho': rho,
            'epsilon': epsilon,
            'momentum': momentum,
            'centered': centered,
        }
M
MRXLT 已提交
191 192 193 194 195

    def _create_accumulators(self, block, parameters):
        if not isinstance(block, framework.Block):
            raise TypeError("block is not instance of framework.Block.")

196 197 198
        if isinstance(parameters, dict):
            parameters = parameters.get('params')

M
MRXLT 已提交
199 200 201 202 203 204 205 206 207
        for p in parameters:
            self._add_accumulator(self._momentum_acc_str, p)
            self._add_accumulator(self._mean_square_acc_str, p)
            self._add_accumulator(self._mean_grad_acc_str, p)

    def _append_optimize_op(self, block, param_and_grad):
        if not isinstance(block, framework.Block):
            raise TypeError("block is not instance of framework.Block.")

208 209 210
        if isinstance(param_and_grad, dict):
            param_and_grad = self._update_param_group(param_and_grad)

M
MRXLT 已提交
211 212 213 214 215 216
        momentum_acc = self._get_accumulator(self._momentum_acc_str,
                                             param_and_grad[0])
        mean_square_acc = self._get_accumulator(self._mean_square_acc_str,
                                                param_and_grad[0])
        mean_grad_acc = self._get_accumulator(self._mean_grad_acc_str,
                                              param_and_grad[0])
217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244
        rmsprop_op = block.append_op(type=self.type,
                                     inputs={
                                         "Param":
                                         param_and_grad[0],
                                         "Grad":
                                         param_and_grad[1],
                                         "Moment":
                                         momentum_acc,
                                         "MeanSquare":
                                         mean_square_acc,
                                         "MeanGrad":
                                         mean_grad_acc,
                                         "LearningRate":
                                         self._create_param_lr(param_and_grad),
                                     },
                                     outputs={
                                         "ParamOut": param_and_grad[0],
                                         "MomentOut": momentum_acc,
                                         "MeanSquareOut": mean_square_acc,
                                         "MeanGradOut": mean_grad_acc
                                     },
                                     attrs={
                                         "epsilon": self._epsilon,
                                         "decay": self._rho,
                                         "momentum": self._momentum,
                                         "centered": self._centered
                                     },
                                     stop_gradient=True)
M
MRXLT 已提交
245 246

        return rmsprop_op
247 248 249 250 251 252 253 254 255 256

    def _update_param_group(self, parameters):
        self._epsilon = parameters.get('epsilon', self._default_dict['epsilon'])
        self._rho = parameters.get('rho', self._default_dict['rho'])
        self._momentum = parameters.get('momentum',
                                        self._default_dict['momentum'])
        self._centered = parameters.get('centered',
                                        self._default_dict['centered'])
        parameters = parameters.get('params')
        return parameters