regularizer.py 8.1 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

17
from . import framework
C
chengduoZH 已提交
18
from . import core
19

Y
yuyang18 已提交
20
__all__ = ['L1Decay', 'L2Decay', 'L1DecayRegularizer', 'L2DecayRegularizer']
21 22


D
dzhwinter 已提交
23
def append_regularization_ops(parameters_and_grads, regularization=None):
24 25 26 27 28 29 30 31 32 33
    """Create and add backward regularization Operators

    Creates and adds backward regularization operators in the BlockDesc.
    This will add gradients of the regularizer function to the gradients
    of the parameters and return these modified gradients. This is the
    same as implementing weight decay in optimizers for regularization.

    Args:
        parameters_and_grads: A list of (parameters, gradients) pairs
                              that need to be regularized.
D
dzhwinter 已提交
34 35
        regularization: A global regularizer. If the parameter is not
                        set. It will be applied with regularizer.
36 37

    Returns:
38 39
        list[(Variable, Variable)]: list of (parameters, gradients) \
        pair with the regularized gradient
40 41 42 43 44 45

    Raises:
        Exception: Unknown regularization type
    """
    params_and_grads = []
    for param, grad in parameters_and_grads:
46 47 48 49
        # If no gradient then we don't need to do anything
        if grad is None:
            params_and_grads.append((param, grad))
            continue
X
Xin Pan 已提交
50 51
        with param.block.program._optimized_guard(
            [param, grad]), framework.name_scope('regularization'):
Y
yuyang18 已提交
52 53 54 55 56 57 58 59 60 61 62 63
            regularization_term = None
            if param.regularizer is not None:
                # Add variable for regularization term in grad block
                regularization_term = param.regularizer(param, grad, grad.block)
            elif regularization is not None:
                regularization_term = regularization(param, grad, grad.block)

            # If no regularization specified, then we don't need to do anything
            if regularization_term is None:
                params_and_grads.append((param, grad))
                continue

C
chengduo 已提交
64 65 66 67 68 69 70 71 72 73 74 75
            new_grad = grad
            if grad.type == core.VarDesc.VarType.SELECTED_ROWS:
                # FIXME(zcd): If the grad is SELECTED_ROWS, after regularization,
                # the grad's type and name will be changed. But the gradient's name
                # is used in ParallelExecutor Reduce mode, so I add a flag for
                # the new_grad here.
                new_grad = grad.block.create_var(
                    name=grad.name + core.kNewGradSuffix(),
                    dtype=param.dtype,
                    shape=param.shape,
                    lod_level=param.lod_level,
                    type=core.VarDesc.VarType.LOD_TENSOR)
Y
yuyang18 已提交
76 77

            grad.block.append_op(
C
chengduo 已提交
78 79 80 81 82
                type='sum',
                inputs={"X": [grad, regularization_term]},
                outputs={"Out": new_grad})

            params_and_grads.append((param, new_grad))
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100

    return params_and_grads


class WeightDecayRegularizer(object):
    """Base class for weight decay regularizers

    Defines the common interface of weight-decay regularizers.
    Weight-decay regularizers are added only during the backward
    pass for faster regularization. They add operations to the network
    that correspond to gradient of the regularization function.
    Users should not use this class directly, but need to use one
    of its implementations
    """

    def __init__(self):
        pass

C
chengduoZH 已提交
101
    def __call__(self, param, grad, block):
102 103 104 105
        """Add corresponding weight decay operations to the network
        """
        raise NotImplementedError()

F
fengjiayi 已提交
106 107 108 109 110
    def __str__(self):
        """Debug string
        """
        raise NotImplementedError()

111 112 113

class L2DecayRegularizer(WeightDecayRegularizer):
    """Implements the L2 Weight Decay Regularization
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131

    Small values of L2 can help prevent over fitting the training data.

    .. math::

        L2WeightDecay = reg\_coeff * parameter

    Args:
        regularization_coeff(float): regularization coeff

    Examples:
        .. code-block:: python

            optimizer = fluid.optimizer.Adagrad(
                learning_rate=1e-4,
                regularization=fluid.regularizer.L2DecayRegularizer(
                    regularization_coeff=0.1))
            optimizer.minimize(avg_cost)
132 133 134 135 136 137 138
    """

    def __init__(self, regularization_coeff=0.0):
        assert regularization_coeff is not None
        super(L2DecayRegularizer, self).__init__()
        self._regularization_coeff = regularization_coeff

C
chengduoZH 已提交
139
    def __call__(self, param, grad, block):
140 141 142 143 144 145 146 147 148 149 150 151 152 153
        """Add L2 weight decay ops to network

        Adds L2 weight decay ops.
        L2WeightDecay = reg_coeff * parameter

        Args:
            param: parameter variable for which regularization is applied
            block: block in which variable is to be created

        Returns:
            new variable for weight decay
        """
        assert isinstance(param, framework.Parameter)
        assert isinstance(block, framework.Block)
C
chengduoZH 已提交
154

155
        decay = block.create_var(
C
chengduo 已提交
156
            dtype=param.dtype, shape=param.shape, lod_level=param.lod_level)
C
chengduoZH 已提交
157

158 159 160 161 162 163 164 165
        # Append Op to calculate decay
        block.append_op(
            type='scale',
            inputs={"X": param},
            outputs={"Out": decay},
            attrs={"scale": self._regularization_coeff})

        return decay
166

F
fengjiayi 已提交
167 168 169
    def __str__(self):
        return "L2Decay, regularization_coeff=%f" % self._regularization_coeff

170 171 172

class L1DecayRegularizer(WeightDecayRegularizer):
    """Implements the L1 Weight Decay Regularization
173 174 175 176 177 178 179 180 181 182 183 184 185

    L1 regularization encourages sparsity.

    .. math::

        L1WeightDecay = reg\_coeff * sign(parameter)

    Args:
        regularization_coeff(float): regularization coeff

    Examples:
        .. code-block:: python

X
Xin Pan 已提交
186 187 188 189 190
            optimizer = fluid.optimizer.Adagrad(
                learning_rate=1e-4,
                regularization=fluid.regularizer.L1DecayRegularizer(
                    regularization_coeff=0.1))
            optimizer.minimize(avg_cost)
191 192 193 194 195 196 197
    """

    def __init__(self, regularization_coeff=0.0):
        assert regularization_coeff is not None
        super(L1DecayRegularizer, self).__init__()
        self._regularization_coeff = regularization_coeff

C
chengduoZH 已提交
198
    def __call__(self, param, grad, block):
199 200 201 202 203 204 205 206 207 208 209 210 211 212
        """Add L1 weight decay ops to network

        Adds L1 weight decay ops.
        L1WeightDecay = reg_coeff * sign(parameter)

        Args:
            param: parameter variable for which regularization is applied
            block: block in which variable is to be created

        Returns:
            new variable for weight decay
        """
        assert isinstance(param, framework.Parameter)
        assert isinstance(block, framework.Block)
C
chengduo 已提交
213

214
        decay = block.create_var(
C
chengduo 已提交
215
            dtype=param.dtype, shape=param.shape, lod_level=param.lod_level)
C
chengduoZH 已提交
216

217 218 219 220 221 222 223 224 225 226 227 228
        # Append sign op
        block.append_op(
            type='sign', inputs={"X": param}, outputs={"Out": decay})

        # Append scale op to the output of sign op
        block.append_op(
            type='scale',
            inputs={"X": decay},
            outputs={"Out": decay},
            attrs={"scale": self._regularization_coeff})

        return decay
229

F
fengjiayi 已提交
230 231 232
    def __str__(self):
        return "L1Decay, regularization_coeff=%f" % self._regularization_coeff

233 234 235 236 237 238 239

# We short the class name, since users will use the regulaizer with the package
# name. The sample code:
#
# import paddle.fluid as fluid
#
# hidden = fluid.layers.fc(...,
Y
Yu Yang 已提交
240
#                          param_attr=fluid.regularizer.Xavier())
241 242 243 244
#
# It is no need to add a `Regularizer` as the class suffix
L1Decay = L1DecayRegularizer
L2Decay = L2DecayRegularizer