regularizer.py 8.8 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

17
from . import framework
C
chengduoZH 已提交
18
from . import core
19

Y
yuyang18 已提交
20
__all__ = ['L1Decay', 'L2Decay', 'L1DecayRegularizer', 'L2DecayRegularizer']
21 22


D
dzhwinter 已提交
23
def append_regularization_ops(parameters_and_grads, regularization=None):
24 25 26 27 28 29 30 31 32 33
    """Create and add backward regularization Operators

    Creates and adds backward regularization operators in the BlockDesc.
    This will add gradients of the regularizer function to the gradients
    of the parameters and return these modified gradients. This is the
    same as implementing weight decay in optimizers for regularization.

    Args:
        parameters_and_grads: A list of (parameters, gradients) pairs
                              that need to be regularized.
D
dzhwinter 已提交
34 35
        regularization: A global regularizer. If the parameter is not
                        set. It will be applied with regularizer.
36 37

    Returns:
38 39
        list[(Variable, Variable)]: list of (parameters, gradients) \
        pair with the regularized gradient
40 41 42 43 44 45

    Raises:
        Exception: Unknown regularization type
    """
    params_and_grads = []
    for param, grad in parameters_and_grads:
46 47 48 49
        # If no gradient then we don't need to do anything
        if grad is None:
            params_and_grads.append((param, grad))
            continue
W
Wu Yi 已提交
50
        with param.block.program._optimized_guard([param, grad]):
Y
yuyang18 已提交
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
            regularization_term = None
            if param.regularizer is not None:
                # Add variable for regularization term in grad block
                regularization_term = param.regularizer(param, grad, grad.block)
            elif regularization is not None:
                regularization_term = regularization(param, grad, grad.block)

            # If no regularization specified, then we don't need to do anything
            if regularization_term is None:
                params_and_grads.append((param, grad))
                continue

            assert grad.shape == regularization_term.shape

            grad.block.append_op(
                type='elementwise_add',
                inputs={"X": grad,
                        "Y": regularization_term},
                outputs={"Out": grad})
70
            params_and_grads.append((param, grad))
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88

    return params_and_grads


class WeightDecayRegularizer(object):
    """Base class for weight decay regularizers

    Defines the common interface of weight-decay regularizers.
    Weight-decay regularizers are added only during the backward
    pass for faster regularization. They add operations to the network
    that correspond to gradient of the regularization function.
    Users should not use this class directly, but need to use one
    of its implementations
    """

    def __init__(self):
        pass

C
chengduoZH 已提交
89
    def __call__(self, param, grad, block):
90 91 92 93
        """Add corresponding weight decay operations to the network
        """
        raise NotImplementedError()

F
fengjiayi 已提交
94 95 96 97 98
    def __str__(self):
        """Debug string
        """
        raise NotImplementedError()

99 100 101

class L2DecayRegularizer(WeightDecayRegularizer):
    """Implements the L2 Weight Decay Regularization
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119

    Small values of L2 can help prevent over fitting the training data.

    .. math::

        L2WeightDecay = reg\_coeff * parameter

    Args:
        regularization_coeff(float): regularization coeff

    Examples:
        .. code-block:: python

            optimizer = fluid.optimizer.Adagrad(
                learning_rate=1e-4,
                regularization=fluid.regularizer.L2DecayRegularizer(
                    regularization_coeff=0.1))
            optimizer.minimize(avg_cost)
120 121 122 123 124 125 126
    """

    def __init__(self, regularization_coeff=0.0):
        assert regularization_coeff is not None
        super(L2DecayRegularizer, self).__init__()
        self._regularization_coeff = regularization_coeff

C
chengduoZH 已提交
127
    def __call__(self, param, grad, block):
128 129 130 131 132 133 134 135 136 137 138 139 140 141
        """Add L2 weight decay ops to network

        Adds L2 weight decay ops.
        L2WeightDecay = reg_coeff * parameter

        Args:
            param: parameter variable for which regularization is applied
            block: block in which variable is to be created

        Returns:
            new variable for weight decay
        """
        assert isinstance(param, framework.Parameter)
        assert isinstance(block, framework.Block)
C
chengduoZH 已提交
142

143 144
        decay = block.create_var(
            dtype="float32", shape=param.shape, lod_level=param.lod_level)
C
chengduoZH 已提交
145 146

        if grad.type == core.VarDesc.VarType.SELECTED_ROWS:
147 148 149 150
            idx = block.create_var(
                dtype="int64",
                shape=param.shape,
                type=core.VarDesc.VarType.LOD_TENSOR)
C
chengduoZH 已提交
151 152 153 154
            decay = block.create_var(
                dtype="float32",
                shape=param.shape,
                type=core.VarDesc.VarType.SELECTED_ROWS)
155 156
            block.append_op(
                type='extract_rows', inputs={'X': grad}, outputs={'Out': idx})
C
chengduoZH 已提交
157 158 159
            block.append_op(
                type='lookup_table',
                inputs={'W': param,
160
                        'Ids': idx},
C
chengduoZH 已提交
161 162 163 164
                outputs={'Out': decay},
                attrs={'is_sparse': True})
            param = decay

165 166 167 168 169 170 171 172
        # Append Op to calculate decay
        block.append_op(
            type='scale',
            inputs={"X": param},
            outputs={"Out": decay},
            attrs={"scale": self._regularization_coeff})

        return decay
173

F
fengjiayi 已提交
174 175 176
    def __str__(self):
        return "L2Decay, regularization_coeff=%f" % self._regularization_coeff

177 178 179

class L1DecayRegularizer(WeightDecayRegularizer):
    """Implements the L1 Weight Decay Regularization
180 181 182 183 184 185 186 187 188 189 190 191 192

    L1 regularization encourages sparsity.

    .. math::

        L1WeightDecay = reg\_coeff * sign(parameter)

    Args:
        regularization_coeff(float): regularization coeff

    Examples:
        .. code-block:: python

X
Xin Pan 已提交
193 194 195 196 197
            optimizer = fluid.optimizer.Adagrad(
                learning_rate=1e-4,
                regularization=fluid.regularizer.L1DecayRegularizer(
                    regularization_coeff=0.1))
            optimizer.minimize(avg_cost)
198 199 200 201 202 203 204
    """

    def __init__(self, regularization_coeff=0.0):
        assert regularization_coeff is not None
        super(L1DecayRegularizer, self).__init__()
        self._regularization_coeff = regularization_coeff

C
chengduoZH 已提交
205
    def __call__(self, param, grad, block):
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
        """Add L1 weight decay ops to network

        Adds L1 weight decay ops.
        L1WeightDecay = reg_coeff * sign(parameter)

        Args:
            param: parameter variable for which regularization is applied
            block: block in which variable is to be created

        Returns:
            new variable for weight decay
        """
        assert isinstance(param, framework.Parameter)
        assert isinstance(block, framework.Block)
        decay = block.create_var(
            dtype="float32", shape=param.shape, lod_level=param.lod_level)
C
chengduoZH 已提交
222 223

        if grad.type == core.VarDesc.VarType.SELECTED_ROWS:
224 225 226 227
            idx = block.create_var(
                dtype="int64",
                shape=param.shape,
                type=core.VarDesc.VarType.LOD_TENSOR)
C
chengduoZH 已提交
228 229 230 231
            decay = block.create_var(
                dtype="float32",
                shape=param.shape,
                type=core.VarDesc.VarType.SELECTED_ROWS)
232 233
            block.append_op(
                type='extract_rows', inputs={'X': grad}, outputs={'Out': idx})
C
chengduoZH 已提交
234 235 236
            block.append_op(
                type='lookup_table',
                inputs={'W': param,
237
                        'Ids': idx},
C
chengduoZH 已提交
238 239 240
                outputs={'Out': decay},
                attrs={'is_sparse': True})

241 242 243 244 245 246 247 248 249 250 251 252
        # Append sign op
        block.append_op(
            type='sign', inputs={"X": param}, outputs={"Out": decay})

        # Append scale op to the output of sign op
        block.append_op(
            type='scale',
            inputs={"X": decay},
            outputs={"Out": decay},
            attrs={"scale": self._regularization_coeff})

        return decay
253

F
fengjiayi 已提交
254 255 256
    def __str__(self):
        return "L1Decay, regularization_coeff=%f" % self._regularization_coeff

257 258 259 260 261 262 263

# We short the class name, since users will use the regulaizer with the package
# name. The sample code:
#
# import paddle.fluid as fluid
#
# hidden = fluid.layers.fc(...,
Y
Yu Yang 已提交
264
#                          param_attr=fluid.regularizer.Xavier())
265 266 267 268
#
# It is no need to add a `Regularizer` as the class suffix
L1Decay = L1DecayRegularizer
L2Decay = L2DecayRegularizer