regularizer.py 7.2 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import framework
C
chengduoZH 已提交
16
from . import core
17

18
__all__ = [
19 20
    'append_regularization_ops', 'WeightDecayRegularizer', 'L1Decay', 'L2Decay',
    'L1DecayRegularizer', 'L2DecayRegularizer'
21
]
22 23


D
dzhwinter 已提交
24
def append_regularization_ops(parameters_and_grads, regularization=None):
25 26 27 28 29 30 31 32 33 34
    """Create and add backward regularization Operators

    Creates and adds backward regularization operators in the BlockDesc.
    This will add gradients of the regularizer function to the gradients
    of the parameters and return these modified gradients. This is the
    same as implementing weight decay in optimizers for regularization.

    Args:
        parameters_and_grads: A list of (parameters, gradients) pairs
                              that need to be regularized.
D
dzhwinter 已提交
35 36
        regularization: A global regularizer. If the parameter is not
                        set. It will be applied with regularizer.
37 38 39 40 41 42 43 44 45

    Returns:
        list of (parameters, gradients) pair with the regularized gradient

    Raises:
        Exception: Unknown regularization type
    """
    params_and_grads = []
    for param, grad in parameters_and_grads:
46 47 48 49 50
        # If no gradient then we don't need to do anything
        if grad is None:
            params_and_grads.append((param, grad))
            continue

D
dzhwinter 已提交
51 52 53
        regularization_term = None
        if param.regularizer is not None:
            # Add variable for regularization term in grad block
C
chengduoZH 已提交
54
            regularization_term = param.regularizer(param, grad, grad.block)
D
dzhwinter 已提交
55
        elif regularization is not None:
C
chengduoZH 已提交
56
            regularization_term = regularization(param, grad, grad.block)
D
dzhwinter 已提交
57

58 59
        # If no regularization specified, then we don't need to do anything
        if regularization_term is None:
60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
            params_and_grads.append((param, grad))
            continue

        assert grad.shape == regularization_term.shape

        grad.block.append_op(
            type='elementwise_add',
            inputs={"X": grad,
                    "Y": regularization_term},
            outputs={"Out": grad})
        params_and_grads.append((param, grad))

    return params_and_grads


class WeightDecayRegularizer(object):
    """Base class for weight decay regularizers

    Defines the common interface of weight-decay regularizers.
    Weight-decay regularizers are added only during the backward
    pass for faster regularization. They add operations to the network
    that correspond to gradient of the regularization function.
    Users should not use this class directly, but need to use one
    of its implementations
    """

    def __init__(self):
        pass

C
chengduoZH 已提交
89
    def __call__(self, param, grad, block):
90 91 92 93
        """Add corresponding weight decay operations to the network
        """
        raise NotImplementedError()

F
fengjiayi 已提交
94 95 96 97 98
    def __str__(self):
        """Debug string
        """
        raise NotImplementedError()

99 100 101 102 103 104 105 106 107 108

class L2DecayRegularizer(WeightDecayRegularizer):
    """Implements the L2 Weight Decay Regularization
    """

    def __init__(self, regularization_coeff=0.0):
        assert regularization_coeff is not None
        super(L2DecayRegularizer, self).__init__()
        self._regularization_coeff = regularization_coeff

C
chengduoZH 已提交
109
    def __call__(self, param, grad, block):
110 111 112 113 114 115 116 117 118 119 120 121 122 123
        """Add L2 weight decay ops to network

        Adds L2 weight decay ops.
        L2WeightDecay = reg_coeff * parameter

        Args:
            param: parameter variable for which regularization is applied
            block: block in which variable is to be created

        Returns:
            new variable for weight decay
        """
        assert isinstance(param, framework.Parameter)
        assert isinstance(block, framework.Block)
C
chengduoZH 已提交
124

125 126
        decay = block.create_var(
            dtype="float32", shape=param.shape, lod_level=param.lod_level)
C
chengduoZH 已提交
127 128 129 130 131 132 133 134 135 136 137 138 139 140

        if grad.type == core.VarDesc.VarType.SELECTED_ROWS:
            decay = block.create_var(
                dtype="float32",
                shape=param.shape,
                type=core.VarDesc.VarType.SELECTED_ROWS)
            block.append_op(
                type='lookup_table',
                inputs={'W': param,
                        'Ids': grad},
                outputs={'Out': decay},
                attrs={'is_sparse': True})
            param = decay

141 142 143 144 145 146 147 148
        # Append Op to calculate decay
        block.append_op(
            type='scale',
            inputs={"X": param},
            outputs={"Out": decay},
            attrs={"scale": self._regularization_coeff})

        return decay
149

F
fengjiayi 已提交
150 151 152
    def __str__(self):
        return "L2Decay, regularization_coeff=%f" % self._regularization_coeff

153 154 155 156 157 158 159 160 161 162

class L1DecayRegularizer(WeightDecayRegularizer):
    """Implements the L1 Weight Decay Regularization
    """

    def __init__(self, regularization_coeff=0.0):
        assert regularization_coeff is not None
        super(L1DecayRegularizer, self).__init__()
        self._regularization_coeff = regularization_coeff

C
chengduoZH 已提交
163
    def __call__(self, param, grad, block):
164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
        """Add L1 weight decay ops to network

        Adds L1 weight decay ops.
        L1WeightDecay = reg_coeff * sign(parameter)

        Args:
            param: parameter variable for which regularization is applied
            block: block in which variable is to be created

        Returns:
            new variable for weight decay
        """
        assert isinstance(param, framework.Parameter)
        assert isinstance(block, framework.Block)
        decay = block.create_var(
            dtype="float32", shape=param.shape, lod_level=param.lod_level)
C
chengduoZH 已提交
180 181 182 183 184 185 186 187 188 189 190 191 192

        if grad.type == core.VarDesc.VarType.SELECTED_ROWS:
            decay = block.create_var(
                dtype="float32",
                shape=param.shape,
                type=core.VarDesc.VarType.SELECTED_ROWS)
            block.append_op(
                type='lookup_table',
                inputs={'W': param,
                        'Ids': grad},
                outputs={'Out': decay},
                attrs={'is_sparse': True})

193 194 195 196 197 198 199 200 201 202 203 204
        # Append sign op
        block.append_op(
            type='sign', inputs={"X": param}, outputs={"Out": decay})

        # Append scale op to the output of sign op
        block.append_op(
            type='scale',
            inputs={"X": decay},
            outputs={"Out": decay},
            attrs={"scale": self._regularization_coeff})

        return decay
205

F
fengjiayi 已提交
206 207 208
    def __str__(self):
        return "L1Decay, regularization_coeff=%f" % self._regularization_coeff

209 210 211 212 213 214 215

# We short the class name, since users will use the regulaizer with the package
# name. The sample code:
#
# import paddle.fluid as fluid
#
# hidden = fluid.layers.fc(...,
Y
Yu Yang 已提交
216
#                          param_attr=fluid.regularizer.Xavier())
217 218 219 220
#
# It is no need to add a `Regularizer` as the class suffix
L1Decay = L1DecayRegularizer
L2Decay = L2DecayRegularizer