regularizer.py 5.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16 17 18 19 20
__all__ = ['L1Decay', 'L2Decay']

import paddle.fluid as fluid


class L1Decay(fluid.regularizer.L1Decay):
21
    r"""
22
    Implement the L1 Weight Decay Regularization, which encourages the weights to be sparse.
23 24 25 26 27

    It can be set in :ref:`api_paddle_ParamAttr` or ``optimizer`` (such as :ref:`api_paddle_optimizer_Momentum` ).
    When set in ``ParamAttr`` , it only takes effect for trainable parameters in this layer. When set in
    ``optimizer`` , it takes effect for all trainable parameters. When set together, ``ParamAttr`` has
    higher priority than ``optimizer`` , which means that for a trainable parameter, if regularizer is defined
28 29
    in its ParamAttr, then the regularizer in Optimizer will be ignored. Otherwise the  regularizer
    in Optimizer will be used.
30

L
littletomatodonkey 已提交
31
    In the implementation, the loss function of L1 Weight Decay Regularization is as follows:
32

33 34
    .. math::

L
littletomatodonkey 已提交
35
        loss = coeff * reduce\_sum(abs(x))
36 37 38

    Args:
        coeff(float, optional): regularization coeff. Default:0.0.
39

40 41 42 43 44 45
    Examples:
        .. code-block:: python

            # Example1: set Regularizer in optimizer
            import paddle
            from paddle.regularizer import L1Decay
46

47
            linear = paddle.nn.Linear(10, 10)
L
littletomatodonkey 已提交
48
            inp = paddle.rand(shape=[10, 10], dtype="float32")
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
            out = linear(inp)
            loss = paddle.mean(out)
            beta1 = paddle.to_tensor([0.9], dtype="float32")
            beta2 = paddle.to_tensor([0.99], dtype="float32")
            momentum = paddle.optimizer.Momentum(
                learning_rate=0.1,
                parameters=linear.parameters(),
                weight_decay=L1Decay(0.0001))
            back = out.backward()
            momentum.step()
            momentum.clear_grad()

            # Example2: set Regularizer in parameters
            # Set L1 regularization in parameters.
            # Global regularizer does not take effect on my_conv2d for this case.
C
cnn 已提交
64
            from paddle.nn import Conv2D
65 66 67
            from paddle import ParamAttr
            from paddle.regularizer import L2Decay

C
cnn 已提交
68
            my_conv2d = Conv2D(
69 70 71 72 73 74 75 76 77 78 79 80 81 82
                    in_channels=10,
                    out_channels=10,
                    kernel_size=1,
                    stride=1,
                    padding=0,
                    weight_attr=ParamAttr(regularizer=L2Decay(coeff=0.01)),
                    bias_attr=False)
    """

    def __init__(self, coeff=0.0):
        super(L1Decay, self).__init__(coeff)


class L2Decay(fluid.regularizer.L2Decay):
83
    r"""
84
    Implement the L2 Weight Decay Regularization, which helps to prevent the model over-fitting.
85 86 87 88 89

    It can be set in :ref:`api_paddle_ParamAttr` or ``optimizer`` (such as :ref:`api_paddle_optimizer_Momentum` ).
    When set in ``ParamAttr`` , it only takes effect for trainable parameters in this layer. When set in
    ``optimizer`` , it takes effect for all trainable parameters. When set together, ``ParamAttr`` has
    higher priority than ``optimizer`` , which means that for a trainable parameter, if regularizer is defined
90 91
    in its ParamAttr, then the regularizer in Optimizer will be ignored. Otherwise the  regularizer
    in Optimizer will be used.
92

L
littletomatodonkey 已提交
93
    In the implementation, the loss function of L2 Weight Decay Regularization is as follows:
94 95 96

    .. math::

L
littletomatodonkey 已提交
97
        loss = 0.5 * coeff * reduce\_sum(square(x))
98 99 100

    Args:
        regularization_coeff(float, optional): regularization coeff. Default:0.0
101

102 103 104 105 106 107 108 109
    Examples:
        .. code-block:: python

            # Example1: set Regularizer in optimizer
            import paddle
            from paddle.regularizer import L2Decay
            import numpy as np
            linear = paddle.nn.Linear(10, 10)
L
littletomatodonkey 已提交
110
            inp = paddle.rand(shape=[10, 10], dtype="float32")
111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
            out = linear(inp)
            loss = paddle.mean(out)
            beta1 = paddle.to_tensor([0.9], dtype="float32")
            beta2 = paddle.to_tensor([0.99], dtype="float32")
            momentum = paddle.optimizer.Momentum(
                learning_rate=0.1,
                parameters=linear.parameters(),
                weight_decay=L2Decay(0.0001))
            back = out.backward()
            momentum.step()
            momentum.clear_grad()

            # Example2: set Regularizer in parameters
            # Set L2 regularization in parameters.
            # Global regularizer does not take effect on my_conv2d for this case.
C
cnn 已提交
126
            from paddle.nn import Conv2D
127 128 129
            from paddle import ParamAttr
            from paddle.regularizer import L2Decay

C
cnn 已提交
130
            my_conv2d = Conv2D(
131 132 133 134 135 136 137 138 139 140 141
                    in_channels=10,
                    out_channels=10,
                    kernel_size=1,
                    stride=1,
                    padding=0,
                    weight_attr=ParamAttr(regularizer=L2Decay(coeff=0.01)),
                    bias_attr=False)
    """

    def __init__(self, coeff=0.0):
        super(L2Decay, self).__init__(coeff)