grad_scaler.py 6.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from paddle.fluid.dygraph.amp import AmpScaler

17
__all__ = []
18 19 20 21


class GradScaler(AmpScaler):
    """
22 23
    GradScaler is used for Auto-Mixed-Precision training in dynamic graph mode. 
    It controls the scaling of loss, helps avoiding numerical overflow.
24 25 26
    The object of this class has two methods `scale()`, `minimize()`.

    `scale()` is used to multiply the loss by a scale ratio.
27
    `minimize()` is similar as `optimizer.minimize()`, performs parameters updating.
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44

    Commonly, it is used together with `paddle.amp.auto_cast` to achieve Auto-Mixed-Precision in 
    dynamic graph mode.

    Args:
        enable(bool, optional): Enable loss scaling or not. Default is True.
        init_loss_scaling (float, optional): The initial loss scaling factor. Default is 2**15.
        incr_ratio(float, optional): The multiplier to use when increasing the loss 
                        scaling. Default is 2.0.
        decr_ratio(float, optional): The less-than-one-multiplier to use when decreasing 
                        the loss scaling. Default is 0.5.
        incr_every_n_steps(int, optional): Increases loss scaling every n consecutive 
                                steps with finite gradients. Default is 1000.
        decr_every_n_nan_or_inf(int, optional): Decreases loss scaling every n 
                                    accumulated steps with nan or inf gradients. Default is 2.
        use_dynamic_loss_scaling(bool, optional): Whether to use dynamic loss scaling. If False, fixed loss_scaling is used. If True, the loss scaling is updated dynamicly. Default is True.
    Returns:
45
        An GradScaler object.
46 47 48

    Examples:

49
        .. code-block:: python
50

51
            import paddle
52

53 54 55 56
            model = paddle.nn.Conv2D(3, 2, 3, bias_attr=True)
            optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters())
            scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
            data = paddle.rand([10, 3, 32, 32])
L
Leo Chen 已提交
57

58 59 60
            with paddle.amp.auto_cast():
                conv = model(data)
                loss = paddle.mean(conv)
L
Leo Chen 已提交
61 62 63 64
                
            scaled = scaler.scale(loss)  # scale the loss 
            scaled.backward()            # do backward
            scaler.minimize(optimizer, scaled)  # update parameters     
65
            optimizer.clear_grad()
66 67 68 69 70 71 72 73
    """

    def __init__(self,
                 enable=True,
                 init_loss_scaling=2.**15,
                 incr_ratio=2.0,
                 decr_ratio=0.5,
                 incr_every_n_steps=1000,
74
                 decr_every_n_nan_or_inf=2,
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
                 use_dynamic_loss_scaling=True):
        super(GradScaler, self).__init__(enable, init_loss_scaling, incr_ratio,
                                         decr_ratio, incr_every_n_steps,
                                         decr_every_n_nan_or_inf,
                                         use_dynamic_loss_scaling)

    def scale(self, var):
        """
        Multiplies a Tensor by the scale factor and returns scaled outputs.  
        If this instance of :class:`GradScaler` is not enabled, output are returned unmodified.

        Args:
            var (Tensor):  The tensor to scale.
        Returns:
            The scaled tensor or original tensor.
        
        Examples:
L
Leo Chen 已提交
92

93 94
            .. code-block:: python

95 96 97 98 99 100
                import paddle

                model = paddle.nn.Conv2D(3, 2, 3, bias_attr=True)
                optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters())
                scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
                data = paddle.rand([10, 3, 32, 32])
L
Leo Chen 已提交
101

102 103 104
                with paddle.amp.auto_cast():
                    conv = model(data)
                    loss = paddle.mean(conv)
L
Leo Chen 已提交
105 106 107 108

                scaled = scaler.scale(loss)  # scale the loss 
                scaled.backward()            # do backward
                scaler.minimize(optimizer, scaled)  # update parameters  
109
                optimizer.clear_grad()
110 111 112 113 114
        """
        return super(GradScaler, self).scale(var)

    def minimize(self, optimizer, *args, **kwargs):
        """
115
        This function is similar as `optimizer.minimize()`, which performs parameters updating.
116 117 118 119 120 121 122 123 124
        
        If the scaled gradients of parameters contains NAN or INF, the parameters updating is skipped.
        Otherwise, it first unscales the scaled gradients of parameters, then updates the parameters.

        Finally, the loss scaling ratio is updated.

        Args:
            optimizer(Optimizer):  The optimizer used to update parameters.
            args:  Arguments, which will be forward to `optimizer.minimize()`.
125
            kwargs: Keyword arguments, which will be forward to `optimizer.minimize()`.
126 127

        Examples:
L
Leo Chen 已提交
128

129 130
            .. code-block:: python

131 132 133 134 135 136
                import paddle

                model = paddle.nn.Conv2D(3, 2, 3, bias_attr=True)
                optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters())
                scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
                data = paddle.rand([10, 3, 32, 32])
L
Leo Chen 已提交
137

138 139 140
                with paddle.amp.auto_cast():
                    conv = model(data)
                    loss = paddle.mean(conv)
L
Leo Chen 已提交
141 142 143 144

                scaled = scaler.scale(loss)  # scale the loss 
                scaled.backward()            # do backward
                scaler.minimize(optimizer, scaled)  # update parameters  
145
                optimizer.clear_grad()
146 147
        """
        return super(GradScaler, self).minimize(optimizer, *args, **kwargs)