decorator.py 10.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from ... import default_main_program
from ... import default_startup_program
from ... import layers
from ... import unique_name
from . import fp16_utils
J
Jie Fang 已提交
20
from .fp16_utils import update_loss_scaling, rewrite_program
21
from .fp16_utils import update_role_var_grad
J
Jie Fang 已提交
22
from .fp16_lists import AutoMixedPrecisionLists
23 24 25 26

__all__ = ["decorate"]


Z
Zhen Wang 已提交
27
class OptimizerWithMixedPrecision(object):
28 29
    """
    Optimizer with mixed-precision (MP) training. This is a wrapper of a common 
Z
Zhen Wang 已提交
30
    optimizer, plus the support of mixed-precision pre-training. The object
31 32 33 34 35 36 37
    of this class almost has the same behavior as the common optimizer, with the 
    methods `minimize()`, `backward()`, `apply_gradients()` implemented. 
    Additionally, it enables the MP training automatically, i.e, the creation 
    and maintenance of master parameters, scaling of loss, etc.

    Args:
        optimizer (Optimizer): A common Optimizer object.
J
Jie Fang 已提交
38
        amp_lists (AutoMixedPrecisionLists): An AutoMixedPrecisionLists object.
39 40
        init_loss_scaling (float): The initial loss scaling factor.
        use_dynamic_loss_scaling (bool): Whether to use dynamic loss scaling.
J
Jie Fang 已提交
41 42 43 44 45 46 47 48 49 50
        incr_every_n_steps(int): Increases loss scaling every n consecutive 
                                 steps with finite gradients.
        decr_every_n_nan_or_inf(int): Decreases loss scaling every n 
                                      accumulated steps with nan or 
                                      inf gradients.
        incr_ratio(float): The multiplier to use when increasing the loss 
                           scaling.
        decr_ratio(float): The less-than-one-multiplier to use when decreasing 
                           the loss scaling.

51 52
    """

J
Jie Fang 已提交
53 54 55
    def __init__(self, optimizer, amp_lists, init_loss_scaling,
                 use_dynamic_loss_scaling, incr_every_n_steps,
                 decr_every_n_nan_or_inf, incr_ratio, decr_ratio):
56
        self._optimizer = optimizer
J
Jie Fang 已提交
57
        self._amp_lists = amp_lists
58 59 60
        self._param_grads = None
        self._train_program = default_main_program()
        self._startup_prog = default_startup_program()
61
        self._scaled_loss = None
J
Jie Fang 已提交
62 63 64 65 66 67
        self._loss_scaling = layers.create_global_var(
            name=unique_name.generate("loss_scaling"),
            shape=[1],
            value=init_loss_scaling,
            dtype='float32',
            persistable=True)
68
        self._use_dynamic_loss_scaling = use_dynamic_loss_scaling
J
Jie Fang 已提交
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
        if self._use_dynamic_loss_scaling:
            self._incr_every_n_steps = layers.fill_constant(
                shape=[1], dtype='int32', value=incr_every_n_steps)
            self._decr_every_n_nan_or_inf = layers.fill_constant(
                shape=[1], dtype='int32', value=decr_every_n_nan_or_inf)
            self._incr_ratio = incr_ratio
            self._decr_ratio = decr_ratio
            self._num_good_steps = layers.create_global_var(
                name=unique_name.generate("num_good_steps"),
                shape=[1],
                value=0,
                dtype='int32',
                persistable=True)
            self._num_bad_steps = layers.create_global_var(
                name=unique_name.generate("num_bad_steps"),
                shape=[1],
                value=0,
                dtype='int32',
                persistable=True)
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104

        # Ensure the data type of learning rate vars is float32 (same as the 
        # master parameter dtype)
        if isinstance(optimizer._learning_rate, float):
            optimizer._learning_rate_map[default_main_program()] = \
                        layers.create_global_var(
                        name=unique_name.generate("learning_rate"),
                        shape=[1],
                        value=float(optimizer._learning_rate),
                        dtype='float32',
                        persistable=True)

    def get_loss_scaling(self):
        """Return the real-time loss scaling factor.
        """
        return self._loss_scaling

105 106 107 108 109 110 111
    def get_scaled_loss(self):
        """Return the scaled loss.
        It's useful when you feed customed loss into executor.
        """

        return self._scaled_loss

112 113 114 115 116 117 118
    def backward(self,
                 loss,
                 startup_program=None,
                 parameter_list=None,
                 no_grad_set=None,
                 callbacks=None):
        """
Z
Zhen Wang 已提交
119
        Backward propagation or auto differentiation for gradients' computation.
120 121 122 123 124 125 126

        Args:
            loss (Variable): The loss Variable to minimize.
            startup_program (Program|None): The startup Program for initializing 
                                       parameters in `parameter_list`.
            parameter_list (list|None): A list of Variables to update.
            no_grad_set (set|None): A set of Variables should be ignored.
Z
Zhen Wang 已提交
127
            callbacks (list|None): A list of callable objects to run when appending
128 129 130 131 132 133
                                   backward operator for one parameter.

        Returns:
            A list of (param, grad), which is a tuple of a parameter and its 
            gradient respectively, and the scaled loss.
        """
J
Jie Fang 已提交
134
        rewrite_program(self._train_program, self._amp_lists)
135
        self._scaled_loss = loss * self._loss_scaling
136
        self._params_grads = self._optimizer.backward(
137
            self._scaled_loss, startup_program, parameter_list, no_grad_set,
138
            callbacks)
Z
Zhen Wang 已提交
139 140
        # Change the op_role_var attr for some ops, so that gradients
        # transferred across GPUs can be FP16.
141 142 143 144 145 146
        update_role_var_grad(self._train_program, self._params_grads)
        scaled_params_grads = []
        for p, g in self._params_grads:
            with self._train_program._optimized_guard([p, g]):
                scaled_g = g / self._loss_scaling
                scaled_params_grads.append([p, scaled_g])
147

148
        return scaled_params_grads
149

150
    def apply_gradients(self, scaled_params_grads):
151
        """
152 153
        Check scaled gradients to determine whether to update loss scaling and update 
        parameters by their scaled gradients, 
154 155
  
        Args:
156
            scaled_params_grads (list): A list of params and scaled grads.
157 158 159 160
    
        Returns:
            A list of optimize operators.
        """
J
Jie Fang 已提交
161 162 163

        if self._use_dynamic_loss_scaling:

164
            grads = [layers.reduce_sum(g) for [_, g] in scaled_params_grads]
J
Jie Fang 已提交
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
            all_grads = layers.concat(grads)
            all_grads_sum = layers.reduce_sum(all_grads)
            is_overall_finite = layers.isfinite(all_grads_sum)

            update_loss_scaling(is_overall_finite, self._loss_scaling,
                                self._num_good_steps, self._num_bad_steps,
                                self._incr_every_n_steps,
                                self._decr_every_n_nan_or_inf, self._incr_ratio,
                                self._decr_ratio)

            # apply_gradient append all ops in global block, thus we shouldn't
            # apply gradient in the switch branch.
            with layers.Switch() as switch:
                with switch.case(is_overall_finite):
                    pass
                with switch.default():
181
                    for _, g in scaled_params_grads:
J
Jie Fang 已提交
182 183
                        layers.assign(layers.zeros_like(g), g)

184
        optimize_ops = self._optimizer.apply_gradients(scaled_params_grads)
J
Jie Fang 已提交
185

186 187
        return optimize_ops

G
gongweibao 已提交
188 189 190 191 192
    def minimize(self,
                 loss,
                 startup_program=None,
                 parameter_list=None,
                 no_grad_set=None):
193 194 195 196 197
        """
        Perform optimization by minimizing the given loss.

        Args:
            loss (Variable): The loss Variable.
G
gongweibao 已提交
198 199 200 201
            startup_program (Program): startup_program for initializing parameters
                in `parameter_list`.
            parameter_list (list): list of Variables to update.
            no_grad_set (set|None): set of Variables should be ignored.
202 203 204

        Returns:
            The scaled loss by scaling factor, the list of optimize ops, and a
205
            list of scaled parameters and gradients.
206
        """
207
        scaled_params_grads = self.backward(
G
gongweibao 已提交
208 209 210 211 212
            loss,
            startup_program=startup_program,
            parameter_list=parameter_list,
            no_grad_set=no_grad_set)

213
        optimize_ops = self.apply_gradients(scaled_params_grads)
214

G
gongweibao 已提交
215
        return optimize_ops, scaled_params_grads
216 217


J
Jie Fang 已提交
218
def decorate(optimizer,
J
Jie Fang 已提交
219
             amp_lists=None,
J
Jie Fang 已提交
220 221 222 223 224
             init_loss_scaling=1.0,
             incr_every_n_steps=1000,
             decr_every_n_nan_or_inf=2,
             incr_ratio=2.0,
             decr_ratio=0.8,
225
             use_dynamic_loss_scaling=True):
226 227 228 229 230
    """ 
    Decorate the given optimizer to adapt to the mixed-precision training.

    Args:
        optimizer(Optimizer): A common Optimizer.
J
Jie Fang 已提交
231
        amp_lists (AutoMixedPrecisionLists): An AutoMixedPrecisionLists object.
232
        init_loss_scaling(float): The initial loss scaling factor.
J
Jie Fang 已提交
233 234 235 236 237 238 239 240 241
        incr_every_n_steps(int): Increases loss scaling every n consecutive 
                                 steps with finite gradients.
        decr_every_n_nan_or_inf(int): Decreases loss scaling every n 
                                      accumulated steps with nan or 
                                      inf gradients.
        incr_ratio(float): The multiplier to use when increasing the loss 
                           scaling.
        decr_ratio(float): The less-than-one-multiplier to use when decreasing 
                           the loss scaling.
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
        use_dynamic_loss_scaling(bool): Whether to use dynamic loss scaling.

    Returns:
        An optimizer acting like a normal one but with mixed-precision training 
        enabled.

    Examples:
	.. code-block:: python

	    loss = network()
            optimizer = fluid.optimizer.Adam(learning_rate=0.001)
	
            mp_optimizer = fluid.contrib.mixed_precision.decorate(
	              optimizer=optimizer, init_loss_scaling=8.0)
	
G
gongweibao 已提交
257
            ops, param_grads = mp_optimizer.minimize(loss)
258
            scaled_loss = mp_optimizer.get_scaled_loss()
259
    """
J
Jie Fang 已提交
260 261
    if amp_lists is None:
        amp_lists = AutoMixedPrecisionLists()
Z
Zhen Wang 已提交
262
    mp_optimizer = OptimizerWithMixedPrecision(
J
Jie Fang 已提交
263
        optimizer, amp_lists, init_loss_scaling, use_dynamic_loss_scaling,
J
Jie Fang 已提交
264
        incr_every_n_steps, decr_every_n_nan_or_inf, incr_ratio, decr_ratio)
265 266

    return mp_optimizer