auto_cast.py 20.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function
from paddle.fluid.wrapped_decorator import signature_safe_contextmanager, wrap_decorator
from paddle.fluid import core
import contextlib
J
Jiabin Yang 已提交
19
from paddle.fluid.framework import Variable, _non_static_mode, OpProtoHolder, Parameter, _dygraph_tracer, dygraph_only, set_flags, get_flags
20 21
import warnings
import copy
22 23 24 25
import functools
import paddle
import operator
import types
26

L
Leo Chen 已提交
27 28
AMP_LEVEL = core.AmpLevel

29
__all__ = ['amp_guard', 'amp_decorate']
30 31 32 33 34 35

# The set of ops that support fp16 calculation and are considered numerically-
# safe and performance-critical. These ops are always converted to fp16.
WHITE_LIST = {
    'conv2d',
    'matmul',
L
Leo Chen 已提交
36
    'matmul_v2',
37
    'mul',
C
cc 已提交
38 39
    'fake_quantize_dequantize_abs_max',
    'fake_quantize_dequantize_moving_average_abs_max',
40 41 42 43 44 45 46 47 48 49 50 51 52 53
}

# The set of ops that support fp16 calculation and are considered numerically-
# dangerous and whose effects may also be observed in downstream ops.
BLACK_LIST = {
    'exp',
    'square',
    'log',
    'mean',
    'sum',
    'cos_sim',
    'softmax',
    'softmax_with_cross_entropy',
    'sigmoid_cross_entropy_with_logits',
54
    'c_softmax_with_cross_entropy',
55 56
    'cross_entropy',
    'cross_entropy2',
57 58
    # default fp32 can avoid return inf when the sum value large than 65504
    'reduce_sum',
59 60 61 62 63 64 65 66 67 68 69 70 71 72
}

AMP_RELATED_FLAGS = [
    'FLAGS_cudnn_exhaustive_search',
    'FLAGS_conv_workspace_size_limit',
    'FLAGS_cudnn_batchnorm_spatial_persistent',
]

AMP_RELATED_FLAGS_SETTING = {
    'FLAGS_cudnn_exhaustive_search': 1,
    'FLAGS_conv_workspace_size_limit': 1000,
    'FLAGS_cudnn_batchnorm_spatial_persistent': 1,
}

73
PURE_FP16_WHITE_LIST = {' '}
74 75 76
PURE_FP16_BLACK_LIST = {
    'lookup_table', 'lookup_table_v2', 'scatter', 'scatter_grad'
}
77

78
BF16_WHITE_LIST = {'conv2d', 'matmul_v2'}
79 80
BF16_BLACK_LIST = {' '}

L
Leo Chen 已提交
81 82 83 84 85 86 87
_g_amp_state_ = None


def amp_state():
    global _g_amp_state_
    return _g_amp_state_

88 89 90

#NOTE(zhiqiu): similar as paddle.fluid.contrib.mixed_precision.fp16_lists.AutoMixedPrecisionLists._update_list
# The reason why not use AutoMixedPrecisionLists is that custom_black_varnames is not suitable for imperative mode.
91 92 93 94
def _update_list(custom_white_list,
                 custom_black_list,
                 level='O1',
                 dtype='float16'):
95 96 97
    """
    Update black and white list according to users' custom list.
    """
98 99 100 101 102 103 104
    if dtype == 'float16':
        if level == 'O1':
            _white_list = copy.copy(WHITE_LIST)
            _black_list = copy.copy(BLACK_LIST)
        else:
            _white_list = copy.copy(PURE_FP16_WHITE_LIST)
            _black_list = copy.copy(PURE_FP16_BLACK_LIST)
105
    else:
106 107
        _white_list = copy.copy(BF16_WHITE_LIST)
        _black_list = copy.copy(BF16_BLACK_LIST)
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
    if custom_white_list and custom_black_list:
        for op_name in custom_white_list:
            if op_name in custom_black_list:
                raise ValueError("Custom white list overlap "
                                 "custom black list")
    if custom_white_list:
        for op_name in custom_white_list:
            if op_name in _black_list:
                _black_list.remove(op_name)
            _white_list.add(op_name)
    if custom_black_list:
        for op_name in custom_black_list:
            if op_name in _white_list:
                _white_list.remove(op_name)
            _black_list.add(op_name)
    return _white_list, _black_list


126 127 128 129 130 131
def _in_amp_guard():
    """
    Judge whether current code block is in `amp_guard` context.
    """
    tracer = _dygraph_tracer()
    if tracer:
L
Leo Chen 已提交
132
        if tracer._amp_level == core.AmpLevel.O1:
133 134 135
            return True
        else:
            return False
136 137 138 139
    else:
        return False


140 141 142 143 144
def _in_pure_fp16_guard():
    tracer = _dygraph_tracer()
    return tracer and tracer._amp_level == core.AmpLevel.O2


145 146 147 148 149 150 151 152 153 154 155 156 157 158
def _is_gpu_float16_supported():
    """
    Judge whether current gpu support float16 amp.
    """
    prop = paddle.device.cuda.get_device_capability()
    return prop[0] >= 7


def _is_gpu_bfloat16_supported():
    """
    Judge whether current gpu support bfloat16 amp.
    """
    prop = paddle.device.cuda.get_device_capability()
    cuda_version = paddle.version.cuda()
159
    if cuda_version is not None and cuda_version != 'False':
160 161 162 163 164 165
        cuda_version_check = int(cuda_version.split('.')[0]) >= 11
    else:
        cuda_version_check = False
    return prop[0] >= 8 and cuda_version_check


166
@dygraph_only
167
def pure_fp16_initialize(models):
168 169 170
    for idx in range(len(models)):
        for layer in models[idx].sublayers(include_self=True):
            layer._casted_by_pure_fp16 = True
171
            if (layer._dtype == 'float16') or isinstance(
172 173
                    layer, (paddle.nn.BatchNorm, paddle.nn.BatchNorm1D,
                            paddle.nn.BatchNorm2D, paddle.nn.BatchNorm3D,
174
                            paddle.nn.LayerNorm, paddle.nn.SyncBatchNorm)):
175 176
                continue
            layer._to_impl(dtype='float16', include_sublayers=False)
177
    return models
178 179 180 181 182 183


def check_models(models):
    for model in models:
        if not isinstance(model, paddle.nn.Layer):
            raise RuntimeError(
184 185
                "Current train mode is pure fp16, models should be paddle.nn.Layer, but receive {}."
                .format(type(model)))
186 187 188 189
        if isinstance(model, paddle.DataParallel):
            raise RuntimeError(
                "For distributed AMP training, you should first use paddle.amp.decorate() to decotate origin model, and then call paddle.DataParallel get distributed model."
            )
190 191 192 193


def check_optimizers(optimizers):
    for optimizer in optimizers:
194 195 196
        if not isinstance(
                optimizer,
            (paddle.optimizer.Optimizer, paddle.fluid.optimizer.Optimizer)):
197
            raise RuntimeError(
198 199
                "Current train mode is pure fp16, optimizers should be paddle.optimizer.Optimizer or paddle.fluid.optimizer.Optimizer, but receive {}."
                .format(type(optimizer)))
200 201


202 203
@signature_safe_contextmanager
@dygraph_only
204 205 206
def amp_guard(enable=True,
              custom_white_list=None,
              custom_black_list=None,
207 208
              level='O1',
              dtype='float16'):
209 210 211
    """
    :api_attr: imperative

212
    Create a context which enables auto-mixed-precision(AMP) of operators executed in dynamic graph mode.
213 214 215
    If enabled, the input data type (float32 or float16) of each operator is decided 
    by autocast algorithm for better performance. 
    
216 217
    Commonly, it is used together with `GradScaler` to achieve Auto-Mixed-Precision in 
    imperative mode. It is used together with `decorator` to achieve Pure fp16 in imperative mode.
218 219 220

    Args:
        enable(bool, optional): Enable auto-mixed-precision or not. Default is True.
221 222 223 224 225 226 227 228
        custom_white_list(set|list|tuple, optional): The custom white_list. It's the set of ops that support
             fp16 calculation and are considered numerically-safe and performance-critical. These ops 
             will be converted to fp16.
        custom_black_list(set|list|tuple, optional): The custom black_list. The set of ops that support fp16
             calculation and are considered numerically-dangerous and whose effects may also be 
             observed in downstream ops. These ops will not be converted to fp16.
        level(str, optional): Auto mixed precision level. Accepted values are "O1" and "O2": O1 represent mixed precision, the input data type of each operator will be casted by white_list and black_list; 
             O2 represent Pure fp16, all operators parameters and input data will be casted to fp16, except operators in black_list, don't support fp16 kernel and batchnorm. Default is O1(amp)
229
        dtype(str, optional): Whether to use 'float16' or 'bfloat16'. Default is 'float16'.
230

231 232 233 234 235 236
        
    Examples:

     .. code-block:: python

        import numpy as np
237
        import paddle
238 239

        data = np.random.uniform(-1, 1, [10, 3, 32, 32]).astype('float32')
240 241 242 243
        with paddle.fluid.dygraph.guard():
            conv2d = paddle.fluid.dygraph.Conv2D(3, 2, 3)
            data = paddle.fluid.dygraph.to_variable(data)
            with paddle.fluid.dygraph.amp_guard():
244 245
                conv = conv2d(data)
                print(conv.dtype) # FP16
246
            with paddle.fluid.dygraph.amp_guard(enable=False):
247 248 249 250
                conv = conv2d(data)
                print(conv.dtype) # FP32

    """
L
Leo Chen 已提交
251 252 253 254 255
    amp_state = locals()
    global _g_amp_state_
    original_state = _g_amp_state_
    _g_amp_state_ = amp_state

256 257
    # check amp_level: O0-O2
    level = level.upper()
L
Leo Chen 已提交
258
    if not (level in ['O0', 'O1', 'O2']):
259
        raise ValueError(
260
            "level should be O0, O1 or O2. O0 represents fp32 train mode, O1 represents AMP train mode, O2 represents pure fp16/bf16 train mode."
261 262
        )

263 264 265 266 267 268
    # check amp_dtype: float16 or bfloat16
    dtype = dtype.lower()
    if not (dtype in ['float16', 'bfloat16']):
        raise ValueError("dtype should be 'float16' or 'bfloat16'.")

    # check tracer
269 270 271 272 273
    tracer = _dygraph_tracer()
    if not tracer:
        raise ValueError(
            "current_tracer is None, maybe it is not in imperative mode.")

274
    # check device_type:
Q
qipengh 已提交
275
    # NOTE: Now, amp only support gpu for float16 and bfloat16, xpu for float16, mlu for float16, npu for float16.
276
    # Maybe we will support cpu for bfloat16.
277 278 279 280 281
    if enable and not (tracer._expected_place.is_gpu_place()
                       or tracer._expected_place.is_xpu_place()
                       or tracer._expected_place.is_mlu_place()
                       or tracer._expected_place.is_npu_place()
                       or tracer._expected_place.is_custom_place()):
282
        warnings.warn(
283
            'amp_guard can only be enabled on CUDAPlace, XPUPlace, MLUPlace, NPUPlace, and CustomPlace, current place is %s, so it makes no effect.'
284 285
            % tracer._expected_place)
        enable = False
F
furnace 已提交
286 287 288 289
    # For npu:
    if tracer._expected_place.is_npu_place() and (dtype == 'bfloat16'):
        warnings.warn('NPUPlace only support float16 amp.')
        enable = False
290 291 292 293
    # For xpu:
    if tracer._expected_place.is_xpu_place() and (dtype == 'bfloat16'):
        warnings.warn('XPUPlace only support float16 amp.')
        enable = False
Q
qipengh 已提交
294 295 296 297
    # For mlu:
    if tracer._expected_place.is_mlu_place() and (dtype == 'bfloat16'):
        warnings.warn('MLUPlace only support float16 amp.')
        enable = False
298 299 300 301
    # For custom device:
    if tracer._expected_place.is_custom_place() and (dtype == 'bfloat16'):
        warnings.warn('CustomPlace only support float16 amp.')
        enable = False
302 303
    # For gpu float16: Compute Capability should >= 7.
    # For gpu bfloat16: Compute Capability should >= 8 & CUDA Version should >= 11.
Z
zhangbo9674 已提交
304
    if tracer._expected_place.is_gpu_place():
305 306
        if (dtype == 'float16') and not _is_gpu_float16_supported():
            prop = paddle.device.cuda.get_device_capability()
Z
zhangbo9674 已提交
307
            warnings.warn(
308
                "For float16, amp only support NVIDIA GPU with Compute Capability 7.0 or higher, current GPU is: %s, with Compute Capability: %d.%d."
Z
zhangbo9674 已提交
309
                % (paddle.device.cuda.get_device_name(), prop[0], prop[1]))
310 311 312 313 314 315 316 317 318
        elif (dtype == 'bfloat16') and not _is_gpu_bfloat16_supported():
            prop = paddle.device.cuda.get_device_capability()
            cuda_version = paddle.version.cuda()
            warnings.warn(
                "For bfloat16, amp only support NVIDIA GPU with Compute Capability 8.0 or higher and CUDA Version 11.0 or higher, current GPU is: %s, with Compute Capability: %d.%d, current CUDA Version is: %s."
                % (paddle.device.cuda.get_device_name(), prop[0], prop[1],
                   cuda_version))

    amp_dtype = dtype
Z
zhangbo9674 已提交
319

320
    if level == 'O1':
L
Leo Chen 已提交
321
        amp_level = AMP_LEVEL.O1
322 323 324 325 326 327 328
        if dtype == 'float16':
            _white_list = WHITE_LIST
            _black_list = BLACK_LIST
        elif dtype == 'bfloat16':
            _white_list = BF16_WHITE_LIST
            _black_list = BF16_BLACK_LIST

L
Leo Chen 已提交
329
    elif level == 'O2':
L
Leo Chen 已提交
330
        amp_level = AMP_LEVEL.O2
331 332 333 334 335 336
        if dtype == 'float16':
            _white_list = PURE_FP16_WHITE_LIST
            _black_list = PURE_FP16_BLACK_LIST
        elif dtype == 'bfloat16':
            _white_list = BF16_WHITE_LIST
            _black_list = BF16_BLACK_LIST
L
Leo Chen 已提交
337 338
    elif level == 'O0':
        amp_level = AMP_LEVEL.O0
339 340 341 342 343 344
        if dtype == 'float16':
            _white_list = WHITE_LIST
            _black_list = BLACK_LIST
        elif dtype == 'bfloat16':
            _white_list = BF16_WHITE_LIST
            _black_list = BF16_BLACK_LIST
345

346 347
    if custom_white_list or custom_black_list:
        _white_list, _black_list = _update_list(custom_white_list,
348
                                                custom_black_list, level, dtype)
349 350

    if not enable:
L
Leo Chen 已提交
351
        amp_level = AMP_LEVEL.O0
352
        amp_dtype = "float32"
353 354 355

    if tracer:
        # enable auto_cast
356 357 358
        original_amp_level = tracer._amp_level
        tracer._amp_level = amp_level

359 360 361 362 363 364 365 366 367 368 369 370
        # set amp op list
        original_white_list, original_black_list = tracer._get_amp_op_list()
        tracer._set_amp_op_list(_white_list, _black_list)

        # TODO(zhiqiu) set amp related flags automatically in this guard
        # Currently, if FLAGS_cudnn_batchnorm_spatial_persistent is set True in amp_guard,
        # batch_norm can run in fast mode, but batch_norm_grad can not if backward if not executed insise amp_guard.
        # So, users need to set related flags manually.

        # original_flags = get_flags(AMP_RELATED_FLAGS)
        # set_flags(AMP_RELATED_FLAGS_SETTING)

371 372 373 374
        # set amp dtype
        original_amp_dtype = tracer._amp_dtype
        tracer._amp_dtype = amp_dtype

375 376 377 378 379
    # restore status
    try:
        yield
    finally:
        if tracer:
L
Leo Chen 已提交
380
            _g_amp_state_ = original_state
381
            tracer._amp_level = original_amp_level
382 383
            tracer._set_amp_op_list(original_white_list, original_black_list)
            # set_flags(original_flags)
384
            tracer._amp_dtype = original_amp_dtype
385 386 387


class StateDictHook(object):
388

389 390 391 392 393 394
    def __init__(self, save_dtype):
        self._save_dtype = save_dtype

    def __call__(self, state_dict):
        for key in state_dict:
            param = state_dict[key]
395
            with paddle.fluid.dygraph.guard():
396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430
                param_applied = paddle.cast(param, self._save_dtype)
                param_applied.name = param.name
                state_dict[key] = param_applied


@dygraph_only
def amp_decorate(models,
                 optimizers=None,
                 level='O1',
                 master_weight=None,
                 save_dtype=None):
    """
    Decorate models and optimizers for auto-mixed-precision. When level is O1(amp), the decorate will do nothing. 
    When level is O2(pure fp16), the decorate will cast all parameters of models to FP16, except BatchNorm and LayerNorm.
    
    Commonly, it is used together with `amp_guard` to achieve Pure fp16 in imperative mode.

    Args:
        models(Layer|list of Layer, optional): The defined models by user, models must be either a single model or a list of models. Default is None.
        optimizers(Optimizer|list of Optimizer, optional): The defined optimizers by user, optimizers must be either a single optimizer or a list of optimizers. Default is None.
        level(str, optional): Auto mixed precision level. Accepted values are "O1" and "O2": O1 represent mixed precision, the decorator will do nothing; 
             O2 represent Pure fp16, the decorator will cast all parameters of models to FP16, except BatchNorm and LayerNorm. Default is O1(amp)
        master_weight(bool, optinal): For level='O2', whether to use multi-precision during weight updating. If master_weight is None, in O2 level optimizer will use multi-precision. Default is None.
        save_dtype(float, optional): The save model parameter dtype when use `paddle.save` or `paddle.jit.save`,it should be float16, float32, float64 or None.
             The save_dtype will not change model parameters dtype, it just change the state_dict dtype. When save_dtype is None, the save dtype is same as model dtype. Default is None.

    Examples:

     .. code-block:: python   
        
        # required: gpu
        # Demo1: single model and optimizer:
        import paddle

        model = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
431
        optimizer = paddle.optimizer.SGD(parameters=model.parameters())
432

433
        model, optimizer = paddle.fluid.dygraph.amp_decorate(models=model, optimizers=optimizer, level='O2')
434 435 436

        data = paddle.rand([10, 3, 32, 32])

437
        with paddle.fluid.dygraph.amp_guard(enable=True, custom_white_list=None, custom_black_list=None, level='O2'):
438 439 440 441 442 443 444 445
            output = model(data)
            print(output.dtype) # FP16

        # required: gpu
        # Demo2: multi models and optimizers:
        model2 = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
        optimizer2 = paddle.optimizer.Adam(parameters=model2.parameters())

446
        models, optimizers = paddle.fluid.dygraph.amp_decorate(models=[model, model2], optimizers=[optimizer, optimizer2], level='O2')
447 448 449

        data = paddle.rand([10, 3, 32, 32])

450
        with paddle.fluid.dygraph.amp_guard(enable=True, custom_white_list=None, custom_black_list=None, level='O2'):
451 452 453 454
            output = models[0](data)
            output2 = models[1](data)
            print(output.dtype) # FP16
            print(output2.dtype) # FP16
455 456 457 458 459 460 461 462 463 464 465 466 467
        
        # required: gpu
        # Demo3: optimizers is None:
        model3 = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
        optimizer3 = paddle.optimizer.Adam(parameters=model2.parameters())

        model = paddle.fluid.dygraph.amp_decorate(models=model3, level='O2')

        data = paddle.rand([10, 3, 32, 32])

        with paddle.fluid.dygraph.amp_guard(enable=True, custom_white_list=None, custom_black_list=None, level='O2'):
            output = model(data)
            print(output.dtype) # FP16
468 469 470 471 472 473 474
    """
    if not (level in ['O1', 'O2']):
        raise ValueError(
            "level should be O1 or O2, O1 represent AMP train mode, O2 represent Pure fp16 train mode."
        )

    if level == 'O1':
475 476 477 478
        if optimizers is None:
            return models
        else:
            return models, optimizers
479 480 481 482 483 484 485 486 487 488 489 490 491

    models_is_list = False
    if isinstance(models, paddle.nn.Layer):
        models_is_list = False
        models = [models]
        check_models(models)
    elif isinstance(models, list):
        check_models(models)
        models_is_list = True
    else:
        raise TypeError(
            "models must be either a single model or a list of models.")

492
    models = pure_fp16_initialize(models=models)
493

494 495 496
    if optimizers is not None:
        # check optimizers
        optimizers_is_list = False
497 498 499
        if isinstance(
                optimizers,
            (paddle.optimizer.Optimizer, paddle.fluid.optimizer.Optimizer)):
500 501 502 503 504 505 506 507 508 509
            optimizers_is_list = False
            optimizers = [optimizers]
            check_optimizers(optimizers)
        elif isinstance(optimizers, list):
            check_optimizers(optimizers)
            optimizers_is_list = True
        else:
            raise TypeError(
                "optimizers must be either a single optimizer or a list of optimizers."
            )
510
        # supprot master_weight
511 512 513 514 515 516
        for idx_opt in range(len(optimizers)):
            if hasattr(optimizers[idx_opt], '_multi_precision'):
                if master_weight is False:
                    optimizers[idx_opt]._multi_precision = False
                else:
                    optimizers[idx_opt]._multi_precision = True
517 518 519 520 521 522 523 524 525 526 527

    if save_dtype is not None:
        if not (save_dtype in ['float16', 'float32', 'float64']):
            raise ValueError(
                "save_dtype can only be float16 float32 or float64, but your input save_dtype is %s."
                % save_dtype)
        for idx in range(len(models)):
            for layer in models[idx].sublayers(include_self=True):
                layer.register_state_dict_hook(StateDictHook(save_dtype))

    if models_is_list:
528 529 530 531 532
        if optimizers is not None:
            if optimizers_is_list:
                return models, optimizers
            else:
                return models, optimizers[0]
533
        else:
534
            return models
535
    else:
536 537 538 539 540
        if optimizers is not None:
            if optimizers_is_list:
                return models[0], optimizers
            else:
                return models[0], optimizers[0]
541
        else:
542
            return models[0]