fp16_utils.py 28.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import collections
16
import logging
17

18
import numpy as np
19

20 21 22 23 24 25
from paddle.fluid import core, framework, global_scope
from paddle.fluid.log_helper import get_logger
from paddle.fluid.wrapped_decorator import signature_safe_contextmanager

from .fp16_lists import AutoMixedPrecisionLists

26 27 28
_logger = get_logger(
    __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s'
)
29

30
_valid_types = [
31 32 33
    core.VarDesc.VarType.LOD_TENSOR,
    core.VarDesc.VarType.SELECTED_ROWS,
    core.VarDesc.VarType.LOD_TENSOR_ARRAY,
34 35 36 37
]

_fp16_guard_pattern = "__use_fp16__"

38

J
Jie Fang 已提交
39 40
def _rename_arg(op, old_name, new_name):
    """
41
    If an op has old_name input and output, rename these input
J
Jie Fang 已提交
42 43 44 45 46 47 48 49 50 51 52 53 54 55
    args new_name.

    Args:
        op (Operator): Current operator.
        old_name (str): The old name of input args.
        new_name (str): The new name of input args.
    """
    op_desc = op.desc
    if isinstance(op_desc, tuple):
        op_desc = op_desc[0]
    op_desc._rename_input(old_name, new_name)
    op_desc._rename_output(old_name, new_name)


56 57 58 59 60 61 62 63 64 65 66 67
def _rename_op_input(program, op_var_rename_map, origin_ops, keep_fp32_ops):
    for block in program.blocks:
        ops = block.ops
        block_id = block.idx
        for op in ops:
            if op not in origin_ops or op in keep_fp32_ops:
                continue
            for name in op.input_arg_names:
                if name in op_var_rename_map[block_id]:
                    op._rename_input(name, op_var_rename_map[block_id][name])


J
Jie Fang 已提交
68 69 70 71 72 73 74 75 76 77 78 79 80
def _dtype_to_str(dtype):
    """
    Convert specific variable type to its corresponding string.

    Args:
        dtype (VarType): Variable type.
    """
    if dtype == core.VarDesc.VarType.FP16:
        return 'fp16'
    else:
        return 'fp32'


81 82 83 84 85 86 87 88 89 90 91 92 93 94
_keep_layer_norm_scale_bias_to_fp32_flag = True


def _keep_layer_norm_scale_bias_to_fp32(*args):
    global _keep_layer_norm_scale_bias_to_fp32_flag
    if len(args) == 0:
        return _keep_layer_norm_scale_bias_to_fp32_flag
    else:
        assert len(args) == 1 and isinstance(args[0], bool)
        old_value = _keep_layer_norm_scale_bias_to_fp32_flag
        _keep_layer_norm_scale_bias_to_fp32_flag = args[0]
        return old_value


95 96
def _keep_fp32_input(op, in_name):
    op_type = op.type
97
    if op_type == 'batch_norm':
98 99
        # Scale, Bias, Mean, Variance should be float32.
        return in_name != 'X'
100 101
    if op_type == 'layer_norm' and _keep_layer_norm_scale_bias_to_fp32():
        return in_name != 'X'
102 103
    if op_type == 'instance_norm':
        return in_name != 'X'
104 105 106 107
    if op_type == 'fused_bn_add_activation':
        return in_name not in {'X', 'Z'}
    if op_type == 'resnet_unit':
        return in_name not in {'X', 'FilterX', 'Z', 'FilterZ'}
108 109
    if op_type in ['fused_attention', 'fused_feedforward']:
        return in_name in {
110 111 112 113 114 115
            'LnScale',
            'LnBias',
            'Ln2Scale',
            'Ln2Bias',
            "Ln1Scale",
            "Ln1Bias",
116
        }
117 118
    if op_type == 'fused_multi_transformer':
        return in_name in {'LnScale', 'LnBias', 'FFNLnScale', 'FFNLnBias'}
119 120 121 122 123
    return False


def _keep_fp32_output(op, out_name):
    op_type = op.type
124 125 126
    if op_type in ['batch_norm', 'fused_bn_add_activation']:
        return out_name != 'Y'
    if op_type == 'layer_norm' and _keep_layer_norm_scale_bias_to_fp32():
127 128 129
        return out_name != 'Y'
    if op_type == 'resnet_unit':
        return out_name not in {'Y', 'ConvX', 'ConvZ'}
130 131
    if op_type in ['fused_attention', 'fused_feedforward']:
        return out_name in {
132 133 134 135 136 137
            'LnMean',
            'LnVariance',
            'Ln2Mean',
            'Ln2Variance',
            'Ln1Mean',
            'Ln1Variance',
138
        }
139 140 141
    return False


J
Jie Fang 已提交
142 143 144 145 146 147 148 149 150
def _insert_cast_op(block, op, idx, src_dtype, dest_dtype):
    """
    Insert cast op and rename args of input and output.

    Args:
        block (Program): The block in which the operator is.
        op (Operator): The operator to insert cast op.
        idx (int): The index of current operator.
        src_dtype (VarType): The input variable dtype of cast op.
Z
Zhen Wang 已提交
151
        dest_dtype (VarType): The output variable dtype of cast op.
J
Jie Fang 已提交
152 153 154 155 156

    Returns:
        num_cast_op (int): The number of cast ops that have been inserted.
    """
    num_cast_ops = 0
157

J
Jie Fang 已提交
158
    for in_name in op.input_names:
159
        if src_dtype == core.VarDesc.VarType.FP32 and _keep_fp32_input(
160 161
            op, in_name
        ):
162
            continue
J
Jie Fang 已提交
163
        for in_var_name in op.input(in_name):
H
huangxu96 已提交
164
            in_var = block._find_var_recursive(in_var_name)
165
            if in_var.type not in _valid_types or in_var.dtype == dest_dtype:
J
Jie Fang 已提交
166 167
                continue
            if in_var.dtype == src_dtype:
168 169 170
                cast_name = in_var.name + '.cast_' + _dtype_to_str(dest_dtype)
                out_var = block.vars.get(cast_name)
                if out_var is None or out_var.dtype != dest_dtype:
171 172 173 174 175 176
                    op_device = op.attr('op_device')
                    # NOTE(wangxi): optimize for pipeline, reduce one send.
                    # if in_var is stop_gradient and prev_op device is `all`,
                    # set cast_op device to `all`, can reduce send cast_var.
                    # TODO: need remove this after we unified the dynamic
                    # and static pipeline interface.
177 178 179 180
                    if (
                        src_dtype == core.VarDesc.VarType.FP32
                        and in_var.stop_gradient
                    ):
181 182
                        prev_op = None
                        if in_var.op is op:
183 184 185
                            prev_op = find_true_prev_op(
                                block.ops, op, in_var_name
                            )
186 187 188 189 190 191 192
                        elif in_var.op is not None:
                            prev_op = in_var.op

                        prev_op_device = None
                        if prev_op is not None:
                            prev_op_device = prev_op.attr('op_device')

193 194 195 196
                        if (
                            prev_op_device is not None
                            and 'all' in prev_op_device
                        ):
197 198
                            op_device = prev_op_device

199 200 201 202
                    out_var = block.create_var(
                        name=cast_name,
                        dtype=dest_dtype,
                        persistable=False,
203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
                        stop_gradient=in_var.stop_gradient,
                    )

                    block._insert_op_without_sync(
                        idx,
                        type="cast",
                        inputs={"X": in_var},
                        outputs={"Out": out_var},
                        attrs={
                            "in_dtype": in_var.dtype,
                            "out_dtype": out_var.dtype,
                            "op_device": op_device,
                            "op_role": op.attr("op_role"),
                        },
                    )
218
                    num_cast_ops += 1
J
Jie Fang 已提交
219 220 221 222
                _rename_arg(op, in_var.name, out_var.name)
            else:
                if op.has_attr('in_dtype'):
                    op._set_attr('in_dtype', dest_dtype)
223 224 225 226
    if (
        src_dtype == core.VarDesc.VarType.FP32
        and dest_dtype == core.VarDesc.VarType.FP16
    ):
J
Jie Fang 已提交
227
        for out_name in op.output_names:
228
            if _keep_fp32_output(op, out_name):
229
                continue
J
Jie Fang 已提交
230 231
            for out_var_name in op.output(out_name):
                out_var = block.var(out_var_name)
232
                if out_var.type not in _valid_types:
J
Jie Fang 已提交
233
                    continue
234 235
                if out_var.dtype == core.VarDesc.VarType.FP32:
                    out_var.desc.set_dtype(core.VarDesc.VarType.FP16)
J
Jie Fang 已提交
236
                    if op.has_attr('out_dtype'):
237
                        op._set_attr('out_dtype', core.VarDesc.VarType.FP16)
J
Jie Fang 已提交
238 239 240
    return num_cast_ops


241 242 243
def _insert_cast_post_op(
    block, op, idx, src_dtype, dest_dtype, target_name, op_var_rename_map
):
244 245 246 247 248 249
    num_cast_ops = 0

    target_var = block.var(target_name)
    if target_var.type not in _valid_types or target_var.dtype == dest_dtype:
        return num_cast_ops

250 251 252 253 254
    assert (
        target_var.dtype == src_dtype
    ), "The real dtype({}) is not equal to the src dtype({})".format(
        _dtype_to_str(target_var.dtype), _dtype_to_str(src_dtype)
    )
255 256 257 258

    cast_name = target_var.name + '.cast_' + _dtype_to_str(dest_dtype)
    cast_var = block.vars.get(cast_name)
    if cast_var is None or cast_var.dtype != dest_dtype:
259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
        cast_var = block.create_var(
            name=cast_name,
            dtype=dest_dtype,
            persistable=False,
            stop_gradient=target_var.stop_gradient,
        )
        block._insert_op(
            idx,
            type="cast",
            inputs={"X": target_var},
            outputs={"Out": cast_var},
            attrs={
                "in_dtype": target_var.dtype,
                "out_dtype": cast_var.dtype,
                "op_device": op.attr("op_device"),
                "op_role": op.attr("op_role"),
            },
        )
277 278 279 280 281 282
        num_cast_ops += 1
        op_var_rename_map[block.idx][target_var.name] = cast_var.name

    return num_cast_ops


283 284 285 286 287 288 289 290 291 292
def find_true_prev_op(ops, cur_op, var_name):
    """
    Find the true prev op that outputs var_name variable.

    Args:
        ops (list): A list of ops.
        cur_op (Operator): Current operator which has var_name variable.
        var_name (string): Variable name.
    """
    prev_op = []
J
Jie Fang 已提交
293
    for op in ops:
294 295
        if op == cur_op:
            break
J
Jie Fang 已提交
296 297 298
        for out_name in op.output_names:
            for out_var_name in op.output(out_name):
                if out_var_name == var_name:
299 300 301
                    prev_op.append(op)
    if prev_op:
        if not len(prev_op) == 1:
302 303
            raise ValueError(
                "There must be only one previous op "
304
                f"that outputs {var_name} variable"
305
            )
306 307 308
        else:
            return prev_op[0]
    return None
J
Jie Fang 已提交
309 310


311
def find_true_post_op(ops, cur_op, var_name, search_all=False):
M
mapingshuo 已提交
312 313 314 315 316 317 318
    """
    if there are post ops, return them, if there is no post op,
    return None instead.
    Args:
        ops (list): A list of ops.
        cur_op (Operator): Current operator which has var_name variable.
        var_name (string): Variable name.
319
        search_all (bool): The type of operator search. Use if \"cur_op\" is not in the \"ops\" set.
M
mapingshuo 已提交
320 321
    """
    post_op = []
322 323
    if search_all:
        """
324 325 326 327 328
        \"cur_op\" do not have to be in list of \"ops\". E.g. \"cur_op\" can come
        from startup_prog block and \"ops\" list from main_prog block.
        By setting idx to -1, we'll start looking for post-ops from the top of the list.
        If search_all is False, assume that \"cur_op\" is in \"ops\" list,
        so to reduce the time of search we can start iterating from \"cur_op\" idx.
329 330 331 332 333 334
        """
        idx = -1
    else:
        for idx, op in enumerate(ops):
            if op == cur_op:
                break
M
mapingshuo 已提交
335 336 337 338 339 340 341

    for i in range(idx + 1, len(ops)):
        op = ops[i]
        for in_name in op.input_names:
            for in_var_name in op.input(in_name):
                if in_var_name == var_name:
                    post_op.append(op)
342 343

    return post_op
M
mapingshuo 已提交
344 345 346


def find_op_index(block_desc, cur_op_desc):
347
    """ """
M
mapingshuo 已提交
348 349 350 351 352 353
    for idx in range(block_desc.op_size()):
        if cur_op_desc == block_desc.op(idx):
            return idx
    return -1


354 355 356 357 358 359 360 361 362 363 364 365
def _is_in_black_varnames(op, amp_lists):
    for in_name in op.input_arg_names:
        if in_name in amp_lists.black_varnames:
            return True

    for out_name in op.output_arg_names:
        if out_name in amp_lists.black_varnames:
            return True

    return False


366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
def _need_keep_fp32(op, unsupported_op_list, use_fp16_guard):
    if op.type in unsupported_op_list:
        # the highest priority condition: If ops don't have fp16 computing kernels,
        # they must be executed in fp32 calculation pattern.
        return True

    # process ops about learning rate
    in_out_arg_names = []
    in_out_arg_names.extend(list(op.input_arg_names))
    in_out_arg_names.extend(list(op.output_arg_names))
    for name in in_out_arg_names:
        if "learning_rate" in name:
            return True

    if use_fp16_guard:
381 382 383
        if op.has_attr("op_namescope") and (
            _fp16_guard_pattern in op.attr("op_namescope")
        ):
384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
            # op in fp16 guard
            return False
        else:
            # op not in fp16 guard
            return True
    else:
        return False


@signature_safe_contextmanager
def fp16_guard():
    """
    As for the pure fp16 training, if users set `use_fp16_guard` to True,
    only those ops created in the context manager `fp16_guard` will be
    transformed as float16 type.
H
huangxu96 已提交
399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414

    Examples:
        .. code-block:: python

            import numpy as np
            import paddle
            import paddle.nn.functional as F
            paddle.enable_static()
            data = paddle.static.data(name='X', shape=[None, 1, 28, 28], dtype='float32')
            conv2d = paddle.static.nn.conv2d(input=data, num_filters=6, filter_size=3)

            with paddle.static.amp.fp16_guard():
                bn = paddle.static.nn.batch_norm(input=conv2d, act="relu")
                pool = F.max_pool2d(bn, kernel_size=2, stride=2)
                hidden = paddle.static.nn.fc(pool, size=10)
                loss = paddle.mean(hidden)
415 416 417 418 419 420
    """
    with framework.name_scope(prefix=_fp16_guard_pattern):
        yield


def cast_model_to_fp16(program, amp_lists=None, use_fp16_guard=True):
421 422 423 424 425 426
    """
    Traverse all ops in the whole model and set their inputs and outputs
    to the fp16 data type. This function will do some special process for
    the batch normalization, which keeps the computational process of
    batchnorms in FP32.
    Args:
427 428 429 430
        program (Program): The used program.
        amp_lists (AutoMixedPrecisionLists): An AutoMixedPrecisionLists object.
        use_fp16_guard(bool): Determine whether to use `fp16_guard` when
                              constructing the program. Default True.
431 432
    """

433 434
    if amp_lists is None:
        amp_lists = AutoMixedPrecisionLists()
435 436 437 438 439 440 441 442
    amp_lists.unsupported_list -= {
        "conditional_block_grad",
        "conditional_block",
        "conditional_block_infer",
        "select_input",
        "while",
        "while_grad",
        "cast",
443 444 445
        "tensor_array_to_tensor",
        "lod_array_length",
        "write_to_array",
446
    }
447 448 449 450 451 452 453 454
    global_block = program.global_block()
    keep_fp32_ops = set()
    to_fp16_var_names = set()
    origin_ops = []
    for block in program.blocks:
        origin_ops.extend(block.ops)

    for block in program.blocks:
455 456 457 458
        ops = block.ops
        for op in ops:
            if op.type == 'create_py_reader' or op.type == 'read':
                continue
459 460 461
            if _need_keep_fp32(op, amp_lists.unsupported_list, use_fp16_guard):
                keep_fp32_ops.add(op)
                continue  # processed below
462
            for in_name in op.input_names:
463 464
                # for ipu, all inputs must be converted to fp16
                if not core.is_compiled_with_ipu() and _keep_fp32_input(
465 466
                    op, in_name
                ):
467 468 469 470
                    continue
                for in_var_name in op.input(in_name):
                    in_var = None
                    try:
471
                        in_var = block._var_recursive(in_var_name)
472 473
                    except ValueError as e:
                        _logger.debug(
474 475 476 477
                            "-- {}, try to get it in the global block --".format(
                                e
                            )
                        )
478 479 480
                        in_var = global_block.var(in_var_name)
                        if in_var is not None:
                            _logger.debug(
481 482 483 484
                                "-- var {} is got in the global block --".format(
                                    in_var_name
                                )
                            )
485

486
                    if in_var is None or in_var.type not in _valid_types:
487 488 489 490
                        continue

                    if in_var.dtype == core.VarDesc.VarType.FP32:
                        in_var.desc.set_dtype(core.VarDesc.VarType.FP16)
491
                        to_fp16_var_names.add(in_var_name)
492 493

                    _logger.debug(
494 495 496 497
                        "-- op type: {}, in var name: {}, in var dtype: {} --".format(
                            op.type, in_var_name, in_var.dtype
                        )
                    )
498 499

            for out_name in op.output_names:
500 501
                # for ipu, all outputs must be converted to fp16
                if not core.is_compiled_with_ipu() and _keep_fp32_output(
502 503
                    op, out_name
                ):
504 505 506 507
                    continue
                for out_var_name in op.output(out_name):
                    out_var = None
                    try:
508
                        out_var = block._var_recursive(out_var_name)
509 510
                    except ValueError as e:
                        _logger.debug(
511 512 513 514
                            "-- {}, try to get it in the global block --".format(
                                e
                            )
                        )
515 516 517
                        out_var = global_block.var(out_var_name)
                        if out_var is not None:
                            _logger.debug(
518 519 520 521
                                "-- var {} is got in the global block --".format(
                                    out_var_name
                                )
                            )
522

523
                    if out_var is None or out_var.type not in _valid_types:
524 525 526 527 528 529
                        continue

                    if out_var.dtype == core.VarDesc.VarType.FP32:
                        out_var.desc.set_dtype(core.VarDesc.VarType.FP16)

                    _logger.debug(
530 531 532 533 534 535 536 537
                        "-- op type: {}, out var name: {}, out var dtype: {} --".format(
                            op.type, out_var_name, out_var.dtype
                        )
                    )
            if (
                op.has_attr('in_dtype')
                and op.attr('in_dtype') == core.VarDesc.VarType.FP32
            ):
538
                op._set_attr('in_dtype', core.VarDesc.VarType.FP16)
539 540 541 542
            if (
                op.has_attr('out_dtype')
                and op.attr('out_dtype') == core.VarDesc.VarType.FP32
            ):
543
                op._set_attr('out_dtype', core.VarDesc.VarType.FP16)
544 545 546 547
            if (
                op.has_attr('dtype')
                and op.attr('dtype') == core.VarDesc.VarType.FP32
            ):
548 549
                op._set_attr('dtype', core.VarDesc.VarType.FP16)

550 551 552 553 554 555 556 557 558 559 560
    # process ops in keep_fp32_ops
    op_var_rename_map = [
        collections.OrderedDict() for _ in range(len(program.blocks))
    ]
    for block in program.blocks:
        ops = block.ops
        idx = 0
        while idx < len(ops):
            op = ops[idx]
            num_cast_ops = 0
            if op in keep_fp32_ops:
561 562 563 564 565 566 567
                pre_cast_num = _insert_cast_op(
                    block,
                    op,
                    idx,
                    core.VarDesc.VarType.FP16,
                    core.VarDesc.VarType.FP32,
                )
568 569 570 571 572 573 574 575 576 577 578 579
                num_cast_ops += pre_cast_num
                for out_var_name in op.output_arg_names:
                    out_var = block.vars.get(out_var_name)
                    if out_var is None or out_var.type not in _valid_types:
                        continue
                    if out_var.dtype == core.VarDesc.VarType.FP16:
                        out_var.desc.set_dtype(core.VarDesc.VarType.FP32)
                        post_ops = find_true_post_op(ops, op, out_var_name)
                        for post_op in post_ops:
                            if post_op in keep_fp32_ops:
                                continue
                            post_cast_num = _insert_cast_post_op(
580 581 582
                                block,
                                op,
                                idx + pre_cast_num + 1,
583
                                core.VarDesc.VarType.FP32,
584 585 586 587
                                core.VarDesc.VarType.FP16,
                                out_var_name,
                                op_var_rename_map,
                            )
588 589 590 591 592
                            num_cast_ops += post_cast_num
            idx += num_cast_ops + 1

    _rename_op_input(program, op_var_rename_map, origin_ops, keep_fp32_ops)
    return to_fp16_var_names
593

594 595

def cast_parameters_to_fp16(place, program, scope=None, to_fp16_var_names=None):
596
    """
597
    Traverse all parameters in the whole model and set them to the FP16 data type.
598 599
    Whereas, this function will keep parameters of batchnorms in FP32.
    Args:
600 601 602 603 604 605 606
        place(fluid.CPUPlace|fluid.CUDAPlace): `place` is used to restore the FP16 weight tensors.
        program (Program): The used program.
        scope(fluid.Scope, optional): `scope` is used to get the FP32 weight tensor values.
                                      Default is None.
        to_fp16_var_names(set|list, optional): The data types of vars in `to_fp16_var_names`
                                               will be set to FP16. Usually, it is the returned
                                               value of `cast_model_to_fp16` API.
607
    """
608 609 610 611 612 613
    all_parameters = []
    for block in program.blocks:
        all_parameters.extend(block.all_parameters())

    fp16_var_names = to_fp16_var_names if to_fp16_var_names else set()
    var_scope = scope if scope else global_scope()
614
    for param in all_parameters:
615
        if param.name in fp16_var_names:
616
            _logger.debug(f"---- cast {param.name} to fp16 dtype ----")
617 618 619 620 621
            param_t = var_scope.find_var(param.name).get_tensor()
            data = np.array(param_t)
            param_t.set(np.float16(data), place)


J
Jie Fang 已提交
622
def rewrite_program(main_prog, amp_lists):
J
Jie Fang 已提交
623
    """
624
    Traverse all ops in current block and insert cast op according to
J
Jie Fang 已提交
625 626 627 628
    which set current op belongs to.

    1. When an op belongs to the black list, add it to black set
    2. When an op belongs to the white list, add it to white set
629 630 631 632
    3. When an op belongs to the gray list. If one
       of its inputs is the output of black set op or black list op,
       add it to black set. If all of its previous ops are not black
       op and one of its inputs is the output of white set op or
J
Jie Fang 已提交
633 634
       white list op, add it to white set.
    4. When an op isn't in the lists, add it to black op set.
635 636
    5. Add necessary cast ops to make sure that black set op will be
       computed in fp32 mode, while white set op will be computed in
J
Jie Fang 已提交
637 638 639 640 641 642
       fp16 mode.

    Args:
        main_prog (Program): The main program for training.
    """
    block = main_prog.global_block()
F
fangshuixun007 已提交
643
    block._sync_with_cpp()
J
Jie Fang 已提交
644 645 646
    ops = block.ops
    white_op_set = set()
    black_op_set = set()
647
    for op in ops:
648

649 650
        # NOTE(zhiqiu): 'create_py_reader' and 'read' is used in non-iterable DataLoder,
        # we don't need to handle reader op and the input of 'create_py_reader' is not
651 652 653 654 655
        # in block, which may result in errors.
        # See GeneratorLoader._init_non_iterable() for details.
        if op.type == 'create_py_reader' or op.type == 'read':
            continue

656
        if amp_lists.black_varnames is not None and _is_in_black_varnames(
657 658
            op, amp_lists
        ):
659 660 661
            black_op_set.add(op)
            continue

J
Jie Fang 已提交
662
        if op.type in amp_lists.black_list:
J
Jie Fang 已提交
663
            black_op_set.add(op)
J
Jie Fang 已提交
664
        elif op.type in amp_lists.white_list:
J
Jie Fang 已提交
665
            white_op_set.add(op)
J
Jie Fang 已提交
666
        elif op.type in amp_lists.gray_list:
J
Jie Fang 已提交
667 668 669 670 671 672 673 674 675 676
            is_black_op = False
            is_white_op = False
            for in_name in op.input_names:
                # if this op has inputs
                if in_name:
                    for in_var_name in op.input(in_name):
                        in_var = block.var(in_var_name)
                        # this in_var isn't the output of other op
                        if in_var.op is None:
                            continue
677 678 679 680
                        elif in_var.op is op:
                            prev_op = find_true_prev_op(ops, op, in_var_name)
                            if prev_op is None:
                                continue
J
Jie Fang 已提交
681 682 683
                        else:
                            prev_op = in_var.op
                        # if it's one of inputs
684 685 686 687
                        if (
                            prev_op in black_op_set
                            or prev_op.type in amp_lists.black_list
                        ):
J
Jie Fang 已提交
688
                            is_black_op = True
689 690 691 692
                        elif (
                            prev_op in white_op_set
                            or prev_op.type in amp_lists.white_list
                        ):
J
Jie Fang 已提交
693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709
                            is_white_op = True
            if is_black_op:
                black_op_set.add(op)
            elif is_white_op:
                white_op_set.add(op)
            else:
                pass
        else:
            # For numerical safe, we apply fp32 computation on ops that
            # are not determined which list they should stay.
            black_op_set.add(op)

    idx = 0
    while idx < len(ops):
        op = ops[idx]
        num_cast_ops = 0
        if op in black_op_set:
710 711 712 713 714 715 716
            num_cast_ops = _insert_cast_op(
                block,
                op,
                idx,
                core.VarDesc.VarType.FP16,
                core.VarDesc.VarType.FP32,
            )
J
Jie Fang 已提交
717
        elif op in white_op_set:
718 719 720 721 722 723 724
            num_cast_ops = _insert_cast_op(
                block,
                op,
                idx,
                core.VarDesc.VarType.FP32,
                core.VarDesc.VarType.FP16,
            )
J
Jie Fang 已提交
725 726 727 728 729 730
        else:
            pass

        idx += num_cast_ops + 1


731 732 733
def update_role_var_grad(main_prog, params_grads):
    """
    Update op_role_var attr for some ops to make sure the gradients
Z
Zhen Wang 已提交
734
    transferred across GPUs is FP16.
735 736 737 738 739 740 741 742 743 744
    1. Check whether the op that outputs gradient is cast or not.
    2. If op is cast and gradient is FP32, remove the op_role_var
       and find the prev op which outputs FP16 gradient
    3. Update the op_role_var of the prev op.

    Args:
        main_prog (Program): The main program for training.
        params_grads (list): A list of params and grads.
    """
    block = main_prog.global_block()
F
fangshuixun007 已提交
745
    block._sync_with_cpp()
746 747 748 749 750 751 752
    BACKWARD = core.op_proto_and_checker_maker.OpRole.Backward
    OPTIMIZE = core.op_proto_and_checker_maker.OpRole.Optimize
    for p, g in params_grads:
        op = g.op
        if g.dtype == core.VarDesc.VarType.FP32 and op.type == 'cast':
            role = op.attr('op_role')
            if role & int(BACKWARD) and op.has_attr('op_role_var'):
F
fangshuixun007 已提交
753
                op._remove_attr("op_role_var")
754
            else:
755
                raise ValueError(
756 757
                    f"The cast op {op} must be in BACKWARD role "
                    "and have op_role_var attr."
758
                )
759 760 761

            fp16_grad_name = op.input(op.input_names[0])[0]
            op_for_fp16_grad = find_true_prev_op(block.ops, op, fp16_grad_name)
762
            op_role_var_attr_name = (
763
                core.op_proto_and_checker_maker.kOpRoleVarAttrName()
764
            )
765 766 767 768 769
            attr_val = [p.name, fp16_grad_name]
            if op_for_fp16_grad.has_attr(op_role_var_attr_name):
                attr_val.extend(op_for_fp16_grad.attr(op_role_var_attr_name))
            op_for_fp16_grad._set_attr(op_role_var_attr_name, attr_val)

Z
Zhen Wang 已提交
770 771
            # Maximize the all_reduce overlap, and perform the cast
            # operation after gradients transfer.
772
            op._set_attr('op_role', OPTIMIZE)
M
mapingshuo 已提交
773 774 775 776
            # optimize op should stay behind forward and backward ops
            if op == block.ops[-1]:
                continue
            post_ops = find_true_post_op(block.ops, op, g.name)
777
            if post_ops:
778
                raise ValueError(
779
                    f"The cast op {op}'s output should not be"
780
                    "used by a non-optimize op, however, it"
781
                    f"is used by {post_ops[0]}"
782
                )
783
            # add new op in the python and cpp at the same time
M
mapingshuo 已提交
784 785
            new_op_desc = block.desc.append_op()
            new_op_desc.copy_from(op.desc)
786 787 788 789 790 791 792 793
            new_op = framework.Operator(
                block=block,
                desc=new_op_desc,
                type=None,
                inputs=None,
                outputs=None,
                attrs=None,
            )
F
fangshuixun007 已提交
794
            block.ops.append(new_op)
M
mapingshuo 已提交
795 796
            op_idx = find_op_index(block.desc, op.desc)
            if op_idx == -1:
797
                raise ValueError(f"The op {op} is not in program")
F
fangshuixun007 已提交
798 799
            block._remove_op(op_idx, sync=False)
    block._sync_with_cpp()