auto_parallel_amp.py 39.3 KB
Newer Older
J
JZ-LIANG 已提交
1
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2
#
J
JZ-LIANG 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
6
#
J
JZ-LIANG 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
#
J
JZ-LIANG 已提交
9 10 11 12 13 14 15 16 17 18 19 20
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import paddle
from paddle.framework import core
from paddle.fluid import unique_name
from .pass_base import PassBase, register_pass
from paddle.distributed.fleet.meta_optimizers.common import OpRole
from paddle.fluid.data_feeder import check_variable_and_dtype, check_type
21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
from paddle.distributed.auto_parallel.utils import (
    get_loss_op,
    set_var_dist_attr,
)
from paddle.distributed.auto_parallel.utils import (
    naive_set_dist_op_attr_for_program_by_mesh_and_mapping,
)
from paddle.distributed.auto_parallel.process_group import (
    get_world_process_group,
)
from paddle.fluid.contrib.mixed_precision.fp16_utils import (
    AutoMixedPrecisionLists,
)
from paddle.fluid.contrib.mixed_precision.fp16_utils import (
    _keep_fp32_input,
    _keep_fp32_output,
    find_op_index,
)
from paddle.fluid.contrib.mixed_precision.fp16_utils import (
    _valid_types,
    find_true_post_op,
    find_true_prev_op,
)
from paddle.fluid.contrib.mixed_precision.fp16_utils import (
    _is_in_black_varnames,
    _dtype_to_str,
    _rename_arg,
)
from paddle.distributed.auto_parallel.dist_attribute import (
    OperatorDistributedAttribute,
)
52
from ..auto_parallel.utils import is_forward_op, is_backward_op, is_loss_op
53

Z
zhaoyingli 已提交
54
world_process_group = get_world_process_group()
J
JZ-LIANG 已提交
55 56


57
class AMPState:
J
JZ-LIANG 已提交
58 59
    def __init__(self, block):
        self._block = block
60 61 62
        self._op_fp16_dict = (
            {}
        )  # op_id --> True/False. 'True' means that the current op is in fp16 mode.
J
JZ-LIANG 已提交
63
        self._var_name_dict = {}  # fwd_op_id --> {old_name: cast_name}
Z
zhaoyingli 已提交
64
        self.is_train = False
J
JZ-LIANG 已提交
65 66 67 68

    def _is_fp16_op(self, op_id):
        return self._op_fp16_dict.get(op_id, None)

Z
zhaoyingli 已提交
69
    def _build_state(self, amp_lists, dist_context):
J
JZ-LIANG 已提交
70 71 72
        ops = self._block.ops
        dist_op_context = dist_context.dist_op_context
        for op in ops:
Z
zhaoyingli 已提交
73 74 75
            if int(op.attr('op_role')) == 257:
                self.is_train = True

J
JZ-LIANG 已提交
76 77 78
            if int(op.attr('op_role')) == int(OpRole.Forward):
                self._mark_black_white_ops(amp_lists)
            elif int(op.attr('op_role')) == int(OpRole.Backward):
79 80
                if op.desc.original_id() in dist_op_context.grad_op_id_to_op_id:
                    fwd_op_id = dist_op_context.grad_op_id_to_op_id[
81 82
                        op.desc.original_id()
                    ]
83
                    if self._is_fp16_op(fwd_op_id) is True:
84
                        self._op_fp16_dict[op.desc.original_id()] = True
85
                    elif self._is_fp16_op(fwd_op_id) is False:
86
                        self._op_fp16_dict[op.desc.original_id()] = False
J
JZ-LIANG 已提交
87 88 89
            elif int(op.attr('op_role')) == int(OpRole.Optimize):
                break

Z
zhaoyingli 已提交
90 91
        return self.is_train

J
JZ-LIANG 已提交
92 93 94 95 96 97 98 99 100 101 102 103 104
    def _mark_black_white_ops(self, amp_lists):
        """
        this function is modified from paddle.fluid.contrib.mixed_precision
        """
        self._block._sync_with_cpp()
        ops = self._block.ops

        for op in ops:
            if int(op.attr('op_role')) == int(OpRole.Backward):
                break
            if op.type == 'create_py_reader' or op.type == 'read':
                continue
            if amp_lists.black_varnames is not None and _is_in_black_varnames(
105 106
                op, amp_lists
            ):
107
                self._op_fp16_dict[op.desc.original_id()] = False
J
JZ-LIANG 已提交
108 109
                continue
            if op.type in amp_lists.black_list:
110
                self._op_fp16_dict[op.desc.original_id()] = False
J
JZ-LIANG 已提交
111
            elif op.type in amp_lists.white_list:
112
                self._op_fp16_dict[op.desc.original_id()] = True
J
JZ-LIANG 已提交
113 114 115 116 117 118 119 120 121 122 123 124
            elif op.type in amp_lists.gray_list:
                is_black_op = False
                is_white_op = False
                for in_name in op.input_names:
                    # if this op has inputs
                    if in_name:
                        for in_var_name in op.input(in_name):
                            in_var = self._block.var(in_var_name)
                            # this in_var isn't the output of other op
                            if in_var.op is None:
                                continue
                            elif in_var.op is op:
125
                                prev_op = find_true_prev_op(
126 127
                                    ops, op, in_var_name
                                )
J
JZ-LIANG 已提交
128 129 130 131 132
                                if prev_op is None:
                                    continue
                            else:
                                prev_op = in_var.op
                            # if it's one of inputs
133 134
                            if (
                                self._is_fp16_op(prev_op.desc.original_id())
135
                                is False
136 137
                                or prev_op.type in amp_lists.black_list
                            ):
J
JZ-LIANG 已提交
138
                                is_black_op = True
139 140
                            elif (
                                self._is_fp16_op(prev_op.desc.original_id())
141
                                is True
142 143
                                or prev_op.type in amp_lists.white_list
                            ):
J
JZ-LIANG 已提交
144 145
                                is_white_op = True
                if is_black_op:
146
                    self._op_fp16_dict[op.desc.original_id()] = False
J
JZ-LIANG 已提交
147
                elif is_white_op:
148
                    self._op_fp16_dict[op.desc.original_id()] = True
J
JZ-LIANG 已提交
149 150 151 152 153
                else:
                    pass
            else:
                # For numerical safe, we apply fp32 computation on ops that
                # are not determined which list they should stay.
154
                self._op_fp16_dict[op.desc.original_id()] = False
J
JZ-LIANG 已提交
155 156 157 158 159 160 161 162 163

    def cast_forward_program(self, dist_context):
        ops = self._block.ops
        idx = 0
        while idx < len(ops):
            op = ops[idx]
            num_cast_ops = 0
            if int(op.attr('op_role')) == int(OpRole.Backward):
                break
164
            if self._is_fp16_op(op.desc.original_id()) is False:
J
JZ-LIANG 已提交
165
                num_cast_ops = self._insert_cast_op_forward(
166 167 168 169 170 171
                    op,
                    idx,
                    core.VarDesc.VarType.FP16,
                    core.VarDesc.VarType.FP32,
                    dist_context,
                )
172
            elif self._is_fp16_op(op.desc.original_id()) is True:
J
JZ-LIANG 已提交
173
                num_cast_ops = self._insert_cast_op_forward(
174 175 176 177 178 179
                    op,
                    idx,
                    core.VarDesc.VarType.FP32,
                    core.VarDesc.VarType.FP16,
                    dist_context,
                )
J
JZ-LIANG 已提交
180 181 182 183 184
            else:
                pass
            idx += num_cast_ops + 1
        self._block._sync_with_cpp()

185 186 187
    def _insert_cast_op_forward(
        self, op, idx, src_dtype, dst_dtype, dist_context
    ):
J
JZ-LIANG 已提交
188 189 190 191 192
        """
        only for forward cast
        modified from paddle.fluid.contrib.mixed_precision
        """
        num_cast_ops = 0
193
        var_name_dict = {}
J
JZ-LIANG 已提交
194 195
        for in_name in op.input_names:
            if src_dtype == core.VarDesc.VarType.FP32 and _keep_fp32_input(
196 197
                op, in_name
            ):
J
JZ-LIANG 已提交
198 199 200 201 202 203
                continue
            for in_var_name in op.input(in_name):
                in_var = self._block._find_var_recursive(in_var_name)
                if in_var.type not in _valid_types or in_var.dtype == dst_dtype:
                    continue
                if in_var.dtype == src_dtype:
204 205 206
                    cast_name = (
                        in_var.name + '.cast_' + _dtype_to_str(dst_dtype)
                    )
J
JZ-LIANG 已提交
207 208 209
                    out_var = self._block.vars.get(cast_name)
                    var_name_dict[in_var.name] = cast_name
                    consume_op_attr = dist_context.get_op_dist_attr_for_program(
210 211
                        op
                    )
J
JZ-LIANG 已提交
212 213 214 215 216
                    assert consume_op_attr is not None
                    if out_var is None or out_var.dtype != dst_dtype:
                        # NOTE we make the cast op and var's dist attr as the op that consume the
                        # cast var instead of the op which generates the var
                        in_var_dist_attr = consume_op_attr.get_input_dist_attr(
217 218
                            in_var.name
                        )
J
JZ-LIANG 已提交
219 220 221
                        assert in_var_dist_attr is not None
                        ref_mesh = in_var_dist_attr.process_mesh
                        ref_mapping = in_var_dist_attr.dims_mapping
222
                        consume_op_attr.set_input_dist_attr(
223 224
                            cast_name, in_var_dist_attr
                        )
J
JZ-LIANG 已提交
225 226 227 228 229

                        out_var = self._block.create_var(
                            name=cast_name,
                            dtype=dst_dtype,
                            persistable=False,
230 231 232 233 234
                            stop_gradient=in_var.stop_gradient,
                        )
                        set_var_dist_attr(
                            dist_context, out_var, ref_mapping, ref_mesh
                        )
J
JZ-LIANG 已提交
235 236 237 238 239 240 241 242 243

                        cast_op = self._block._insert_op_without_sync(
                            idx,
                            type="cast",
                            inputs={"X": in_var},
                            outputs={"Out": out_var},
                            attrs={
                                "in_dtype": in_var.dtype,
                                "out_dtype": out_var.dtype,
244 245
                            },
                        )
J
JZ-LIANG 已提交
246
                        naive_set_dist_op_attr_for_program_by_mesh_and_mapping(
247 248
                            cast_op, ref_mesh, ref_mapping, dist_context
                        )
J
JZ-LIANG 已提交
249 250 251
                        num_cast_ops += 1
                    else:
                        in_var_dist_attr = consume_op_attr.get_input_dist_attr(
252 253
                            in_var.name
                        )
254
                        consume_op_attr.set_input_dist_attr(
255 256
                            cast_name, in_var_dist_attr
                        )
J
JZ-LIANG 已提交
257 258 259 260
                    _rename_arg(op, in_var.name, cast_name)
                else:
                    if op.has_attr('in_dtype'):
                        op._set_attr('in_dtype', dst_dtype)
261
        self._var_name_dict[op.desc.original_id()] = var_name_dict
J
JZ-LIANG 已提交
262

263 264 265 266
        if (
            src_dtype == core.VarDesc.VarType.FP32
            and dst_dtype == core.VarDesc.VarType.FP16
        ):
J
JZ-LIANG 已提交
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
            for out_name in op.output_names:
                if _keep_fp32_output(op, out_name):
                    continue
                for out_var_name in op.output(out_name):
                    out_var = self._block.var(out_var_name)
                    if out_var.type not in _valid_types:
                        continue
                    if out_var.dtype == core.VarDesc.VarType.FP32:
                        out_var.desc.set_dtype(core.VarDesc.VarType.FP16)
                        if op.has_attr('out_dtype'):
                            op._set_attr('out_dtype', core.VarDesc.VarType.FP16)
        return num_cast_ops

    def cast_backward_program(self, params_grads, dist_context):
        self._block._sync_with_cpp()
        ops = self._block.ops

        loss_op = get_loss_op(self._block)
        loss_op_index = find_op_index(self._block.desc, loss_op.desc)

287
        appended_grad_times = 0
J
JZ-LIANG 已提交
288 289 290 291
        idx = loss_op_index + 1
        while idx < len(ops):
            num_cast_ops = 0
            grad_op = ops[idx]
292 293 294 295

            # NOTE: the map in `grad_var_to_var` may be changed when the var is casted,
            # which will affect the dist_op to insert allreduce_sum op.
            op_dist_attr = dist_context.get_op_dist_attr_for_program(grad_op)
296 297 298
            if is_backward_op(grad_op) and (
                is_forward_op(ops[idx - 1]) or is_loss_op(ops[idx - 1])
            ):
299 300 301
                if not op_dist_attr.is_recompute:
                    appended_grad_times += 1

302
            grad_op_orig_id = grad_op.desc.original_id()
J
JZ-LIANG 已提交
303
            dist_op_context = dist_context.dist_op_context
304
            if grad_op_orig_id in dist_op_context.grad_op_id_to_op_id:
305
                if self._is_fp16_op(grad_op_orig_id) is False:  # fp32
J
JZ-LIANG 已提交
306
                    num_cast_ops = self._insert_cast_op_backward(
307 308 309 310 311 312 313
                        grad_op,
                        idx,
                        core.VarDesc.VarType.FP16,
                        core.VarDesc.VarType.FP32,
                        dist_context,
                        appended_grad_times,
                    )
314
                elif self._is_fp16_op(grad_op_orig_id) is True:  # fp16
J
JZ-LIANG 已提交
315
                    num_cast_ops = self._insert_cast_op_backward(
316 317 318 319 320 321 322
                        grad_op,
                        idx,
                        core.VarDesc.VarType.FP32,
                        core.VarDesc.VarType.FP16,
                        dist_context,
                        appended_grad_times,
                    )
J
JZ-LIANG 已提交
323 324 325 326 327 328 329 330 331 332 333 334 335 336
            elif grad_op.type == "sum":
                in_var_name = grad_op.desc.input_arg_names()[0]
                src_dtype = self._block.var(in_var_name).dtype
                for in_var_name in grad_op.desc.input_arg_names():
                    assert src_dtype == self._block.var(in_var_name).dtype
                out_var_name = grad_op.desc.output_arg_names()[0]
                out_var = self._block.var(out_var_name)
                if out_var.dtype != src_dtype:
                    out_var.desc.set_dtype(src_dtype)
            elif int(grad_op.attr('op_role')) == 257:
                pass
            else:
                raise ValueError(
                    "'{}' op is not supported in the complete amp pass.".format(
337 338 339
                        grad_op.type
                    )
                )
J
JZ-LIANG 已提交
340 341 342 343 344
            idx += num_cast_ops + 1

        self._block._sync_with_cpp()
        _update_backward_cast_ops(params_grads, dist_context)

345 346 347 348 349 350 351 352 353 354
    def _insert_cast_op_backward(
        self,
        grad_op,
        idx,
        src_dtype,
        dst_dtype,
        dist_context,
        appended_grad_times,
    ):
        """only for backward cast"""
J
JZ-LIANG 已提交
355 356 357 358 359 360 361 362 363 364 365 366 367 368

        def _keep_fp32_input(op, in_name):
            op_type = op.type
            if op_type in ['layer_norm_grad']:
                return in_name not in {'X', 'Y@GRAD'}
            return False

        def _keep_fp32_output(op, out_name):
            op_type = op.type
            if op_type in ['layer_norm_grad']:
                return out_name != 'X@GRAD'
            return False

        num_cast_ops = 0
369
        original_id = grad_op.desc.original_id()
J
JZ-LIANG 已提交
370
        dist_op_context = dist_context.dist_op_context
371
        fwd_op_id = dist_op_context.grad_op_id_to_op_id[original_id]
J
JZ-LIANG 已提交
372 373 374

        for in_name in grad_op.input_names:
            if src_dtype == core.VarDesc.VarType.FP32 and _keep_fp32_input(
375 376
                grad_op, in_name
            ):
J
JZ-LIANG 已提交
377 378 379 380 381 382 383 384 385
                for in_var_name in grad_op.input(in_name):
                    in_var = self._block._find_var_recursive(in_var_name)
                    assert in_var.dtype == core.VarDesc.VarType.FP32
                continue

            for in_var_name in grad_op.input(in_name):
                in_var = self._block._find_var_recursive(in_var_name)
                if in_var.dtype == src_dtype:
                    consume_op_attr = dist_context.get_op_dist_attr_for_program(
386 387
                        grad_op
                    )
J
JZ-LIANG 已提交
388 389 390 391 392 393
                    if in_var_name in self._var_name_dict[fwd_op_id]:
                        # NOTE: if in_var of consume grad_op has been casted before,
                        # it should be renamed and reset dist_attr.
                        cast_name = self._var_name_dict[fwd_op_id][in_var_name]
                        grad_op.desc._rename_input(in_var_name, cast_name)
                        in_var_dist_attr = consume_op_attr.get_input_dist_attr(
394 395
                            in_var_name
                        )
396
                        consume_op_attr.set_input_dist_attr(
397 398
                            cast_name, in_var_dist_attr
                        )
J
JZ-LIANG 已提交
399
                    else:
400 401 402 403 404 405 406 407 408
                        assert (
                            in_var.dtype == dst_dtype
                        ), "op [{}] expect input [{}] to be dtype [{}] BUT got [{}]. {}".format(
                            grad_op.type,
                            in_name,
                            dst_dtype,
                            in_var.dtype,
                            str(grad_op),
                        )
J
JZ-LIANG 已提交
409 410 411

        for out_name in grad_op.output_names:
            if src_dtype == core.VarDesc.VarType.FP32 and _keep_fp32_output(
412 413
                grad_op, out_name
            ):
J
JZ-LIANG 已提交
414 415 416 417 418 419 420
                for out_var_name in grad_op.output(out_name):
                    out_var = self._block._find_var_recursive(out_var_name)
                    assert out_var.dtype == core.VarDesc.VarType.FP32
                continue

            for out_var_name in grad_op.output(out_name):
                out_var = self._block._find_var_recursive(out_var_name)
421
                out_var_name_prefix = out_var_name[: out_var_name.find("@")]
J
JZ-LIANG 已提交
422 423 424 425 426 427 428 429 430 431
                fwd_var = self._block._find_var_recursive(out_var_name_prefix)
                # NOTE: the out_var's dtype of consume grad_op should equal to the fwd_var's dtype
                if out_var.dtype != fwd_var.dtype:
                    out_var.desc.set_dtype(fwd_var.dtype)

                if out_var.dtype == src_dtype:
                    if out_var_name_prefix in self._var_name_dict[fwd_op_id]:
                        # NOTE: if out_var of consume grad_op has been casted before,
                        # it should be renamed and reset dist_attr, then we insert cast op to
                        # convert the cast_var to original dtype
432 433 434
                        consume_op_attr = (
                            dist_context.get_op_dist_attr_for_program(grad_op)
                        )
J
JZ-LIANG 已提交
435
                        fwd_cast_name = self._var_name_dict[fwd_op_id][
436 437
                            out_var_name_prefix
                        ]
438 439
                        suffix = ""
                        if "@RENAME" in out_var_name:
440 441 442
                            suffix = out_var_name[
                                out_var_name.find("@RENAME") :
                            ]
443
                        cast_name = fwd_cast_name + "@GRAD" + suffix
J
JZ-LIANG 已提交
444 445 446
                        cast_var = self._block.vars.get(cast_name)
                        if cast_var is None or cast_var.dtype != dst_dtype:
                            grad_op.desc._rename_output(out_var_name, cast_name)
447 448 449 450 451
                            out_var_dist_attr = (
                                consume_op_attr.get_output_dist_attr(
                                    out_var_name
                                )
                            )
J
JZ-LIANG 已提交
452 453 454
                            ref_mesh = out_var_dist_attr.process_mesh
                            ref_mapping = out_var_dist_attr.dims_mapping
                            consume_op_attr.set_output_dist_attr(
455 456
                                cast_name, out_var_dist_attr
                            )
J
JZ-LIANG 已提交
457 458 459 460 461 462
                            assert ref_mapping is not None
                            cast_var = self._block.create_var(
                                name=cast_name,
                                shape=out_var.shape,
                                dtype=dst_dtype,
                                persistable=False,
463 464 465 466 467
                                stop_gradient=out_var.stop_gradient,
                            )
                            set_var_dist_attr(
                                dist_context, cast_var, ref_mapping, ref_mesh
                            )
468
                            dist_op_context.grad_var_to_var[
469 470
                                appended_grad_times
                            ][cast_name] = fwd_cast_name
J
JZ-LIANG 已提交
471 472 473 474 475 476 477 478 479

                            cast_op = self._block._insert_op(
                                idx + 1,
                                type="cast",
                                inputs={"X": cast_var},
                                outputs={"Out": out_var},
                                attrs={
                                    "in_dtype": cast_var.dtype,
                                    "out_dtype": out_var.dtype,
480 481 482
                                    "op_role": OpRole.Backward,
                                },
                            )
J
JZ-LIANG 已提交
483 484 485 486
                            cast_op._remove_attr("op_role_var")
                            cast_op._remove_attr("op_namescope")
                            cast_op._remove_attr("with_quant_attr")
                            naive_set_dist_op_attr_for_program_by_mesh_and_mapping(
487 488
                                cast_op, ref_mesh, ref_mapping, dist_context
                            )
J
JZ-LIANG 已提交
489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508
                            num_cast_ops += 1
                else:
                    assert out_var.dtype == dst_dtype

        return num_cast_ops


def _update_backward_cast_ops(params_grads, dist_context):
    """
    move param grad cast to the end of backward segment
    in order to enabel fp16 allreduce
    """
    # TODO filter optimize ops in future

    main_block = paddle.static.default_main_program().global_block()
    main_block._sync_with_cpp()

    for p, g in params_grads:
        op = g.op
        if g.dtype == core.VarDesc.VarType.FP32 and op.type == 'cast':
509 510 511
            if int(op.attr('op_role')) == int(OpRole.Backward) and op.has_attr(
                'op_role_var'
            ):
J
JZ-LIANG 已提交
512 513 514 515
                op._remove_attr("op_role_var")

            post_ops = find_true_post_op(main_block.ops, op, g.name)
            if post_ops:
516 517 518 519 520
                raise ValueError(
                    "The cast op {0}'s output should not be"
                    "used by a non-optimize op, however, it"
                    "is used by {1}".format(op, post_ops[0])
                )
J
JZ-LIANG 已提交
521 522 523 524 525 526 527

            if op == main_block.ops[-1]:
                continue

            # add new op in the python and cpp at the same time
            new_op_desc = main_block.desc.append_op()
            new_op_desc.copy_from(op.desc)
528 529 530 531 532 533 534 535
            new_op = paddle.fluid.framework.Operator(
                block=main_block,
                desc=new_op_desc,
                type=None,
                inputs=None,
                outputs=None,
                attrs=None,
            )
J
JZ-LIANG 已提交
536 537 538 539 540
            main_block.ops.append(new_op)

            # dist attr
            param_dist_attr = dist_context.get_tensor_dist_attr_for_program(p)
            output_dist_attr = dist_context.get_tensor_dist_attr_for_program(
541 542
                main_block.var(op.output_arg_names[0])
            )
J
JZ-LIANG 已提交
543 544 545
            assert param_dist_attr is not None
            assert output_dist_attr is not None
            naive_set_dist_op_attr_for_program_by_mesh_and_mapping(
546 547 548 549 550
                new_op,
                param_dist_attr.process_mesh,
                param_dist_attr.dims_mapping,
                dist_context,
            )
J
JZ-LIANG 已提交
551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570

            output_dist_attr.process_mesh = param_dist_attr.process_mesh
            output_dist_attr.dims_mapping = param_dist_attr.dims_mapping

            op_idx = find_op_index(main_block.desc, op.desc)
            if op_idx == -1:
                raise ValueError("The op {0} is not in program".format(op))
            main_block._remove_op(op_idx, sync=False)

    main_block._sync_with_cpp()


def _check_and_update_gradient(params_grads, loss_scaling, dist_context):

    main_block = paddle.static.default_main_program().global_block()
    main_block._sync_with_cpp()

    grads = [g for _, g in params_grads]
    check_type(grads, 'x', (tuple, list), 'check_finite_and_unscale')
    for e in grads:
571 572 573 574 575 576
        check_variable_and_dtype(
            e,
            "x",
            ['float16', 'float32', 'float64'],
            'check_finite_and_unscale',
        )
J
JZ-LIANG 已提交
577 578

    found_inf = main_block.create_var(
579 580 581
        name=unique_name.generate_with_ignorable_key(
            ".".join(['find_infinite_scale', 'tmp'])
        ),
J
JZ-LIANG 已提交
582 583 584 585
        shape=[1],
        dtype='bool',
        type=core.VarDesc.VarType.LOD_TENSOR,
        persistable=False,
586 587
        stop_gradient=False,
    )
Z
zhaoyingli 已提交
588
    set_var_dist_attr(dist_context, found_inf, [-1], world_process_group.ranks)
J
JZ-LIANG 已提交
589 590 591

    inputs = {'X': grads, 'Scale': loss_scaling}
    outputs = {'Out': grads, 'FoundInfinite': found_inf}
592
    attrs = {'op_role': OpRole.Optimize}
593 594 595 596 597 598
    new_op = main_block.append_op(
        type='check_finite_and_unscale',
        inputs=inputs,
        outputs=outputs,
        attrs=attrs,
    )
J
JZ-LIANG 已提交
599 600

    new_op_dist_attr = OperatorDistributedAttribute()
Z
zhaoyingli 已提交
601 602 603 604
    new_op_dist_attr.process_mesh = world_process_group.ranks
    new_op_dist_attr.impl_idx = 0
    if len(world_process_group.ranks) > 1:
        new_op_dist_attr.impl_type = "check_finite_and_unscale"
J
JZ-LIANG 已提交
605 606 607
    for g in grads:
        g_dist_attr = dist_context.get_tensor_dist_attr_for_program(g)
        assert g_dist_attr is not None
608 609 610 611 612 613
        new_op_dist_attr.set_input_dims_mapping(
            g.name, g_dist_attr.dims_mapping
        )
        new_op_dist_attr.set_output_dims_mapping(
            g.name, g_dist_attr.dims_mapping
        )
J
JZ-LIANG 已提交
614 615 616 617 618 619 620
    dist_context.set_op_dist_attr_for_program(new_op, new_op_dist_attr)
    return grads, found_inf


@register_pass("auto_parallel_amp")
class AMPPass(PassBase):
    def __init__(self):
621
        super().__init__()
J
JZ-LIANG 已提交
622 623 624 625 626 627 628 629 630 631 632
        self.set_attr("loss", None)
        self.set_attr("dist_context", None)
        self.set_attr("custom_white_list", None)
        self.set_attr("custom_black_list", None)
        self.set_attr("custom_black_varnames", None)
        self.set_attr("init_loss_scaling", 32768.0)
        self.set_attr("incr_every_n_steps", 1000)
        self.set_attr("decr_every_n_nan_or_inf", 2)
        self.set_attr("incr_ratio", 2.0)
        self.set_attr("decr_ratio", 0.8)
        self.set_attr("use_dynamic_loss_scaling", False)
633
        self.set_attr("input_data", [])
J
JZ-LIANG 已提交
634
        self.set_attr("params_grads", [])
635
        self._loss = None
J
JZ-LIANG 已提交
636 637 638
        self._loss_scaling = None
        self._num_good_steps = None
        self._num_bad_steps = None
639
        self._loss = None
J
JZ-LIANG 已提交
640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659

    def _check_self(self):
        if self.get_attr("init_loss_scaling") < 0:
            return False
        if self.get_attr("incr_every_n_steps") < 0:
            return False
        if self.get_attr("decr_every_n_nan_or_inf") < 0:
            return False
        if self.get_attr("incr_ratio") < 0:
            return False
        if self.get_attr("decr_ratio") < 0:
            return False
        if self.get_attr("dist_context") is None:
            return False
        return True

    def _check_conflict(self, other_pass):

        return True

660 661
    # NOTE: why AMPBackwardPass can override apply_single_impl instead of
    # apply_impl? AMP is an optimization pass for serial program,
J
JZ-LIANG 已提交
662 663 664 665 666 667 668 669
    # in distributed scenario, all ranks should have the same modification.
    def _apply_single_impl(self, main_program, startup_program, context):
        self.dist_context = self.get_attr("dist_context")
        params_grads = self.get_attr("params_grads")

        amp_lists = AutoMixedPrecisionLists(
            set(self.get_attr("custom_white_list")),
            set(self.get_attr("custom_black_list")),
670 671
            set(self.get_attr("custom_black_varnames")),
        )
J
JZ-LIANG 已提交
672 673

        with paddle.static.program_guard(main_program, startup_program):
Z
zhaoyingli 已提交
674 675 676
            amp_state = AMPState(main_program.global_block())
            is_train = amp_state._build_state(amp_lists, self.dist_context)

J
JZ-LIANG 已提交
677
            amp_state.cast_forward_program(self.dist_context)
Z
zhaoyingli 已提交
678 679 680 681 682 683 684

        if is_train:
            with paddle.static.program_guard(main_program, startup_program):
                amp_state.cast_backward_program(params_grads, self.dist_context)
                self._init_amp_var()
                self._scale_loss()

685 686 687 688
                if (
                    self.get_attr("use_dynamic_loss_scaling")
                    or self.get_attr("init_loss_scaling") != 1.0
                ):
Z
zhaoyingli 已提交
689
                    grads, found_inf = _check_and_update_gradient(
690 691
                        params_grads, self._loss_scaling, self.dist_context
                    )
Z
zhaoyingli 已提交
692 693 694

                if self.get_attr("use_dynamic_loss_scaling"):
                    self._update_loss_scaling(grads, found_inf)
J
JZ-LIANG 已提交
695 696 697 698 699 700 701

    def _init_amp_var(self):
        self._loss_scaling = paddle.static.create_global_var(
            name=unique_name.generate("loss_scaling"),
            shape=[1],
            value=self.get_attr("init_loss_scaling"),
            dtype='float32',
702 703 704 705 706 707 708 709
            persistable=True,
        )
        set_var_dist_attr(
            self.dist_context,
            self._loss_scaling,
            [-1],
            world_process_group.ranks,
        )
J
JZ-LIANG 已提交
710 711 712 713 714 715 716

        if self.get_attr("use_dynamic_loss_scaling"):
            self._num_good_steps = paddle.static.create_global_var(
                name=unique_name.generate("num_good_steps"),
                shape=[1],
                value=0,
                dtype='int32',
717 718 719 720 721 722 723 724
                persistable=True,
            )
            set_var_dist_attr(
                self.dist_context,
                self._num_good_steps,
                [-1],
                world_process_group.ranks,
            )
J
JZ-LIANG 已提交
725 726 727 728 729 730

            self._num_bad_steps = paddle.static.create_global_var(
                name=unique_name.generate("num_bad_steps"),
                shape=[1],
                value=0,
                dtype='int32',
731 732 733 734 735 736 737 738
                persistable=True,
            )
            set_var_dist_attr(
                self.dist_context,
                self._num_bad_steps,
                [-1],
                world_process_group.ranks,
            )
J
JZ-LIANG 已提交
739 740 741 742 743

    def _scale_loss(self):

        main_block = paddle.static.default_main_program().global_block()
        main_block._sync_with_cpp()
744 745
        OP_ROLE_KEY = core.op_proto_and_checker_maker.kOpRoleAttrName()

J
JZ-LIANG 已提交
746 747 748 749
        loss = self.get_attr("loss")
        assert loss is not None
        loss_op = loss.op
        loss_op_dist_attr = self.dist_context.get_op_dist_attr_for_program(
750 751
            loss_op
        )
J
JZ-LIANG 已提交
752 753

        if loss.dtype != core.VarDesc.VarType.FP32:
754 755

            tmp_name = unique_name.generate(loss.name + ".cast_fp32")
756 757 758
            cast_loss = main_block.create_var(
                name=tmp_name, dtype=core.VarDesc.VarType.FP32
            )
759
            loss_dist_attr = self.dist_context.get_tensor_dist_attr_for_program(
760 761
                loss
            )
762
            ref_mesh = loss_op_dist_attr.process_mesh
763
            self.dist_context.set_tensor_dist_attr_for_program(
764 765
                cast_loss, loss_dist_attr
            )
766

767
            # forward
768 769 770 771 772 773 774 775 776 777
            loss_op_idx = find_op_index(main_block.desc, loss_op.desc)
            cast_op = main_block._insert_op(
                loss_op_idx + 1,
                type='cast',
                inputs={'X': [loss]},
                outputs={'Out': [cast_loss]},
                attrs={
                    "in_dtype": loss.dtype,
                    "out_dtype": core.VarDesc.VarType.FP32,
                    'op_role': loss_op.all_attrs()[OP_ROLE_KEY],
778 779
                },
            )
780

781 782 783
            loss_op._set_attr(
                OP_ROLE_KEY, core.op_proto_and_checker_maker.OpRole.Forward
            )
784
            naive_set_dist_op_attr_for_program_by_mesh_and_mapping(
785 786
                cast_op, ref_mesh, [-1], self.dist_context
            )
787 788 789

            # backward
            first_backward_op = main_block.ops[loss_op_idx + 2]
790 791 792 793
            assert (
                first_backward_op.type == "fill_constant"
                and int(first_backward_op.all_attrs()[OP_ROLE_KEY]) == 257
            )
794 795 796 797
            cast_loss_grad = main_block.create_var(
                name=unique_name.generate(tmp_name + "@GRAD"),
                shape=loss.shape,
                dtype=core.VarDesc.VarType.FP32,
798 799
                persistable=loss.persistable,
            )
800 801 802 803 804 805 806 807 808 809 810 811 812
            set_var_dist_attr(self.dist_context, cast_loss_grad, [-1], ref_mesh)

            pre_grad_name = first_backward_op.output_arg_names[0]
            first_backward_op._rename_output(pre_grad_name, cast_loss_grad.name)
            cast_grad_op = main_block._insert_op(
                loss_op_idx + 3,
                type='cast',
                inputs={'X': [cast_loss_grad]},
                outputs={'Out': [pre_grad_name]},
                attrs={
                    "in_dtype": core.VarDesc.VarType.FP32,
                    "out_dtype": core.VarDesc.VarType.FP16,
                    'op_role': core.op_proto_and_checker_maker.OpRole.Backward,
813 814
                },
            )
815
            naive_set_dist_op_attr_for_program_by_mesh_and_mapping(
816 817
                cast_grad_op, ref_mesh, [-1], self.dist_context
            )
818 819
            loss_op = cast_op
            loss = cast_loss
J
JZ-LIANG 已提交
820

821 822 823 824
        if (
            self.get_attr("use_dynamic_loss_scaling")
            or self.get_attr("init_loss_scaling") != 1.0
        ):
J
JZ-LIANG 已提交
825 826 827 828 829 830 831 832 833

            loss_op_idx = find_op_index(main_block.desc, loss_op.desc)

            # forward
            ref_mesh = loss_op_dist_attr.process_mesh
            self._scaled_loss = main_block.create_var(
                name=unique_name.generate("scaled_loss"),
                shape=loss.shape,
                dtype=loss.dtype,
834 835 836 837 838
                persistable=loss.persistable,
            )
            set_var_dist_attr(
                self.dist_context, self._scaled_loss, [-1], ref_mesh
            )
J
JZ-LIANG 已提交
839 840 841 842

            elementwise_mul_op = main_block._insert_op(
                loss_op_idx + 1,
                type='elementwise_mul',
843
                inputs={'X': [loss], 'Y': [self._loss_scaling]},
J
JZ-LIANG 已提交
844
                outputs={'Out': [self._scaled_loss]},
845 846
                attrs={
                    'op_role': loss_op.all_attrs()[OP_ROLE_KEY],
847 848 849 850 851
                },
            )
            loss_op._set_attr(
                OP_ROLE_KEY, core.op_proto_and_checker_maker.OpRole.Forward
            )
J
JZ-LIANG 已提交
852
            naive_set_dist_op_attr_for_program_by_mesh_and_mapping(
853 854
                elementwise_mul_op, ref_mesh, [-1], self.dist_context
            )
J
JZ-LIANG 已提交
855 856 857

            # backward
            first_backward_op = main_block.ops[loss_op_idx + 2]
858 859 860 861
            assert (
                first_backward_op.type == "fill_constant"
                and int(first_backward_op.all_attrs()[OP_ROLE_KEY]) == 257
            )
J
JZ-LIANG 已提交
862 863 864 865
            self._scaled_loss_grad = main_block.create_var(
                name=unique_name.generate("scaled_loss") + "@GRAD",
                shape=loss.shape,
                dtype=loss.dtype,
866 867 868 869 870
                persistable=loss.persistable,
            )
            set_var_dist_attr(
                self.dist_context, self._scaled_loss_grad, [-1], ref_mesh
            )
J
JZ-LIANG 已提交
871
            pre_grad_name = first_backward_op.output_arg_names[0]
872 873 874
            first_backward_op._rename_output(
                pre_grad_name, self._scaled_loss_grad.name
            )
J
JZ-LIANG 已提交
875 876 877
            # FIXME(JZ-LIANG) a trick to insert backward op
            main_block._sync_with_cpp()
            elementwise_mul_grad_op_desc = main_block.desc._insert_op(
878 879
                loss_op_idx + 3
            )
J
JZ-LIANG 已提交
880 881
            elementwise_mul_grad_op_desc.set_type("elementwise_mul_grad")
            elementwise_mul_grad_op_desc.set_input(
882 883
                'Out@GRAD', [self._scaled_loss_grad.name]
            )
J
JZ-LIANG 已提交
884
            elementwise_mul_grad_op_desc.set_input('X', [loss.name])
885 886 887
            elementwise_mul_grad_op_desc.set_input(
                'Y', [self._loss_scaling.name]
            )
J
JZ-LIANG 已提交
888 889 890
            elementwise_mul_grad_op_desc.set_output('X@GRAD', [pre_grad_name])
            elementwise_mul_grad_op_desc.set_output('Y@GRAD', [])
            elementwise_mul_grad_op_desc._set_attr(
891 892
                OP_ROLE_KEY, core.op_proto_and_checker_maker.OpRole.Backward
            )
J
JZ-LIANG 已提交
893 894
            elementwise_mul_grad_op_desc._set_attr('axis', -1)
            elementwise_mul_grad_op = paddle.fluid.framework.Operator(
895 896
                main_block, elementwise_mul_grad_op_desc
            )
J
JZ-LIANG 已提交
897 898 899 900 901
            main_block.ops.insert(loss_op_idx + 3, elementwise_mul_grad_op)
            main_block._sync_with_cpp()
            elementwise_mul_grad_op = main_block.ops[loss_op_idx + 3]
            assert elementwise_mul_grad_op.type == "elementwise_mul_grad"
            naive_set_dist_op_attr_for_program_by_mesh_and_mapping(
902 903
                elementwise_mul_grad_op, ref_mesh, [-1], self.dist_context
            )
J
JZ-LIANG 已提交
904 905 906

        else:
            self._scaled_loss = loss
907
        self._loss = loss
J
JZ-LIANG 已提交
908 909 910 911 912 913 914
        main_block._sync_with_cpp()

    def _update_loss_scaling(self, grads, found_inf):

        main_block = paddle.static.default_main_program().global_block()
        main_block._sync_with_cpp()

915 916 917 918 919 920
        check_variable_and_dtype(
            self._loss_scaling,
            "prev_loss_scaling",
            ['float32', 'float64'],
            "update_loss_scaling",
        )
J
JZ-LIANG 已提交
921 922
        check_type(grads, 'x', (tuple, list), 'update_loss_scaling')
        for e in grads:
923 924 925
            check_variable_and_dtype(
                e, "x", ['float16', 'float32', 'float64'], 'update_loss_scaling'
            )
926
            if e.dtype == core.VarDesc.VarType.FP16:
927 928 929
                assert (
                    self._loss_scaling.dtype == core.VarDesc.VarType.FP32
                ), "The dtype of prev_loss_scaling should be float32 when the dtype of x is float16."
930
            else:
931 932 933
                assert (
                    self._loss_scaling.dtype == e.dtype
                ), "The dtype of prev_loss_scaling should be equal to the dtype of x."
J
JZ-LIANG 已提交
934 935 936 937 938 939

        inputs = {
            'X': grads,
            'FoundInfinite': found_inf,
            'PrevLossScaling': self._loss_scaling,
            'InGoodSteps': self._num_good_steps,
940
            'InBadSteps': self._num_bad_steps,
J
JZ-LIANG 已提交
941 942 943 944 945 946
        }

        outputs = {
            'Out': grads,
            'LossScaling': self._loss_scaling,
            'OutGoodSteps': self._num_good_steps,
947
            'OutBadSteps': self._num_bad_steps,
J
JZ-LIANG 已提交
948 949 950 951 952 953 954 955
        }

        attrs = {
            'incr_every_n_steps': self.get_attr("incr_every_n_steps"),
            'decr_every_n_nan_or_inf': self.get_attr("decr_every_n_nan_or_inf"),
            'incr_ratio': self.get_attr("incr_ratio"),
            'decr_ratio': self.get_attr("decr_ratio"),
            'stop_update': self.get_attr("stop_update"),
956
            'op_role': OpRole.Optimize,
J
JZ-LIANG 已提交
957 958
        }

959 960 961 962 963 964
        new_op = main_block.append_op(
            type='update_loss_scaling',
            inputs=inputs,
            outputs=outputs,
            attrs=attrs,
        )
J
JZ-LIANG 已提交
965 966

        new_op_dist_attr = OperatorDistributedAttribute()
Z
zhaoyingli 已提交
967 968 969 970
        new_op_dist_attr.process_mesh = world_process_group.ranks
        new_op_dist_attr.impl_idx = 0
        if len(world_process_group.ranks) > 1:
            new_op_dist_attr.impl_type = "update_loss_scaling"
J
JZ-LIANG 已提交
971 972 973
        for g in grads:
            g_dist_attr = self.dist_context.get_tensor_dist_attr_for_program(g)
            assert g_dist_attr is not None
974 975 976 977 978 979
            new_op_dist_attr.set_input_dims_mapping(
                g.name, g_dist_attr.dims_mapping
            )
            new_op_dist_attr.set_output_dims_mapping(
                g.name, g_dist_attr.dims_mapping
            )
J
JZ-LIANG 已提交
980 981 982
        self.dist_context.set_op_dist_attr_for_program(new_op, new_op_dist_attr)

        main_block._sync_with_cpp()
983 984 985 986 987 988 989 990 991 992

    def get_loss(self):
        # the amp / fp16 might change the effective loss variable for network and
        # therefore would affect the subsequent passes that rely on the loss.
        # return the effective loss after amp / fp16 pass.

        if self._loss:
            return self._loss
        else:
            return self.get_attr("loss")