auto_parallel_sharding.py 70.9 KB
Newer Older
J
JZ-LIANG 已提交
1
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2
#
J
JZ-LIANG 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
6
#
J
JZ-LIANG 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
#
J
JZ-LIANG 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import logging
16
from functools import reduce
17 18

import paddle
19
from paddle.distributed.auto_parallel.operators.common import (
20
    ParallelMode,
21
    is_data_parallel_reduce_op,
22
    is_parameter_related,
23
)
24
from paddle.distributed.auto_parallel.process_group import new_process_group
25 26
from paddle.distributed.auto_parallel.utils import (
    _get_comm_group,
27 28
    get_logger,
    get_var_numel,
29 30 31 32 33
    insert_dependencies_for_vars,
    is_backward_op,
    is_dep_skip_op,
    is_loss_grad_op,
    is_optimize_op,
34 35
    naive_set_dist_op_attr_for_program_by_mesh_and_mapping,
    set_var_dist_attr,
36 37
)
from paddle.distributed.fleet.meta_optimizers.sharding.utils import get_var_size
38
from paddle.fluid.executor import _is_enable_standalone_executor
39
from paddle.framework import core
40 41
from paddle.static import default_main_program, default_startup_program
from paddle.utils import unique_name
42 43

from .pass_base import PassBase, register_pass
J
JZ-LIANG 已提交
44 45 46

OpRole = core.op_proto_and_checker_maker.OpRole
OP_ROLE_KEY = core.op_proto_and_checker_maker.kOpRoleAttrName()
47
_skip_ops = [
48 49 50 51 52 53 54
    'create_py_reader',
    'create_double_buffer_reader',
    'read',
    'slice',
    'split',
    'assign',
    "send_v2",
55
]
J
JZ-LIANG 已提交
56 57
# update here to support new optimizers
_supported_optimizer_type = [
58 59 60 61 62 63 64 65 66 67
    "adam",
    "adamax",
    "adamw",
    "decayed_adagrad",
    "momentum",
    "dgc_momentum",
    "lars_momentum",
    "merged_momentum",
    "lamb",
    "sgd",
J
JZ-LIANG 已提交
68 69
]

70 71
_logger = get_logger(logging.INFO)

J
JZ-LIANG 已提交
72

73
def _is_reshard_op(op):
74 75 76
    return op.desc.has_attr(
        "op_namescope"
    ) and "/auto_parallel/reshard" in op.desc.attr('op_namescope')
77 78


J
JZ-LIANG 已提交
79 80 81
# NOTE we add the "auto_parallel" prefix to the pass in order to
# indicate that this pass should obey some constrains by auto_parallel
# for example all ops and vars should has dist attr before and after pass
82
# should use dist op instead of custom comm op
J
JZ-LIANG 已提交
83 84 85
@register_pass("auto_parallel_sharding")
class ShardingPass(PassBase):
    def __init__(self):
86
        super().__init__()
J
JZ-LIANG 已提交
87 88
        self.set_attr("dist_context", None)
        self.set_attr("stage", None)
Z
zhaoyingli 已提交
89 90
        self.set_attr("sharding_degree", None)  # for parallelizer
        self.set_attr("degree", None)  # for parallelizer_v2
91 92 93 94 95
        self.set_attr("enable_overlap", None)
        self.set_attr("param_comm_stream_num", None)
        self.set_attr("grad_comm_stream_num", None)
        self.set_attr("param_bucket_size_numel", None)
        self.set_attr("grad_bucket_size_numel", None)
96
        self.set_attr("partition_algor", None)
97
        self.set_attr("enable_hierarchical_comm", None)
J
JZ-LIANG 已提交
98 99 100 101 102
        self.set_attr("params_grads", [])
        self.set_attr("global_rank", -1)
        self.dp_groups = set()
        self.sharding_infos = []
        self.varname_to_sharding_info = {}
103
        self.sharding_hybrid_dp = False
J
JZ-LIANG 已提交
104
        self.outer_dp_group = None
105
        self.shared_params_grads = []
J
JZ-LIANG 已提交
106 107 108 109 110 111 112

    def _check_self(self):
        if self.get_attr("dist_context") is None:
            return False

        if self.get_attr("stage") not in [1, 2, 3]:
            return False
Z
zhaoyingli 已提交
113
        if self.get_attr("sharding_degree") is not None:
114 115 116
            if (
                not isinstance(self.get_attr("sharding_degree"), int)
            ) or self.get_attr("sharding_degree") <= 1:
Z
zhaoyingli 已提交
117 118
                return False
        elif self.get_attr("degree") is not None:
119 120 121
            if (not isinstance(self.get_attr("degree"), int)) or self.get_attr(
                "degree"
            ) <= 1:
Z
zhaoyingli 已提交
122 123
                return False
        else:
J
JZ-LIANG 已提交
124 125 126
            return False
        if len(self.get_attr("params_grads")) <= 0:
            return False
127 128 129
        if (not isinstance(self.get_attr("global_rank"), int)) or self.get_attr(
            "global_rank"
        ) < 0:
J
JZ-LIANG 已提交
130
            return False
131 132 133
        if self.get_attr("enable_overlap") is None:
            return False
        if self.get_attr("param_comm_stream_num") is None:
134
            return False
135 136 137 138 139
        if self.get_attr("grad_comm_stream_num") is None:
            return False
        if self.get_attr("param_bucket_size_numel") is None:
            return False
        if self.get_attr("grad_bucket_size_numel") is None:
140 141 142
            return False
        if self.get_attr("partition_algor") is None:
            return False
143 144
        if self.get_attr("enable_hierarchical_comm") is None:
            return False
J
JZ-LIANG 已提交
145 146 147 148 149 150 151
        return True

    def _check_conflict(self, other_pass):
        return True

    def _apply_single_impl(self, main_program, startup_program, context):
        self._dist_context = self.get_attr("dist_context")
Z
zhaoyingli 已提交
152
        self.sharding_world_size = int(
153 154
            self.get_attr("sharding_degree") or self.get_attr("degree")
        )
J
JZ-LIANG 已提交
155 156
        self.stage = int(self.get_attr("stage"))
        self.global_rank = int(self.get_attr("global_rank"))
157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
        self.enable_overlap = self.get_attr("enable_overlap")
        self.param_comm_stream_num = int(self.get_attr("param_comm_stream_num"))
        self.grad_comm_stream_num = int(self.get_attr("grad_comm_stream_num"))
        self.enable_hierarchical_comm = self.get_attr(
            "enable_hierarchical_comm"
        )
        if self.param_comm_stream_num > 1 or self.grad_comm_stream_num > 1:
            assert (
                self.enable_overlap
            ), "multiple comm stream need enable_overlap to be True"
        self.param_bucket_size_numel = int(
            self.get_attr("param_bucket_size_numel")
        )
        self.grad_bucket_size_numel = int(
            self.get_attr("grad_bucket_size_numel")
        )
173
        self.partition_algor = self.get_attr("partition_algor")
174

J
JZ-LIANG 已提交
175
        params_grads = self.get_attr("params_grads")
176 177 178 179
        main_block, startup_block = (
            main_program.global_block(),
            startup_program.global_block(),
        )
J
JZ-LIANG 已提交
180

181 182 183 184 185 186 187 188
        # NOTE Multi / Sub-Block Support
        # we assume that only parameter are present and partitioned in main_block,
        # there is NO new param in sub_block, and all params in sub_block follows the same
        # partition as main_block. the above contraint fullfill the 3 most common use-cases in Paddle sub_block:
        # 1. subblock for lr scheduler
        # 2. sub-block uses the same or partial network of main-block, e.g. GPT3 generation model
        # 3. sub-block used for double backward

J
JZ-LIANG 已提交
189
        self._build_sharding_groups(main_block, params_grads)
190 191 192 193
        for block in main_program.blocks:
            self._shard_optimizer(block, startup_block, params_grads, context)
            self._shard_gradient_synchronization(block)
            self._shard_parameter(block, startup_block)
J
JZ-LIANG 已提交
194

195
        context.set_attr("params_grads", self.shared_params_grads)
196
        self._optimization_pass(main_program, startup_program)
197

J
JZ-LIANG 已提交
198 199
    def _build_sharding_groups(self, main_block, params_grads):
        self._collective_data_parallel_groups(main_block)
200
        self._build_sharding_infos(main_block, params_grads)
J
JZ-LIANG 已提交
201 202 203

    def _collective_data_parallel_groups(self, main_block):
        for op in main_block.ops:
J
JZ-LIANG 已提交
204
            if not _is_forward_op(op) or op.type in _skip_ops:
J
JZ-LIANG 已提交
205
                continue
206 207 208 209
            # NOTE: there aren't dist_attr in the ops which reshard insert,
            # and should be skip in sharding.
            if _is_reshard_op(op):
                continue
J
JZ-LIANG 已提交
210
            group = _inference_data_parallel_group_for_operator(
211 212
                self.global_rank, op, self._dist_context
            )
J
JZ-LIANG 已提交
213 214 215
            if group is not None:
                self.dp_groups.add(group)

216
        # TODO(JZ-LIANG) allow more than one dp groups in network, support more general distribution
J
JZ-LIANG 已提交
217 218 219
        # genetated by auto search
        if len(self.dp_groups) != 1:
            raise NotImplementedError(
220 221 222 223
                "So far Only and Exactly one data parallel group in network are supported, but got [{}] different data parallel groups".format(
                    len(self.dp_groups)
                )
            )
J
JZ-LIANG 已提交
224

225 226 227 228 229 230
    def _build_sharding_infos(self, main_block, params_grads):

        # order params
        params_grads = re_order_program(
            main_block, params_grads, self._dist_context
        )
J
JZ-LIANG 已提交
231

232
        # partition
J
JZ-LIANG 已提交
233 234
        for dp_group in self.dp_groups:

235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
            assert (
                dp_group.nranks >= self.sharding_world_size
            ), "sharding world size [{}] should not larger than dp world size [{}]".format(
                self.sharding_world_size, dp_group.nranks
            )
            assert (
                dp_group.nranks % self.sharding_world_size == 0
            ), "sharding world size [{}] should be divisible by dp world size [{}]".format(
                self.sharding_world_size, dp_group.nranks
            )
            assert (
                self.global_rank in dp_group.ranks
            ), "current ranks [{}] does NOT belong to the data parallel group [{}]".format(
                self.global_rank, dp_group.ranks
            )
            assert (
                len(params_grads) >= self.sharding_world_size
            ), "number of parameters [{}] is not enough to be shard among [{}] ranks".format(
                len(params_grads), self.sharding_world_size
            )
J
JZ-LIANG 已提交
255

256
            # sharding hybrid data parallel: partial sharding param within
J
JZ-LIANG 已提交
257
            if dp_group.nranks > self.sharding_world_size:
258 259 260
                self.sharding_hybrid_dp = True
                assert self.param_comm_stream_num < 2
                assert self.grad_comm_stream_num < 2
261 262 263
                assert (
                    len(self.dp_groups) == 1
                ), "hybrid sharding and data parallelism are supported only when there is excatly one data parallel group in the network"
J
JZ-LIANG 已提交
264
                outer_dp_group, sharding_group = _get_dp_and_sharding_groups(
265 266
                    dp_group.ranks, self.sharding_world_size, self.global_rank
                )
J
JZ-LIANG 已提交
267 268 269 270 271
                sharding_group = new_process_group(sharding_group)
                self.outer_dp_group = new_process_group(outer_dp_group)
            else:
                sharding_group = dp_group

272
            self._dist_context._sharding_group = sharding_group
J
JZ-LIANG 已提交
273
            # TODO(JZ-LIANG) when support multiple dp groups in future, should group param and bind them to corresponding dp group
274
            sharding_info = ShardingInfo(
275 276 277 278
                sharding_group,
                self.global_rank,
                params_grads,
                self.partition_algor,
279
            )
J
JZ-LIANG 已提交
280
            self.sharding_infos.append(sharding_info)
281
            for param in sharding_info.params:
J
JZ-LIANG 已提交
282 283
                self.varname_to_sharding_info[param.name] = sharding_info

284 285 286
    def _shard_optimizer(
        self, main_block, startup_block, params_grads, pass_context
    ):
J
JZ-LIANG 已提交
287 288 289 290 291 292 293 294
        """
        sharding all optimizer related ops and vars, include:
        gradient clip ops & vars
        weight decay ops & vars
        optimizer ops and states
        """
        self._shard_amp_related_op_and_vars(main_block, pass_context)
        self._shard_weight_decay(main_block)
295
        # self._shard_gradient_clip(main_block)
J
JZ-LIANG 已提交
296 297 298 299 300 301 302 303 304 305 306 307
        self._shard_optimizer_ops_and_states(main_block, startup_block)
        self._insert_optimizer_broadcasts(main_block, startup_block)

    def _shard_amp_related_op_and_vars(self, main_block, pass_context):

        if self.stage < 2:
            return

        for idx, op in reversed(list(enumerate(main_block.ops))):
            # shard amp related param_grad cast
            if _is_param_grad_fp32_cast_op(main_block, op):
                output_name = op.output_arg_names[0]
308
                param_name = output_name[: output_name.find("@")]
J
JZ-LIANG 已提交
309 310 311 312 313 314 315 316
                if not self._is_parameter_in_local_shard(param_name):
                    main_block._remove_op(idx, sync=False)
                    main_block._remove_var(output_name, sync=False)

            # shard check nan inf
            elif op.type in ["check_finite_and_unscale", "update_loss_scaling"]:
                reversed_x = []
                for input_name in op.desc.input('X'):
317
                    param_name = input_name[: input_name.find("@")]
J
JZ-LIANG 已提交
318 319 320

                    if self._is_parameter_in_local_shard(param_name):
                        reversed_x.append(input_name)
321 322 323 324 325 326 327 328

                # NOTE: When `reversed_x` is [], check_finite_and_unscale will be replaced by `fill_constant` op.
                # The output of check_finite_and_unscale is be set False
                if reversed_x:
                    op.desc.set_input('X', reversed_x)
                    op.desc.set_output('Out', reversed_x)
                else:
                    if op.type == "check_finite_and_unscale":
329
                        op_role = op.attr('op_role')
330 331 332 333 334 335 336 337 338 339 340
                        out_name = op.output_arg_names[0]
                        out_var = main_block.vars[out_name]
                        main_block._remove_op(idx, sync=False)
                        main_block._insert_op_without_sync(
                            idx,
                            type="fill_constant",
                            outputs={"Out": out_var},
                            attrs={
                                "shape": out_var.shape,
                                "dtype": out_var.dtype,
                                "value": 0,
341
                                OP_ROLE_KEY: op_role,
342 343
                            },
                        )
344 345
                    else:
                        main_block._remove_op(idx, sync=False)
J
JZ-LIANG 已提交
346 347 348 349 350 351 352 353 354

        main_block._sync_with_cpp()

    def _shard_gradient_clip(self, main_block):

        if self.stage < 2:
            return

        # TODO (JZ-LIANG) support calculate global norm with tensor parallelism
J
JZ-LIANG 已提交
355 356 357 358
        removed_op_type = ['elementwise_mul', 'squared_l2_norm', 'clip_by_norm']
        removed_op_idx = set()
        removed_tmp_var = set()

J
JZ-LIANG 已提交
359 360 361 362
        for idx, op in list(enumerate(main_block.ops)):
            if not _is_gradient_clip_op(op):
                continue

J
JZ-LIANG 已提交
363 364
            if op.type in removed_op_type:
                input_name = op.input("X")[0]
365
                param_name = input_name[: input_name.find("@GRAD")]
J
JZ-LIANG 已提交
366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
                if not self._is_parameter_in_local_shard(param_name):
                    removed_op_idx.add(idx)
                    if op.type in ['squared_l2_norm', 'clip_by_norm']:
                        for output_name in op.output_arg_names:
                            removed_tmp_var.add(output_name)

        for idx, op in reversed(list(enumerate(main_block.ops))):
            if not _is_gradient_clip_op(op):
                continue
            if idx in removed_op_idx:
                main_block._remove_op(idx, sync=False)

        for varname in removed_tmp_var:
            main_block._remove_var(varname, sync=False)

J
JZ-LIANG 已提交
381 382 383 384 385 386 387 388 389 390
        for idx, op in list(enumerate(main_block.ops)):
            if not _is_gradient_clip_op(op):
                continue
            if op.type == 'sum':
                reserved_vars = []
                for input_name in op.input_arg_names:
                    if input_name not in removed_tmp_var:
                        reserved_vars.append(input_name)
                op.desc.set_input("X", reserved_vars)

391
                sum_op_output = op.output_arg_names[0]
J
JZ-LIANG 已提交
392 393
                for i, sharding_info in enumerate(self.sharding_infos):
                    new_op = main_block._insert_op(
J
JZ-LIANG 已提交
394
                        idx + i + 1,
J
JZ-LIANG 已提交
395 396 397 398 399 400 401 402
                        type='c_allreduce_sum',
                        inputs={'X': [sum_op_output]},
                        outputs={'Out': [sum_op_output]},
                        attrs={
                            'ring_id': sharding_info.group.id,
                            'op_namescope': "/gradient_clip_model_parallelism",
                            'use_calc_stream': True,
                            OP_ROLE_KEY: OpRole.Optimize,
403 404 405 406 407 408 409
                        },
                    )
                    dist_attr = (
                        self._dist_context.get_tensor_dist_attr_for_program(
                            main_block.var(sum_op_output)
                        )
                    )
410 411 412 413
                    # assert dist_attr is not None
                    # naive_set_dist_op_attr_for_program_by_mesh_and_mapping(
                    #     new_op, dist_attr.process_mesh, dist_attr.dims_mapping,
                    #     self._dist_context)
J
JZ-LIANG 已提交
414 415 416 417 418 419 420 421 422 423 424 425 426 427
                break

        main_block._sync_with_cpp()

    def _shard_weight_decay(self, main_block):

        if self.stage < 2:
            return

        for idx, op in reversed(list(enumerate(main_block.ops))):
            if not _is_weight_decay_op(op):
                continue
            else:
                raise NotImplementedError(
428 429
                    "weight decay is NOT supported by now"
                )
J
JZ-LIANG 已提交
430 431 432 433 434 435
        main_block._sync_with_cpp()

    def _shard_optimizer_ops_and_states(self, main_block, startup_block):

        should_removed_optimizer_states = []
        for idx, op in reversed(list(enumerate(main_block.ops))):
436
            if not is_optimize_op(op):
J
JZ-LIANG 已提交
437 438 439 440 441 442 443
                break

            if op.type in _supported_optimizer_type:
                assert "Param" in op.input_names
                assert len(op.input("Param")) == 1
                param_name = op.input("Param")[0]
                if not self._is_parameter_in_local_shard(param_name):
444 445 446 447 448 449 450
                    should_removed_optimizer_states.extend(
                        [
                            varname
                            for varname in op.output_arg_names
                            if varname != param_name
                        ]
                    )
J
JZ-LIANG 已提交
451
                    main_block._remove_op(idx, sync=False)
452 453
                else:
                    self.shared_params_grads.append(
454 455
                        self._get_param_grad(param_name)
                    )
J
JZ-LIANG 已提交
456 457

        for idx, op in reversed(list(enumerate(startup_block.ops))):
458 459 460 461
            if (
                len(op.output_arg_names) == 1
                and op.output_arg_names[0] in should_removed_optimizer_states
            ):
J
JZ-LIANG 已提交
462 463 464 465 466 467 468 469 470 471 472 473 474
                startup_block._remove_op(idx, sync=False)

        for varname in should_removed_optimizer_states:
            if main_block.has_var(varname):
                main_block._remove_var(varname, sync=False)
            if startup_block.has_var(varname):
                startup_block._remove_var(varname, sync=False)

        main_block._sync_with_cpp()
        startup_block._sync_with_cpp()

    def _insert_optimizer_broadcasts(self, main_block, startup_block):

475
        if self.stage > 2 or self.param_bucket_size_numel > 1:
J
JZ-LIANG 已提交
476 477 478 479 480 481 482
            return

        for sharding_info in self.sharding_infos:
            for param in sharding_info.params:
                assert main_block.has_var(param.name)
                assert startup_block.has_var(param.name)

483 484 485 486 487 488 489 490 491 492 493
                new_op = main_block.append_op(
                    type='c_broadcast',
                    inputs={'X': param},
                    outputs={'Out': param},
                    attrs={
                        'ring_id': sharding_info.group.id,
                        'root': sharding_info.get_var_rank(param.name),
                        'use_calc_stream': True,
                        OP_ROLE_KEY: OpRole.Optimize,
                    },
                )
494
                new_op._set_attr(
495
                    'op_namescope', '/' + ParallelMode.DataParallel
496
                )
497 498 499
                param_dist_attr = (
                    self._dist_context.get_tensor_dist_attr_for_program(param)
                )
J
JZ-LIANG 已提交
500 501
                assert param_dist_attr is not None
                naive_set_dist_op_attr_for_program_by_mesh_and_mapping(
502 503 504 505 506
                    new_op,
                    param_dist_attr.process_mesh,
                    param_dist_attr.dims_mapping,
                    self._dist_context,
                )
J
JZ-LIANG 已提交
507 508 509 510 511 512 513
        main_block._sync_with_cpp()

    def _is_parameter_in_local_shard(self, param_name):
        assert param_name in self.varname_to_sharding_info
        sharding_info = self.varname_to_sharding_info[param_name]
        return sharding_info.is_in_local_shard(param_name)

514 515 516 517 518 519 520
    def _get_param_grad(self, param_name):
        assert param_name in self.varname_to_sharding_info
        sharding_info = self.varname_to_sharding_info[param_name]
        p_g = sharding_info.get_param_grad(param_name)
        assert p_g is not None
        return p_g

J
JZ-LIANG 已提交
521 522 523 524 525 526 527
    def _shard_gradient_synchronization(self, main_block):

        if self.stage < 2:
            return

        dp_ring_ids = [group.id for group in self.dp_groups]
        for idx, op in reversed(list(enumerate(main_block.ops))):
528
            if _is_param_grad_allreduce_op(op, main_block):
J
JZ-LIANG 已提交
529 530 531
                input_name = op.input_arg_names[0]
                base_name = _get_base_name_from_grad_name(input_name)
                sharding_info = self.varname_to_sharding_info[base_name]
532
                reduce_op = _insert_reduce_op(
533 534 535 536 537 538 539 540
                    main_block,
                    idx,
                    input_name,
                    sharding_info.group.id,
                    sharding_info.get_var_rank(base_name),
                    self._dist_context,
                )
                if (
541
                    not self.sharding_hybrid_dp
542 543
                    or not sharding_info.is_in_local_shard(base_name)
                ):
J
JZ-LIANG 已提交
544 545 546
                    main_block._remove_op(idx + 1, sync=False)
                else:
                    op._set_attr("ring_id", self.outer_dp_group.id)
547
                    op._set_attr(
548
                        'op_namescope', '/' + ParallelMode.DataParallel
549
                    )
J
JZ-LIANG 已提交
550

551 552 553 554 555 556 557 558 559 560 561
            # NOTE:
            # var@GRAD = sum(var@GRAD@RENAME@0, var@GRAD@RENAME@1)
            # If the var is not in local rank and it is output of many ops, or the var is renamed in another words,
            # the sum op should be removed.
            if _is_param_grad_sum_op(op, main_block):
                out_name = op.output_arg_names[0]
                base_name = _get_base_name_from_grad_name(out_name)
                sharding_info = self.varname_to_sharding_info[base_name]
                if not sharding_info.is_in_local_shard(base_name):
                    main_block._remove_op(idx, sync=False)

J
JZ-LIANG 已提交
562 563 564 565 566 567 568 569 570
        main_block._sync_with_cpp()

    def _shard_parameter(self, main_block, startup_block):

        if self.stage < 3:
            return

        dp_ring_ids = [group.id for group in self.dp_groups]
        for sharding_info in self.sharding_infos:
571 572 573 574
            (
                need_broadcast_vars,
                param_usage,
            ) = sharding_info.get_broadcast_vars_and_param_usage(main_block)
J
JZ-LIANG 已提交
575 576
            not_used_param_nane = []
            for param_name in param_usage:
577 578 579 580 581
                if (
                    param_usage[param_name] == 0
                    and sharding_info.get_var_rank(param_name)
                    != sharding_info.local_rank
                ):
J
JZ-LIANG 已提交
582 583 584
                    not_used_param_nane.append(param_name)

            for idx, op in reversed(list(enumerate(main_block.ops))):
585
                if is_optimize_op(op):
J
JZ-LIANG 已提交
586 587
                    continue

588
                for input_name in op.input_arg_names:
589 590
                    # NOTE hack for embedding op when AMP 02-3
                    # paddle amp force embedding (lookup table) to be run on fp32
591 592 593
                    if _is_param_fp16_cast_op(
                        main_block, op, sharding_info.param_names
                    ):
J
JZ-LIANG 已提交
594 595 596 597 598 599 600
                        continue
                    if input_name not in need_broadcast_vars:
                        continue
                    root_rank = sharding_info.get_var_rank(input_name)
                    if root_rank == sharding_info.local_rank:
                        broadcast_varname = input_name
                    else:
601 602 603
                        broadcast_varname = unique_name.generate(
                            input_name + "@BroadCast"
                        )
J
JZ-LIANG 已提交
604
                        input_var = main_block.var(input_name)
605 606 607 608 609 610 611 612 613 614 615
                        new_var = main_block.create_var(
                            name=broadcast_varname,
                            shape=input_var.shape,
                            dtype=input_var.dtype,
                            persistable=False,
                        )
                        ref_dist_attr = (
                            self._dist_context.get_tensor_dist_attr_for_program(
                                input_var
                            )
                        )
J
JZ-LIANG 已提交
616
                        out_var_dist_attr = set_var_dist_attr(
617 618
                            self._dist_context,
                            new_var,
J
JZ-LIANG 已提交
619
                            ref_dist_attr.dims_mapping,
620 621
                            ref_dist_attr.process_mesh,
                        )
J
JZ-LIANG 已提交
622 623
                        op._rename_input(input_name, broadcast_varname)

624 625 626 627 628 629 630 631 632 633
                    _insert_init_and_broadcast_op(
                        main_block,
                        idx,
                        broadcast_varname,
                        sharding_info.local_rank,
                        root_rank,
                        sharding_info.group.id,
                        op.attr('op_role'),
                        self._dist_context,
                    )
J
JZ-LIANG 已提交
634 635 636 637 638 639 640 641 642 643 644 645 646 647

            for idx, op in reversed(list(enumerate(main_block.ops))):
                if op.type != "cast":
                    continue
                input_name = op.input_arg_names[0]
                output_name = op.output_arg_names[0]
                if input_name in not_used_param_nane:
                    main_block._remove_op(idx, sync=False)
                    main_block._remove_var(output_name, sync=False)

            for idx, op in reversed(list(enumerate(startup_block.ops))):
                assert len(op.output_arg_names) == 1
                output_name = op.output_arg_names[0]

648 649 650 651 652 653 654 655 656
                if (
                    op.type == "c_broadcast"
                    and op.attr("ring_id") in dp_ring_ids
                ):
                    if (
                        self.outer_dp_group
                        and sharding_info.get_var_rank(output_name)
                        == sharding_info.local_rank
                    ):
J
JZ-LIANG 已提交
657 658 659 660 661
                        op._set_attr("ring_id", self.outer_dp_group.id)
                    else:
                        startup_block._remove_op(idx, sync=False)
                    continue

662 663 664 665 666 667
                if (
                    op.type != "c_broadcast"
                    and output_name in param_usage
                    and sharding_info.get_var_rank(output_name)
                    != sharding_info.local_rank
                ):
J
JZ-LIANG 已提交
668 669
                    startup_block._remove_op(idx, sync=False)

J
JZ-LIANG 已提交
670
            for param_name in param_usage:
671 672 673 674
                if (
                    sharding_info.get_var_rank(param_name)
                    != sharding_info.local_rank
                ):
J
JZ-LIANG 已提交
675 676
                    main_block._remove_var(param_name, sync=False)
                    startup_block._remove_var(param_name, sync=False)
J
JZ-LIANG 已提交
677 678 679 680

        main_block._sync_with_cpp()
        startup_block._sync_with_cpp()

681 682
    def _optimization_pass(self, main_program, startup_program):

683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698
        if self.stage <= 1:
            return

        self.grad_coalesce_prefix = 'sharding_coalesce_grad_'
        self.param_coalesce_prefix = 'sharding_coalesce_param_'
        # NOTE PR#49275 for detail
        self.comm_op_scheduling_priority = -1

        # TODO support multiple sub_blocks
        assert (
            len(self.sharding_infos) == 1
        ), "gradient synchronization optimization only support one sharding group right now, but got [{}].".format(
            len(self.sharding_infos)
        )
        sharding_info = self.sharding_infos[0]

699
        with paddle.static.program_guard(main_program, startup_program):
700 701 702 703
            self._gradient_sync_optimization(sharding_info)
            # TODO independent the logic of fuse and overlap
            # support overlap when no fuse
            if self.param_bucket_size_numel > 1:
704
                if self.stage == 2:
705
                    self._fuse_overlap_parameter_comm_stage_two(sharding_info)
706
                elif self.stage == 3:
707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845
                    self._fuse_overlap_parameter_comm_stage_three(sharding_info)

    def _gradient_sync_optimization(self, sharding_info):

        if self.grad_bucket_size_numel <= 1 and (not self.enable_overlap):
            return

        main_block = default_main_program().global_block()
        startup_block = default_startup_program().global_block()
        coalesce_to_group_map, grad_name_to_group_map = self._group_grads(
            main_block,
            sharding_info,
        )
        self._overlap_grad_comm(
            main_block,
            sharding_info,
            coalesce_to_group_map,
            grad_name_to_group_map,
        )

    def _fuse_overlap_parameter_comm_stage_two(self, sharding_info):

        main_block = default_main_program().global_block()
        startup_block = default_startup_program().global_block()

        group_to_param_map, param_to_group_map = group_param(
            sharding_info, self.param_bucket_size_numel
        )
        _logger.info("Sharding Stage2 Optimization:")
        _logger.info(
            "Param Bucket size is [{}], [{}] Parameters are fused into [{}] Buckets".format(
                self.param_bucket_size_numel,
                len(param_to_group_map.keys()),
                len(group_to_param_map.keys()),
            )
        )
        broadcast_var_to_group_map = {}

        if self.enable_overlap:
            # if the communication is cross node, comm will be slow and calc will therefore
            # wait for comm. enable multi-comm-stream
            # TODO revise me in future
            # 1. manager the comm and corresponding stream
            # 2. allow more than two streams and open to be config
            self.param_comm_group_stream_pairs = []
            ranks = sharding_info.group.ranks
            for i in range(self.param_comm_stream_num):
                if i == 0:
                    group = sharding_info.group
                else:
                    group = new_process_group(ranks, force_new_group=True)
                # NOTE here stream is just a presentation with different name,
                # it is up to executor to create the exact streams given the name.
                stream = "sharding_param_comm_stream{}".format(i)
                self.param_comm_group_stream_pairs.append(
                    {
                        "comm_group": group,
                        "comm_stream": stream,
                    }
                )
            _logger.info(
                "Parameter Communication would use [{}] streams.".format(
                    self.param_comm_stream_num
                )
            )
            self.op_to_stream_idx = {}

        for i, param_group in enumerate(group_to_param_map.keys()):

            assert len(param_group) >= 1
            if len(param_group) > 1:
                coalesce_var_name = unique_name.generate(
                    self.param_coalesce_prefix + str(i)
                )
                startup_block.create_var(
                    name=coalesce_var_name,
                    dtype=param_group.dtype,
                    persistable=True,
                    stop_gradient=True,
                )
                param_group.coalesce_var = main_block.create_var(
                    name=coalesce_var_name,
                    dtype=param_group.dtype,
                    persistable=True,
                    stop_gradient=True,
                )
                startup_block.append_op(
                    type="coalesce_tensor",
                    inputs={"Input": param_group.vars},
                    outputs={
                        "Output": param_group.vars,
                        "FusedOutput": param_group.coalesce_var,
                    },
                    attrs={
                        "copy_data": True,
                        "use_align": True,
                        "dtype": param_group.dtype,
                        OP_ROLE_KEY: OpRole.Forward,
                    },
                )
            else:
                param_group.coalesce_var = param_group.vars[0]
            _logger.info(
                "Bucket[{}] size [{}]MB.".format(
                    i,
                    sum([get_var_size(p) for p in param_group.vars]),
                )
            )
            _logger.debug(
                "Bucket[{}] parameters: {}.".format(
                    i,
                    [p.name for p in param_group.vars],
                )
            )

            broadcast_var_to_group_map[
                param_group.coalesce_var.name
            ] = param_group

            # TODO revise me to manager stream and comm
            comm_stream_idx = i % self.param_comm_stream_num
            comm_group = self.param_comm_group_stream_pairs[comm_stream_idx][
                'comm_group'
            ]
            comm_stream = self.param_comm_group_stream_pairs[comm_stream_idx][
                'comm_stream'
            ]
            new_op = main_block.append_op(
                type='c_broadcast',
                inputs={'X': param_group.coalesce_var},
                outputs={'Out': param_group.coalesce_var},
                attrs={
                    'ring_id': comm_group.id,
                    'root': param_group.rank,
                    'use_calc_stream': True,
                    OP_ROLE_KEY: OpRole.Optimize,
                },
            )
            self.op_to_stream_idx[new_op] = comm_stream_idx
846
            new_op._set_attr('op_namescope', '/' + ParallelMode.DataParallel)
847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892
            if self.enable_overlap:
                new_op.dist_attr.execution_stream = comm_stream
                new_op.dist_attr.scheduling_priority = (
                    self.comm_op_scheduling_priority
                )

            # NOTE the current dist context lack the presentation for bucket tensor which
            # composes many tensor with different dims_mapping. we DO NOT assign dist attr
            # for it currently.

        # add dependencies:
        # 1. all broadcast depend on its pre collective
        # 2. coalesce broadcast add nop to resolute data flow dependencies
        dep_map = {}
        for i, op in enumerate(main_block.ops):
            if is_sharding_param_broadcast_op(op):
                broadcast_varname = op.output("Out")[0]
                broadcast_var = main_block.vars[broadcast_varname]
                param_group = broadcast_var_to_group_map[broadcast_varname]
                comm_stream = None
                if self.enable_overlap:
                    comm_stream = op.dist_attr.execution_stream

                # FIXME remove me when upgrade to multi-comm version
                if len(dep_map.keys()) < self.param_comm_stream_num:
                    op = _get_broadcast_first_depend_op(main_block)
                    prior_var = main_block.vars[op.output("ParamOut")[0]]
                else:
                    pre_op = main_block.ops[i - self.param_comm_stream_num]
                    assert is_sharding_param_broadcast_op(
                        pre_op
                    ), "Unexpected: sharding broadcast pre op should be broadcast."
                    prior_var = main_block.vars[pre_op.output("Out")[0]]
                # broadcast order dependencies
                dep_map[i] = [(i, [prior_var], [broadcast_var], comm_stream)]

                if len(param_group.vars) > 1:
                    # in shard coalesce depend to optimizer
                    if param_group.is_in_local_shard:
                        last_grad = param_group.vars[-1]
                        dep_map[i].append(
                            (i, [last_grad], [broadcast_var], comm_stream)
                        )
                    # coalesce resolution post deps
                    dep_map[i].append(
                        (i + 1, [broadcast_var], param_group.vars, comm_stream)
893 894
                    )

895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971
        # insert deps
        indice = sorted(list(dep_map.keys()), reverse=True)
        for i in indice:
            for idx, prior_vars, post_vars, comm_stream in dep_map[i][::-1]:
                depend_op = insert_dependencies_for_vars(
                    main_block,
                    idx,
                    prior_vars,
                    post_vars,
                    self._dist_context,
                    OpRole.Optimize,
                    process_mesh=[
                        -1
                    ],  # hack to avoid initialize the dist attr for coalesce var
                    is_recompute=False,
                    sync=False,
                    op_namescope="sharding_stage2_broadcast_dep",
                )
                if self.enable_overlap:
                    depend_op.dist_attr.execution_stream = comm_stream
                    depend_op.dist_attr.scheduling_priority = (
                        self.comm_op_scheduling_priority
                    )

        main_block._sync_with_cpp()

    def _fuse_overlap_parameter_comm_stage_three(self, sharding_info):
        pass

    def _group_grads(
        self,
        block,
        sharding_info,
    ):
        """
        conditions for gradients to be grouped:
            1. group size < grad_bucket_size_numel
            2. same dp group (TODO)
            3. same src rank
            4. same dtype
            5. dependency: grad would NOT be used by other ops within group segment

        main logic:
            1. record coalesce group
            2. record all dp allreduce/reduce op idx

            3. insert coalesce op
            4. insert coalesce dependency (avoid allocate memory too early)
            5. modify and remove allreduce/reduce op
            6. ensure sharding-dp hybrid parallel logic

        gradients inside same group would be fuse into one coalesce tensor
        """
        ops = block.ops
        if self.grad_bucket_size_numel < 1:
            # numel for transformer layer
            # h = 4096 + 1
            # ffn_numel = 2 * (4 * h) * h
            # mha_numel = 3 * h * h + h * h
            # max_fuse_numel = ffn_numel + mha_numel
            self.grad_bucket_size_numel = 1

        first_backward_op = None
        for op in ops:
            if is_loss_grad_op(op):
                first_backward_op = op
        # not backward op, sharding for inference
        if first_backward_op is None:
            return
        first_backward_varname = first_backward_op.output_arg_names[0]

        cur_group = VarGroup(self.grad_bucket_size_numel)
        grad_groups = []
        grouped_grad_names = set()

        def op_depend_on_group(op, group):
            vars_ = set(op.input_arg_names + op.output_arg_names)
972
            var_names = {var.name for var in group.vars}
973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170
            return len(vars_.intersection(var_names)) > 0

        # analyze groups
        i = 0
        while i < len(ops):
            op = ops[i]
            if is_data_parallel_reduce_op(op):
                assert (
                    op.type == "c_reduce_sum"
                ), "Sharding should reduce grad first and than allreduce if Hybrid Sharding with Data-Parallel"

                grad_name = op.output_arg_names[0]
                param_name = _get_base_name_from_grad_name(grad_name)
                rank = sharding_info.get_var_rank(param_name)
                grad_var = block.var(grad_name)

                if cur_group.acceptable(grad_var, rank):
                    assert grad_name not in grouped_grad_names
                    cur_group.collect(grad_var, rank)
                else:
                    grad_groups.append(cur_group)
                    cur_group = VarGroup(self.grad_bucket_size_numel)
                    cur_group.collect(grad_var, rank)

                if len(cur_group.vars) == 1:
                    cur_group.coalesce_op_idx = i - 1
                    # NOTE coalesce dependency: control when allocate memory for gradients
                    # too early would increase the peak memory requirement, too later would hurt the performance
                    j = 2
                    while is_dep_skip_op(ops[i - j]):
                        j += 1
                    dep_op = ops[i - j]
                    dep_varname = dep_op.output_arg_names[0]
                    cur_group.coalesce_dep_varname = dep_varname

                grouped_grad_names.add(grad_name)
                cur_group.reduce_op_indices.append(i)

                if self.sharding_hybrid_dp and sharding_info.is_in_local_shard(
                    param_name
                ):
                    cur_group.is_in_local_shard = True
                    assert (
                        ops[i + 1].type == "c_allreduce_sum"
                    ), "Sharding should reduce grad first and than allreduce if Hybrid Sharding with Data-Parallel"
                    assert (
                        ops[i + 1].output_arg_names[0] == grad_name
                    ), "Hybrid Sharding with Data-Parallel should sync same gradient var"
                    cur_group.allreduce_op_indices.append(i + 1)
                    i += 1
            elif op_depend_on_group(op, cur_group):
                grad_groups.append(cur_group)
                cur_group = VarGroup(self.grad_bucket_size_numel)

            i += 1
        # some grad not in this rank may not be used after dp reduced
        if len(cur_group.vars) >= 1:
            grad_groups.append(cur_group)

        _logger.info("Sharding Gradient Communication Optimization:")
        _logger.info(
            "Gradient Bucket size is [{}], [{}] Gradients are fused into [{}] Buckets.".format(
                self.grad_bucket_size_numel,
                len(grouped_grad_names),
                len(grad_groups),
            )
        )

        # create coalesce tesnor and record op idx
        grad_name_to_group_map = {}
        coalesce_to_group_map = {}
        modify_reduce_op_map = {}
        coalesce_op_map = {}
        remove_reduce_op_indices = []

        for i, group in enumerate(grad_groups):
            if len(group.vars) > 1:
                group.coalesce_var = block.create_var(
                    name=unique_name.generate(
                        self.grad_coalesce_prefix + str(i)
                    ),
                    dtype=group.dtype,
                    persistable=False,
                    stop_gradient=True,
                )
                coalesce_op_map[group.coalesce_op_idx] = group
                last_reduce_op_idx = group.reduce_op_indices.pop()
                modify_reduce_op_map[last_reduce_op_idx] = group
                remove_reduce_op_indices.extend(group.reduce_op_indices)
                if group.is_in_local_shard:
                    last_allreduce_op_idx = group.allreduce_op_indices.pop()
                    modify_reduce_op_map[last_allreduce_op_idx] = group
                    remove_reduce_op_indices.extend(group.allreduce_op_indices)
            else:
                group.coalesce_var = group.vars[0]
            for grad in group.vars:
                grad_name_to_group_map[grad.name] = group
            coalesce_to_group_map[group.coalesce_var.name] = group

        coalesce_op_set = set(coalesce_op_map.keys())
        modify_op_set = set(modify_reduce_op_map.keys())
        remove_op_set = set(remove_reduce_op_indices)
        confilct = coalesce_op_set.intersection(modify_op_set)

        assert len(confilct) == 0
        confilct = coalesce_op_set.intersection(remove_op_set)
        assert len(confilct) == 0
        confilct = modify_op_set.intersection(remove_op_set)
        assert len(confilct) == 0

        # update block
        for idx, op in reversed(list(enumerate(block.ops))):

            if idx in modify_reduce_op_map:
                group = modify_reduce_op_map[idx]
                grad_name = op.output_arg_names[0]
                assert (
                    grad_name == group.vars[-1].name
                ), "Unexpected: it is supposed to sync [{}] but got [{}]".format(
                    group.vars[-1].name, grad_name
                )
                op._rename_input(grad_name, group.coalesce_var.name)
                op._rename_output(grad_name, group.coalesce_var.name)

            if idx in remove_reduce_op_indices:
                block._remove_op(idx, sync=False)

            if idx in coalesce_op_map:
                group = coalesce_op_map[idx]
                first_grad_name = group.vars[0].name
                assert (
                    first_grad_name in op.output_arg_names
                ), "Unexpected: op is supposed to generate grad [{}] but got [{}]".format(
                    first_grad_name, str(op)
                )
                grad_names = [grad.name for grad in group.vars]

                concated_shapes = []
                concated_ranks = []
                for grad_ in group.vars:
                    shape = grad_.shape
                    concated_shapes.extend(shape)
                    concated_ranks.append(len(shape))

                coalesce_op = block._insert_op_without_sync(
                    idx,
                    type="coalesce_tensor",
                    inputs={"Input": grad_names},
                    outputs={
                        "Output": grad_names,
                        "FusedOutput": group.coalesce_var,
                    },
                    attrs={
                        "copy_data": False,
                        "use_align": True,
                        "dtype": group.dtype,
                        "concated_shapes": concated_shapes,
                        "concated_ranks": concated_ranks,
                        OP_ROLE_KEY: OpRole.Backward,
                    },
                )
                depend_op = insert_dependencies_for_vars(
                    block,
                    idx,
                    block.var(group.coalesce_dep_varname),
                    group.coalesce_var,
                    self._dist_context,
                    OpRole.Backward,
                    process_mesh=[
                        -1
                    ],  # hack to avoid initialize the dist attr for coalesce var
                    is_recompute=False,
                    sync=False,
                    op_namescope="sharding_grad_coalesce_dep",
                )
        block._sync_with_cpp()

        return coalesce_to_group_map, grad_name_to_group_map

    def _overlap_grad_comm(
        self,
        block,
        sharding_info,
        coalesce_to_group_map,
        grad_name_to_group_map,
    ):
        """
        overlap gradient communication with backward & optimizer computation.

        1. assign gradient communications to grad comm stream
        2. for coalesce gradient communication:
            2.1 insert before communication dependencies
            2.2 insert after communication dependencies only when need
        3. there is not need to add explicit dependencies for non-coalesce gradient communication

        P.S. this overlap pass is ONLY adapted for standalone executor (graph based) and stream awared allocator.
        """

1171
        if not _is_enable_standalone_executor() or (not self.enable_overlap):
1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374
            return

        self.grad_comm_group_stream_pairs = []
        ranks = sharding_info.group.ranks
        # NOTE since the gradient synchronization has calculation, there would be computation
        # competition between backward calculation. therefore should limit the number of stream used.
        for i in range(self.grad_comm_stream_num):
            if i == 0:
                group = sharding_info.group
            else:
                group = new_process_group(ranks, force_new_group=True)
            # NOTE here stream is just a presentation with different name,
            # it is up to executor to create the exact streams given the name.
            stream = "sharding_grad_comm_stream{}".format(i)
            self.grad_comm_group_stream_pairs.append(
                {
                    "comm_group": group,
                    "comm_stream": stream,
                }
            )

        ops = block.ops
        # analyze dependencies
        dep_map = {}
        reduce_op_count = 0
        grad_comm_op_to_stream_idx = {}
        for idx, op in enumerate(ops):
            if is_data_parallel_reduce_op(op):

                if op.type == "c_allreduce_sum":
                    continue
                stream_idx = reduce_op_count % self.grad_comm_stream_num
                grad_comm_op_to_stream_idx[op] = stream_idx
                comm_group = self.grad_comm_group_stream_pairs[stream_idx][
                    "comm_group"
                ]
                comm_stream = self.grad_comm_group_stream_pairs[stream_idx][
                    "comm_stream"
                ]

                reduce_varname = op.output("Out")[0]
                grad_group = coalesce_to_group_map[reduce_varname]
                assert grad_group.coalesce_var.name == reduce_varname

                # coalesce deps
                if len(grad_group.vars) > 1:
                    # NOTE should prior vars to be all grads ?
                    # when the grad_ops' order is random
                    # prior dep
                    dep_map[idx] = [
                        (
                            idx,
                            grad_group.vars[-1],
                            grad_group.coalesce_var,
                            comm_stream,
                        )
                    ]
                    # post dep
                    post_idx = idx + 1
                    if self.sharding_hybrid_dp and grad_group.is_in_local_shard:
                        post_idx += 1
                    dep_map[idx].append(
                        (
                            post_idx,
                            grad_group.coalesce_var,
                            grad_group.vars,
                            comm_stream,
                        )
                    )

                # assign stream
                op.dist_attr.execution_stream = comm_stream
                op.dist_attr.scheduling_priority = (
                    self.comm_op_scheduling_priority
                )

                op._set_attr("ring_id", comm_group.id)
                if self.sharding_hybrid_dp and grad_group.is_in_local_shard:
                    next_op = ops[idx + 1]
                    assert next_op.type == "c_allreduce_sum"
                    assert next_op.output("Out")[0] == reduce_varname
                    # FIXME hybrid sharding-dp support multi comm & stream in feature
                    # next_op._set_attr("ring_id", comm_group.id)
                    next_op.dist_attr.execution_stream = comm_stream
                    next_op.dist_attr.scheduling_priority = (
                        self.comm_op_scheduling_priority
                    )
                    idx += 1

                reduce_op_count += 1

            idx += 1

        # insert deps
        indice = sorted(list(dep_map.keys()), reverse=True)
        for i in indice:
            for idx, prior_vars, post_vars, comm_stream in dep_map[i][::-1]:
                depend_op = insert_dependencies_for_vars(
                    block,
                    idx,
                    prior_vars,
                    post_vars,
                    self._dist_context,
                    OpRole.Backward,
                    process_mesh=[
                        -1
                    ],  # hack to avoid initialize the dist attr for coalesce var
                    is_recompute=False,
                    sync=False,
                    op_namescope="sharding_grad_comm_dep",
                )
                depend_op.dist_attr.execution_stream = comm_stream
                depend_op.dist_attr.scheduling_priority = (
                    self.comm_op_scheduling_priority
                )

        # hierarchical grad comm
        if self.enable_hierarchical_comm:
            # NOTE so far we only support Isomorphic cluster with 8 ranks per node
            # TODO unifiy here create communicators
            # create communicators
            nranks_per_node = 8
            assert self.sharding_world_size % nranks_per_node == 0
            global_group = sharding_info.group
            global_ranks = global_group.ranks
            relative_idx_in_node = self.global_rank % nranks_per_node
            node_idx = self.global_rank // nranks_per_node
            inter_node_ranks = [
                rank
                for rank in global_ranks
                if rank % nranks_per_node == relative_idx_in_node
            ]
            _logger.info(
                "Sharding Gradient Hierarchical Communication Optimization."
            )
            _logger.info(
                "current global rank idx: {}.".format(self.global_rank)
            )
            _logger.info(
                "local inter node ranks idx: {}.".format(inter_node_ranks)
            )
            assert (
                len(inter_node_ranks)
                == self.sharding_world_size // nranks_per_node
            )
            intra_node_ranks = [
                rank
                for rank in global_ranks
                if rank // nranks_per_node == node_idx
            ]
            assert len(intra_node_ranks) == nranks_per_node
            _logger.info(
                "local intra node ranks idx: {}.".format(intra_node_ranks)
            )
            inter_node_groups = []
            intra_node_groups = []
            for _ in range(self.grad_comm_stream_num):
                # TODO re-use one origin communicator
                inter_node_groups.append(
                    new_process_group(inter_node_ranks, force_new_group=True)
                )
                intra_node_groups.append(
                    new_process_group(intra_node_ranks, force_new_group=True)
                )

            # update program
            for idx, op in reversed(list(enumerate(block.ops))):
                if is_data_parallel_reduce_op(op):
                    assert op.type == "c_reduce_sum"
                    grad_comm_stream_idx = grad_comm_op_to_stream_idx[op]
                    inter_node_group = inter_node_groups[grad_comm_stream_idx]
                    intra_node_group = intra_node_groups[grad_comm_stream_idx]

                    reduce_varname = op.output("Out")[0]
                    if self.enable_overlap:
                        comm_stream = op.dist_attr.execution_stream
                    dst_rank = int(op.attr("root_id"))

                    in_peer = False
                    if dst_rank % nranks_per_node == relative_idx_in_node:
                        in_peer = True
                    intra_node_dst = dst_rank % nranks_per_node

                    op._set_attr('ring_id', intra_node_group.id)
                    op._set_attr('root_id', intra_node_dst)

                    if in_peer:
                        inter_node_dst = dst_rank // nranks_per_node
                        new_op = block._insert_op_without_sync(
                            idx + 1,
                            type='c_reduce_sum',
                            inputs={"X": reduce_varname},
                            outputs={
                                "Out": reduce_varname,
                            },
                            attrs={
                                'ring_id': inter_node_group.id,
                                'root_id': inter_node_dst,
                                'use_calc_stream': True,
                                OP_ROLE_KEY: OpRole.Backward,
                            },
                        )
                        new_op._set_attr(
1375
                            'op_namescope', '/' + ParallelMode.DataParallel
1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393
                        )

                        if self.enable_overlap:
                            new_op.dist_attr.execution_stream = comm_stream
                            new_op.dist_attr.scheduling_priority = (
                                self.comm_op_scheduling_priority
                            )

        block._sync_with_cpp()


def _get_broadcast_first_depend_op(block):
    for op in block.ops:
        if op.type in _supported_optimizer_type:
            return op

    raise Exception("Could not find optimizer op.")

J
JZ-LIANG 已提交
1394

1395 1396 1397 1398 1399 1400 1401 1402 1403 1404
def _insert_init_and_broadcast_op(
    block,
    insert_idx,
    varname,
    local_rank,
    root_rank,
    ring_id,
    op_role,
    dist_context,
):
J
JZ-LIANG 已提交
1405 1406 1407 1408 1409
    """
    empty op for initialization
    """
    broadcast_var = block.var(varname)
    broadcast_var_dist_attr = dist_context.get_tensor_dist_attr_for_program(
1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424
        broadcast_var
    )

    new_op = block._insert_op_without_sync(
        insert_idx,
        type='c_broadcast',
        inputs={'X': varname},
        outputs={'Out': varname},
        attrs={
            'ring_id': ring_id,
            'root': root_rank,
            'use_calc_stream': True,
            OP_ROLE_KEY: op_role,
        },
    )
1425
    new_op._set_attr('op_namescope', '/' + ParallelMode.DataParallel)
J
JZ-LIANG 已提交
1426
    naive_set_dist_op_attr_for_program_by_mesh_and_mapping(
1427 1428 1429 1430 1431
        new_op,
        broadcast_var_dist_attr.process_mesh,
        broadcast_var_dist_attr.dims_mapping,
        dist_context,
    )
J
JZ-LIANG 已提交
1432 1433 1434 1435 1436 1437 1438 1439 1440
    if local_rank != root_rank:

        new_op = block._insert_op_without_sync(
            insert_idx,
            type="empty",
            outputs={"Out": broadcast_var.name},
            attrs={
                "shape": broadcast_var.shape,
                "dtype": broadcast_var.dtype,
1441 1442 1443
                OP_ROLE_KEY: op_role,
            },
        )
J
JZ-LIANG 已提交
1444
        naive_set_dist_op_attr_for_program_by_mesh_and_mapping(
1445 1446 1447 1448 1449
            new_op,
            broadcast_var_dist_attr.process_mesh,
            broadcast_var_dist_attr.dims_mapping,
            dist_context,
        )
J
JZ-LIANG 已提交
1450 1451 1452
    return


1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477
def _insert_reduce_op(
    block,
    insert_idx,
    reduce_var,
    ring_id,
    root_id,
    dist_context,
    op_role=OpRole.Backward,
    use_calc_stream=True,
):
    assert (
        root_id >= 0
    ), "root id should be a positive int, but now root id is {}".format(root_id)
    new_op = block._insert_op_without_sync(
        insert_idx,
        type='c_reduce_sum',
        inputs={'X': [reduce_var]},
        outputs={'Out': [reduce_var]},
        attrs={
            'ring_id': ring_id,
            'root_id': root_id,
            'use_calc_stream': use_calc_stream,
            OP_ROLE_KEY: op_role,
        },
    )
J
JZ-LIANG 已提交
1478 1479

    dist_attr = dist_context.get_tensor_dist_attr_for_program(
1480 1481
        block.var(reduce_var)
    )
J
JZ-LIANG 已提交
1482
    naive_set_dist_op_attr_for_program_by_mesh_and_mapping(
1483 1484
        new_op, dist_attr.process_mesh, dist_attr.dims_mapping, dist_context
    )
1485
    new_op._set_attr('op_namescope', '/' + ParallelMode.DataParallel)
1486
    return new_op
J
JZ-LIANG 已提交
1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500


def _get_dp_and_sharding_groups(origin_group, sharding_group_size, rank):
    dp_axis = 0
    sharding_axis = 1
    shape = [len(origin_group) // sharding_group_size, sharding_group_size]

    dp_group = _get_comm_group(origin_group, shape, dp_axis, rank)
    sharding_group = _get_comm_group(origin_group, shape, sharding_axis, rank)

    return dp_group, sharding_group


def _is_gradient_clip_op(op):
1501 1502 1503
    return op.desc.has_attr("op_namescope") and op.desc.attr(
        "op_namescope"
    ).startswith("/gradient_clip")
J
JZ-LIANG 已提交
1504 1505 1506


def _is_weight_decay_op(op):
1507 1508 1509
    return op.desc.has_attr("op_namescope") and op.desc.attr(
        "op_namescope"
    ).startswith("/regularization")
J
JZ-LIANG 已提交
1510 1511 1512 1513 1514


def _is_param_grad_fp32_cast_op(block, op):
    if not is_backward_op(op):
        return False
1515 1516 1517
    if not _is_desired_cast_op(
        block, op, core.VarDesc.VarType.FP16, core.VarDesc.VarType.FP32
    ):
J
JZ-LIANG 已提交
1518
        return False
1519
    output_name = op.output_arg_names[0]
1520
    base_name = output_name[: output_name.find("@")]
J
JZ-LIANG 已提交
1521 1522 1523 1524 1525 1526 1527
    if not block.has_var(base_name):
        return False
    return block.var(base_name).is_parameter


def _is_param_fp16_cast_op(block, op, params):

1528
    if is_optimize_op(op):
J
JZ-LIANG 已提交
1529 1530 1531
        return False
    if not _is_desired_cast_op(block, op):
        return False
1532
    input_name = op.input_arg_names[0]
J
JZ-LIANG 已提交
1533 1534 1535 1536 1537
    if input_name not in params:
        return False
    return True


1538 1539 1540 1541 1542 1543
def _is_desired_cast_op(
    block,
    op,
    src_var_type=core.VarDesc.VarType.FP32,
    dst_var_type=core.VarDesc.VarType.FP16,
):
J
JZ-LIANG 已提交
1544 1545
    if op.type != "cast":
        return False
1546 1547 1548 1549
    assert len(op.input_arg_names) == 1
    assert len(op.output_arg_names) == 1
    input_var = block.var(op.input_arg_names[0])
    output_var = block.var(op.output_arg_names[0])
J
JZ-LIANG 已提交
1550

1551
    if input_var.dtype != src_var_type or output_var.dtype != dst_var_type:
J
JZ-LIANG 已提交
1552 1553 1554 1555 1556 1557 1558 1559
        return False

    return True


def _get_base_name_from_grad_name(grad_name):
    base_name = None
    if ".cast_fp16@GRAD" in grad_name:
1560
        base_name = grad_name[: grad_name.find(".cast_fp16@GRAD")]
J
JZ-LIANG 已提交
1561
    elif "@GRAD" in grad_name:
1562
        base_name = grad_name[: grad_name.find("@GRAD")]
J
JZ-LIANG 已提交
1563 1564 1565
    return base_name


1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579
def _is_param_grad_allreduce_op(op, block):

    if not is_data_parallel_reduce_op(op):
        return False

    output_name = op.output_arg_names[0]
    base_name = _get_base_name_from_grad_name(output_name)

    if not block.has_var(base_name):
        return False

    return block.var(base_name).is_parameter


1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595
def _is_param_grad_sum_op(op, block):

    if not is_backward_op(op):
        return False
    if op.type != "sum":
        return False

    output_name = op.output_arg_names[0]
    base_name = _get_base_name_from_grad_name(output_name)

    if not block.has_var(base_name):
        return False

    return block.var(base_name).is_parameter


J
JZ-LIANG 已提交
1596 1597 1598 1599
def _is_forward_op(op):
    return op.attr("op_role") == 0


1600 1601 1602 1603 1604 1605 1606 1607
def is_sharding_param_broadcast_op(op):
    return (
        op.type == "c_broadcast"
        and op.desc.has_attr("op_namescope")
        and ParallelMode.DataParallel in op.desc.attr("op_namescope")
    )


J
JZ-LIANG 已提交
1608 1609 1610 1611
def _inference_data_parallel_group_for_operator(rank_id, op, dist_context):

    dp_group = None
    for input_name in op.input_arg_names:
1612 1613 1614
        # TODO(zhaoyingli): maintain a dict in dist_context to record all variables which are renamed,
        # to solve the param@RESHARD cannot be identifed.
        if not is_parameter_related(input_name, op.block, dist_context):
J
JZ-LIANG 已提交
1615 1616 1617
            dist_attr = dist_context.get_op_dist_attr_for_program(op)
            process_mesh = dist_attr.process_mesh
            input_dim_mapping = dist_attr.get_input_dims_mapping(input_name)
1618
            mesh_shape = process_mesh.shape
J
JZ-LIANG 已提交
1619 1620 1621
            # TODO(JZ-LIANG) replace with specific batch size dimension
            batch_size_axis = input_dim_mapping[0]
            if batch_size_axis > -1 and mesh_shape[batch_size_axis] > 1:
1622
                group_ranks = _get_comm_group(
1623 1624
                    process_mesh.process_ids,
                    process_mesh.shape,
1625 1626 1627
                    batch_size_axis,
                    rank_id,
                )
J
JZ-LIANG 已提交
1628 1629 1630 1631 1632 1633
                dp_group = new_process_group(group_ranks)
                break

    return dp_group


1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663
def partition_by_use_order(params, group_size):
    """
    shard the continouse param into same rank and divide the forward&backward computation into segement,
    which will favor the fuse pass in later.

    we assume that the params is already sorted by utilization order.
    """
    mapping = {}
    total_param_mem = 0.0
    param2mem = []
    for param in params:
        mem = get_var_size(param)
        total_param_mem += mem
        param2mem.append((param, mem))
    mapping = {x: [] for x in range(group_size)}
    cur_rank = 0
    mem_accu = 0.0
    for param, mem in param2mem:
        if mem_accu > total_param_mem * 1.0 * (cur_rank + 1) / group_size:
            cur_rank += 1
        mapping[cur_rank].append(param)
        mem_accu += mem

    return mapping


def partition_by_greedy_even(params, group_size):
    """
    use greedy alogrithm to partition parameter as even as possible.
    """
J
JZ-LIANG 已提交
1664 1665 1666 1667 1668 1669 1670 1671
    mapping = {}
    for rank_ in range(group_size):
        mapping[rank_] = []
    sizes = [0] * group_size
    for param in params:
        rank = sizes.index(min(sizes))
        mapping[rank].append(param)
        numel = reduce(lambda x, y: x * y, param.shape)
1672 1673 1674 1675 1676
        assert (
            numel > 0
        ), "param [{}] should larger than 0, but it is [{}]".format(
            param.name, numel
        )
J
JZ-LIANG 已提交
1677 1678 1679 1680 1681
        sizes[rank] += numel

    return mapping


1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722
def partition_parameters(params, group_size, algor="greedy_even"):
    if algor == "greedy_even":
        rank_to_params = partition_by_greedy_even(params, group_size)
    else:
        rank_to_params = partition_by_use_order(params, group_size)

    _logger.info("Sharding Parameter Partition:")
    for k, v in rank_to_params.items():
        _logger.info(
            "Rank:{}, Parameter Size:{} MB.".format(
                k, sum([get_var_size(var) for var in v])
            )
        )
        _logger.info("Params in this rank: {}.".format([var.name for var in v]))

    return rank_to_params


def re_order_program(block, param_grads, dist_context):

    # record order
    pname_to_pg_pairs = {}
    for p, g in param_grads:
        pname_to_pg_pairs[p.name] = (p, g)

    use_order = []
    for op in block.ops:
        for input_name in op.input_arg_names:
            if (input_name in pname_to_pg_pairs) and (
                input_name not in use_order
            ):
                use_order.append(input_name)
        if len(use_order) == len(pname_to_pg_pairs):
            break

    # reorder optimzier
    last_op = block.ops[-1]
    pname_to_op = {}
    num_ops = len(block.ops)
    remove_op_indices = []
    # TODO support case when optimizer is not the last op
1723
    if is_optimize_op(last_op) and last_op.type in _supported_optimizer_type:
1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765
        # record optimizer
        for idx, op in reversed(list(enumerate(block.ops))):
            if op.type not in _supported_optimizer_type:
                break
            assert len(op.input("Param")) == 1
            pname_to_op[op.input("Param")[0]] = op
            remove_op_indices.append(idx)
        assert len(use_order) == len(pname_to_op)

        # append new opts
        for pname in use_order:
            new_op = block.append_op(type='nop')
            new_op.desc.copy_from(pname_to_op[pname].desc)
            dist_context.set_op_dist_attr_for_program(
                new_op,
                dist_context.get_op_dist_attr_for_program(pname_to_op[pname]),
            )

        # remove old opts
        for idx in remove_op_indices:
            block._remove_op(idx, sync=False)

        block._sync_with_cpp()
        assert len(block.ops) == num_ops

    # TODO reorder gradient clip order
    _logger.info(
        "Sharding the Order of param being used: {}.".format(use_order)
    )
    return [pname_to_pg_pairs[p] for p in use_order]


def group_param(sharding_info, fuse_size):
    """
    param are group by:
    rank id
    fuse_size
    dtype
    """
    group_to_param_map = {}
    param_to_group_map = {}
    bucket = []
1766
    cur_group = VarGroup(fuse_size)
1767 1768 1769 1770 1771 1772
    for param in sharding_info.params:
        rank = sharding_info.get_var_rank(param.name)

        if cur_group.acceptable(param, rank):
            cur_group.collect(param, rank)
        else:
1773
            cur_group = VarGroup(fuse_size)
1774 1775
            cur_group.collect(param, rank)

1776 1777 1778 1779
        cur_group.is_in_local_shard = sharding_info.is_in_local_shard(
            param.name
        )

1780 1781 1782 1783 1784 1785 1786 1787 1788 1789
        if cur_group in group_to_param_map:
            group_to_param_map[cur_group].append(param.name)
        else:
            group_to_param_map[cur_group] = [param.name]

        param_to_group_map[param.name] = cur_group

    return group_to_param_map, param_to_group_map


1790
class ShardingInfo:
1791
    def __init__(self, group, rank, params_grads, partition_algor):
J
JZ-LIANG 已提交
1792
        self.group = group
1793
        self.params_grads = {p.name: (p, g) for p, g in params_grads}
1794 1795 1796
        assert len(self.params_grads) == len(
            set(self.params_grads)
        ), "found duplicated param in params_grads"
1797 1798

        self.params = [p for p, _ in params_grads]
J
JZ-LIANG 已提交
1799 1800 1801 1802
        self.param_names = [p.name for p in self.params]
        self.group_size = group.nranks
        self.global_rank = rank
        self.local_rank = group.ranks.index(self.global_rank)
1803
        self.partition_algor = partition_algor
J
JZ-LIANG 已提交
1804
        # rank in below mapping are local rank in this sharding group
1805 1806 1807
        self.rank_to_params = partition_parameters(
            self.params, self.group_size, self.partition_algor
        )
J
JZ-LIANG 已提交
1808
        # include fp32 and fp16 param
1809
        self.param_to_rank = {}
J
JZ-LIANG 已提交
1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824
        self._map_param_to_rank()

    def _map_param_to_rank(self):
        """
        mapping parameters to the rank which holds it.
        """
        for rank, params in self.rank_to_params.items():
            for param in params:
                self.param_to_rank[param.name] = rank

    def get_var_rank(self, varname):
        if varname in self.param_to_rank:
            return self.param_to_rank[varname]
        return -1

1825
    # determine fp32 and fp16 (cast) param
J
JZ-LIANG 已提交
1826 1827 1828
    def is_in_local_shard(self, param_name):
        return self.get_var_rank(param_name) == self.local_rank

1829 1830 1831 1832
    # NOTE the follwo logic is designed for supporting AMP O1 when
    # the param would be cast to fp16 before used for caculation.
    # and sharding should only broadcast the casted fp16 param
    # instead of the origin fp32 version param.
J
JZ-LIANG 已提交
1833 1834 1835 1836 1837 1838 1839
    def get_broadcast_vars_and_param_usage(self, block):
        broadcast_vars = set([])
        fp16_params = set([])
        fp16_to_fp32 = {}

        param_usage = {x: 0 for x in self.param_names}
        for op in block.ops:
1840
            if is_optimize_op(op):
J
JZ-LIANG 已提交
1841
                continue
1842
            for input_name in op.input_arg_names:
J
JZ-LIANG 已提交
1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860
                if input_name in self.param_names:
                    param_usage[input_name] += 1

        for op in block.ops:
            if not _is_param_fp16_cast_op(block, op, self.param_names):
                continue
            input_name = op.input_arg_names[0]
            output_name = op.output_arg_names[0]
            broadcast_vars.add(output_name)
            fp16_params.add(output_name)
            fp16_to_fp32[output_name] = input_name
            param_usage[input_name] -= 1
            self.param_to_rank[output_name] = self.param_to_rank[input_name]

        for param, usage in param_usage.items():
            if usage > 0:
                broadcast_vars.add(param)
        return broadcast_vars, param_usage
1861 1862 1863 1864

    def get_param_grad(self, param_name):
        if not self.is_in_local_shard(param_name):
            raise ValueError(
1865 1866
                "param[{}] not in current rank.".format(param_name)
            )
1867 1868 1869
        if param_name not in self.params_grads:
            raise ValueError('param[{}] not in params_grads'.format(param_name))
        return self.params_grads.get(param_name, None)
1870 1871


1872
class VarGroup:
1873 1874 1875 1876 1877
    def __init__(self, max_size):
        self.max_siez = max_size
        self.dtype = None
        self.rank = -1
        self.numel = 0
1878
        self.vars = []
1879
        self.coalesce_var = None
1880 1881 1882 1883 1884
        self.coalesce_dep_varname = None
        self.coalesce_op_idx = None
        self.reduce_op_indices = []
        self.allreduce_op_indices = []
        self.is_in_local_shard = False
1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901

    def acceptable(self, param, rank):
        if self.numel == 0:
            return True
        else:
            if param.dtype != self.dtype:
                return False
            if rank != self.rank:
                return False
            if self.numel + get_var_numel(param) > self.max_siez:
                return False
            return True

    def collect(self, param, rank):
        self.dtype = param.dtype
        self.rank = rank
        self.numel += get_var_numel(param)
1902
        self.vars.append(param)
1903 1904

    def __len__(self):
1905
        return len(self.vars)