auto_parallel_sharding.py 70.6 KB
Newer Older
J
JZ-LIANG 已提交
1
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2
#
J
JZ-LIANG 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
6
#
J
JZ-LIANG 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
#
J
JZ-LIANG 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import logging
16
from functools import reduce
17 18

import paddle
19
from paddle.distributed.auto_parallel.operators.common import (
20
    ParallelMode,
21
    is_data_parallel_reduce_op,
22
    is_parameter_related,
23
)
24
from paddle.distributed.auto_parallel.process_group import new_process_group
25 26
from paddle.distributed.auto_parallel.utils import (
    _get_comm_group,
27 28
    get_logger,
    get_var_numel,
29 30 31 32 33
    insert_dependencies_for_vars,
    is_backward_op,
    is_dep_skip_op,
    is_loss_grad_op,
    is_optimize_op,
34 35
    naive_set_dist_op_attr_for_program_by_mesh_and_mapping,
    set_var_dist_attr,
36 37 38
)
from paddle.distributed.fleet.meta_optimizers.sharding.utils import get_var_size
from paddle.framework import core
39 40
from paddle.static import default_main_program, default_startup_program
from paddle.utils import unique_name
41 42

from .pass_base import PassBase, register_pass
J
JZ-LIANG 已提交
43 44 45

OpRole = core.op_proto_and_checker_maker.OpRole
OP_ROLE_KEY = core.op_proto_and_checker_maker.kOpRoleAttrName()
46
_skip_ops = [
47 48 49 50 51 52 53
    'create_py_reader',
    'create_double_buffer_reader',
    'read',
    'slice',
    'split',
    'assign',
    "send_v2",
54
]
J
JZ-LIANG 已提交
55 56
# update here to support new optimizers
_supported_optimizer_type = [
57 58 59 60 61 62 63 64 65 66
    "adam",
    "adamax",
    "adamw",
    "decayed_adagrad",
    "momentum",
    "dgc_momentum",
    "lars_momentum",
    "merged_momentum",
    "lamb",
    "sgd",
J
JZ-LIANG 已提交
67 68
]

69 70
_logger = get_logger(logging.INFO)

J
JZ-LIANG 已提交
71

72
def _is_reshard_op(op):
73 74 75
    return op.desc.has_attr(
        "op_namescope"
    ) and "/auto_parallel/reshard" in op.desc.attr('op_namescope')
76 77


J
JZ-LIANG 已提交
78 79 80
# NOTE we add the "auto_parallel" prefix to the pass in order to
# indicate that this pass should obey some constrains by auto_parallel
# for example all ops and vars should has dist attr before and after pass
81
# should use dist op instead of custom comm op
J
JZ-LIANG 已提交
82 83 84
@register_pass("auto_parallel_sharding")
class ShardingPass(PassBase):
    def __init__(self):
85
        super().__init__()
J
JZ-LIANG 已提交
86 87
        self.set_attr("dist_context", None)
        self.set_attr("stage", None)
Z
zhaoyingli 已提交
88 89
        self.set_attr("sharding_degree", None)  # for parallelizer
        self.set_attr("degree", None)  # for parallelizer_v2
90 91 92 93 94
        self.set_attr("enable_overlap", None)
        self.set_attr("param_comm_stream_num", None)
        self.set_attr("grad_comm_stream_num", None)
        self.set_attr("param_bucket_size_numel", None)
        self.set_attr("grad_bucket_size_numel", None)
95
        self.set_attr("partition_algor", None)
96
        self.set_attr("enable_hierarchical_comm", None)
J
JZ-LIANG 已提交
97 98 99 100 101
        self.set_attr("params_grads", [])
        self.set_attr("global_rank", -1)
        self.dp_groups = set()
        self.sharding_infos = []
        self.varname_to_sharding_info = {}
102
        self.sharding_hybrid_dp = False
J
JZ-LIANG 已提交
103
        self.outer_dp_group = None
104
        self.shared_params_grads = []
J
JZ-LIANG 已提交
105 106 107 108 109 110 111

    def _check_self(self):
        if self.get_attr("dist_context") is None:
            return False

        if self.get_attr("stage") not in [1, 2, 3]:
            return False
Z
zhaoyingli 已提交
112
        if self.get_attr("sharding_degree") is not None:
113 114 115
            if (
                not isinstance(self.get_attr("sharding_degree"), int)
            ) or self.get_attr("sharding_degree") <= 1:
Z
zhaoyingli 已提交
116 117
                return False
        elif self.get_attr("degree") is not None:
118 119 120
            if (not isinstance(self.get_attr("degree"), int)) or self.get_attr(
                "degree"
            ) <= 1:
Z
zhaoyingli 已提交
121 122
                return False
        else:
J
JZ-LIANG 已提交
123 124 125
            return False
        if len(self.get_attr("params_grads")) <= 0:
            return False
126 127 128
        if (not isinstance(self.get_attr("global_rank"), int)) or self.get_attr(
            "global_rank"
        ) < 0:
J
JZ-LIANG 已提交
129
            return False
130 131 132
        if self.get_attr("enable_overlap") is None:
            return False
        if self.get_attr("param_comm_stream_num") is None:
133
            return False
134 135 136 137 138
        if self.get_attr("grad_comm_stream_num") is None:
            return False
        if self.get_attr("param_bucket_size_numel") is None:
            return False
        if self.get_attr("grad_bucket_size_numel") is None:
139 140 141
            return False
        if self.get_attr("partition_algor") is None:
            return False
142 143
        if self.get_attr("enable_hierarchical_comm") is None:
            return False
J
JZ-LIANG 已提交
144 145 146 147 148 149 150
        return True

    def _check_conflict(self, other_pass):
        return True

    def _apply_single_impl(self, main_program, startup_program, context):
        self._dist_context = self.get_attr("dist_context")
Z
zhaoyingli 已提交
151
        self.sharding_world_size = int(
152 153
            self.get_attr("sharding_degree") or self.get_attr("degree")
        )
J
JZ-LIANG 已提交
154 155
        self.stage = int(self.get_attr("stage"))
        self.global_rank = int(self.get_attr("global_rank"))
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
        self.enable_overlap = self.get_attr("enable_overlap")
        self.param_comm_stream_num = int(self.get_attr("param_comm_stream_num"))
        self.grad_comm_stream_num = int(self.get_attr("grad_comm_stream_num"))
        self.enable_hierarchical_comm = self.get_attr(
            "enable_hierarchical_comm"
        )
        if self.param_comm_stream_num > 1 or self.grad_comm_stream_num > 1:
            assert (
                self.enable_overlap
            ), "multiple comm stream need enable_overlap to be True"
        self.param_bucket_size_numel = int(
            self.get_attr("param_bucket_size_numel")
        )
        self.grad_bucket_size_numel = int(
            self.get_attr("grad_bucket_size_numel")
        )
172
        self.partition_algor = self.get_attr("partition_algor")
173

J
JZ-LIANG 已提交
174
        params_grads = self.get_attr("params_grads")
175 176 177 178
        main_block, startup_block = (
            main_program.global_block(),
            startup_program.global_block(),
        )
J
JZ-LIANG 已提交
179

180 181 182 183 184 185 186 187
        # NOTE Multi / Sub-Block Support
        # we assume that only parameter are present and partitioned in main_block,
        # there is NO new param in sub_block, and all params in sub_block follows the same
        # partition as main_block. the above contraint fullfill the 3 most common use-cases in Paddle sub_block:
        # 1. subblock for lr scheduler
        # 2. sub-block uses the same or partial network of main-block, e.g. GPT3 generation model
        # 3. sub-block used for double backward

J
JZ-LIANG 已提交
188
        self._build_sharding_groups(main_block, params_grads)
189 190 191 192
        for block in main_program.blocks:
            self._shard_optimizer(block, startup_block, params_grads, context)
            self._shard_gradient_synchronization(block)
            self._shard_parameter(block, startup_block)
J
JZ-LIANG 已提交
193

194
        context.set_attr("params_grads", self.shared_params_grads)
195
        self._optimization_pass(main_program, startup_program)
196

J
JZ-LIANG 已提交
197 198
    def _build_sharding_groups(self, main_block, params_grads):
        self._collective_data_parallel_groups(main_block)
199
        self._build_sharding_infos(main_block, params_grads)
J
JZ-LIANG 已提交
200 201 202

    def _collective_data_parallel_groups(self, main_block):
        for op in main_block.ops:
J
JZ-LIANG 已提交
203
            if not _is_forward_op(op) or op.type in _skip_ops:
J
JZ-LIANG 已提交
204
                continue
205 206 207 208
            # NOTE: there aren't dist_attr in the ops which reshard insert,
            # and should be skip in sharding.
            if _is_reshard_op(op):
                continue
J
JZ-LIANG 已提交
209
            group = _inference_data_parallel_group_for_operator(
210 211
                self.global_rank, op, self._dist_context
            )
J
JZ-LIANG 已提交
212 213 214
            if group is not None:
                self.dp_groups.add(group)

215
        # TODO(JZ-LIANG) allow more than one dp groups in network, support more general distribution
J
JZ-LIANG 已提交
216 217 218
        # genetated by auto search
        if len(self.dp_groups) != 1:
            raise NotImplementedError(
219 220 221 222
                "So far Only and Exactly one data parallel group in network are supported, but got [{}] different data parallel groups".format(
                    len(self.dp_groups)
                )
            )
J
JZ-LIANG 已提交
223

224 225 226 227 228 229
    def _build_sharding_infos(self, main_block, params_grads):

        # order params
        params_grads = re_order_program(
            main_block, params_grads, self._dist_context
        )
J
JZ-LIANG 已提交
230

231
        # partition
J
JZ-LIANG 已提交
232 233
        for dp_group in self.dp_groups:

234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
            assert (
                dp_group.nranks >= self.sharding_world_size
            ), "sharding world size [{}] should not larger than dp world size [{}]".format(
                self.sharding_world_size, dp_group.nranks
            )
            assert (
                dp_group.nranks % self.sharding_world_size == 0
            ), "sharding world size [{}] should be divisible by dp world size [{}]".format(
                self.sharding_world_size, dp_group.nranks
            )
            assert (
                self.global_rank in dp_group.ranks
            ), "current ranks [{}] does NOT belong to the data parallel group [{}]".format(
                self.global_rank, dp_group.ranks
            )
            assert (
                len(params_grads) >= self.sharding_world_size
            ), "number of parameters [{}] is not enough to be shard among [{}] ranks".format(
                len(params_grads), self.sharding_world_size
            )
J
JZ-LIANG 已提交
254

255
            # sharding hybrid data parallel: partial sharding param within
J
JZ-LIANG 已提交
256
            if dp_group.nranks > self.sharding_world_size:
257 258 259
                self.sharding_hybrid_dp = True
                assert self.param_comm_stream_num < 2
                assert self.grad_comm_stream_num < 2
260 261 262
                assert (
                    len(self.dp_groups) == 1
                ), "hybrid sharding and data parallelism are supported only when there is excatly one data parallel group in the network"
J
JZ-LIANG 已提交
263
                outer_dp_group, sharding_group = _get_dp_and_sharding_groups(
264 265
                    dp_group.ranks, self.sharding_world_size, self.global_rank
                )
J
JZ-LIANG 已提交
266 267 268 269 270
                sharding_group = new_process_group(sharding_group)
                self.outer_dp_group = new_process_group(outer_dp_group)
            else:
                sharding_group = dp_group

271
            self._dist_context._sharding_group = sharding_group
J
JZ-LIANG 已提交
272
            # TODO(JZ-LIANG) when support multiple dp groups in future, should group param and bind them to corresponding dp group
273
            sharding_info = ShardingInfo(
274 275 276 277
                sharding_group,
                self.global_rank,
                params_grads,
                self.partition_algor,
278
            )
J
JZ-LIANG 已提交
279
            self.sharding_infos.append(sharding_info)
280
            for param in sharding_info.params:
J
JZ-LIANG 已提交
281 282
                self.varname_to_sharding_info[param.name] = sharding_info

283 284 285
    def _shard_optimizer(
        self, main_block, startup_block, params_grads, pass_context
    ):
J
JZ-LIANG 已提交
286 287 288 289 290 291 292 293
        """
        sharding all optimizer related ops and vars, include:
        gradient clip ops & vars
        weight decay ops & vars
        optimizer ops and states
        """
        self._shard_amp_related_op_and_vars(main_block, pass_context)
        self._shard_weight_decay(main_block)
294
        # self._shard_gradient_clip(main_block)
J
JZ-LIANG 已提交
295 296 297 298 299 300 301 302 303 304 305 306
        self._shard_optimizer_ops_and_states(main_block, startup_block)
        self._insert_optimizer_broadcasts(main_block, startup_block)

    def _shard_amp_related_op_and_vars(self, main_block, pass_context):

        if self.stage < 2:
            return

        for idx, op in reversed(list(enumerate(main_block.ops))):
            # shard amp related param_grad cast
            if _is_param_grad_fp32_cast_op(main_block, op):
                output_name = op.output_arg_names[0]
307
                param_name = output_name[: output_name.find("@")]
J
JZ-LIANG 已提交
308 309 310 311 312 313 314 315
                if not self._is_parameter_in_local_shard(param_name):
                    main_block._remove_op(idx, sync=False)
                    main_block._remove_var(output_name, sync=False)

            # shard check nan inf
            elif op.type in ["check_finite_and_unscale", "update_loss_scaling"]:
                reversed_x = []
                for input_name in op.desc.input('X'):
316
                    param_name = input_name[: input_name.find("@")]
J
JZ-LIANG 已提交
317 318 319

                    if self._is_parameter_in_local_shard(param_name):
                        reversed_x.append(input_name)
320 321 322 323 324 325 326 327

                # NOTE: When `reversed_x` is [], check_finite_and_unscale will be replaced by `fill_constant` op.
                # The output of check_finite_and_unscale is be set False
                if reversed_x:
                    op.desc.set_input('X', reversed_x)
                    op.desc.set_output('Out', reversed_x)
                else:
                    if op.type == "check_finite_and_unscale":
328
                        op_role = op.attr('op_role')
329 330 331 332 333 334 335 336 337 338 339
                        out_name = op.output_arg_names[0]
                        out_var = main_block.vars[out_name]
                        main_block._remove_op(idx, sync=False)
                        main_block._insert_op_without_sync(
                            idx,
                            type="fill_constant",
                            outputs={"Out": out_var},
                            attrs={
                                "shape": out_var.shape,
                                "dtype": out_var.dtype,
                                "value": 0,
340
                                OP_ROLE_KEY: op_role,
341 342
                            },
                        )
343 344
                    else:
                        main_block._remove_op(idx, sync=False)
J
JZ-LIANG 已提交
345 346 347 348 349 350 351 352 353

        main_block._sync_with_cpp()

    def _shard_gradient_clip(self, main_block):

        if self.stage < 2:
            return

        # TODO (JZ-LIANG) support calculate global norm with tensor parallelism
J
JZ-LIANG 已提交
354 355 356 357
        removed_op_type = ['elementwise_mul', 'squared_l2_norm', 'clip_by_norm']
        removed_op_idx = set()
        removed_tmp_var = set()

J
JZ-LIANG 已提交
358 359 360 361
        for idx, op in list(enumerate(main_block.ops)):
            if not _is_gradient_clip_op(op):
                continue

J
JZ-LIANG 已提交
362 363
            if op.type in removed_op_type:
                input_name = op.input("X")[0]
364
                param_name = input_name[: input_name.find("@GRAD")]
J
JZ-LIANG 已提交
365 366 367 368 369 370 371 372 373 374 375 376 377 378 379
                if not self._is_parameter_in_local_shard(param_name):
                    removed_op_idx.add(idx)
                    if op.type in ['squared_l2_norm', 'clip_by_norm']:
                        for output_name in op.output_arg_names:
                            removed_tmp_var.add(output_name)

        for idx, op in reversed(list(enumerate(main_block.ops))):
            if not _is_gradient_clip_op(op):
                continue
            if idx in removed_op_idx:
                main_block._remove_op(idx, sync=False)

        for varname in removed_tmp_var:
            main_block._remove_var(varname, sync=False)

J
JZ-LIANG 已提交
380 381 382 383 384 385 386 387 388 389
        for idx, op in list(enumerate(main_block.ops)):
            if not _is_gradient_clip_op(op):
                continue
            if op.type == 'sum':
                reserved_vars = []
                for input_name in op.input_arg_names:
                    if input_name not in removed_tmp_var:
                        reserved_vars.append(input_name)
                op.desc.set_input("X", reserved_vars)

390
                sum_op_output = op.output_arg_names[0]
J
JZ-LIANG 已提交
391 392
                for i, sharding_info in enumerate(self.sharding_infos):
                    new_op = main_block._insert_op(
J
JZ-LIANG 已提交
393
                        idx + i + 1,
J
JZ-LIANG 已提交
394 395 396 397 398 399 400 401
                        type='c_allreduce_sum',
                        inputs={'X': [sum_op_output]},
                        outputs={'Out': [sum_op_output]},
                        attrs={
                            'ring_id': sharding_info.group.id,
                            'op_namescope': "/gradient_clip_model_parallelism",
                            'use_calc_stream': True,
                            OP_ROLE_KEY: OpRole.Optimize,
402 403 404 405 406 407 408
                        },
                    )
                    dist_attr = (
                        self._dist_context.get_tensor_dist_attr_for_program(
                            main_block.var(sum_op_output)
                        )
                    )
409 410 411 412
                    # assert dist_attr is not None
                    # naive_set_dist_op_attr_for_program_by_mesh_and_mapping(
                    #     new_op, dist_attr.process_mesh, dist_attr.dims_mapping,
                    #     self._dist_context)
J
JZ-LIANG 已提交
413 414 415 416 417 418 419 420 421 422 423 424 425 426
                break

        main_block._sync_with_cpp()

    def _shard_weight_decay(self, main_block):

        if self.stage < 2:
            return

        for idx, op in reversed(list(enumerate(main_block.ops))):
            if not _is_weight_decay_op(op):
                continue
            else:
                raise NotImplementedError(
427 428
                    "weight decay is NOT supported by now"
                )
J
JZ-LIANG 已提交
429 430 431 432 433 434
        main_block._sync_with_cpp()

    def _shard_optimizer_ops_and_states(self, main_block, startup_block):

        should_removed_optimizer_states = []
        for idx, op in reversed(list(enumerate(main_block.ops))):
435
            if not is_optimize_op(op):
J
JZ-LIANG 已提交
436 437 438 439 440 441 442
                break

            if op.type in _supported_optimizer_type:
                assert "Param" in op.input_names
                assert len(op.input("Param")) == 1
                param_name = op.input("Param")[0]
                if not self._is_parameter_in_local_shard(param_name):
443 444 445 446 447 448 449
                    should_removed_optimizer_states.extend(
                        [
                            varname
                            for varname in op.output_arg_names
                            if varname != param_name
                        ]
                    )
J
JZ-LIANG 已提交
450
                    main_block._remove_op(idx, sync=False)
451 452
                else:
                    self.shared_params_grads.append(
453 454
                        self._get_param_grad(param_name)
                    )
J
JZ-LIANG 已提交
455 456

        for idx, op in reversed(list(enumerate(startup_block.ops))):
457 458 459 460
            if (
                len(op.output_arg_names) == 1
                and op.output_arg_names[0] in should_removed_optimizer_states
            ):
J
JZ-LIANG 已提交
461 462 463 464 465 466 467 468 469 470 471 472 473
                startup_block._remove_op(idx, sync=False)

        for varname in should_removed_optimizer_states:
            if main_block.has_var(varname):
                main_block._remove_var(varname, sync=False)
            if startup_block.has_var(varname):
                startup_block._remove_var(varname, sync=False)

        main_block._sync_with_cpp()
        startup_block._sync_with_cpp()

    def _insert_optimizer_broadcasts(self, main_block, startup_block):

474
        if self.stage > 2 or self.param_bucket_size_numel > 1:
J
JZ-LIANG 已提交
475 476 477 478 479 480 481
            return

        for sharding_info in self.sharding_infos:
            for param in sharding_info.params:
                assert main_block.has_var(param.name)
                assert startup_block.has_var(param.name)

482 483 484 485 486 487 488 489 490 491 492
                new_op = main_block.append_op(
                    type='c_broadcast',
                    inputs={'X': param},
                    outputs={'Out': param},
                    attrs={
                        'ring_id': sharding_info.group.id,
                        'root': sharding_info.get_var_rank(param.name),
                        'use_calc_stream': True,
                        OP_ROLE_KEY: OpRole.Optimize,
                    },
                )
493
                new_op._set_attr(
494
                    'op_namescope', '/' + ParallelMode.DataParallel
495
                )
496 497 498
                param_dist_attr = (
                    self._dist_context.get_tensor_dist_attr_for_program(param)
                )
J
JZ-LIANG 已提交
499 500
                assert param_dist_attr is not None
                naive_set_dist_op_attr_for_program_by_mesh_and_mapping(
501 502 503 504 505
                    new_op,
                    param_dist_attr.process_mesh,
                    param_dist_attr.dims_mapping,
                    self._dist_context,
                )
J
JZ-LIANG 已提交
506 507 508 509 510 511 512
        main_block._sync_with_cpp()

    def _is_parameter_in_local_shard(self, param_name):
        assert param_name in self.varname_to_sharding_info
        sharding_info = self.varname_to_sharding_info[param_name]
        return sharding_info.is_in_local_shard(param_name)

513 514 515 516 517 518 519
    def _get_param_grad(self, param_name):
        assert param_name in self.varname_to_sharding_info
        sharding_info = self.varname_to_sharding_info[param_name]
        p_g = sharding_info.get_param_grad(param_name)
        assert p_g is not None
        return p_g

J
JZ-LIANG 已提交
520 521 522 523 524 525 526
    def _shard_gradient_synchronization(self, main_block):

        if self.stage < 2:
            return

        dp_ring_ids = [group.id for group in self.dp_groups]
        for idx, op in reversed(list(enumerate(main_block.ops))):
527
            if _is_param_grad_allreduce_op(op, main_block):
J
JZ-LIANG 已提交
528 529 530
                input_name = op.input_arg_names[0]
                base_name = _get_base_name_from_grad_name(input_name)
                sharding_info = self.varname_to_sharding_info[base_name]
531
                reduce_op = _insert_reduce_op(
532 533 534 535 536 537 538 539
                    main_block,
                    idx,
                    input_name,
                    sharding_info.group.id,
                    sharding_info.get_var_rank(base_name),
                    self._dist_context,
                )
                if (
540
                    not self.sharding_hybrid_dp
541 542
                    or not sharding_info.is_in_local_shard(base_name)
                ):
J
JZ-LIANG 已提交
543 544 545
                    main_block._remove_op(idx + 1, sync=False)
                else:
                    op._set_attr("ring_id", self.outer_dp_group.id)
546
                    op._set_attr(
547
                        'op_namescope', '/' + ParallelMode.DataParallel
548
                    )
J
JZ-LIANG 已提交
549

550 551 552 553 554 555 556 557 558 559 560
            # NOTE:
            # var@GRAD = sum(var@GRAD@RENAME@0, var@GRAD@RENAME@1)
            # If the var is not in local rank and it is output of many ops, or the var is renamed in another words,
            # the sum op should be removed.
            if _is_param_grad_sum_op(op, main_block):
                out_name = op.output_arg_names[0]
                base_name = _get_base_name_from_grad_name(out_name)
                sharding_info = self.varname_to_sharding_info[base_name]
                if not sharding_info.is_in_local_shard(base_name):
                    main_block._remove_op(idx, sync=False)

J
JZ-LIANG 已提交
561 562 563 564 565 566 567 568 569
        main_block._sync_with_cpp()

    def _shard_parameter(self, main_block, startup_block):

        if self.stage < 3:
            return

        dp_ring_ids = [group.id for group in self.dp_groups]
        for sharding_info in self.sharding_infos:
570 571 572 573
            (
                need_broadcast_vars,
                param_usage,
            ) = sharding_info.get_broadcast_vars_and_param_usage(main_block)
J
JZ-LIANG 已提交
574 575
            not_used_param_nane = []
            for param_name in param_usage:
576 577 578 579 580
                if (
                    param_usage[param_name] == 0
                    and sharding_info.get_var_rank(param_name)
                    != sharding_info.local_rank
                ):
J
JZ-LIANG 已提交
581 582 583
                    not_used_param_nane.append(param_name)

            for idx, op in reversed(list(enumerate(main_block.ops))):
584
                if is_optimize_op(op):
J
JZ-LIANG 已提交
585 586
                    continue

587
                for input_name in op.input_arg_names:
588 589
                    # NOTE hack for embedding op when AMP 02-3
                    # paddle amp force embedding (lookup table) to be run on fp32
590 591 592
                    if _is_param_fp16_cast_op(
                        main_block, op, sharding_info.param_names
                    ):
J
JZ-LIANG 已提交
593 594 595 596 597 598 599
                        continue
                    if input_name not in need_broadcast_vars:
                        continue
                    root_rank = sharding_info.get_var_rank(input_name)
                    if root_rank == sharding_info.local_rank:
                        broadcast_varname = input_name
                    else:
600 601 602
                        broadcast_varname = unique_name.generate(
                            input_name + "@BroadCast"
                        )
J
JZ-LIANG 已提交
603
                        input_var = main_block.var(input_name)
604 605 606 607 608 609 610 611 612 613 614
                        new_var = main_block.create_var(
                            name=broadcast_varname,
                            shape=input_var.shape,
                            dtype=input_var.dtype,
                            persistable=False,
                        )
                        ref_dist_attr = (
                            self._dist_context.get_tensor_dist_attr_for_program(
                                input_var
                            )
                        )
J
JZ-LIANG 已提交
615
                        out_var_dist_attr = set_var_dist_attr(
616 617
                            self._dist_context,
                            new_var,
J
JZ-LIANG 已提交
618
                            ref_dist_attr.dims_mapping,
619 620
                            ref_dist_attr.process_mesh,
                        )
J
JZ-LIANG 已提交
621 622
                        op._rename_input(input_name, broadcast_varname)

623 624 625 626 627 628 629 630 631 632
                    _insert_init_and_broadcast_op(
                        main_block,
                        idx,
                        broadcast_varname,
                        sharding_info.local_rank,
                        root_rank,
                        sharding_info.group.id,
                        op.attr('op_role'),
                        self._dist_context,
                    )
J
JZ-LIANG 已提交
633 634 635 636 637 638 639 640 641 642 643 644 645 646

            for idx, op in reversed(list(enumerate(main_block.ops))):
                if op.type != "cast":
                    continue
                input_name = op.input_arg_names[0]
                output_name = op.output_arg_names[0]
                if input_name in not_used_param_nane:
                    main_block._remove_op(idx, sync=False)
                    main_block._remove_var(output_name, sync=False)

            for idx, op in reversed(list(enumerate(startup_block.ops))):
                assert len(op.output_arg_names) == 1
                output_name = op.output_arg_names[0]

647 648 649 650 651 652 653 654 655
                if (
                    op.type == "c_broadcast"
                    and op.attr("ring_id") in dp_ring_ids
                ):
                    if (
                        self.outer_dp_group
                        and sharding_info.get_var_rank(output_name)
                        == sharding_info.local_rank
                    ):
J
JZ-LIANG 已提交
656 657 658 659 660
                        op._set_attr("ring_id", self.outer_dp_group.id)
                    else:
                        startup_block._remove_op(idx, sync=False)
                    continue

661 662 663 664 665 666
                if (
                    op.type != "c_broadcast"
                    and output_name in param_usage
                    and sharding_info.get_var_rank(output_name)
                    != sharding_info.local_rank
                ):
J
JZ-LIANG 已提交
667 668
                    startup_block._remove_op(idx, sync=False)

J
JZ-LIANG 已提交
669
            for param_name in param_usage:
670 671 672 673
                if (
                    sharding_info.get_var_rank(param_name)
                    != sharding_info.local_rank
                ):
J
JZ-LIANG 已提交
674 675
                    main_block._remove_var(param_name, sync=False)
                    startup_block._remove_var(param_name, sync=False)
J
JZ-LIANG 已提交
676 677 678 679

        main_block._sync_with_cpp()
        startup_block._sync_with_cpp()

680 681
    def _optimization_pass(self, main_program, startup_program):

682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697
        if self.stage <= 1:
            return

        self.grad_coalesce_prefix = 'sharding_coalesce_grad_'
        self.param_coalesce_prefix = 'sharding_coalesce_param_'
        # NOTE PR#49275 for detail
        self.comm_op_scheduling_priority = -1

        # TODO support multiple sub_blocks
        assert (
            len(self.sharding_infos) == 1
        ), "gradient synchronization optimization only support one sharding group right now, but got [{}].".format(
            len(self.sharding_infos)
        )
        sharding_info = self.sharding_infos[0]

698
        with paddle.static.program_guard(main_program, startup_program):
699 700 701 702
            self._gradient_sync_optimization(sharding_info)
            # TODO independent the logic of fuse and overlap
            # support overlap when no fuse
            if self.param_bucket_size_numel > 1:
703
                if self.stage == 2:
704
                    self._fuse_overlap_parameter_comm_stage_two(sharding_info)
705
                elif self.stage == 3:
706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758
                    self._fuse_overlap_parameter_comm_stage_three(sharding_info)

    def _gradient_sync_optimization(self, sharding_info):

        if self.grad_bucket_size_numel <= 1 and (not self.enable_overlap):
            return

        main_block = default_main_program().global_block()
        startup_block = default_startup_program().global_block()
        coalesce_to_group_map, grad_name_to_group_map = self._group_grads(
            main_block,
            sharding_info,
        )
        self._overlap_grad_comm(
            main_block,
            sharding_info,
            coalesce_to_group_map,
            grad_name_to_group_map,
        )

    def _fuse_overlap_parameter_comm_stage_two(self, sharding_info):

        main_block = default_main_program().global_block()
        startup_block = default_startup_program().global_block()

        group_to_param_map, param_to_group_map = group_param(
            sharding_info, self.param_bucket_size_numel
        )
        _logger.info("Sharding Stage2 Optimization:")
        _logger.info(
            "Param Bucket size is [{}], [{}] Parameters are fused into [{}] Buckets".format(
                self.param_bucket_size_numel,
                len(param_to_group_map.keys()),
                len(group_to_param_map.keys()),
            )
        )
        broadcast_var_to_group_map = {}

        if self.enable_overlap:
            # if the communication is cross node, comm will be slow and calc will therefore
            # wait for comm. enable multi-comm-stream
            # TODO revise me in future
            # 1. manager the comm and corresponding stream
            # 2. allow more than two streams and open to be config
            self.param_comm_group_stream_pairs = []
            ranks = sharding_info.group.ranks
            for i in range(self.param_comm_stream_num):
                if i == 0:
                    group = sharding_info.group
                else:
                    group = new_process_group(ranks, force_new_group=True)
                # NOTE here stream is just a presentation with different name,
                # it is up to executor to create the exact streams given the name.
759
                stream = f"sharding_param_comm_stream{i}"
760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844
                self.param_comm_group_stream_pairs.append(
                    {
                        "comm_group": group,
                        "comm_stream": stream,
                    }
                )
            _logger.info(
                "Parameter Communication would use [{}] streams.".format(
                    self.param_comm_stream_num
                )
            )
            self.op_to_stream_idx = {}

        for i, param_group in enumerate(group_to_param_map.keys()):

            assert len(param_group) >= 1
            if len(param_group) > 1:
                coalesce_var_name = unique_name.generate(
                    self.param_coalesce_prefix + str(i)
                )
                startup_block.create_var(
                    name=coalesce_var_name,
                    dtype=param_group.dtype,
                    persistable=True,
                    stop_gradient=True,
                )
                param_group.coalesce_var = main_block.create_var(
                    name=coalesce_var_name,
                    dtype=param_group.dtype,
                    persistable=True,
                    stop_gradient=True,
                )
                startup_block.append_op(
                    type="coalesce_tensor",
                    inputs={"Input": param_group.vars},
                    outputs={
                        "Output": param_group.vars,
                        "FusedOutput": param_group.coalesce_var,
                    },
                    attrs={
                        "copy_data": True,
                        "use_align": True,
                        "dtype": param_group.dtype,
                        OP_ROLE_KEY: OpRole.Forward,
                    },
                )
            else:
                param_group.coalesce_var = param_group.vars[0]
            _logger.info(
                "Bucket[{}] size [{}]MB.".format(
                    i,
                    sum([get_var_size(p) for p in param_group.vars]),
                )
            )
            _logger.debug(
                "Bucket[{}] parameters: {}.".format(
                    i,
                    [p.name for p in param_group.vars],
                )
            )

            broadcast_var_to_group_map[
                param_group.coalesce_var.name
            ] = param_group

            # TODO revise me to manager stream and comm
            comm_stream_idx = i % self.param_comm_stream_num
            comm_group = self.param_comm_group_stream_pairs[comm_stream_idx][
                'comm_group'
            ]
            comm_stream = self.param_comm_group_stream_pairs[comm_stream_idx][
                'comm_stream'
            ]
            new_op = main_block.append_op(
                type='c_broadcast',
                inputs={'X': param_group.coalesce_var},
                outputs={'Out': param_group.coalesce_var},
                attrs={
                    'ring_id': comm_group.id,
                    'root': param_group.rank,
                    'use_calc_stream': True,
                    OP_ROLE_KEY: OpRole.Optimize,
                },
            )
            self.op_to_stream_idx[new_op] = comm_stream_idx
845
            new_op._set_attr('op_namescope', '/' + ParallelMode.DataParallel)
846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891
            if self.enable_overlap:
                new_op.dist_attr.execution_stream = comm_stream
                new_op.dist_attr.scheduling_priority = (
                    self.comm_op_scheduling_priority
                )

            # NOTE the current dist context lack the presentation for bucket tensor which
            # composes many tensor with different dims_mapping. we DO NOT assign dist attr
            # for it currently.

        # add dependencies:
        # 1. all broadcast depend on its pre collective
        # 2. coalesce broadcast add nop to resolute data flow dependencies
        dep_map = {}
        for i, op in enumerate(main_block.ops):
            if is_sharding_param_broadcast_op(op):
                broadcast_varname = op.output("Out")[0]
                broadcast_var = main_block.vars[broadcast_varname]
                param_group = broadcast_var_to_group_map[broadcast_varname]
                comm_stream = None
                if self.enable_overlap:
                    comm_stream = op.dist_attr.execution_stream

                # FIXME remove me when upgrade to multi-comm version
                if len(dep_map.keys()) < self.param_comm_stream_num:
                    op = _get_broadcast_first_depend_op(main_block)
                    prior_var = main_block.vars[op.output("ParamOut")[0]]
                else:
                    pre_op = main_block.ops[i - self.param_comm_stream_num]
                    assert is_sharding_param_broadcast_op(
                        pre_op
                    ), "Unexpected: sharding broadcast pre op should be broadcast."
                    prior_var = main_block.vars[pre_op.output("Out")[0]]
                # broadcast order dependencies
                dep_map[i] = [(i, [prior_var], [broadcast_var], comm_stream)]

                if len(param_group.vars) > 1:
                    # in shard coalesce depend to optimizer
                    if param_group.is_in_local_shard:
                        last_grad = param_group.vars[-1]
                        dep_map[i].append(
                            (i, [last_grad], [broadcast_var], comm_stream)
                        )
                    # coalesce resolution post deps
                    dep_map[i].append(
                        (i + 1, [broadcast_var], param_group.vars, comm_stream)
892 893
                    )

894
        # insert deps
895
        indice = sorted(dep_map.keys(), reverse=True)
896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970
        for i in indice:
            for idx, prior_vars, post_vars, comm_stream in dep_map[i][::-1]:
                depend_op = insert_dependencies_for_vars(
                    main_block,
                    idx,
                    prior_vars,
                    post_vars,
                    self._dist_context,
                    OpRole.Optimize,
                    process_mesh=[
                        -1
                    ],  # hack to avoid initialize the dist attr for coalesce var
                    is_recompute=False,
                    sync=False,
                    op_namescope="sharding_stage2_broadcast_dep",
                )
                if self.enable_overlap:
                    depend_op.dist_attr.execution_stream = comm_stream
                    depend_op.dist_attr.scheduling_priority = (
                        self.comm_op_scheduling_priority
                    )

        main_block._sync_with_cpp()

    def _fuse_overlap_parameter_comm_stage_three(self, sharding_info):
        pass

    def _group_grads(
        self,
        block,
        sharding_info,
    ):
        """
        conditions for gradients to be grouped:
            1. group size < grad_bucket_size_numel
            2. same dp group (TODO)
            3. same src rank
            4. same dtype
            5. dependency: grad would NOT be used by other ops within group segment

        main logic:
            1. record coalesce group
            2. record all dp allreduce/reduce op idx

            3. insert coalesce op
            4. insert coalesce dependency (avoid allocate memory too early)
            5. modify and remove allreduce/reduce op
            6. ensure sharding-dp hybrid parallel logic

        gradients inside same group would be fuse into one coalesce tensor
        """
        ops = block.ops
        if self.grad_bucket_size_numel < 1:
            # numel for transformer layer
            # h = 4096 + 1
            # ffn_numel = 2 * (4 * h) * h
            # mha_numel = 3 * h * h + h * h
            # max_fuse_numel = ffn_numel + mha_numel
            self.grad_bucket_size_numel = 1

        first_backward_op = None
        for op in ops:
            if is_loss_grad_op(op):
                first_backward_op = op
        # not backward op, sharding for inference
        if first_backward_op is None:
            return
        first_backward_varname = first_backward_op.output_arg_names[0]

        cur_group = VarGroup(self.grad_bucket_size_numel)
        grad_groups = []
        grouped_grad_names = set()

        def op_depend_on_group(op, group):
            vars_ = set(op.input_arg_names + op.output_arg_names)
971
            var_names = {var.name for var in group.vars}
972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169
            return len(vars_.intersection(var_names)) > 0

        # analyze groups
        i = 0
        while i < len(ops):
            op = ops[i]
            if is_data_parallel_reduce_op(op):
                assert (
                    op.type == "c_reduce_sum"
                ), "Sharding should reduce grad first and than allreduce if Hybrid Sharding with Data-Parallel"

                grad_name = op.output_arg_names[0]
                param_name = _get_base_name_from_grad_name(grad_name)
                rank = sharding_info.get_var_rank(param_name)
                grad_var = block.var(grad_name)

                if cur_group.acceptable(grad_var, rank):
                    assert grad_name not in grouped_grad_names
                    cur_group.collect(grad_var, rank)
                else:
                    grad_groups.append(cur_group)
                    cur_group = VarGroup(self.grad_bucket_size_numel)
                    cur_group.collect(grad_var, rank)

                if len(cur_group.vars) == 1:
                    cur_group.coalesce_op_idx = i - 1
                    # NOTE coalesce dependency: control when allocate memory for gradients
                    # too early would increase the peak memory requirement, too later would hurt the performance
                    j = 2
                    while is_dep_skip_op(ops[i - j]):
                        j += 1
                    dep_op = ops[i - j]
                    dep_varname = dep_op.output_arg_names[0]
                    cur_group.coalesce_dep_varname = dep_varname

                grouped_grad_names.add(grad_name)
                cur_group.reduce_op_indices.append(i)

                if self.sharding_hybrid_dp and sharding_info.is_in_local_shard(
                    param_name
                ):
                    cur_group.is_in_local_shard = True
                    assert (
                        ops[i + 1].type == "c_allreduce_sum"
                    ), "Sharding should reduce grad first and than allreduce if Hybrid Sharding with Data-Parallel"
                    assert (
                        ops[i + 1].output_arg_names[0] == grad_name
                    ), "Hybrid Sharding with Data-Parallel should sync same gradient var"
                    cur_group.allreduce_op_indices.append(i + 1)
                    i += 1
            elif op_depend_on_group(op, cur_group):
                grad_groups.append(cur_group)
                cur_group = VarGroup(self.grad_bucket_size_numel)

            i += 1
        # some grad not in this rank may not be used after dp reduced
        if len(cur_group.vars) >= 1:
            grad_groups.append(cur_group)

        _logger.info("Sharding Gradient Communication Optimization:")
        _logger.info(
            "Gradient Bucket size is [{}], [{}] Gradients are fused into [{}] Buckets.".format(
                self.grad_bucket_size_numel,
                len(grouped_grad_names),
                len(grad_groups),
            )
        )

        # create coalesce tesnor and record op idx
        grad_name_to_group_map = {}
        coalesce_to_group_map = {}
        modify_reduce_op_map = {}
        coalesce_op_map = {}
        remove_reduce_op_indices = []

        for i, group in enumerate(grad_groups):
            if len(group.vars) > 1:
                group.coalesce_var = block.create_var(
                    name=unique_name.generate(
                        self.grad_coalesce_prefix + str(i)
                    ),
                    dtype=group.dtype,
                    persistable=False,
                    stop_gradient=True,
                )
                coalesce_op_map[group.coalesce_op_idx] = group
                last_reduce_op_idx = group.reduce_op_indices.pop()
                modify_reduce_op_map[last_reduce_op_idx] = group
                remove_reduce_op_indices.extend(group.reduce_op_indices)
                if group.is_in_local_shard:
                    last_allreduce_op_idx = group.allreduce_op_indices.pop()
                    modify_reduce_op_map[last_allreduce_op_idx] = group
                    remove_reduce_op_indices.extend(group.allreduce_op_indices)
            else:
                group.coalesce_var = group.vars[0]
            for grad in group.vars:
                grad_name_to_group_map[grad.name] = group
            coalesce_to_group_map[group.coalesce_var.name] = group

        coalesce_op_set = set(coalesce_op_map.keys())
        modify_op_set = set(modify_reduce_op_map.keys())
        remove_op_set = set(remove_reduce_op_indices)
        confilct = coalesce_op_set.intersection(modify_op_set)

        assert len(confilct) == 0
        confilct = coalesce_op_set.intersection(remove_op_set)
        assert len(confilct) == 0
        confilct = modify_op_set.intersection(remove_op_set)
        assert len(confilct) == 0

        # update block
        for idx, op in reversed(list(enumerate(block.ops))):

            if idx in modify_reduce_op_map:
                group = modify_reduce_op_map[idx]
                grad_name = op.output_arg_names[0]
                assert (
                    grad_name == group.vars[-1].name
                ), "Unexpected: it is supposed to sync [{}] but got [{}]".format(
                    group.vars[-1].name, grad_name
                )
                op._rename_input(grad_name, group.coalesce_var.name)
                op._rename_output(grad_name, group.coalesce_var.name)

            if idx in remove_reduce_op_indices:
                block._remove_op(idx, sync=False)

            if idx in coalesce_op_map:
                group = coalesce_op_map[idx]
                first_grad_name = group.vars[0].name
                assert (
                    first_grad_name in op.output_arg_names
                ), "Unexpected: op is supposed to generate grad [{}] but got [{}]".format(
                    first_grad_name, str(op)
                )
                grad_names = [grad.name for grad in group.vars]

                concated_shapes = []
                concated_ranks = []
                for grad_ in group.vars:
                    shape = grad_.shape
                    concated_shapes.extend(shape)
                    concated_ranks.append(len(shape))

                coalesce_op = block._insert_op_without_sync(
                    idx,
                    type="coalesce_tensor",
                    inputs={"Input": grad_names},
                    outputs={
                        "Output": grad_names,
                        "FusedOutput": group.coalesce_var,
                    },
                    attrs={
                        "copy_data": False,
                        "use_align": True,
                        "dtype": group.dtype,
                        "concated_shapes": concated_shapes,
                        "concated_ranks": concated_ranks,
                        OP_ROLE_KEY: OpRole.Backward,
                    },
                )
                depend_op = insert_dependencies_for_vars(
                    block,
                    idx,
                    block.var(group.coalesce_dep_varname),
                    group.coalesce_var,
                    self._dist_context,
                    OpRole.Backward,
                    process_mesh=[
                        -1
                    ],  # hack to avoid initialize the dist attr for coalesce var
                    is_recompute=False,
                    sync=False,
                    op_namescope="sharding_grad_coalesce_dep",
                )
        block._sync_with_cpp()

        return coalesce_to_group_map, grad_name_to_group_map

    def _overlap_grad_comm(
        self,
        block,
        sharding_info,
        coalesce_to_group_map,
        grad_name_to_group_map,
    ):
        """
        overlap gradient communication with backward & optimizer computation.

        1. assign gradient communications to grad comm stream
        2. for coalesce gradient communication:
            2.1 insert before communication dependencies
            2.2 insert after communication dependencies only when need
        3. there is not need to add explicit dependencies for non-coalesce gradient communication

        P.S. this overlap pass is ONLY adapted for standalone executor (graph based) and stream awared allocator.
        """

1170
        if not self.enable_overlap:
1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183
            return

        self.grad_comm_group_stream_pairs = []
        ranks = sharding_info.group.ranks
        # NOTE since the gradient synchronization has calculation, there would be computation
        # competition between backward calculation. therefore should limit the number of stream used.
        for i in range(self.grad_comm_stream_num):
            if i == 0:
                group = sharding_info.group
            else:
                group = new_process_group(ranks, force_new_group=True)
            # NOTE here stream is just a presentation with different name,
            # it is up to executor to create the exact streams given the name.
1184
            stream = f"sharding_grad_comm_stream{i}"
1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264
            self.grad_comm_group_stream_pairs.append(
                {
                    "comm_group": group,
                    "comm_stream": stream,
                }
            )

        ops = block.ops
        # analyze dependencies
        dep_map = {}
        reduce_op_count = 0
        grad_comm_op_to_stream_idx = {}
        for idx, op in enumerate(ops):
            if is_data_parallel_reduce_op(op):

                if op.type == "c_allreduce_sum":
                    continue
                stream_idx = reduce_op_count % self.grad_comm_stream_num
                grad_comm_op_to_stream_idx[op] = stream_idx
                comm_group = self.grad_comm_group_stream_pairs[stream_idx][
                    "comm_group"
                ]
                comm_stream = self.grad_comm_group_stream_pairs[stream_idx][
                    "comm_stream"
                ]

                reduce_varname = op.output("Out")[0]
                grad_group = coalesce_to_group_map[reduce_varname]
                assert grad_group.coalesce_var.name == reduce_varname

                # coalesce deps
                if len(grad_group.vars) > 1:
                    # NOTE should prior vars to be all grads ?
                    # when the grad_ops' order is random
                    # prior dep
                    dep_map[idx] = [
                        (
                            idx,
                            grad_group.vars[-1],
                            grad_group.coalesce_var,
                            comm_stream,
                        )
                    ]
                    # post dep
                    post_idx = idx + 1
                    if self.sharding_hybrid_dp and grad_group.is_in_local_shard:
                        post_idx += 1
                    dep_map[idx].append(
                        (
                            post_idx,
                            grad_group.coalesce_var,
                            grad_group.vars,
                            comm_stream,
                        )
                    )

                # assign stream
                op.dist_attr.execution_stream = comm_stream
                op.dist_attr.scheduling_priority = (
                    self.comm_op_scheduling_priority
                )

                op._set_attr("ring_id", comm_group.id)
                if self.sharding_hybrid_dp and grad_group.is_in_local_shard:
                    next_op = ops[idx + 1]
                    assert next_op.type == "c_allreduce_sum"
                    assert next_op.output("Out")[0] == reduce_varname
                    # FIXME hybrid sharding-dp support multi comm & stream in feature
                    # next_op._set_attr("ring_id", comm_group.id)
                    next_op.dist_attr.execution_stream = comm_stream
                    next_op.dist_attr.scheduling_priority = (
                        self.comm_op_scheduling_priority
                    )
                    idx += 1

                reduce_op_count += 1

            idx += 1

        # insert deps
1265
        indice = sorted(dep_map.keys(), reverse=True)
1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305
        for i in indice:
            for idx, prior_vars, post_vars, comm_stream in dep_map[i][::-1]:
                depend_op = insert_dependencies_for_vars(
                    block,
                    idx,
                    prior_vars,
                    post_vars,
                    self._dist_context,
                    OpRole.Backward,
                    process_mesh=[
                        -1
                    ],  # hack to avoid initialize the dist attr for coalesce var
                    is_recompute=False,
                    sync=False,
                    op_namescope="sharding_grad_comm_dep",
                )
                depend_op.dist_attr.execution_stream = comm_stream
                depend_op.dist_attr.scheduling_priority = (
                    self.comm_op_scheduling_priority
                )

        # hierarchical grad comm
        if self.enable_hierarchical_comm:
            # NOTE so far we only support Isomorphic cluster with 8 ranks per node
            # TODO unifiy here create communicators
            # create communicators
            nranks_per_node = 8
            assert self.sharding_world_size % nranks_per_node == 0
            global_group = sharding_info.group
            global_ranks = global_group.ranks
            relative_idx_in_node = self.global_rank % nranks_per_node
            node_idx = self.global_rank // nranks_per_node
            inter_node_ranks = [
                rank
                for rank in global_ranks
                if rank % nranks_per_node == relative_idx_in_node
            ]
            _logger.info(
                "Sharding Gradient Hierarchical Communication Optimization."
            )
1306 1307
            _logger.info(f"current global rank idx: {self.global_rank}.")
            _logger.info(f"local inter node ranks idx: {inter_node_ranks}.")
1308 1309 1310 1311 1312 1313 1314 1315 1316 1317
            assert (
                len(inter_node_ranks)
                == self.sharding_world_size // nranks_per_node
            )
            intra_node_ranks = [
                rank
                for rank in global_ranks
                if rank // nranks_per_node == node_idx
            ]
            assert len(intra_node_ranks) == nranks_per_node
1318
            _logger.info(f"local intra node ranks idx: {intra_node_ranks}.")
1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367
            inter_node_groups = []
            intra_node_groups = []
            for _ in range(self.grad_comm_stream_num):
                # TODO re-use one origin communicator
                inter_node_groups.append(
                    new_process_group(inter_node_ranks, force_new_group=True)
                )
                intra_node_groups.append(
                    new_process_group(intra_node_ranks, force_new_group=True)
                )

            # update program
            for idx, op in reversed(list(enumerate(block.ops))):
                if is_data_parallel_reduce_op(op):
                    assert op.type == "c_reduce_sum"
                    grad_comm_stream_idx = grad_comm_op_to_stream_idx[op]
                    inter_node_group = inter_node_groups[grad_comm_stream_idx]
                    intra_node_group = intra_node_groups[grad_comm_stream_idx]

                    reduce_varname = op.output("Out")[0]
                    if self.enable_overlap:
                        comm_stream = op.dist_attr.execution_stream
                    dst_rank = int(op.attr("root_id"))

                    in_peer = False
                    if dst_rank % nranks_per_node == relative_idx_in_node:
                        in_peer = True
                    intra_node_dst = dst_rank % nranks_per_node

                    op._set_attr('ring_id', intra_node_group.id)
                    op._set_attr('root_id', intra_node_dst)

                    if in_peer:
                        inter_node_dst = dst_rank // nranks_per_node
                        new_op = block._insert_op_without_sync(
                            idx + 1,
                            type='c_reduce_sum',
                            inputs={"X": reduce_varname},
                            outputs={
                                "Out": reduce_varname,
                            },
                            attrs={
                                'ring_id': inter_node_group.id,
                                'root_id': inter_node_dst,
                                'use_calc_stream': True,
                                OP_ROLE_KEY: OpRole.Backward,
                            },
                        )
                        new_op._set_attr(
1368
                            'op_namescope', '/' + ParallelMode.DataParallel
1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386
                        )

                        if self.enable_overlap:
                            new_op.dist_attr.execution_stream = comm_stream
                            new_op.dist_attr.scheduling_priority = (
                                self.comm_op_scheduling_priority
                            )

        block._sync_with_cpp()


def _get_broadcast_first_depend_op(block):
    for op in block.ops:
        if op.type in _supported_optimizer_type:
            return op

    raise Exception("Could not find optimizer op.")

J
JZ-LIANG 已提交
1387

1388 1389 1390 1391 1392 1393 1394 1395 1396 1397
def _insert_init_and_broadcast_op(
    block,
    insert_idx,
    varname,
    local_rank,
    root_rank,
    ring_id,
    op_role,
    dist_context,
):
J
JZ-LIANG 已提交
1398 1399 1400 1401 1402
    """
    empty op for initialization
    """
    broadcast_var = block.var(varname)
    broadcast_var_dist_attr = dist_context.get_tensor_dist_attr_for_program(
1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417
        broadcast_var
    )

    new_op = block._insert_op_without_sync(
        insert_idx,
        type='c_broadcast',
        inputs={'X': varname},
        outputs={'Out': varname},
        attrs={
            'ring_id': ring_id,
            'root': root_rank,
            'use_calc_stream': True,
            OP_ROLE_KEY: op_role,
        },
    )
1418
    new_op._set_attr('op_namescope', '/' + ParallelMode.DataParallel)
J
JZ-LIANG 已提交
1419
    naive_set_dist_op_attr_for_program_by_mesh_and_mapping(
1420 1421 1422 1423 1424
        new_op,
        broadcast_var_dist_attr.process_mesh,
        broadcast_var_dist_attr.dims_mapping,
        dist_context,
    )
J
JZ-LIANG 已提交
1425 1426 1427 1428 1429 1430 1431 1432 1433
    if local_rank != root_rank:

        new_op = block._insert_op_without_sync(
            insert_idx,
            type="empty",
            outputs={"Out": broadcast_var.name},
            attrs={
                "shape": broadcast_var.shape,
                "dtype": broadcast_var.dtype,
1434 1435 1436
                OP_ROLE_KEY: op_role,
            },
        )
J
JZ-LIANG 已提交
1437
        naive_set_dist_op_attr_for_program_by_mesh_and_mapping(
1438 1439 1440 1441 1442
            new_op,
            broadcast_var_dist_attr.process_mesh,
            broadcast_var_dist_attr.dims_mapping,
            dist_context,
        )
J
JZ-LIANG 已提交
1443 1444 1445
    return


1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457
def _insert_reduce_op(
    block,
    insert_idx,
    reduce_var,
    ring_id,
    root_id,
    dist_context,
    op_role=OpRole.Backward,
    use_calc_stream=True,
):
    assert (
        root_id >= 0
1458
    ), f"root id should be a positive int, but now root id is {root_id}"
1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470
    new_op = block._insert_op_without_sync(
        insert_idx,
        type='c_reduce_sum',
        inputs={'X': [reduce_var]},
        outputs={'Out': [reduce_var]},
        attrs={
            'ring_id': ring_id,
            'root_id': root_id,
            'use_calc_stream': use_calc_stream,
            OP_ROLE_KEY: op_role,
        },
    )
J
JZ-LIANG 已提交
1471 1472

    dist_attr = dist_context.get_tensor_dist_attr_for_program(
1473 1474
        block.var(reduce_var)
    )
J
JZ-LIANG 已提交
1475
    naive_set_dist_op_attr_for_program_by_mesh_and_mapping(
1476 1477
        new_op, dist_attr.process_mesh, dist_attr.dims_mapping, dist_context
    )
1478
    new_op._set_attr('op_namescope', '/' + ParallelMode.DataParallel)
1479
    return new_op
J
JZ-LIANG 已提交
1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493


def _get_dp_and_sharding_groups(origin_group, sharding_group_size, rank):
    dp_axis = 0
    sharding_axis = 1
    shape = [len(origin_group) // sharding_group_size, sharding_group_size]

    dp_group = _get_comm_group(origin_group, shape, dp_axis, rank)
    sharding_group = _get_comm_group(origin_group, shape, sharding_axis, rank)

    return dp_group, sharding_group


def _is_gradient_clip_op(op):
1494 1495 1496
    return op.desc.has_attr("op_namescope") and op.desc.attr(
        "op_namescope"
    ).startswith("/gradient_clip")
J
JZ-LIANG 已提交
1497 1498 1499


def _is_weight_decay_op(op):
1500 1501 1502
    return op.desc.has_attr("op_namescope") and op.desc.attr(
        "op_namescope"
    ).startswith("/regularization")
J
JZ-LIANG 已提交
1503 1504 1505 1506 1507


def _is_param_grad_fp32_cast_op(block, op):
    if not is_backward_op(op):
        return False
1508 1509 1510
    if not _is_desired_cast_op(
        block, op, core.VarDesc.VarType.FP16, core.VarDesc.VarType.FP32
    ):
J
JZ-LIANG 已提交
1511
        return False
1512
    output_name = op.output_arg_names[0]
1513
    base_name = output_name[: output_name.find("@")]
J
JZ-LIANG 已提交
1514 1515 1516 1517 1518 1519 1520
    if not block.has_var(base_name):
        return False
    return block.var(base_name).is_parameter


def _is_param_fp16_cast_op(block, op, params):

1521
    if is_optimize_op(op):
J
JZ-LIANG 已提交
1522 1523 1524
        return False
    if not _is_desired_cast_op(block, op):
        return False
1525
    input_name = op.input_arg_names[0]
J
JZ-LIANG 已提交
1526 1527 1528 1529 1530
    if input_name not in params:
        return False
    return True


1531 1532 1533 1534 1535 1536
def _is_desired_cast_op(
    block,
    op,
    src_var_type=core.VarDesc.VarType.FP32,
    dst_var_type=core.VarDesc.VarType.FP16,
):
J
JZ-LIANG 已提交
1537 1538
    if op.type != "cast":
        return False
1539 1540 1541 1542
    assert len(op.input_arg_names) == 1
    assert len(op.output_arg_names) == 1
    input_var = block.var(op.input_arg_names[0])
    output_var = block.var(op.output_arg_names[0])
J
JZ-LIANG 已提交
1543

1544
    if input_var.dtype != src_var_type or output_var.dtype != dst_var_type:
J
JZ-LIANG 已提交
1545 1546 1547 1548 1549 1550 1551 1552
        return False

    return True


def _get_base_name_from_grad_name(grad_name):
    base_name = None
    if ".cast_fp16@GRAD" in grad_name:
1553
        base_name = grad_name[: grad_name.find(".cast_fp16@GRAD")]
J
JZ-LIANG 已提交
1554
    elif "@GRAD" in grad_name:
1555
        base_name = grad_name[: grad_name.find("@GRAD")]
J
JZ-LIANG 已提交
1556 1557 1558
    return base_name


1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572
def _is_param_grad_allreduce_op(op, block):

    if not is_data_parallel_reduce_op(op):
        return False

    output_name = op.output_arg_names[0]
    base_name = _get_base_name_from_grad_name(output_name)

    if not block.has_var(base_name):
        return False

    return block.var(base_name).is_parameter


1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588
def _is_param_grad_sum_op(op, block):

    if not is_backward_op(op):
        return False
    if op.type != "sum":
        return False

    output_name = op.output_arg_names[0]
    base_name = _get_base_name_from_grad_name(output_name)

    if not block.has_var(base_name):
        return False

    return block.var(base_name).is_parameter


J
JZ-LIANG 已提交
1589 1590 1591 1592
def _is_forward_op(op):
    return op.attr("op_role") == 0


1593 1594 1595 1596 1597 1598 1599 1600
def is_sharding_param_broadcast_op(op):
    return (
        op.type == "c_broadcast"
        and op.desc.has_attr("op_namescope")
        and ParallelMode.DataParallel in op.desc.attr("op_namescope")
    )


J
JZ-LIANG 已提交
1601 1602 1603 1604
def _inference_data_parallel_group_for_operator(rank_id, op, dist_context):

    dp_group = None
    for input_name in op.input_arg_names:
1605 1606 1607
        # TODO(zhaoyingli): maintain a dict in dist_context to record all variables which are renamed,
        # to solve the param@RESHARD cannot be identifed.
        if not is_parameter_related(input_name, op.block, dist_context):
J
JZ-LIANG 已提交
1608 1609 1610
            dist_attr = dist_context.get_op_dist_attr_for_program(op)
            process_mesh = dist_attr.process_mesh
            input_dim_mapping = dist_attr.get_input_dims_mapping(input_name)
1611
            mesh_shape = process_mesh.shape
J
JZ-LIANG 已提交
1612 1613 1614
            # TODO(JZ-LIANG) replace with specific batch size dimension
            batch_size_axis = input_dim_mapping[0]
            if batch_size_axis > -1 and mesh_shape[batch_size_axis] > 1:
1615
                group_ranks = _get_comm_group(
1616 1617
                    process_mesh.process_ids,
                    process_mesh.shape,
1618 1619 1620
                    batch_size_axis,
                    rank_id,
                )
J
JZ-LIANG 已提交
1621 1622 1623 1624 1625 1626
                dp_group = new_process_group(group_ranks)
                break

    return dp_group


1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656
def partition_by_use_order(params, group_size):
    """
    shard the continouse param into same rank and divide the forward&backward computation into segement,
    which will favor the fuse pass in later.

    we assume that the params is already sorted by utilization order.
    """
    mapping = {}
    total_param_mem = 0.0
    param2mem = []
    for param in params:
        mem = get_var_size(param)
        total_param_mem += mem
        param2mem.append((param, mem))
    mapping = {x: [] for x in range(group_size)}
    cur_rank = 0
    mem_accu = 0.0
    for param, mem in param2mem:
        if mem_accu > total_param_mem * 1.0 * (cur_rank + 1) / group_size:
            cur_rank += 1
        mapping[cur_rank].append(param)
        mem_accu += mem

    return mapping


def partition_by_greedy_even(params, group_size):
    """
    use greedy alogrithm to partition parameter as even as possible.
    """
J
JZ-LIANG 已提交
1657 1658 1659 1660 1661 1662 1663 1664
    mapping = {}
    for rank_ in range(group_size):
        mapping[rank_] = []
    sizes = [0] * group_size
    for param in params:
        rank = sizes.index(min(sizes))
        mapping[rank].append(param)
        numel = reduce(lambda x, y: x * y, param.shape)
1665 1666 1667 1668 1669
        assert (
            numel > 0
        ), "param [{}] should larger than 0, but it is [{}]".format(
            param.name, numel
        )
J
JZ-LIANG 已提交
1670 1671 1672 1673 1674
        sizes[rank] += numel

    return mapping


1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687
def partition_parameters(params, group_size, algor="greedy_even"):
    if algor == "greedy_even":
        rank_to_params = partition_by_greedy_even(params, group_size)
    else:
        rank_to_params = partition_by_use_order(params, group_size)

    _logger.info("Sharding Parameter Partition:")
    for k, v in rank_to_params.items():
        _logger.info(
            "Rank:{}, Parameter Size:{} MB.".format(
                k, sum([get_var_size(var) for var in v])
            )
        )
1688
        _logger.info(f"Params in this rank: {[var.name for var in v]}.")
1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715

    return rank_to_params


def re_order_program(block, param_grads, dist_context):

    # record order
    pname_to_pg_pairs = {}
    for p, g in param_grads:
        pname_to_pg_pairs[p.name] = (p, g)

    use_order = []
    for op in block.ops:
        for input_name in op.input_arg_names:
            if (input_name in pname_to_pg_pairs) and (
                input_name not in use_order
            ):
                use_order.append(input_name)
        if len(use_order) == len(pname_to_pg_pairs):
            break

    # reorder optimzier
    last_op = block.ops[-1]
    pname_to_op = {}
    num_ops = len(block.ops)
    remove_op_indices = []
    # TODO support case when optimizer is not the last op
1716
    if is_optimize_op(last_op) and last_op.type in _supported_optimizer_type:
1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742
        # record optimizer
        for idx, op in reversed(list(enumerate(block.ops))):
            if op.type not in _supported_optimizer_type:
                break
            assert len(op.input("Param")) == 1
            pname_to_op[op.input("Param")[0]] = op
            remove_op_indices.append(idx)
        assert len(use_order) == len(pname_to_op)

        # append new opts
        for pname in use_order:
            new_op = block.append_op(type='nop')
            new_op.desc.copy_from(pname_to_op[pname].desc)
            dist_context.set_op_dist_attr_for_program(
                new_op,
                dist_context.get_op_dist_attr_for_program(pname_to_op[pname]),
            )

        # remove old opts
        for idx in remove_op_indices:
            block._remove_op(idx, sync=False)

        block._sync_with_cpp()
        assert len(block.ops) == num_ops

    # TODO reorder gradient clip order
1743
    _logger.info(f"Sharding the Order of param being used: {use_order}.")
1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756
    return [pname_to_pg_pairs[p] for p in use_order]


def group_param(sharding_info, fuse_size):
    """
    param are group by:
    rank id
    fuse_size
    dtype
    """
    group_to_param_map = {}
    param_to_group_map = {}
    bucket = []
1757
    cur_group = VarGroup(fuse_size)
1758 1759 1760 1761 1762 1763
    for param in sharding_info.params:
        rank = sharding_info.get_var_rank(param.name)

        if cur_group.acceptable(param, rank):
            cur_group.collect(param, rank)
        else:
1764
            cur_group = VarGroup(fuse_size)
1765 1766
            cur_group.collect(param, rank)

1767 1768 1769 1770
        cur_group.is_in_local_shard = sharding_info.is_in_local_shard(
            param.name
        )

1771 1772 1773 1774 1775 1776 1777 1778 1779 1780
        if cur_group in group_to_param_map:
            group_to_param_map[cur_group].append(param.name)
        else:
            group_to_param_map[cur_group] = [param.name]

        param_to_group_map[param.name] = cur_group

    return group_to_param_map, param_to_group_map


1781
class ShardingInfo:
1782
    def __init__(self, group, rank, params_grads, partition_algor):
J
JZ-LIANG 已提交
1783
        self.group = group
1784
        self.params_grads = {p.name: (p, g) for p, g in params_grads}
1785 1786 1787
        assert len(self.params_grads) == len(
            set(self.params_grads)
        ), "found duplicated param in params_grads"
1788 1789

        self.params = [p for p, _ in params_grads]
J
JZ-LIANG 已提交
1790 1791 1792 1793
        self.param_names = [p.name for p in self.params]
        self.group_size = group.nranks
        self.global_rank = rank
        self.local_rank = group.ranks.index(self.global_rank)
1794
        self.partition_algor = partition_algor
J
JZ-LIANG 已提交
1795
        # rank in below mapping are local rank in this sharding group
1796 1797 1798
        self.rank_to_params = partition_parameters(
            self.params, self.group_size, self.partition_algor
        )
J
JZ-LIANG 已提交
1799
        # include fp32 and fp16 param
1800
        self.param_to_rank = {}
J
JZ-LIANG 已提交
1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815
        self._map_param_to_rank()

    def _map_param_to_rank(self):
        """
        mapping parameters to the rank which holds it.
        """
        for rank, params in self.rank_to_params.items():
            for param in params:
                self.param_to_rank[param.name] = rank

    def get_var_rank(self, varname):
        if varname in self.param_to_rank:
            return self.param_to_rank[varname]
        return -1

1816
    # determine fp32 and fp16 (cast) param
J
JZ-LIANG 已提交
1817 1818 1819
    def is_in_local_shard(self, param_name):
        return self.get_var_rank(param_name) == self.local_rank

1820 1821 1822 1823
    # NOTE the follwo logic is designed for supporting AMP O1 when
    # the param would be cast to fp16 before used for caculation.
    # and sharding should only broadcast the casted fp16 param
    # instead of the origin fp32 version param.
J
JZ-LIANG 已提交
1824
    def get_broadcast_vars_and_param_usage(self, block):
1825 1826
        broadcast_vars = set()
        fp16_params = set()
J
JZ-LIANG 已提交
1827 1828 1829 1830
        fp16_to_fp32 = {}

        param_usage = {x: 0 for x in self.param_names}
        for op in block.ops:
1831
            if is_optimize_op(op):
J
JZ-LIANG 已提交
1832
                continue
1833
            for input_name in op.input_arg_names:
J
JZ-LIANG 已提交
1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851
                if input_name in self.param_names:
                    param_usage[input_name] += 1

        for op in block.ops:
            if not _is_param_fp16_cast_op(block, op, self.param_names):
                continue
            input_name = op.input_arg_names[0]
            output_name = op.output_arg_names[0]
            broadcast_vars.add(output_name)
            fp16_params.add(output_name)
            fp16_to_fp32[output_name] = input_name
            param_usage[input_name] -= 1
            self.param_to_rank[output_name] = self.param_to_rank[input_name]

        for param, usage in param_usage.items():
            if usage > 0:
                broadcast_vars.add(param)
        return broadcast_vars, param_usage
1852 1853 1854

    def get_param_grad(self, param_name):
        if not self.is_in_local_shard(param_name):
1855
            raise ValueError(f"param[{param_name}] not in current rank.")
1856
        if param_name not in self.params_grads:
1857
            raise ValueError(f'param[{param_name}] not in params_grads')
1858
        return self.params_grads.get(param_name, None)
1859 1860


1861
class VarGroup:
1862 1863 1864 1865 1866
    def __init__(self, max_size):
        self.max_siez = max_size
        self.dtype = None
        self.rank = -1
        self.numel = 0
1867
        self.vars = []
1868
        self.coalesce_var = None
1869 1870 1871 1872 1873
        self.coalesce_dep_varname = None
        self.coalesce_op_idx = None
        self.reduce_op_indices = []
        self.allreduce_op_indices = []
        self.is_in_local_shard = False
1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890

    def acceptable(self, param, rank):
        if self.numel == 0:
            return True
        else:
            if param.dtype != self.dtype:
                return False
            if rank != self.rank:
                return False
            if self.numel + get_var_numel(param) > self.max_siez:
                return False
            return True

    def collect(self, param, rank):
        self.dtype = param.dtype
        self.rank = rank
        self.numel += get_var_numel(param)
1891
        self.vars.append(param)
1892 1893

    def __len__(self):
1894
        return len(self.vars)