auto_parallel_sharding.py 70.6 KB
Newer Older
J
JZ-LIANG 已提交
1
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2
#
J
JZ-LIANG 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
6
#
J
JZ-LIANG 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
#
J
JZ-LIANG 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import logging
16
from functools import reduce
17 18

import paddle
19
from paddle.distributed.auto_parallel.static.operators.common import (
20
    ParallelMode,
21
    is_data_parallel_reduce_op,
22
    is_parameter_related,
23
)
24 25 26 27
from paddle.distributed.auto_parallel.static.process_group import (
    new_process_group,
)
from paddle.distributed.auto_parallel.static.utils import (
28
    _get_comm_group,
29 30
    get_logger,
    get_var_numel,
31 32 33 34 35
    insert_dependencies_for_vars,
    is_backward_op,
    is_dep_skip_op,
    is_loss_grad_op,
    is_optimize_op,
36 37
    naive_set_dist_op_attr_for_program_by_mesh_and_mapping,
    set_var_dist_attr,
38 39 40
)
from paddle.distributed.fleet.meta_optimizers.sharding.utils import get_var_size
from paddle.framework import core
41 42
from paddle.static import default_main_program, default_startup_program
from paddle.utils import unique_name
43 44

from .pass_base import PassBase, register_pass
J
JZ-LIANG 已提交
45 46 47

OpRole = core.op_proto_and_checker_maker.OpRole
OP_ROLE_KEY = core.op_proto_and_checker_maker.kOpRoleAttrName()
48
_skip_ops = [
49 50 51 52 53 54 55
    'create_py_reader',
    'create_double_buffer_reader',
    'read',
    'slice',
    'split',
    'assign',
    "send_v2",
56
]
J
JZ-LIANG 已提交
57 58
# update here to support new optimizers
_supported_optimizer_type = [
59 60 61 62 63 64 65 66 67 68
    "adam",
    "adamax",
    "adamw",
    "decayed_adagrad",
    "momentum",
    "dgc_momentum",
    "lars_momentum",
    "merged_momentum",
    "lamb",
    "sgd",
J
JZ-LIANG 已提交
69 70
]

71 72
_logger = get_logger(logging.INFO)

J
JZ-LIANG 已提交
73

74
def _is_reshard_op(op):
75 76 77
    return op.desc.has_attr(
        "op_namescope"
    ) and "/auto_parallel/reshard" in op.desc.attr('op_namescope')
78 79


J
JZ-LIANG 已提交
80 81 82
# NOTE we add the "auto_parallel" prefix to the pass in order to
# indicate that this pass should obey some constrains by auto_parallel
# for example all ops and vars should has dist attr before and after pass
83
# should use dist op instead of custom comm op
J
JZ-LIANG 已提交
84 85 86
@register_pass("auto_parallel_sharding")
class ShardingPass(PassBase):
    def __init__(self):
87
        super().__init__()
J
JZ-LIANG 已提交
88 89
        self.set_attr("dist_context", None)
        self.set_attr("stage", None)
Z
zhaoyingli 已提交
90 91
        self.set_attr("sharding_degree", None)  # for parallelizer
        self.set_attr("degree", None)  # for parallelizer_v2
92 93 94 95 96
        self.set_attr("enable_overlap", None)
        self.set_attr("param_comm_stream_num", None)
        self.set_attr("grad_comm_stream_num", None)
        self.set_attr("param_bucket_size_numel", None)
        self.set_attr("grad_bucket_size_numel", None)
97
        self.set_attr("partition_algor", None)
98
        self.set_attr("enable_hierarchical_comm", None)
J
JZ-LIANG 已提交
99 100 101 102 103
        self.set_attr("params_grads", [])
        self.set_attr("global_rank", -1)
        self.dp_groups = set()
        self.sharding_infos = []
        self.varname_to_sharding_info = {}
104
        self.sharding_hybrid_dp = False
J
JZ-LIANG 已提交
105
        self.outer_dp_group = None
106
        self.shared_params_grads = []
J
JZ-LIANG 已提交
107 108 109 110 111 112 113

    def _check_self(self):
        if self.get_attr("dist_context") is None:
            return False

        if self.get_attr("stage") not in [1, 2, 3]:
            return False
Z
zhaoyingli 已提交
114
        if self.get_attr("sharding_degree") is not None:
115 116 117
            if (
                not isinstance(self.get_attr("sharding_degree"), int)
            ) or self.get_attr("sharding_degree") <= 1:
Z
zhaoyingli 已提交
118 119
                return False
        elif self.get_attr("degree") is not None:
120 121 122
            if (not isinstance(self.get_attr("degree"), int)) or self.get_attr(
                "degree"
            ) <= 1:
Z
zhaoyingli 已提交
123 124
                return False
        else:
J
JZ-LIANG 已提交
125 126 127
            return False
        if len(self.get_attr("params_grads")) <= 0:
            return False
128 129 130
        if (not isinstance(self.get_attr("global_rank"), int)) or self.get_attr(
            "global_rank"
        ) < 0:
J
JZ-LIANG 已提交
131
            return False
132 133 134
        if self.get_attr("enable_overlap") is None:
            return False
        if self.get_attr("param_comm_stream_num") is None:
135
            return False
136 137 138 139 140
        if self.get_attr("grad_comm_stream_num") is None:
            return False
        if self.get_attr("param_bucket_size_numel") is None:
            return False
        if self.get_attr("grad_bucket_size_numel") is None:
141 142 143
            return False
        if self.get_attr("partition_algor") is None:
            return False
144 145
        if self.get_attr("enable_hierarchical_comm") is None:
            return False
J
JZ-LIANG 已提交
146 147 148 149 150 151 152
        return True

    def _check_conflict(self, other_pass):
        return True

    def _apply_single_impl(self, main_program, startup_program, context):
        self._dist_context = self.get_attr("dist_context")
Z
zhaoyingli 已提交
153
        self.sharding_world_size = int(
154 155
            self.get_attr("sharding_degree") or self.get_attr("degree")
        )
J
JZ-LIANG 已提交
156 157
        self.stage = int(self.get_attr("stage"))
        self.global_rank = int(self.get_attr("global_rank"))
158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173
        self.enable_overlap = self.get_attr("enable_overlap")
        self.param_comm_stream_num = int(self.get_attr("param_comm_stream_num"))
        self.grad_comm_stream_num = int(self.get_attr("grad_comm_stream_num"))
        self.enable_hierarchical_comm = self.get_attr(
            "enable_hierarchical_comm"
        )
        if self.param_comm_stream_num > 1 or self.grad_comm_stream_num > 1:
            assert (
                self.enable_overlap
            ), "multiple comm stream need enable_overlap to be True"
        self.param_bucket_size_numel = int(
            self.get_attr("param_bucket_size_numel")
        )
        self.grad_bucket_size_numel = int(
            self.get_attr("grad_bucket_size_numel")
        )
174
        self.partition_algor = self.get_attr("partition_algor")
175

J
JZ-LIANG 已提交
176
        params_grads = self.get_attr("params_grads")
177 178 179 180
        main_block, startup_block = (
            main_program.global_block(),
            startup_program.global_block(),
        )
J
JZ-LIANG 已提交
181

182 183 184 185 186 187 188 189
        # NOTE Multi / Sub-Block Support
        # we assume that only parameter are present and partitioned in main_block,
        # there is NO new param in sub_block, and all params in sub_block follows the same
        # partition as main_block. the above contraint fullfill the 3 most common use-cases in Paddle sub_block:
        # 1. subblock for lr scheduler
        # 2. sub-block uses the same or partial network of main-block, e.g. GPT3 generation model
        # 3. sub-block used for double backward

J
JZ-LIANG 已提交
190
        self._build_sharding_groups(main_block, params_grads)
191 192 193 194
        for block in main_program.blocks:
            self._shard_optimizer(block, startup_block, params_grads, context)
            self._shard_gradient_synchronization(block)
            self._shard_parameter(block, startup_block)
J
JZ-LIANG 已提交
195

196
        context.set_attr("params_grads", self.shared_params_grads)
197
        self._optimization_pass(main_program, startup_program)
198

J
JZ-LIANG 已提交
199 200
    def _build_sharding_groups(self, main_block, params_grads):
        self._collective_data_parallel_groups(main_block)
201
        self._build_sharding_infos(main_block, params_grads)
J
JZ-LIANG 已提交
202 203 204

    def _collective_data_parallel_groups(self, main_block):
        for op in main_block.ops:
J
JZ-LIANG 已提交
205
            if not _is_forward_op(op) or op.type in _skip_ops:
J
JZ-LIANG 已提交
206
                continue
207 208 209 210
            # NOTE: there aren't dist_attr in the ops which reshard insert,
            # and should be skip in sharding.
            if _is_reshard_op(op):
                continue
J
JZ-LIANG 已提交
211
            group = _inference_data_parallel_group_for_operator(
212 213
                self.global_rank, op, self._dist_context
            )
J
JZ-LIANG 已提交
214 215 216
            if group is not None:
                self.dp_groups.add(group)

217
        # TODO(JZ-LIANG) allow more than one dp groups in network, support more general distribution
J
JZ-LIANG 已提交
218 219 220
        # genetated by auto search
        if len(self.dp_groups) != 1:
            raise NotImplementedError(
221 222 223 224
                "So far Only and Exactly one data parallel group in network are supported, but got [{}] different data parallel groups".format(
                    len(self.dp_groups)
                )
            )
J
JZ-LIANG 已提交
225

226 227 228 229 230 231
    def _build_sharding_infos(self, main_block, params_grads):

        # order params
        params_grads = re_order_program(
            main_block, params_grads, self._dist_context
        )
J
JZ-LIANG 已提交
232

233
        # partition
J
JZ-LIANG 已提交
234 235
        for dp_group in self.dp_groups:

236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
            assert (
                dp_group.nranks >= self.sharding_world_size
            ), "sharding world size [{}] should not larger than dp world size [{}]".format(
                self.sharding_world_size, dp_group.nranks
            )
            assert (
                dp_group.nranks % self.sharding_world_size == 0
            ), "sharding world size [{}] should be divisible by dp world size [{}]".format(
                self.sharding_world_size, dp_group.nranks
            )
            assert (
                self.global_rank in dp_group.ranks
            ), "current ranks [{}] does NOT belong to the data parallel group [{}]".format(
                self.global_rank, dp_group.ranks
            )
            assert (
                len(params_grads) >= self.sharding_world_size
            ), "number of parameters [{}] is not enough to be shard among [{}] ranks".format(
                len(params_grads), self.sharding_world_size
            )
J
JZ-LIANG 已提交
256

257
            # sharding hybrid data parallel: partial sharding param within
J
JZ-LIANG 已提交
258
            if dp_group.nranks > self.sharding_world_size:
259 260 261
                self.sharding_hybrid_dp = True
                assert self.param_comm_stream_num < 2
                assert self.grad_comm_stream_num < 2
262 263 264
                assert (
                    len(self.dp_groups) == 1
                ), "hybrid sharding and data parallelism are supported only when there is excatly one data parallel group in the network"
J
JZ-LIANG 已提交
265
                outer_dp_group, sharding_group = _get_dp_and_sharding_groups(
266 267
                    dp_group.ranks, self.sharding_world_size, self.global_rank
                )
J
JZ-LIANG 已提交
268 269 270 271 272
                sharding_group = new_process_group(sharding_group)
                self.outer_dp_group = new_process_group(outer_dp_group)
            else:
                sharding_group = dp_group

273
            self._dist_context._sharding_group = sharding_group
J
JZ-LIANG 已提交
274
            # TODO(JZ-LIANG) when support multiple dp groups in future, should group param and bind them to corresponding dp group
275
            sharding_info = ShardingInfo(
276 277 278 279
                sharding_group,
                self.global_rank,
                params_grads,
                self.partition_algor,
280
            )
J
JZ-LIANG 已提交
281
            self.sharding_infos.append(sharding_info)
282
            for param in sharding_info.params:
J
JZ-LIANG 已提交
283 284
                self.varname_to_sharding_info[param.name] = sharding_info

285 286 287
    def _shard_optimizer(
        self, main_block, startup_block, params_grads, pass_context
    ):
J
JZ-LIANG 已提交
288 289 290 291 292 293 294 295
        """
        sharding all optimizer related ops and vars, include:
        gradient clip ops & vars
        weight decay ops & vars
        optimizer ops and states
        """
        self._shard_amp_related_op_and_vars(main_block, pass_context)
        self._shard_weight_decay(main_block)
296
        # self._shard_gradient_clip(main_block)
J
JZ-LIANG 已提交
297 298 299 300 301 302 303 304 305 306 307 308
        self._shard_optimizer_ops_and_states(main_block, startup_block)
        self._insert_optimizer_broadcasts(main_block, startup_block)

    def _shard_amp_related_op_and_vars(self, main_block, pass_context):

        if self.stage < 2:
            return

        for idx, op in reversed(list(enumerate(main_block.ops))):
            # shard amp related param_grad cast
            if _is_param_grad_fp32_cast_op(main_block, op):
                output_name = op.output_arg_names[0]
309
                param_name = output_name[: output_name.find("@")]
J
JZ-LIANG 已提交
310 311 312 313 314 315 316 317
                if not self._is_parameter_in_local_shard(param_name):
                    main_block._remove_op(idx, sync=False)
                    main_block._remove_var(output_name, sync=False)

            # shard check nan inf
            elif op.type in ["check_finite_and_unscale", "update_loss_scaling"]:
                reversed_x = []
                for input_name in op.desc.input('X'):
318
                    param_name = input_name[: input_name.find("@")]
J
JZ-LIANG 已提交
319 320 321

                    if self._is_parameter_in_local_shard(param_name):
                        reversed_x.append(input_name)
322 323 324 325 326 327 328 329

                # NOTE: When `reversed_x` is [], check_finite_and_unscale will be replaced by `fill_constant` op.
                # The output of check_finite_and_unscale is be set False
                if reversed_x:
                    op.desc.set_input('X', reversed_x)
                    op.desc.set_output('Out', reversed_x)
                else:
                    if op.type == "check_finite_and_unscale":
330
                        op_role = op.attr('op_role')
331 332 333 334 335 336 337 338 339 340 341
                        out_name = op.output_arg_names[0]
                        out_var = main_block.vars[out_name]
                        main_block._remove_op(idx, sync=False)
                        main_block._insert_op_without_sync(
                            idx,
                            type="fill_constant",
                            outputs={"Out": out_var},
                            attrs={
                                "shape": out_var.shape,
                                "dtype": out_var.dtype,
                                "value": 0,
342
                                OP_ROLE_KEY: op_role,
343 344
                            },
                        )
345 346
                    else:
                        main_block._remove_op(idx, sync=False)
J
JZ-LIANG 已提交
347 348 349 350 351 352 353 354 355

        main_block._sync_with_cpp()

    def _shard_gradient_clip(self, main_block):

        if self.stage < 2:
            return

        # TODO (JZ-LIANG) support calculate global norm with tensor parallelism
J
JZ-LIANG 已提交
356 357 358 359
        removed_op_type = ['elementwise_mul', 'squared_l2_norm', 'clip_by_norm']
        removed_op_idx = set()
        removed_tmp_var = set()

J
JZ-LIANG 已提交
360 361 362 363
        for idx, op in list(enumerate(main_block.ops)):
            if not _is_gradient_clip_op(op):
                continue

J
JZ-LIANG 已提交
364 365
            if op.type in removed_op_type:
                input_name = op.input("X")[0]
366
                param_name = input_name[: input_name.find("@GRAD")]
J
JZ-LIANG 已提交
367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
                if not self._is_parameter_in_local_shard(param_name):
                    removed_op_idx.add(idx)
                    if op.type in ['squared_l2_norm', 'clip_by_norm']:
                        for output_name in op.output_arg_names:
                            removed_tmp_var.add(output_name)

        for idx, op in reversed(list(enumerate(main_block.ops))):
            if not _is_gradient_clip_op(op):
                continue
            if idx in removed_op_idx:
                main_block._remove_op(idx, sync=False)

        for varname in removed_tmp_var:
            main_block._remove_var(varname, sync=False)

J
JZ-LIANG 已提交
382 383 384 385 386 387 388 389 390 391
        for idx, op in list(enumerate(main_block.ops)):
            if not _is_gradient_clip_op(op):
                continue
            if op.type == 'sum':
                reserved_vars = []
                for input_name in op.input_arg_names:
                    if input_name not in removed_tmp_var:
                        reserved_vars.append(input_name)
                op.desc.set_input("X", reserved_vars)

392
                sum_op_output = op.output_arg_names[0]
J
JZ-LIANG 已提交
393 394
                for i, sharding_info in enumerate(self.sharding_infos):
                    new_op = main_block._insert_op(
J
JZ-LIANG 已提交
395
                        idx + i + 1,
J
JZ-LIANG 已提交
396 397 398 399 400 401 402 403
                        type='c_allreduce_sum',
                        inputs={'X': [sum_op_output]},
                        outputs={'Out': [sum_op_output]},
                        attrs={
                            'ring_id': sharding_info.group.id,
                            'op_namescope': "/gradient_clip_model_parallelism",
                            'use_calc_stream': True,
                            OP_ROLE_KEY: OpRole.Optimize,
404 405 406 407 408 409 410
                        },
                    )
                    dist_attr = (
                        self._dist_context.get_tensor_dist_attr_for_program(
                            main_block.var(sum_op_output)
                        )
                    )
411 412 413 414
                    # assert dist_attr is not None
                    # naive_set_dist_op_attr_for_program_by_mesh_and_mapping(
                    #     new_op, dist_attr.process_mesh, dist_attr.dims_mapping,
                    #     self._dist_context)
J
JZ-LIANG 已提交
415 416 417 418 419 420 421 422 423 424 425 426 427 428
                break

        main_block._sync_with_cpp()

    def _shard_weight_decay(self, main_block):

        if self.stage < 2:
            return

        for idx, op in reversed(list(enumerate(main_block.ops))):
            if not _is_weight_decay_op(op):
                continue
            else:
                raise NotImplementedError(
429 430
                    "weight decay is NOT supported by now"
                )
J
JZ-LIANG 已提交
431 432 433 434 435 436
        main_block._sync_with_cpp()

    def _shard_optimizer_ops_and_states(self, main_block, startup_block):

        should_removed_optimizer_states = []
        for idx, op in reversed(list(enumerate(main_block.ops))):
437
            if not is_optimize_op(op):
J
JZ-LIANG 已提交
438 439 440 441 442 443 444
                break

            if op.type in _supported_optimizer_type:
                assert "Param" in op.input_names
                assert len(op.input("Param")) == 1
                param_name = op.input("Param")[0]
                if not self._is_parameter_in_local_shard(param_name):
445 446 447 448 449 450 451
                    should_removed_optimizer_states.extend(
                        [
                            varname
                            for varname in op.output_arg_names
                            if varname != param_name
                        ]
                    )
J
JZ-LIANG 已提交
452
                    main_block._remove_op(idx, sync=False)
453 454
                else:
                    self.shared_params_grads.append(
455 456
                        self._get_param_grad(param_name)
                    )
J
JZ-LIANG 已提交
457 458

        for idx, op in reversed(list(enumerate(startup_block.ops))):
459 460 461 462
            if (
                len(op.output_arg_names) == 1
                and op.output_arg_names[0] in should_removed_optimizer_states
            ):
J
JZ-LIANG 已提交
463 464 465 466 467 468 469 470 471 472 473 474 475
                startup_block._remove_op(idx, sync=False)

        for varname in should_removed_optimizer_states:
            if main_block.has_var(varname):
                main_block._remove_var(varname, sync=False)
            if startup_block.has_var(varname):
                startup_block._remove_var(varname, sync=False)

        main_block._sync_with_cpp()
        startup_block._sync_with_cpp()

    def _insert_optimizer_broadcasts(self, main_block, startup_block):

476
        if self.stage > 2 or self.param_bucket_size_numel > 1:
J
JZ-LIANG 已提交
477 478 479 480 481 482 483
            return

        for sharding_info in self.sharding_infos:
            for param in sharding_info.params:
                assert main_block.has_var(param.name)
                assert startup_block.has_var(param.name)

484 485 486 487 488 489 490 491 492 493 494
                new_op = main_block.append_op(
                    type='c_broadcast',
                    inputs={'X': param},
                    outputs={'Out': param},
                    attrs={
                        'ring_id': sharding_info.group.id,
                        'root': sharding_info.get_var_rank(param.name),
                        'use_calc_stream': True,
                        OP_ROLE_KEY: OpRole.Optimize,
                    },
                )
495
                new_op._set_attr(
496
                    'op_namescope', '/' + ParallelMode.DataParallel
497
                )
498 499 500
                param_dist_attr = (
                    self._dist_context.get_tensor_dist_attr_for_program(param)
                )
J
JZ-LIANG 已提交
501 502
                assert param_dist_attr is not None
                naive_set_dist_op_attr_for_program_by_mesh_and_mapping(
503 504 505 506 507
                    new_op,
                    param_dist_attr.process_mesh,
                    param_dist_attr.dims_mapping,
                    self._dist_context,
                )
J
JZ-LIANG 已提交
508 509 510 511 512 513 514
        main_block._sync_with_cpp()

    def _is_parameter_in_local_shard(self, param_name):
        assert param_name in self.varname_to_sharding_info
        sharding_info = self.varname_to_sharding_info[param_name]
        return sharding_info.is_in_local_shard(param_name)

515 516 517 518 519 520 521
    def _get_param_grad(self, param_name):
        assert param_name in self.varname_to_sharding_info
        sharding_info = self.varname_to_sharding_info[param_name]
        p_g = sharding_info.get_param_grad(param_name)
        assert p_g is not None
        return p_g

J
JZ-LIANG 已提交
522 523 524 525 526 527 528
    def _shard_gradient_synchronization(self, main_block):

        if self.stage < 2:
            return

        dp_ring_ids = [group.id for group in self.dp_groups]
        for idx, op in reversed(list(enumerate(main_block.ops))):
529
            if _is_param_grad_allreduce_op(op, main_block):
J
JZ-LIANG 已提交
530 531 532
                input_name = op.input_arg_names[0]
                base_name = _get_base_name_from_grad_name(input_name)
                sharding_info = self.varname_to_sharding_info[base_name]
533
                reduce_op = _insert_reduce_op(
534 535 536 537 538 539 540 541
                    main_block,
                    idx,
                    input_name,
                    sharding_info.group.id,
                    sharding_info.get_var_rank(base_name),
                    self._dist_context,
                )
                if (
542
                    not self.sharding_hybrid_dp
543 544
                    or not sharding_info.is_in_local_shard(base_name)
                ):
J
JZ-LIANG 已提交
545 546 547
                    main_block._remove_op(idx + 1, sync=False)
                else:
                    op._set_attr("ring_id", self.outer_dp_group.id)
548
                    op._set_attr(
549
                        'op_namescope', '/' + ParallelMode.DataParallel
550
                    )
J
JZ-LIANG 已提交
551

552 553 554 555 556 557 558 559 560 561 562
            # NOTE:
            # var@GRAD = sum(var@GRAD@RENAME@0, var@GRAD@RENAME@1)
            # If the var is not in local rank and it is output of many ops, or the var is renamed in another words,
            # the sum op should be removed.
            if _is_param_grad_sum_op(op, main_block):
                out_name = op.output_arg_names[0]
                base_name = _get_base_name_from_grad_name(out_name)
                sharding_info = self.varname_to_sharding_info[base_name]
                if not sharding_info.is_in_local_shard(base_name):
                    main_block._remove_op(idx, sync=False)

J
JZ-LIANG 已提交
563 564 565 566 567 568 569 570 571
        main_block._sync_with_cpp()

    def _shard_parameter(self, main_block, startup_block):

        if self.stage < 3:
            return

        dp_ring_ids = [group.id for group in self.dp_groups]
        for sharding_info in self.sharding_infos:
572 573 574 575
            (
                need_broadcast_vars,
                param_usage,
            ) = sharding_info.get_broadcast_vars_and_param_usage(main_block)
J
JZ-LIANG 已提交
576 577
            not_used_param_nane = []
            for param_name in param_usage:
578 579 580 581 582
                if (
                    param_usage[param_name] == 0
                    and sharding_info.get_var_rank(param_name)
                    != sharding_info.local_rank
                ):
J
JZ-LIANG 已提交
583 584 585
                    not_used_param_nane.append(param_name)

            for idx, op in reversed(list(enumerate(main_block.ops))):
586
                if is_optimize_op(op):
J
JZ-LIANG 已提交
587 588
                    continue

589
                for input_name in op.input_arg_names:
590 591
                    # NOTE hack for embedding op when AMP 02-3
                    # paddle amp force embedding (lookup table) to be run on fp32
592 593 594
                    if _is_param_fp16_cast_op(
                        main_block, op, sharding_info.param_names
                    ):
J
JZ-LIANG 已提交
595 596 597 598 599 600 601
                        continue
                    if input_name not in need_broadcast_vars:
                        continue
                    root_rank = sharding_info.get_var_rank(input_name)
                    if root_rank == sharding_info.local_rank:
                        broadcast_varname = input_name
                    else:
602 603 604
                        broadcast_varname = unique_name.generate(
                            input_name + "@BroadCast"
                        )
J
JZ-LIANG 已提交
605
                        input_var = main_block.var(input_name)
606 607 608 609 610 611 612 613 614 615 616
                        new_var = main_block.create_var(
                            name=broadcast_varname,
                            shape=input_var.shape,
                            dtype=input_var.dtype,
                            persistable=False,
                        )
                        ref_dist_attr = (
                            self._dist_context.get_tensor_dist_attr_for_program(
                                input_var
                            )
                        )
J
JZ-LIANG 已提交
617
                        out_var_dist_attr = set_var_dist_attr(
618 619
                            self._dist_context,
                            new_var,
J
JZ-LIANG 已提交
620
                            ref_dist_attr.dims_mapping,
621 622
                            ref_dist_attr.process_mesh,
                        )
J
JZ-LIANG 已提交
623 624
                        op._rename_input(input_name, broadcast_varname)

625 626 627 628 629 630 631 632 633 634
                    _insert_init_and_broadcast_op(
                        main_block,
                        idx,
                        broadcast_varname,
                        sharding_info.local_rank,
                        root_rank,
                        sharding_info.group.id,
                        op.attr('op_role'),
                        self._dist_context,
                    )
J
JZ-LIANG 已提交
635 636 637 638 639 640 641 642 643 644 645 646 647 648

            for idx, op in reversed(list(enumerate(main_block.ops))):
                if op.type != "cast":
                    continue
                input_name = op.input_arg_names[0]
                output_name = op.output_arg_names[0]
                if input_name in not_used_param_nane:
                    main_block._remove_op(idx, sync=False)
                    main_block._remove_var(output_name, sync=False)

            for idx, op in reversed(list(enumerate(startup_block.ops))):
                assert len(op.output_arg_names) == 1
                output_name = op.output_arg_names[0]

649 650 651 652 653 654 655 656 657
                if (
                    op.type == "c_broadcast"
                    and op.attr("ring_id") in dp_ring_ids
                ):
                    if (
                        self.outer_dp_group
                        and sharding_info.get_var_rank(output_name)
                        == sharding_info.local_rank
                    ):
J
JZ-LIANG 已提交
658 659 660 661 662
                        op._set_attr("ring_id", self.outer_dp_group.id)
                    else:
                        startup_block._remove_op(idx, sync=False)
                    continue

663 664 665 666 667 668
                if (
                    op.type != "c_broadcast"
                    and output_name in param_usage
                    and sharding_info.get_var_rank(output_name)
                    != sharding_info.local_rank
                ):
J
JZ-LIANG 已提交
669 670
                    startup_block._remove_op(idx, sync=False)

J
JZ-LIANG 已提交
671
            for param_name in param_usage:
672 673 674 675
                if (
                    sharding_info.get_var_rank(param_name)
                    != sharding_info.local_rank
                ):
J
JZ-LIANG 已提交
676 677
                    main_block._remove_var(param_name, sync=False)
                    startup_block._remove_var(param_name, sync=False)
J
JZ-LIANG 已提交
678 679 680 681

        main_block._sync_with_cpp()
        startup_block._sync_with_cpp()

682 683
    def _optimization_pass(self, main_program, startup_program):

684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
        if self.stage <= 1:
            return

        self.grad_coalesce_prefix = 'sharding_coalesce_grad_'
        self.param_coalesce_prefix = 'sharding_coalesce_param_'
        # NOTE PR#49275 for detail
        self.comm_op_scheduling_priority = -1

        # TODO support multiple sub_blocks
        assert (
            len(self.sharding_infos) == 1
        ), "gradient synchronization optimization only support one sharding group right now, but got [{}].".format(
            len(self.sharding_infos)
        )
        sharding_info = self.sharding_infos[0]

700
        with paddle.static.program_guard(main_program, startup_program):
701 702 703 704
            self._gradient_sync_optimization(sharding_info)
            # TODO independent the logic of fuse and overlap
            # support overlap when no fuse
            if self.param_bucket_size_numel > 1:
705
                if self.stage == 2:
706
                    self._fuse_overlap_parameter_comm_stage_two(sharding_info)
707
                elif self.stage == 3:
708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760
                    self._fuse_overlap_parameter_comm_stage_three(sharding_info)

    def _gradient_sync_optimization(self, sharding_info):

        if self.grad_bucket_size_numel <= 1 and (not self.enable_overlap):
            return

        main_block = default_main_program().global_block()
        startup_block = default_startup_program().global_block()
        coalesce_to_group_map, grad_name_to_group_map = self._group_grads(
            main_block,
            sharding_info,
        )
        self._overlap_grad_comm(
            main_block,
            sharding_info,
            coalesce_to_group_map,
            grad_name_to_group_map,
        )

    def _fuse_overlap_parameter_comm_stage_two(self, sharding_info):

        main_block = default_main_program().global_block()
        startup_block = default_startup_program().global_block()

        group_to_param_map, param_to_group_map = group_param(
            sharding_info, self.param_bucket_size_numel
        )
        _logger.info("Sharding Stage2 Optimization:")
        _logger.info(
            "Param Bucket size is [{}], [{}] Parameters are fused into [{}] Buckets".format(
                self.param_bucket_size_numel,
                len(param_to_group_map.keys()),
                len(group_to_param_map.keys()),
            )
        )
        broadcast_var_to_group_map = {}

        if self.enable_overlap:
            # if the communication is cross node, comm will be slow and calc will therefore
            # wait for comm. enable multi-comm-stream
            # TODO revise me in future
            # 1. manager the comm and corresponding stream
            # 2. allow more than two streams and open to be config
            self.param_comm_group_stream_pairs = []
            ranks = sharding_info.group.ranks
            for i in range(self.param_comm_stream_num):
                if i == 0:
                    group = sharding_info.group
                else:
                    group = new_process_group(ranks, force_new_group=True)
                # NOTE here stream is just a presentation with different name,
                # it is up to executor to create the exact streams given the name.
761
                stream = f"sharding_param_comm_stream{i}"
762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846
                self.param_comm_group_stream_pairs.append(
                    {
                        "comm_group": group,
                        "comm_stream": stream,
                    }
                )
            _logger.info(
                "Parameter Communication would use [{}] streams.".format(
                    self.param_comm_stream_num
                )
            )
            self.op_to_stream_idx = {}

        for i, param_group in enumerate(group_to_param_map.keys()):

            assert len(param_group) >= 1
            if len(param_group) > 1:
                coalesce_var_name = unique_name.generate(
                    self.param_coalesce_prefix + str(i)
                )
                startup_block.create_var(
                    name=coalesce_var_name,
                    dtype=param_group.dtype,
                    persistable=True,
                    stop_gradient=True,
                )
                param_group.coalesce_var = main_block.create_var(
                    name=coalesce_var_name,
                    dtype=param_group.dtype,
                    persistable=True,
                    stop_gradient=True,
                )
                startup_block.append_op(
                    type="coalesce_tensor",
                    inputs={"Input": param_group.vars},
                    outputs={
                        "Output": param_group.vars,
                        "FusedOutput": param_group.coalesce_var,
                    },
                    attrs={
                        "copy_data": True,
                        "use_align": True,
                        "dtype": param_group.dtype,
                        OP_ROLE_KEY: OpRole.Forward,
                    },
                )
            else:
                param_group.coalesce_var = param_group.vars[0]
            _logger.info(
                "Bucket[{}] size [{}]MB.".format(
                    i,
                    sum([get_var_size(p) for p in param_group.vars]),
                )
            )
            _logger.debug(
                "Bucket[{}] parameters: {}.".format(
                    i,
                    [p.name for p in param_group.vars],
                )
            )

            broadcast_var_to_group_map[
                param_group.coalesce_var.name
            ] = param_group

            # TODO revise me to manager stream and comm
            comm_stream_idx = i % self.param_comm_stream_num
            comm_group = self.param_comm_group_stream_pairs[comm_stream_idx][
                'comm_group'
            ]
            comm_stream = self.param_comm_group_stream_pairs[comm_stream_idx][
                'comm_stream'
            ]
            new_op = main_block.append_op(
                type='c_broadcast',
                inputs={'X': param_group.coalesce_var},
                outputs={'Out': param_group.coalesce_var},
                attrs={
                    'ring_id': comm_group.id,
                    'root': param_group.rank,
                    'use_calc_stream': True,
                    OP_ROLE_KEY: OpRole.Optimize,
                },
            )
            self.op_to_stream_idx[new_op] = comm_stream_idx
847
            new_op._set_attr('op_namescope', '/' + ParallelMode.DataParallel)
848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893
            if self.enable_overlap:
                new_op.dist_attr.execution_stream = comm_stream
                new_op.dist_attr.scheduling_priority = (
                    self.comm_op_scheduling_priority
                )

            # NOTE the current dist context lack the presentation for bucket tensor which
            # composes many tensor with different dims_mapping. we DO NOT assign dist attr
            # for it currently.

        # add dependencies:
        # 1. all broadcast depend on its pre collective
        # 2. coalesce broadcast add nop to resolute data flow dependencies
        dep_map = {}
        for i, op in enumerate(main_block.ops):
            if is_sharding_param_broadcast_op(op):
                broadcast_varname = op.output("Out")[0]
                broadcast_var = main_block.vars[broadcast_varname]
                param_group = broadcast_var_to_group_map[broadcast_varname]
                comm_stream = None
                if self.enable_overlap:
                    comm_stream = op.dist_attr.execution_stream

                # FIXME remove me when upgrade to multi-comm version
                if len(dep_map.keys()) < self.param_comm_stream_num:
                    op = _get_broadcast_first_depend_op(main_block)
                    prior_var = main_block.vars[op.output("ParamOut")[0]]
                else:
                    pre_op = main_block.ops[i - self.param_comm_stream_num]
                    assert is_sharding_param_broadcast_op(
                        pre_op
                    ), "Unexpected: sharding broadcast pre op should be broadcast."
                    prior_var = main_block.vars[pre_op.output("Out")[0]]
                # broadcast order dependencies
                dep_map[i] = [(i, [prior_var], [broadcast_var], comm_stream)]

                if len(param_group.vars) > 1:
                    # in shard coalesce depend to optimizer
                    if param_group.is_in_local_shard:
                        last_grad = param_group.vars[-1]
                        dep_map[i].append(
                            (i, [last_grad], [broadcast_var], comm_stream)
                        )
                    # coalesce resolution post deps
                    dep_map[i].append(
                        (i + 1, [broadcast_var], param_group.vars, comm_stream)
894 895
                    )

896
        # insert deps
897
        indice = sorted(dep_map.keys(), reverse=True)
898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972
        for i in indice:
            for idx, prior_vars, post_vars, comm_stream in dep_map[i][::-1]:
                depend_op = insert_dependencies_for_vars(
                    main_block,
                    idx,
                    prior_vars,
                    post_vars,
                    self._dist_context,
                    OpRole.Optimize,
                    process_mesh=[
                        -1
                    ],  # hack to avoid initialize the dist attr for coalesce var
                    is_recompute=False,
                    sync=False,
                    op_namescope="sharding_stage2_broadcast_dep",
                )
                if self.enable_overlap:
                    depend_op.dist_attr.execution_stream = comm_stream
                    depend_op.dist_attr.scheduling_priority = (
                        self.comm_op_scheduling_priority
                    )

        main_block._sync_with_cpp()

    def _fuse_overlap_parameter_comm_stage_three(self, sharding_info):
        pass

    def _group_grads(
        self,
        block,
        sharding_info,
    ):
        """
        conditions for gradients to be grouped:
            1. group size < grad_bucket_size_numel
            2. same dp group (TODO)
            3. same src rank
            4. same dtype
            5. dependency: grad would NOT be used by other ops within group segment

        main logic:
            1. record coalesce group
            2. record all dp allreduce/reduce op idx

            3. insert coalesce op
            4. insert coalesce dependency (avoid allocate memory too early)
            5. modify and remove allreduce/reduce op
            6. ensure sharding-dp hybrid parallel logic

        gradients inside same group would be fuse into one coalesce tensor
        """
        ops = block.ops
        if self.grad_bucket_size_numel < 1:
            # numel for transformer layer
            # h = 4096 + 1
            # ffn_numel = 2 * (4 * h) * h
            # mha_numel = 3 * h * h + h * h
            # max_fuse_numel = ffn_numel + mha_numel
            self.grad_bucket_size_numel = 1

        first_backward_op = None
        for op in ops:
            if is_loss_grad_op(op):
                first_backward_op = op
        # not backward op, sharding for inference
        if first_backward_op is None:
            return
        first_backward_varname = first_backward_op.output_arg_names[0]

        cur_group = VarGroup(self.grad_bucket_size_numel)
        grad_groups = []
        grouped_grad_names = set()

        def op_depend_on_group(op, group):
            vars_ = set(op.input_arg_names + op.output_arg_names)
973
            var_names = {var.name for var in group.vars}
974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171
            return len(vars_.intersection(var_names)) > 0

        # analyze groups
        i = 0
        while i < len(ops):
            op = ops[i]
            if is_data_parallel_reduce_op(op):
                assert (
                    op.type == "c_reduce_sum"
                ), "Sharding should reduce grad first and than allreduce if Hybrid Sharding with Data-Parallel"

                grad_name = op.output_arg_names[0]
                param_name = _get_base_name_from_grad_name(grad_name)
                rank = sharding_info.get_var_rank(param_name)
                grad_var = block.var(grad_name)

                if cur_group.acceptable(grad_var, rank):
                    assert grad_name not in grouped_grad_names
                    cur_group.collect(grad_var, rank)
                else:
                    grad_groups.append(cur_group)
                    cur_group = VarGroup(self.grad_bucket_size_numel)
                    cur_group.collect(grad_var, rank)

                if len(cur_group.vars) == 1:
                    cur_group.coalesce_op_idx = i - 1
                    # NOTE coalesce dependency: control when allocate memory for gradients
                    # too early would increase the peak memory requirement, too later would hurt the performance
                    j = 2
                    while is_dep_skip_op(ops[i - j]):
                        j += 1
                    dep_op = ops[i - j]
                    dep_varname = dep_op.output_arg_names[0]
                    cur_group.coalesce_dep_varname = dep_varname

                grouped_grad_names.add(grad_name)
                cur_group.reduce_op_indices.append(i)

                if self.sharding_hybrid_dp and sharding_info.is_in_local_shard(
                    param_name
                ):
                    cur_group.is_in_local_shard = True
                    assert (
                        ops[i + 1].type == "c_allreduce_sum"
                    ), "Sharding should reduce grad first and than allreduce if Hybrid Sharding with Data-Parallel"
                    assert (
                        ops[i + 1].output_arg_names[0] == grad_name
                    ), "Hybrid Sharding with Data-Parallel should sync same gradient var"
                    cur_group.allreduce_op_indices.append(i + 1)
                    i += 1
            elif op_depend_on_group(op, cur_group):
                grad_groups.append(cur_group)
                cur_group = VarGroup(self.grad_bucket_size_numel)

            i += 1
        # some grad not in this rank may not be used after dp reduced
        if len(cur_group.vars) >= 1:
            grad_groups.append(cur_group)

        _logger.info("Sharding Gradient Communication Optimization:")
        _logger.info(
            "Gradient Bucket size is [{}], [{}] Gradients are fused into [{}] Buckets.".format(
                self.grad_bucket_size_numel,
                len(grouped_grad_names),
                len(grad_groups),
            )
        )

        # create coalesce tesnor and record op idx
        grad_name_to_group_map = {}
        coalesce_to_group_map = {}
        modify_reduce_op_map = {}
        coalesce_op_map = {}
        remove_reduce_op_indices = []

        for i, group in enumerate(grad_groups):
            if len(group.vars) > 1:
                group.coalesce_var = block.create_var(
                    name=unique_name.generate(
                        self.grad_coalesce_prefix + str(i)
                    ),
                    dtype=group.dtype,
                    persistable=False,
                    stop_gradient=True,
                )
                coalesce_op_map[group.coalesce_op_idx] = group
                last_reduce_op_idx = group.reduce_op_indices.pop()
                modify_reduce_op_map[last_reduce_op_idx] = group
                remove_reduce_op_indices.extend(group.reduce_op_indices)
                if group.is_in_local_shard:
                    last_allreduce_op_idx = group.allreduce_op_indices.pop()
                    modify_reduce_op_map[last_allreduce_op_idx] = group
                    remove_reduce_op_indices.extend(group.allreduce_op_indices)
            else:
                group.coalesce_var = group.vars[0]
            for grad in group.vars:
                grad_name_to_group_map[grad.name] = group
            coalesce_to_group_map[group.coalesce_var.name] = group

        coalesce_op_set = set(coalesce_op_map.keys())
        modify_op_set = set(modify_reduce_op_map.keys())
        remove_op_set = set(remove_reduce_op_indices)
        confilct = coalesce_op_set.intersection(modify_op_set)

        assert len(confilct) == 0
        confilct = coalesce_op_set.intersection(remove_op_set)
        assert len(confilct) == 0
        confilct = modify_op_set.intersection(remove_op_set)
        assert len(confilct) == 0

        # update block
        for idx, op in reversed(list(enumerate(block.ops))):

            if idx in modify_reduce_op_map:
                group = modify_reduce_op_map[idx]
                grad_name = op.output_arg_names[0]
                assert (
                    grad_name == group.vars[-1].name
                ), "Unexpected: it is supposed to sync [{}] but got [{}]".format(
                    group.vars[-1].name, grad_name
                )
                op._rename_input(grad_name, group.coalesce_var.name)
                op._rename_output(grad_name, group.coalesce_var.name)

            if idx in remove_reduce_op_indices:
                block._remove_op(idx, sync=False)

            if idx in coalesce_op_map:
                group = coalesce_op_map[idx]
                first_grad_name = group.vars[0].name
                assert (
                    first_grad_name in op.output_arg_names
                ), "Unexpected: op is supposed to generate grad [{}] but got [{}]".format(
                    first_grad_name, str(op)
                )
                grad_names = [grad.name for grad in group.vars]

                concated_shapes = []
                concated_ranks = []
                for grad_ in group.vars:
                    shape = grad_.shape
                    concated_shapes.extend(shape)
                    concated_ranks.append(len(shape))

                coalesce_op = block._insert_op_without_sync(
                    idx,
                    type="coalesce_tensor",
                    inputs={"Input": grad_names},
                    outputs={
                        "Output": grad_names,
                        "FusedOutput": group.coalesce_var,
                    },
                    attrs={
                        "copy_data": False,
                        "use_align": True,
                        "dtype": group.dtype,
                        "concated_shapes": concated_shapes,
                        "concated_ranks": concated_ranks,
                        OP_ROLE_KEY: OpRole.Backward,
                    },
                )
                depend_op = insert_dependencies_for_vars(
                    block,
                    idx,
                    block.var(group.coalesce_dep_varname),
                    group.coalesce_var,
                    self._dist_context,
                    OpRole.Backward,
                    process_mesh=[
                        -1
                    ],  # hack to avoid initialize the dist attr for coalesce var
                    is_recompute=False,
                    sync=False,
                    op_namescope="sharding_grad_coalesce_dep",
                )
        block._sync_with_cpp()

        return coalesce_to_group_map, grad_name_to_group_map

    def _overlap_grad_comm(
        self,
        block,
        sharding_info,
        coalesce_to_group_map,
        grad_name_to_group_map,
    ):
        """
        overlap gradient communication with backward & optimizer computation.

        1. assign gradient communications to grad comm stream
        2. for coalesce gradient communication:
            2.1 insert before communication dependencies
            2.2 insert after communication dependencies only when need
        3. there is not need to add explicit dependencies for non-coalesce gradient communication

        P.S. this overlap pass is ONLY adapted for standalone executor (graph based) and stream awared allocator.
        """

1172
        if not self.enable_overlap:
1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185
            return

        self.grad_comm_group_stream_pairs = []
        ranks = sharding_info.group.ranks
        # NOTE since the gradient synchronization has calculation, there would be computation
        # competition between backward calculation. therefore should limit the number of stream used.
        for i in range(self.grad_comm_stream_num):
            if i == 0:
                group = sharding_info.group
            else:
                group = new_process_group(ranks, force_new_group=True)
            # NOTE here stream is just a presentation with different name,
            # it is up to executor to create the exact streams given the name.
1186
            stream = f"sharding_grad_comm_stream{i}"
1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266
            self.grad_comm_group_stream_pairs.append(
                {
                    "comm_group": group,
                    "comm_stream": stream,
                }
            )

        ops = block.ops
        # analyze dependencies
        dep_map = {}
        reduce_op_count = 0
        grad_comm_op_to_stream_idx = {}
        for idx, op in enumerate(ops):
            if is_data_parallel_reduce_op(op):

                if op.type == "c_allreduce_sum":
                    continue
                stream_idx = reduce_op_count % self.grad_comm_stream_num
                grad_comm_op_to_stream_idx[op] = stream_idx
                comm_group = self.grad_comm_group_stream_pairs[stream_idx][
                    "comm_group"
                ]
                comm_stream = self.grad_comm_group_stream_pairs[stream_idx][
                    "comm_stream"
                ]

                reduce_varname = op.output("Out")[0]
                grad_group = coalesce_to_group_map[reduce_varname]
                assert grad_group.coalesce_var.name == reduce_varname

                # coalesce deps
                if len(grad_group.vars) > 1:
                    # NOTE should prior vars to be all grads ?
                    # when the grad_ops' order is random
                    # prior dep
                    dep_map[idx] = [
                        (
                            idx,
                            grad_group.vars[-1],
                            grad_group.coalesce_var,
                            comm_stream,
                        )
                    ]
                    # post dep
                    post_idx = idx + 1
                    if self.sharding_hybrid_dp and grad_group.is_in_local_shard:
                        post_idx += 1
                    dep_map[idx].append(
                        (
                            post_idx,
                            grad_group.coalesce_var,
                            grad_group.vars,
                            comm_stream,
                        )
                    )

                # assign stream
                op.dist_attr.execution_stream = comm_stream
                op.dist_attr.scheduling_priority = (
                    self.comm_op_scheduling_priority
                )

                op._set_attr("ring_id", comm_group.id)
                if self.sharding_hybrid_dp and grad_group.is_in_local_shard:
                    next_op = ops[idx + 1]
                    assert next_op.type == "c_allreduce_sum"
                    assert next_op.output("Out")[0] == reduce_varname
                    # FIXME hybrid sharding-dp support multi comm & stream in feature
                    # next_op._set_attr("ring_id", comm_group.id)
                    next_op.dist_attr.execution_stream = comm_stream
                    next_op.dist_attr.scheduling_priority = (
                        self.comm_op_scheduling_priority
                    )
                    idx += 1

                reduce_op_count += 1

            idx += 1

        # insert deps
1267
        indice = sorted(dep_map.keys(), reverse=True)
1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307
        for i in indice:
            for idx, prior_vars, post_vars, comm_stream in dep_map[i][::-1]:
                depend_op = insert_dependencies_for_vars(
                    block,
                    idx,
                    prior_vars,
                    post_vars,
                    self._dist_context,
                    OpRole.Backward,
                    process_mesh=[
                        -1
                    ],  # hack to avoid initialize the dist attr for coalesce var
                    is_recompute=False,
                    sync=False,
                    op_namescope="sharding_grad_comm_dep",
                )
                depend_op.dist_attr.execution_stream = comm_stream
                depend_op.dist_attr.scheduling_priority = (
                    self.comm_op_scheduling_priority
                )

        # hierarchical grad comm
        if self.enable_hierarchical_comm:
            # NOTE so far we only support Isomorphic cluster with 8 ranks per node
            # TODO unifiy here create communicators
            # create communicators
            nranks_per_node = 8
            assert self.sharding_world_size % nranks_per_node == 0
            global_group = sharding_info.group
            global_ranks = global_group.ranks
            relative_idx_in_node = self.global_rank % nranks_per_node
            node_idx = self.global_rank // nranks_per_node
            inter_node_ranks = [
                rank
                for rank in global_ranks
                if rank % nranks_per_node == relative_idx_in_node
            ]
            _logger.info(
                "Sharding Gradient Hierarchical Communication Optimization."
            )
1308 1309
            _logger.info(f"current global rank idx: {self.global_rank}.")
            _logger.info(f"local inter node ranks idx: {inter_node_ranks}.")
1310 1311 1312 1313 1314 1315 1316 1317 1318 1319
            assert (
                len(inter_node_ranks)
                == self.sharding_world_size // nranks_per_node
            )
            intra_node_ranks = [
                rank
                for rank in global_ranks
                if rank // nranks_per_node == node_idx
            ]
            assert len(intra_node_ranks) == nranks_per_node
1320
            _logger.info(f"local intra node ranks idx: {intra_node_ranks}.")
1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369
            inter_node_groups = []
            intra_node_groups = []
            for _ in range(self.grad_comm_stream_num):
                # TODO re-use one origin communicator
                inter_node_groups.append(
                    new_process_group(inter_node_ranks, force_new_group=True)
                )
                intra_node_groups.append(
                    new_process_group(intra_node_ranks, force_new_group=True)
                )

            # update program
            for idx, op in reversed(list(enumerate(block.ops))):
                if is_data_parallel_reduce_op(op):
                    assert op.type == "c_reduce_sum"
                    grad_comm_stream_idx = grad_comm_op_to_stream_idx[op]
                    inter_node_group = inter_node_groups[grad_comm_stream_idx]
                    intra_node_group = intra_node_groups[grad_comm_stream_idx]

                    reduce_varname = op.output("Out")[0]
                    if self.enable_overlap:
                        comm_stream = op.dist_attr.execution_stream
                    dst_rank = int(op.attr("root_id"))

                    in_peer = False
                    if dst_rank % nranks_per_node == relative_idx_in_node:
                        in_peer = True
                    intra_node_dst = dst_rank % nranks_per_node

                    op._set_attr('ring_id', intra_node_group.id)
                    op._set_attr('root_id', intra_node_dst)

                    if in_peer:
                        inter_node_dst = dst_rank // nranks_per_node
                        new_op = block._insert_op_without_sync(
                            idx + 1,
                            type='c_reduce_sum',
                            inputs={"X": reduce_varname},
                            outputs={
                                "Out": reduce_varname,
                            },
                            attrs={
                                'ring_id': inter_node_group.id,
                                'root_id': inter_node_dst,
                                'use_calc_stream': True,
                                OP_ROLE_KEY: OpRole.Backward,
                            },
                        )
                        new_op._set_attr(
1370
                            'op_namescope', '/' + ParallelMode.DataParallel
1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388
                        )

                        if self.enable_overlap:
                            new_op.dist_attr.execution_stream = comm_stream
                            new_op.dist_attr.scheduling_priority = (
                                self.comm_op_scheduling_priority
                            )

        block._sync_with_cpp()


def _get_broadcast_first_depend_op(block):
    for op in block.ops:
        if op.type in _supported_optimizer_type:
            return op

    raise Exception("Could not find optimizer op.")

J
JZ-LIANG 已提交
1389

1390 1391 1392 1393 1394 1395 1396 1397 1398 1399
def _insert_init_and_broadcast_op(
    block,
    insert_idx,
    varname,
    local_rank,
    root_rank,
    ring_id,
    op_role,
    dist_context,
):
J
JZ-LIANG 已提交
1400 1401 1402 1403 1404
    """
    empty op for initialization
    """
    broadcast_var = block.var(varname)
    broadcast_var_dist_attr = dist_context.get_tensor_dist_attr_for_program(
1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419
        broadcast_var
    )

    new_op = block._insert_op_without_sync(
        insert_idx,
        type='c_broadcast',
        inputs={'X': varname},
        outputs={'Out': varname},
        attrs={
            'ring_id': ring_id,
            'root': root_rank,
            'use_calc_stream': True,
            OP_ROLE_KEY: op_role,
        },
    )
1420
    new_op._set_attr('op_namescope', '/' + ParallelMode.DataParallel)
J
JZ-LIANG 已提交
1421
    naive_set_dist_op_attr_for_program_by_mesh_and_mapping(
1422 1423 1424 1425 1426
        new_op,
        broadcast_var_dist_attr.process_mesh,
        broadcast_var_dist_attr.dims_mapping,
        dist_context,
    )
J
JZ-LIANG 已提交
1427 1428 1429 1430 1431 1432 1433 1434 1435
    if local_rank != root_rank:

        new_op = block._insert_op_without_sync(
            insert_idx,
            type="empty",
            outputs={"Out": broadcast_var.name},
            attrs={
                "shape": broadcast_var.shape,
                "dtype": broadcast_var.dtype,
1436 1437 1438
                OP_ROLE_KEY: op_role,
            },
        )
J
JZ-LIANG 已提交
1439
        naive_set_dist_op_attr_for_program_by_mesh_and_mapping(
1440 1441 1442 1443 1444
            new_op,
            broadcast_var_dist_attr.process_mesh,
            broadcast_var_dist_attr.dims_mapping,
            dist_context,
        )
J
JZ-LIANG 已提交
1445 1446 1447
    return


1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459
def _insert_reduce_op(
    block,
    insert_idx,
    reduce_var,
    ring_id,
    root_id,
    dist_context,
    op_role=OpRole.Backward,
    use_calc_stream=True,
):
    assert (
        root_id >= 0
1460
    ), f"root id should be a positive int, but now root id is {root_id}"
1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472
    new_op = block._insert_op_without_sync(
        insert_idx,
        type='c_reduce_sum',
        inputs={'X': [reduce_var]},
        outputs={'Out': [reduce_var]},
        attrs={
            'ring_id': ring_id,
            'root_id': root_id,
            'use_calc_stream': use_calc_stream,
            OP_ROLE_KEY: op_role,
        },
    )
J
JZ-LIANG 已提交
1473 1474

    dist_attr = dist_context.get_tensor_dist_attr_for_program(
1475 1476
        block.var(reduce_var)
    )
J
JZ-LIANG 已提交
1477
    naive_set_dist_op_attr_for_program_by_mesh_and_mapping(
1478 1479
        new_op, dist_attr.process_mesh, dist_attr.dims_mapping, dist_context
    )
1480
    new_op._set_attr('op_namescope', '/' + ParallelMode.DataParallel)
1481
    return new_op
J
JZ-LIANG 已提交
1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495


def _get_dp_and_sharding_groups(origin_group, sharding_group_size, rank):
    dp_axis = 0
    sharding_axis = 1
    shape = [len(origin_group) // sharding_group_size, sharding_group_size]

    dp_group = _get_comm_group(origin_group, shape, dp_axis, rank)
    sharding_group = _get_comm_group(origin_group, shape, sharding_axis, rank)

    return dp_group, sharding_group


def _is_gradient_clip_op(op):
1496 1497 1498
    return op.desc.has_attr("op_namescope") and op.desc.attr(
        "op_namescope"
    ).startswith("/gradient_clip")
J
JZ-LIANG 已提交
1499 1500 1501


def _is_weight_decay_op(op):
1502 1503 1504
    return op.desc.has_attr("op_namescope") and op.desc.attr(
        "op_namescope"
    ).startswith("/regularization")
J
JZ-LIANG 已提交
1505 1506 1507 1508 1509


def _is_param_grad_fp32_cast_op(block, op):
    if not is_backward_op(op):
        return False
1510 1511 1512
    if not _is_desired_cast_op(
        block, op, core.VarDesc.VarType.FP16, core.VarDesc.VarType.FP32
    ):
J
JZ-LIANG 已提交
1513
        return False
1514
    output_name = op.output_arg_names[0]
1515
    base_name = output_name[: output_name.find("@")]
J
JZ-LIANG 已提交
1516 1517 1518 1519 1520 1521 1522
    if not block.has_var(base_name):
        return False
    return block.var(base_name).is_parameter


def _is_param_fp16_cast_op(block, op, params):

1523
    if is_optimize_op(op):
J
JZ-LIANG 已提交
1524 1525 1526
        return False
    if not _is_desired_cast_op(block, op):
        return False
1527
    input_name = op.input_arg_names[0]
J
JZ-LIANG 已提交
1528 1529 1530 1531 1532
    if input_name not in params:
        return False
    return True


1533 1534 1535 1536 1537 1538
def _is_desired_cast_op(
    block,
    op,
    src_var_type=core.VarDesc.VarType.FP32,
    dst_var_type=core.VarDesc.VarType.FP16,
):
J
JZ-LIANG 已提交
1539 1540
    if op.type != "cast":
        return False
1541 1542 1543 1544
    assert len(op.input_arg_names) == 1
    assert len(op.output_arg_names) == 1
    input_var = block.var(op.input_arg_names[0])
    output_var = block.var(op.output_arg_names[0])
J
JZ-LIANG 已提交
1545

1546
    if input_var.dtype != src_var_type or output_var.dtype != dst_var_type:
J
JZ-LIANG 已提交
1547 1548 1549 1550 1551 1552 1553 1554
        return False

    return True


def _get_base_name_from_grad_name(grad_name):
    base_name = None
    if ".cast_fp16@GRAD" in grad_name:
1555
        base_name = grad_name[: grad_name.find(".cast_fp16@GRAD")]
J
JZ-LIANG 已提交
1556
    elif "@GRAD" in grad_name:
1557
        base_name = grad_name[: grad_name.find("@GRAD")]
J
JZ-LIANG 已提交
1558 1559 1560
    return base_name


1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574
def _is_param_grad_allreduce_op(op, block):

    if not is_data_parallel_reduce_op(op):
        return False

    output_name = op.output_arg_names[0]
    base_name = _get_base_name_from_grad_name(output_name)

    if not block.has_var(base_name):
        return False

    return block.var(base_name).is_parameter


1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590
def _is_param_grad_sum_op(op, block):

    if not is_backward_op(op):
        return False
    if op.type != "sum":
        return False

    output_name = op.output_arg_names[0]
    base_name = _get_base_name_from_grad_name(output_name)

    if not block.has_var(base_name):
        return False

    return block.var(base_name).is_parameter


J
JZ-LIANG 已提交
1591 1592 1593 1594
def _is_forward_op(op):
    return op.attr("op_role") == 0


1595 1596 1597 1598 1599 1600 1601 1602
def is_sharding_param_broadcast_op(op):
    return (
        op.type == "c_broadcast"
        and op.desc.has_attr("op_namescope")
        and ParallelMode.DataParallel in op.desc.attr("op_namescope")
    )


J
JZ-LIANG 已提交
1603 1604 1605 1606
def _inference_data_parallel_group_for_operator(rank_id, op, dist_context):

    dp_group = None
    for input_name in op.input_arg_names:
1607 1608 1609
        # TODO(zhaoyingli): maintain a dict in dist_context to record all variables which are renamed,
        # to solve the param@RESHARD cannot be identifed.
        if not is_parameter_related(input_name, op.block, dist_context):
J
JZ-LIANG 已提交
1610 1611 1612
            dist_attr = dist_context.get_op_dist_attr_for_program(op)
            process_mesh = dist_attr.process_mesh
            input_dim_mapping = dist_attr.get_input_dims_mapping(input_name)
1613
            mesh_shape = process_mesh.shape
J
JZ-LIANG 已提交
1614 1615 1616
            # TODO(JZ-LIANG) replace with specific batch size dimension
            batch_size_axis = input_dim_mapping[0]
            if batch_size_axis > -1 and mesh_shape[batch_size_axis] > 1:
1617
                group_ranks = _get_comm_group(
1618 1619
                    process_mesh.process_ids,
                    process_mesh.shape,
1620 1621 1622
                    batch_size_axis,
                    rank_id,
                )
J
JZ-LIANG 已提交
1623 1624 1625 1626 1627 1628
                dp_group = new_process_group(group_ranks)
                break

    return dp_group


1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658
def partition_by_use_order(params, group_size):
    """
    shard the continouse param into same rank and divide the forward&backward computation into segement,
    which will favor the fuse pass in later.

    we assume that the params is already sorted by utilization order.
    """
    mapping = {}
    total_param_mem = 0.0
    param2mem = []
    for param in params:
        mem = get_var_size(param)
        total_param_mem += mem
        param2mem.append((param, mem))
    mapping = {x: [] for x in range(group_size)}
    cur_rank = 0
    mem_accu = 0.0
    for param, mem in param2mem:
        if mem_accu > total_param_mem * 1.0 * (cur_rank + 1) / group_size:
            cur_rank += 1
        mapping[cur_rank].append(param)
        mem_accu += mem

    return mapping


def partition_by_greedy_even(params, group_size):
    """
    use greedy alogrithm to partition parameter as even as possible.
    """
J
JZ-LIANG 已提交
1659 1660 1661 1662 1663 1664 1665
    mapping = {}
    for rank_ in range(group_size):
        mapping[rank_] = []
    sizes = [0] * group_size
    for param in params:
        rank = sizes.index(min(sizes))
        mapping[rank].append(param)
1666
        numel = reduce(lambda x, y: x * y, param.shape, 1)
1667 1668 1669 1670 1671
        assert (
            numel > 0
        ), "param [{}] should larger than 0, but it is [{}]".format(
            param.name, numel
        )
J
JZ-LIANG 已提交
1672 1673 1674 1675 1676
        sizes[rank] += numel

    return mapping


1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689
def partition_parameters(params, group_size, algor="greedy_even"):
    if algor == "greedy_even":
        rank_to_params = partition_by_greedy_even(params, group_size)
    else:
        rank_to_params = partition_by_use_order(params, group_size)

    _logger.info("Sharding Parameter Partition:")
    for k, v in rank_to_params.items():
        _logger.info(
            "Rank:{}, Parameter Size:{} MB.".format(
                k, sum([get_var_size(var) for var in v])
            )
        )
1690
        _logger.info(f"Params in this rank: {[var.name for var in v]}.")
1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717

    return rank_to_params


def re_order_program(block, param_grads, dist_context):

    # record order
    pname_to_pg_pairs = {}
    for p, g in param_grads:
        pname_to_pg_pairs[p.name] = (p, g)

    use_order = []
    for op in block.ops:
        for input_name in op.input_arg_names:
            if (input_name in pname_to_pg_pairs) and (
                input_name not in use_order
            ):
                use_order.append(input_name)
        if len(use_order) == len(pname_to_pg_pairs):
            break

    # reorder optimzier
    last_op = block.ops[-1]
    pname_to_op = {}
    num_ops = len(block.ops)
    remove_op_indices = []
    # TODO support case when optimizer is not the last op
1718
    if is_optimize_op(last_op) and last_op.type in _supported_optimizer_type:
1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744
        # record optimizer
        for idx, op in reversed(list(enumerate(block.ops))):
            if op.type not in _supported_optimizer_type:
                break
            assert len(op.input("Param")) == 1
            pname_to_op[op.input("Param")[0]] = op
            remove_op_indices.append(idx)
        assert len(use_order) == len(pname_to_op)

        # append new opts
        for pname in use_order:
            new_op = block.append_op(type='nop')
            new_op.desc.copy_from(pname_to_op[pname].desc)
            dist_context.set_op_dist_attr_for_program(
                new_op,
                dist_context.get_op_dist_attr_for_program(pname_to_op[pname]),
            )

        # remove old opts
        for idx in remove_op_indices:
            block._remove_op(idx, sync=False)

        block._sync_with_cpp()
        assert len(block.ops) == num_ops

    # TODO reorder gradient clip order
1745
    _logger.info(f"Sharding the Order of param being used: {use_order}.")
1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758
    return [pname_to_pg_pairs[p] for p in use_order]


def group_param(sharding_info, fuse_size):
    """
    param are group by:
    rank id
    fuse_size
    dtype
    """
    group_to_param_map = {}
    param_to_group_map = {}
    bucket = []
1759
    cur_group = VarGroup(fuse_size)
1760 1761 1762 1763 1764 1765
    for param in sharding_info.params:
        rank = sharding_info.get_var_rank(param.name)

        if cur_group.acceptable(param, rank):
            cur_group.collect(param, rank)
        else:
1766
            cur_group = VarGroup(fuse_size)
1767 1768
            cur_group.collect(param, rank)

1769 1770 1771 1772
        cur_group.is_in_local_shard = sharding_info.is_in_local_shard(
            param.name
        )

1773 1774 1775 1776 1777 1778 1779 1780 1781 1782
        if cur_group in group_to_param_map:
            group_to_param_map[cur_group].append(param.name)
        else:
            group_to_param_map[cur_group] = [param.name]

        param_to_group_map[param.name] = cur_group

    return group_to_param_map, param_to_group_map


1783
class ShardingInfo:
1784
    def __init__(self, group, rank, params_grads, partition_algor):
J
JZ-LIANG 已提交
1785
        self.group = group
1786
        self.params_grads = {p.name: (p, g) for p, g in params_grads}
1787 1788 1789
        assert len(self.params_grads) == len(
            set(self.params_grads)
        ), "found duplicated param in params_grads"
1790 1791

        self.params = [p for p, _ in params_grads]
J
JZ-LIANG 已提交
1792 1793 1794 1795
        self.param_names = [p.name for p in self.params]
        self.group_size = group.nranks
        self.global_rank = rank
        self.local_rank = group.ranks.index(self.global_rank)
1796
        self.partition_algor = partition_algor
J
JZ-LIANG 已提交
1797
        # rank in below mapping are local rank in this sharding group
1798 1799 1800
        self.rank_to_params = partition_parameters(
            self.params, self.group_size, self.partition_algor
        )
J
JZ-LIANG 已提交
1801
        # include fp32 and fp16 param
1802
        self.param_to_rank = {}
J
JZ-LIANG 已提交
1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817
        self._map_param_to_rank()

    def _map_param_to_rank(self):
        """
        mapping parameters to the rank which holds it.
        """
        for rank, params in self.rank_to_params.items():
            for param in params:
                self.param_to_rank[param.name] = rank

    def get_var_rank(self, varname):
        if varname in self.param_to_rank:
            return self.param_to_rank[varname]
        return -1

1818
    # determine fp32 and fp16 (cast) param
J
JZ-LIANG 已提交
1819 1820 1821
    def is_in_local_shard(self, param_name):
        return self.get_var_rank(param_name) == self.local_rank

1822 1823 1824 1825
    # NOTE the follwo logic is designed for supporting AMP O1 when
    # the param would be cast to fp16 before used for caculation.
    # and sharding should only broadcast the casted fp16 param
    # instead of the origin fp32 version param.
J
JZ-LIANG 已提交
1826
    def get_broadcast_vars_and_param_usage(self, block):
1827 1828
        broadcast_vars = set()
        fp16_params = set()
J
JZ-LIANG 已提交
1829 1830 1831 1832
        fp16_to_fp32 = {}

        param_usage = {x: 0 for x in self.param_names}
        for op in block.ops:
1833
            if is_optimize_op(op):
J
JZ-LIANG 已提交
1834
                continue
1835
            for input_name in op.input_arg_names:
J
JZ-LIANG 已提交
1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853
                if input_name in self.param_names:
                    param_usage[input_name] += 1

        for op in block.ops:
            if not _is_param_fp16_cast_op(block, op, self.param_names):
                continue
            input_name = op.input_arg_names[0]
            output_name = op.output_arg_names[0]
            broadcast_vars.add(output_name)
            fp16_params.add(output_name)
            fp16_to_fp32[output_name] = input_name
            param_usage[input_name] -= 1
            self.param_to_rank[output_name] = self.param_to_rank[input_name]

        for param, usage in param_usage.items():
            if usage > 0:
                broadcast_vars.add(param)
        return broadcast_vars, param_usage
1854 1855 1856

    def get_param_grad(self, param_name):
        if not self.is_in_local_shard(param_name):
1857
            raise ValueError(f"param[{param_name}] not in current rank.")
1858
        if param_name not in self.params_grads:
1859
            raise ValueError(f'param[{param_name}] not in params_grads')
1860
        return self.params_grads.get(param_name, None)
1861 1862


1863
class VarGroup:
1864 1865 1866 1867 1868
    def __init__(self, max_size):
        self.max_siez = max_size
        self.dtype = None
        self.rank = -1
        self.numel = 0
1869
        self.vars = []
1870
        self.coalesce_var = None
1871 1872 1873 1874 1875
        self.coalesce_dep_varname = None
        self.coalesce_op_idx = None
        self.reduce_op_indices = []
        self.allreduce_op_indices = []
        self.is_in_local_shard = False
1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892

    def acceptable(self, param, rank):
        if self.numel == 0:
            return True
        else:
            if param.dtype != self.dtype:
                return False
            if rank != self.rank:
                return False
            if self.numel + get_var_numel(param) > self.max_siez:
                return False
            return True

    def collect(self, param, rank):
        self.dtype = param.dtype
        self.rank = rank
        self.numel += get_var_numel(param)
1893
        self.vars.append(param)
1894 1895

    def __len__(self):
1896
        return len(self.vars)