reshard.py 114.8 KB
Newer Older
C
caozhou 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License

from functools import reduce

import paddle
import paddle.fluid.core as core
from paddle.utils import unique_name
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.framework import Program, OpProtoHolder
22
from paddle.distributed.fleet.meta_optimizers.common import OpRole
C
caozhou 已提交
23
import paddle.fluid.layers.utils as utils
24
from .dist_context import DistributedContext
25 26
from .dist_attribute import TensorDistributedAttribute
from .process_group import new_process_group
27 28 29
from .cost import build_comm_desc, CommContext
from .cost import AllgatherOpCost, SendOpCost
from .cost import SliceOpCost, SplitOpCost, ConcatOpCost
30
from .utils import is_gradient_clip_op
C
caozhou 已提交
31

32
# NOTE: If op in _g_special_ops or _g_gradient_clip_ops, it will not be resharded.
33
_g_special_ops = ['check_finite_and_unscale', 'update_loss_scaling']
34
_g_gradient_clip_ops = [
35 36 37 38 39
    "sum",
    "sqrt",
    "fill_constant",
    "elementwise_max",
    "elementwise_div",
40
]
41
_g_subblock_ops = ["while", "conditional_block"]
42 43 44 45 46 47 48 49


def get_var_with_recursion(var_name, block, program):
    """Get var in the parent block if not found in the current block"""
    var = None
    if var_name in block.vars:
        var = block.vars[var_name]
    else:
50 51 52 53 54
        var = block._var_recursive(var_name)
        # parent_block = program.blocks[block.parent_idx]
        # if var_name in parent_block.vars:
        #     var = parent_block.vars[var_name]
    assert var is not None, "{} is not found".format(var.name)
55

56
    return var
57

C
caozhou 已提交
58 59 60 61 62 63 64

class AllGatherOpDesc:
    """
    Describe the allgather op in the reshard phase.

    Args:
        group (list): Process group.
65 66
        shape (list): The tensor shape.
        is_bool (bool): Whether allgather bool data. Default: False.
C
caozhou 已提交
67 68
    """

69
    def __init__(self, group, shape, is_bool=False):
C
caozhou 已提交
70 71
        self._group = group
        self._desc = "all_gather"
72 73 74 75 76 77
        self._shape = shape
        self._is_bool = is_bool

    @property
    def is_bool(self):
        return self._is_bool
C
caozhou 已提交
78 79 80 81 82 83 84 85 86

    @property
    def group(self):
        return self._group

    @property
    def desc(self):
        return self._desc

87 88 89 90
    @property
    def shape(self):
        return self._shape

C
caozhou 已提交
91
    def __repr__(self):
92
        return f"op: {self._desc}, group: {self._group}, shape: {self._shape}, is_bool: {self._is_bool}."
C
caozhou 已提交
93 94


95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
class AllGatherConcatOpDesc:
    """
    Describe the c_concat op in the reshard phase.

    Args:
        group (list): Process group.
        shape (list): The tensor shape.
        is_bool (bool): Whether c_concat bool data. Default: False.
    """

    def __init__(self, group, shape, is_bool=False):
        self._group = group
        self._desc = "c_concat"
        self._shape = shape
        self._is_bool = is_bool

    @property
    def is_bool(self):
        return self._is_bool

    @property
    def group(self):
        return self._group

    @property
    def desc(self):
        return self._desc

    @property
    def shape(self):
        return self._shape

    def __repr__(self):
        return f"op: {self._desc}, group: {self._group}, shape: {self._shape}, is_bool: {self._is_bool}."


C
caozhou 已提交
131 132 133 134 135 136
class SendOpDesc:
    """
    Describe the send op in the reshard phase.

    Args:
        partition_index (list): The index of partition in complete tensor.
137
        src (int): The source process to send.
C
caozhou 已提交
138
        dst (int): The destination process to receive.
139
        is_bool (bool): Whether send bool data. Default: False.
C
caozhou 已提交
140 141
    """

142
    def __init__(self, partition_index, src, dst, is_bool=False):
C
caozhou 已提交
143 144 145
        self._dst = dst
        self._partition_index = partition_index
        self._desc = "send"
146 147 148 149 150 151 152 153 154 155 156
        self._shape = []
        self._is_bool = is_bool
        self._src = src

    @property
    def src(self):
        return self._src

    @property
    def is_bool(self):
        return self._is_bool
C
caozhou 已提交
157 158 159 160 161 162 163 164 165 166 167 168 169

    @property
    def partition_index(self):
        return self._partition_index

    @property
    def dst(self):
        return self._dst

    @property
    def desc(self):
        return self._desc

170 171 172 173 174 175 176
    @property
    def shape(self):
        if not self._shape:
            for item in self.partition_index:
                self._shape.append(item[1] - item[0])
        return self._shape

C
caozhou 已提交
177
    def __repr__(self):
178
        return f"op: {self._desc}, partition_index: {self._partition_index}, dst: {self._dst}, shape: {self._shape}, is_bool: {self._is_bool}."
C
caozhou 已提交
179 180 181 182 183 184 185 186 187


class RecvOpDesc:
    """
    Describe the recv op in the reshard op.

    Args:
        partition_index (list): The index of partition in complete tensor.
        src (int): The source process to send.
188 189
        dst (int): The destination process to receive.
        is_bool (bool): Whether receive bool data. Default: False.
C
caozhou 已提交
190 191
    """

192
    def __init__(self, partition_index, src, dst, is_bool=False):
C
caozhou 已提交
193 194 195
        self._src = src
        self._partition_index = partition_index
        self._desc = "recv"
196 197 198 199 200 201 202 203 204 205 206
        self._shape = []
        self._is_bool = is_bool
        self._dst = dst

    @property
    def dst(self):
        return self._dst

    @property
    def is_bool(self):
        return self._is_bool
C
caozhou 已提交
207 208 209 210 211 212 213 214 215 216 217 218 219

    @property
    def partition_index(self):
        return self._partition_index

    @property
    def src(self):
        return self._src

    @property
    def desc(self):
        return self._desc

220 221 222 223 224 225 226
    @property
    def shape(self):
        if not self._shape:
            for item in self.partition_index:
                self._shape.append(item[1] - item[0])
        return self._shape

C
caozhou 已提交
227
    def __repr__(self):
228
        return f"op: {self._desc}, partition_index: {self._partition_index}, dst: {self._dst}, shape: {self._shape}, is_bool: {self._is_bool}."
C
caozhou 已提交
229 230 231 232 233 234 235


class SliceOpDesc:
    """
    Describe the slice op in the reshard phase.

    Args:
236 237 238 239
        starts (list): It represents start indices of corresponding axis in ``axes``.
        ends (list):  It represents end indices of corresponding axis in ``axes``.
        axes (list):  Axes that `starts` and `ends` apply to.
        shape (list): The shape of the tensor to be sliced.
C
caozhou 已提交
240 241
    """

242
    def __init__(self, starts, ends, axes, shape=None):
C
caozhou 已提交
243 244 245 246
        self._starts = starts
        self._ends = ends
        self._axes = axes
        self._desc = "slice"
247
        self._shape = shape
C
caozhou 已提交
248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264

    @property
    def starts(self):
        return self._starts

    @property
    def ends(self):
        return self._ends

    @property
    def axes(self):
        return self._axes

    @property
    def desc(self):
        return self._desc

265 266 267 268
    @property
    def shape(self):
        return self._shape

C
caozhou 已提交
269
    def __repr__(self):
270 271 272 273
        if self._shape is not None:
            return f"op: {self._desc}, starts: {self._starts}, ends: {self._ends}, axes: {self._axes}, shape: {self._shape}."
        else:
            return f"op: {self._desc}, starts: {self._starts}, ends: {self._ends}, axes: {self._axes}."
C
caozhou 已提交
274 275 276 277 278 279 280


class ConcatOpDesc:
    """
    Describe the concat op in the reshard phase.

    Args:
281
        partition_index_list (list): The list contains all partition index.
C
caozhou 已提交
282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
    """

    def __init__(self, partition_index_list):
        self._partition_index_list = partition_index_list
        self._desc = "concat"

    @property
    def partition_index_list(self):
        return self._partition_index_list

    @property
    def desc(self):
        return self._desc

    def __repr__(self):
        return f"op: {self._desc}, partition_index_list: {self._partition_index_list}."


300 301
class Inserter:
    """Insert op required in the reshard process."""
C
caozhou 已提交
302

303
    @staticmethod
304 305 306
    def insert_cast_op(block, idx, tensor, op_role, tensor_type):
        # to avoid name conflict with framework
        new_var_name = paddle.fluid.unique_name.generate_with_ignorable_key(
307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325
            ".".join(["cast@RESHARD", 'tmp'])
        )
        out = block.create_var(
            name=new_var_name,
            dtype=tensor_type,
            type=tensor.type,
            lod_level=tensor.lod_level,
        )
        cast_op = block._insert_op(
            idx,
            type='cast',
            inputs={'X': [tensor]},
            outputs={'Out': [out]},
            attrs={
                'in_dtype': tensor.dtype,
                'out_dtype': out.dtype,
                'op_role': op_role,
            },
        )
326
        cast_op._set_attr('op_namescope', "/auto_parallel/reshard")
327 328 329 330
        return out

    @staticmethod
    def insert_send_op(block, idx, tensor, src, dst, op_role):
331 332
        """Insert send op into block at the given index."""
        op_type = 'send_v2'
333 334
        # use pair comm group
        process_group = new_process_group([src, dst])
335 336 337 338 339 340 341 342 343 344 345 346
        send_op = block._insert_op(
            idx,
            type=op_type,
            inputs={'X': [tensor]},
            attrs={
                'ring_id': process_group.id,
                'peer': process_group.ranks.index(dst),
                'use_calc_stream': True,
                'op_role': op_role,
                'dynamic_shape': True,
            },
        )
347
        send_op._set_attr('op_namescope', "/auto_parallel/reshard")
348 349

    @staticmethod
350
    def insert_recv_op(block, idx, tensor, src, dst, op_role):
351 352
        """Insert recv op into block at the given index."""
        op_type = 'recv_v2'
353 354
        # use pair group
        process_group = new_process_group([src, dst])
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
        recv_op = block._insert_op(
            idx,
            type=op_type,
            inputs={'X': [tensor]},
            outputs={'Out': [tensor]},
            attrs={
                'ring_id': process_group.id,
                'peer': process_group.ranks.index(src),
                'out_shape': tensor.shape,
                'dtype': tensor.dtype,
                'use_calc_stream': True,
                'op_role': op_role,
                'dynamic_shape': True,
            },
        )
370
        recv_op._set_attr('op_namescope', "/auto_parallel/reshard")
371

372 373 374 375 376
    @staticmethod
    def insert_reset_lod_op(block, idx, X, Y, op_role):
        """Insert reset_lod op into block at the given index."""

        new_var_name = paddle.fluid.unique_name.generate_with_ignorable_key(
377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393
            ".".join(["reset_lod@RESHARD", 'tmp'])
        )
        reset_lod_out = block.create_var(
            name=new_var_name,
            shape=X.shape,
            type=X.type,
            dtype=X.dtype,
            lod_level=X.lod_level,
        )

        reset_op = block._insert_op(
            idx,
            type="lod_reset",
            inputs={'X': X, 'Y': Y},
            outputs={'Out': reset_lod_out},
            attrs={'op_role': op_role},
        )
394
        reset_op._set_attr('op_namescope', "/auto_parallel/reshard")
395 396
        return reset_lod_out

397 398 399 400 401 402 403
    @staticmethod
    def insert_concat_op(block, idx, tensors, axis, op_role):
        """Insert concat op into block at the given block."""
        inputs = {'X': tensors}
        attrs = {}
        attrs['axis'] = axis
        attrs['op_role'] = op_role
404 405
        # to avoid name conflict with framework
        helper = LayerHelper('concat@RESHARD', **locals())
406
        with paddle.static.program_guard(block.program):
407 408
            out = block.create_var(
                name=paddle.fluid.unique_name.generate_with_ignorable_key(
409 410
                    ".".join([helper.name, 'tmp'])
                ),
411 412 413 414 415
                dtype=tensors[0].dtype,
                shape=None,
                lod_level=tensors[0].lod_level,
                type=tensors[0].type,
                persistable=False,
416 417 418 419 420 421 422 423 424
                stop_gradient=False,
            )
        concat_op = block._insert_op(
            idx,
            type='concat',
            inputs=inputs,
            outputs={'Out': [out]},
            attrs=attrs,
        )
425
        concat_op._set_attr('op_namescope', "/auto_parallel/reshard")
426
        return out
C
caozhou 已提交
427

428
    @staticmethod
429 430 431
    def insert_slice_op(
        block, idx, tensor, starts, ends, axes, new_var_name, op_role
    ):
432
        """Insert slice op into block at the given block."""
433 434 435 436 437 438 439 440 441 442 443 444 445
        # This is a hack to insert split op to get slice tensor
        # 1. [128, 128] => [64, 128]: split
        # 2. [128, 128] => [128, 128]: assign
        # 3. [128, 128] => [64, 64]: slice, it will replaced by multi split
        global_shape = tensor.shape
        slice_shape = [ends[i] - starts[i] for i in range(len(starts))]
        diff_dims = []
        for index, item in enumerate(slice_shape):
            if item != global_shape[index]:
                diff_dims.append(index)

        # use assign
        if len(diff_dims) == 0:
446 447 448 449 450 451 452
            out = block.create_var(
                name=new_var_name,
                dtype=tensor.dtype,
                type=tensor.type,
                shape=slice_shape,
                lod_level=tensor.lod_level,
            )
453 454 455
            inputs = {'X': [tensor]}
            outputs = {"Out": [out]}
            attrs = {"in_place": False}
456 457 458
            slice_op = block._insert_op(
                idx, type="assign", inputs=inputs, outputs=outputs, attrs=attrs
            )
459
            slice_op._set_attr('op_namescope', "/auto_parallel/reshard")
460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478
            return out

        # use split once
        elif len(diff_dims) == 1:
            diff_dim = diff_dims[0]
            num_or_sections = global_shape[diff_dim] // slice_shape[diff_dim]
            axis = diff_dim
            cur_idx = starts[diff_dim] // slice_shape[diff_dim]
            input_shape = global_shape
            inputs = {'X': tensor}
            attrs = {'num': num_or_sections, 'axis': axis, 'op_role': op_role}
            new_shape = []
            for index, item in enumerate(tensor.shape):
                if index != axis:
                    new_shape.append(item)
                else:
                    new_shape.append(item // num_or_sections)
            with paddle.static.program_guard(block.program):
                outs = [
479 480 481 482 483 484 485 486 487 488 489
                    block.create_var(
                        name=paddle.fluid.unique_name.generate_with_ignorable_key(
                            ".".join(['split@RESHARD', 'tmp'])
                        ),
                        dtype=tensor.dtype,
                        shape=None,
                        type=tensor.type,
                        persistable=False,
                        lod_level=tensor.lod_level,
                        stop_gradient=False,
                    )
490 491 492
                    for i in range(num_or_sections)
                ]
                out = outs[cur_idx]
493 494 495 496 497 498 499
            split_op = block._insert_op(
                idx,
                type="split",
                inputs=inputs,
                outputs={'Out': outs},
                attrs=attrs,
            )
500
            split_op._set_attr('op_namescope', "/auto_parallel/reshard")
501 502 503 504 505 506 507 508 509 510 511
            return out

        # use slice
        else:
            inputs = {'Input': tensor}
            infer_flags = list(1 for i in range(len(axes)))
            attrs = {
                "axes": axes,
                "starts": starts,
                "ends": ends,
                "infer_flags": infer_flags,
512
                'op_role': op_role,
513
            }
514 515 516 517 518 519 520 521 522 523 524 525 526
            out = block.create_var(
                name=new_var_name,
                dtype=tensor.dtype,
                type=tensor.type,
                lod_level=tensor.lod_level,
            )
            slice_op = block._insert_op(
                idx,
                type="slice",
                inputs=inputs,
                outputs={'Out': [out]},
                attrs=attrs,
            )
527
            slice_op._set_attr('op_namescope', "/auto_parallel/reshard")
528
            return out
C
caozhou 已提交
529

530
    @staticmethod
531
    def insert_split_op(block, idx, tensor, num_or_sections, op_role, axis=0):
532
        """Insert split op into block at the given index."""
533
        helper = LayerHelper('split@RESHARD', **locals())
534 535
        input_shape = tensor.shape
        inputs = {'X': tensor}
536 537 538 539 540 541 542
        attrs = {'num': num_or_sections, 'axis': axis, 'op_role': op_role}
        new_shape = []
        for index, item in enumerate(tensor.shape):
            if index != axis:
                new_shape.append(item)
            else:
                new_shape.append(item // num_or_sections)
543 544
        with paddle.static.program_guard(block.program):
            outs = [
545 546
                block.create_var(
                    name=paddle.fluid.unique_name.generate_with_ignorable_key(
547 548
                        ".".join([helper.name, 'tmp'])
                    ),
549 550 551 552 553
                    dtype=tensor.dtype,
                    shape=None,
                    lod_level=tensor.lod_level,
                    type=tensor.type,
                    persistable=False,
554 555 556
                    stop_gradient=False,
                )
                for i in range(num_or_sections)
557
            ]
558 559 560
        split_op = block._insert_op(
            idx, type="split", inputs=inputs, outputs={'Out': outs}, attrs=attrs
        )
561
        split_op._set_attr('op_namescope', "/auto_parallel/reshard")
562
        return outs
C
caozhou 已提交
563

564 565
    @staticmethod
    def insert_fill_constant_op(block, idx, op_role):
C
caozhou 已提交
566
        """Insert fill constant op into block at the given index."""
567 568 569
        # to avoid name conflict with framework
        helper = LayerHelper('fill_constant@RESHARD', **locals())
        # use paddle.int64 as dtype
C
caozhou 已提交
570
        with paddle.static.program_guard(block.program):
571 572
            out = block.create_var(
                name=paddle.fluid.unique_name.generate_with_ignorable_key(
573 574
                    ".".join([helper.name, 'tmp'])
                ),
575 576 577 578
                dtype=paddle.int64,
                shape=None,
                type=core.VarDesc.VarType.LOD_TENSOR,
                persistable=False,
579 580
                stop_gradient=False,
            )
C
caozhou 已提交
581 582 583 584 585
        inputs = {}
        attrs = {'force_cpu': False}
        attrs['str_value'] = str(int("1"))
        attrs['value'] = int("1")
        attrs['dtype'] = out.dtype
586
        attrs['op_role'] = op_role
587 588 589 590 591 592 593 594 595 596
        utils.get_shape_tensor_inputs(
            inputs=inputs, attrs=attrs, shape=[0], op_type='fill_constant'
        )
        fillconstant_op = block._insert_op(
            idx,
            type='fill_constant',
            inputs=inputs,
            outputs={'Out': [out]},
            attrs=attrs,
        )
C
caozhou 已提交
597
        out.stop_gradient = True
598
        fillconstant_op._set_attr('op_namescope', "/auto_parallel/reshard")
C
caozhou 已提交
599 600
        return out

601 602 603 604 605 606 607 608 609 610
    @staticmethod
    def insert_allgather_op(block, idx, tensor, ranks, op_role):
        """Insert allgather op into block at the given index."""
        tensor_list = []
        group = new_process_group(ranks)
        idx_offset = 0

        # instant process group before insert allgather op.
        if not group.is_instantiate():
            # insert fill_constant op
611
            fill_constant_out = Inserter.insert_fill_constant_op(
612 613
                block, idx, op_role
            )
614 615 616
            fill_constant_out.stop_gradient = True

            # insert c_allreduce_sum op
617 618 619 620 621 622 623 624
            allreduce_op = block._insert_op(
                idx + 1,
                type="c_allreduce_sum",
                inputs={'X': [fill_constant_out]},
                outputs={'Out': [fill_constant_out]},
                attrs={
                    'ring_id': 0,
                    'use_calc_stream': True,
625 626 627
                    'op_role': op_role,
                },
            )
628
            allreduce_op._set_attr('op_namescope', "/auto_parallel/reshard")
629
            # insert c_sync_calc_stream op
630 631 632 633 634
            sync_calc_op = block._insert_op(
                idx + 2,
                type="c_sync_calc_stream",
                inputs={'X': [fill_constant_out]},
                outputs={'Out': [fill_constant_out]},
635 636
                attrs={'op_role': op_role},
            )
637
            sync_calc_op._set_attr('op_namescope', "/auto_parallel/reshard")
638 639 640 641
            idx_offset = 3

        # insert c_allgather op
        op_type = 'c_allgather'
642 643
        # to avoid name conflict with framework
        helper = LayerHelper(op_type + "@RESHARD", **locals())
644
        with paddle.static.program_guard(block.program):
645 646
            allgather_out = block.create_var(
                name=paddle.fluid.unique_name.generate_with_ignorable_key(
647 648
                    ".".join([helper.name, 'tmp'])
                ),
649 650 651 652 653
                dtype=tensor.dtype,
                shape=None,
                lod_level=tensor.lod_level,
                type=tensor.type,
                persistable=False,
654 655 656 657 658 659 660 661 662 663 664 665 666 667
                stop_gradient=False,
            )
        allgather_op = block._insert_op(
            idx + idx_offset,
            type=op_type,
            inputs={'X': [tensor]},
            outputs={'Out': [allgather_out]},
            attrs={
                'ring_id': group.id,
                'use_calc_stream': True,
                'nranks': group.nranks,
                'op_role': op_role,
            },
        )
668
        allgather_op._set_attr('op_namescope', "/auto_parallel/reshard")
669 670 671
        idx_offset += 1

        # insert split op
672 673 674
        split_out = Inserter.insert_split_op(
            block, idx + idx_offset, allgather_out, group.nranks, op_role
        )
675 676 677 678
        idx_offset += 1
        tensor_list.extend(split_out)
        return tensor_list, idx_offset

679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718
    @staticmethod
    def insert_c_concat_op(block, idx, tensor, ranks, op_role):
        """Insert c_concat op into block at the given index."""
        group = new_process_group(ranks)
        idx_offset = 0

        # insert c_concat op
        op_type = 'c_concat'
        # to avoid name conflict with framework
        helper = LayerHelper(op_type + "@RESHARD", **locals())
        with paddle.static.program_guard(block.program):
            c_concat_out = block.create_var(
                name=paddle.fluid.unique_name.generate_with_ignorable_key(
                    ".".join([helper.name, 'tmp'])
                ),
                dtype=tensor.dtype,
                shape=None,
                lod_level=tensor.lod_level,
                type=tensor.type,
                persistable=False,
                stop_gradient=False,
            )
        cur_rank = paddle.distributed.get_rank()
        c_concat_op = block._insert_op(
            idx + idx_offset,
            type=op_type,
            inputs={'X': [tensor]},
            outputs={'Out': [c_concat_out]},
            attrs={
                'ring_id': group.id,
                'use_calc_stream': True,
                'use_model_parallel': True,
                'nranks': group.nranks,
                'op_role': op_role,
                'rank': group.ranks.index(cur_rank) if cur_rank in ranks else 0,
            },
        )
        c_concat_op._set_attr('op_namescope', "/auto_parallel/reshard")
        return c_concat_out

719
    @staticmethod
720 721 722
    def concat_partitions_with_op(
        partition_tensor_list, tensor, partition_index, block, idx, op_role
    ):
723 724
        """Concat the tensors and insert concat op."""
        if not partition_tensor_list:
C
caozhou 已提交
725
            partition_tensor_list.append((tensor, partition_index))
726 727 728 729
        else:
            i = 0
            has_concat = False
            while i < len(partition_tensor_list):
730 731 732 733 734 735 736
                (
                    concat_axis,
                    first_order,
                    new_partition,
                ) = Resharder.compute_concat_info(
                    partition_tensor_list[i][1], partition_index
                )
737 738
                if concat_axis != -1:
                    has_concat = True
739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755
                    _ = (
                        Inserter.insert_concat_op(
                            block,
                            idx[0],
                            [partition_tensor_list[i][0], tensor],
                            concat_axis,
                            op_role,
                        )
                        if first_order == 0
                        else Inserter.insert_concat_op(
                            block,
                            idx[0],
                            [tensor, partition_tensor_list[i][0]],
                            concat_axis,
                            op_role,
                        )
                    )
756 757
                    partition_tensor_list.pop(i)
                    idx[0] += 1
758 759 760 761 762 763 764 765
                    Inserter.concat_partitions_with_op(
                        partition_tensor_list,
                        _,
                        new_partition,
                        block,
                        idx,
                        op_role,
                    )
766 767 768 769 770 771 772 773 774 775 776 777 778
                    break
                i += 1
            if not has_concat:
                partition_tensor_list.append((tensor, partition_index))


class Remover:
    """Remove var and op in the reshard process."""

    @staticmethod
    def remove_no_need_ops(auto_parallel_main_prog, dist_context, rank_id):
        """Remove no need ops in the main program"""
        not_remove_op_ref = [
779 780 781
            "create_py_reader",
            "create_double_buffer_reader",
            "read",
782
        ]
C
caozhou 已提交
783

784 785 786 787
        # NOTE: The nested sub block is not be supported now.
        remove_block_order = []
        for block_idx in Resharder.while_block_info:
            remove_block_order.append(block_idx)
C
caozhou 已提交
788

789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804
        for block_idx, block in enumerate(auto_parallel_main_prog.blocks):
            if block_idx not in remove_block_order:
                remove_block_order.append(block_idx)

        # the sub block should be removed first
        for block_idx in remove_block_order:
            remove_op_idx = []
            block = auto_parallel_main_prog.blocks[block_idx]
            ops = block.ops
            vars = block.vars
            for idx, op in enumerate(ops):
                if op.type == "read":
                    dim_list = []
                    for var_name in op.output_arg_names:
                        dim_list.extend(
                            get_var_with_recursion(
805 806 807
                                var_name, block, auto_parallel_main_prog
                            ).shape
                        )
808 809 810 811 812
                    for i in range(idx, -1, -1):
                        if ops[i].type == "create_py_reader":
                            ops[i]._set_attr("shape_concat", dim_list)
                            break
                    continue
813

814 815 816 817
                # replace the input and output of c_sync_comm_stream op when in pipeline scene.
                if op.type == "c_sync_comm_stream":
                    need_save = []
                    for var_name in op.input_arg_names:
818 819 820 821 822 823 824
                        process_mesh = (
                            dist_context.get_tensor_dist_attr_for_program(
                                get_var_with_recursion(
                                    var_name, block, auto_parallel_main_prog
                                )
                            ).process_mesh
                        )
825 826 827 828 829
                        if rank_id in process_mesh.processes:
                            need_save.append(var_name)
                    if not need_save:
                        remove_op_idx.append(idx)
                        continue
830

831 832 833 834
                    proto = OpProtoHolder.instance().get_op_proto(op.type)
                    op.desc.set_input(proto.inputs[0].name, need_save)
                    op.desc.set_output(proto.outputs[0].name, need_save)
                    continue
835

836 837 838 839
                # judge the other op whether should be removed.
                op_dist_attr = dist_context.get_op_dist_attr_for_program(op)
                if op_dist_attr is not None:
                    op_process_mesh = op_dist_attr.process_mesh
840 841 842 843
                    if (
                        rank_id not in op_process_mesh.processes
                        and op.type not in not_remove_op_ref
                    ):
844 845 846 847 848 849
                        remove_op_idx.append(idx)

            for idx in remove_op_idx[::-1]:
                block._remove_op(idx)

    @staticmethod
850 851 852
    def remove_no_need_vars(
        auto_parallel_main_prog, dist_params_grads, feed_var_names
    ):
853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874
        """Remove no need vars in the main program"""
        for block_idx, block in enumerate(auto_parallel_main_prog.blocks):
            remove_vars = set()
            ops = block.ops
            vars = block.vars
            need_vars = set()
            for op in ops:
                for var_name in op.input_arg_names:
                    if var_name in vars:
                        need_vars.add(var_name)
                for var_name in op.output_arg_names:
                    if var_name in vars:
                        need_vars.add(var_name)
            for var in vars:
                if var not in need_vars:
                    remove_vars.add(var)

            # change dist_params_grads, the optimize op just in block 0.
            if block_idx == 0:
                param_grad_map = {}
                for op in ops:
                    if int(op.attr('op_role')) == int(OpRole.Optimize):
875 876 877 878
                        if (
                            "Param" in op.input_names
                            and "Grad" in op.input_names
                        ):
879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896
                            param_name = op.input("Param")[0]
                            grad_name = op.input("Grad")[0]
                            param_grad_map[param_name] = grad_name

                need_remove_idx = []
                for idx, item in enumerate(dist_params_grads):
                    if item[0].name not in param_grad_map.keys():
                        need_remove_idx.append(idx)

                for idx in need_remove_idx[::-1]:
                    dist_params_grads.pop(idx)

                idx = 0
                while idx < len(dist_params_grads):
                    param_name = dist_params_grads[idx][0].name
                    grad_name = dist_params_grads[idx][1].name
                    if grad_name != param_grad_map[param_name]:
                        dist_params_grads[idx] = (
897 898 899
                            vars[param_name],
                            vars[param_grad_map[param_name]],
                        )
900 901 902
                    idx += 1

            for var in remove_vars:
903
                if var in feed_var_names:
904
                    continue
905 906 907
                block._remove_var(var)

    @staticmethod
908 909 910
    def remove_no_need_in_main(
        auto_parallel_main_prog, dist_context, rank_id, dist_params_grads
    ):
911
        """Remove no need vars and ops in the main program."""
912 913 914 915 916 917
        Remover.remove_no_need_ops(
            auto_parallel_main_prog, dist_context, rank_id
        )
        Resharder.change_while_op_input_and_output(
            auto_parallel_main_prog, dist_context
        )
918 919 920 921
        # 'feed_var_names' cannot be removed from auto_parallel_main_prog
        feed_var_names = []
        for var in sum(list(dist_context.serial_feed_vars.values()), []):
            feed_var_names.append(var.name)
922 923 924
        Remover.remove_no_need_vars(
            auto_parallel_main_prog, dist_params_grads, feed_var_names
        )
925 926

    @staticmethod
927 928 929
    def remove_no_need_in_startup(
        auto_parallel_main_prog, auto_parallel_startup_prog
    ):
930 931 932 933 934 935
        """Remove no need vars and ops in the startup program."""
        main_input_vars = set()
        main_ops = auto_parallel_main_prog.global_block().ops
        for op in main_ops:
            for var_name in op.input_arg_names:
                main_input_vars.add(var_name)
936

937 938 939 940 941 942 943 944 945
        startup_block = auto_parallel_startup_prog.global_block()
        startup_output_vars = set()
        startup_ops = startup_block.ops
        for op in startup_ops:
            # skip c_sync_comm_stream op
            if op.type == "c_sync_comm_stream":
                continue
            for var_name in op.output_arg_names:
                startup_output_vars.add(var_name)
946

947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966
        need_vars = set()
        for var_name in startup_output_vars:
            if var_name in main_input_vars:
                need_vars.add(var_name)

        startup_ops = startup_block.ops
        actual_need_vars = set()
        for idx, op in enumerate(startup_ops):
            is_need_op = False
            if op.type == "c_sync_comm_stream":
                continue
            for var_name in op.output_arg_names:
                if var_name in need_vars:
                    is_need_op = True
                    break
            if is_need_op:
                for var_name in op.output_arg_names:
                    actual_need_vars.add(var_name)
                for var_name in op.input_arg_names:
                    actual_need_vars.add(var_name)
967

968 969 970 971 972 973
        remove_vars = set()
        for var_name in startup_block.vars:
            if var_name not in actual_need_vars:
                remove_vars.add(var_name)
        for var in remove_vars:
            startup_block._remove_var(var)
974 975

        remove_op_idx = []
976 977 978
        vars = startup_block.vars
        for idx, op in enumerate(startup_block.ops):
            is_no_need_op = False
979
            if op.type == "c_sync_comm_stream":
980
                var_names = []
981
                for var_name in op.input_arg_names:
982 983 984
                    if var_name in vars:
                        var_names.append(var_name)
                if not var_names:
985
                    remove_op_idx.append(idx)
986 987 988 989
                else:
                    proto = OpProtoHolder.instance().get_op_proto(op.type)
                    op.desc.set_input(proto.inputs[0].name, var_names)
                    op.desc.set_output(proto.outputs[0].name, var_names)
990
                continue
C
caozhou 已提交
991

992 993 994 995 996 997
            for var_name in op.output_arg_names:
                if var_name not in vars:
                    is_no_need_op = True
                    break
            if is_no_need_op:
                remove_op_idx.append(idx)
998
        for idx in remove_op_idx[::-1]:
999
            startup_block._remove_op(idx)
C
caozhou 已提交
1000 1001


1002 1003 1004
class Resharder:
    """
    Reshard tensor in the program according to its distributed attribute and corresponding op distributed attribute.
1005

1006 1007 1008 1009 1010 1011 1012 1013
    Args:
        auto_parallel_main_prog (Program): An auto parallel main program.
        auto_parallel_startup_prog (Program): An auto parallel startup program.
        rank_id (int): The process id.
        dist_context (DistributedContext): The distributed context of this rank.
        dist_params_grads (list): The list contains the tuple of param and grad.
        batch_size (int): The batch size. Default: None.
    """
1014

1015 1016
    while_block_info = {}

1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029
    def __init__(
        self,
        auto_parallel_main_prog,
        auto_parallel_startup_prog,
        rank_id,
        dist_context,
        dist_params_grads,
        batch_size=None,
    ):
        assert isinstance(auto_parallel_main_prog, Program), (
            "The type of auto_parallel_main_prog should be Program, "
            "but got {}.".format(type(auto_parallel_main_prog))
        )
1030
        if auto_parallel_startup_prog is not None:
1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043
            assert isinstance(auto_parallel_main_prog, Program), (
                "The type of auto_parallel_startup_prog should be Program or None, "
                "but got {}.".format(type(auto_parallel_startup_prog))
            )
        assert isinstance(
            rank_id, int
        ), "The type of rank_id should be int, " "but got {}.".format(
            type(rank_id)
        )
        assert isinstance(dist_context, DistributedContext), (
            "The type of dist_context should be DistributedContext, "
            "but got {}.".format(type(dist_context))
        )
1044

1045
        if batch_size is not None:
1046 1047 1048 1049 1050
            assert isinstance(
                batch_size, int
            ), "The type of batch_size should be int, " "but got {}.".format(
                type(batch_size)
            )
1051 1052 1053 1054 1055 1056 1057 1058 1059 1060

        self._auto_parallel_main_prog = auto_parallel_main_prog
        self._auto_parallel_startup_prog = auto_parallel_startup_prog
        self._rank_id = rank_id
        self._dist_context = dist_context
        self._dist_params_grads = dist_params_grads
        self._batch_size = batch_size
        self._has_sent = {}
        self._has_recv = {}
        self._has_allgather = {}
1061 1062
        # to avoid reshard repeatly
        self._has_resharded = {}
1063

1064 1065 1066
    @property
    def auto_parallel_main_prog(self):
        return self._auto_parallel_main_prog
1067

1068 1069 1070
    @property
    def auto_parallel_startup_prog(self):
        return self._auto_parallel_startup_prog
1071

1072 1073 1074
    @property
    def rank_id(self):
        return self._rank_id
1075

1076 1077 1078
    @property
    def dist_context(self):
        return self._dist_context
1079

1080 1081 1082
    @property
    def dist_params_grads(self):
        return self._dist_params_grads
1083

1084 1085 1086
    @property
    def batch_size(self):
        return self._batch_size
1087

1088 1089 1090
    @property
    def has_sent(self):
        return self._has_sent
1091

1092 1093 1094
    @property
    def has_recv(self):
        return self._has_recv
1095

1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108
    @property
    def has_allgather(self):
        return self._has_allgather

    @staticmethod
    def compute_partition_shape(complete_shape, dims_mapping, process_shape):
        """Compute the shape of partition."""
        partition_shape = []
        for idx, item in enumerate(complete_shape):
            if dims_mapping[idx] == -1:
                partition_shape.append(item)
            else:
                partition_shape.append(item // process_shape[dims_mapping[idx]])
1109

1110
        return partition_shape
1111

1112 1113 1114 1115 1116 1117 1118 1119 1120 1121
    @staticmethod
    def compute_process_index(process, process_group, process_shape):
        """Compute the index of process_shape corresponding to the process."""
        relative_process = process_group.index(process)
        process_index = []
        product = reduce(lambda x, y: x * y, process_shape)

        for i in range(len(process_shape)):
            idx = relative_process // (product // process_shape[i])
            product = product // process_shape[i]
1122 1123 1124
            relative_process = (
                relative_process - relative_process // product * product
            )
1125 1126 1127 1128 1129
            process_index.append(idx)

        return process_index

    @staticmethod
1130 1131 1132
    def compute_partition_index(
        process, complete_shape, dims_mapping, process_shape, process_group
    ):
1133 1134
        """Compute the partition index in complete tensor."""
        partition_shape = Resharder.compute_partition_shape(
1135 1136 1137 1138 1139
            complete_shape, dims_mapping, process_shape
        )
        process_index = Resharder.compute_process_index(
            process, process_group, process_shape
        )
1140 1141 1142 1143 1144 1145
        partition_index = []

        for i in range(len(complete_shape)):
            if dims_mapping[i] == -1:
                partition_index.append([0, partition_shape[i]])
            else:
1146 1147 1148 1149 1150 1151 1152
                partition_index.append(
                    [
                        process_index[dims_mapping[i]] * partition_shape[i],
                        (process_index[dims_mapping[i]] + 1)
                        * partition_shape[i],
                    ]
                )
1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166

        return partition_index

    @staticmethod
    def compute_concat_info(partition_index_x, partition_index_y):
        """Judge whether two partition can be concatenated and compute concatenated partition index."""
        differ_count = 0
        concat_axis = -1
        first_order = 0
        new_partition = []

        for idx, item in enumerate(partition_index_x):
            if item != partition_index_y[idx]:
                differ_count += 1
1167 1168 1169 1170
                if (
                    item[1] == partition_index_y[idx][0]
                    and item[0] < partition_index_y[idx][1]
                ):
1171 1172
                    concat_axis = idx
                    new_partition.append([item[0], partition_index_y[idx][1]])
1173 1174 1175 1176
                elif (
                    item[0] == partition_index_y[idx][1]
                    and item[1] > partition_index_y[idx][0]
                ):
1177 1178 1179 1180 1181
                    first_order = 1
                    concat_axis = idx
                    new_partition.append([partition_index_y[idx][0], item[1]])
            else:
                new_partition.append(item)
1182

1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197
        if differ_count == 1:
            return concat_axis, first_order, new_partition
        else:
            return -1, first_order, new_partition

    @staticmethod
    def compute_complete_shape(slice_shape, process_shape, dims_mapping):
        """compute the complete shape of the slice tensor  with its process mesh and dims mapping"""
        complete_shape = []
        for idx, item in enumerate(slice_shape):
            if dims_mapping[idx] == -1:
                complete_shape.append(item)
            else:
                complete_shape.append(item * process_shape[dims_mapping[idx]])
        return complete_shape
C
caozhou 已提交
1198

1199 1200 1201 1202 1203 1204 1205 1206 1207 1208
    @staticmethod
    def concat_partitions(partition_index_list, partition_index):
        """Concat the given partitions without inserting concat op."""
        if not partition_index_list:
            partition_index_list.append(partition_index)
        else:
            i = 0
            has_concat = False
            while i < len(partition_index_list):
                concat_axis, _, new_partition = Resharder.compute_concat_info(
1209 1210
                    partition_index_list[i], partition_index
                )
1211 1212 1213
                if concat_axis != -1:
                    has_concat = True
                    partition_index_list.pop(i)
1214 1215 1216
                    Resharder.concat_partitions(
                        partition_index_list, new_partition
                    )
1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227
                    break
                i += 1
            if not has_concat:
                partition_index_list.append(partition_index)

    @staticmethod
    def change_while_op_input_and_output(auto_parallel_main_prog, dist_context):
        """Change while op input and output after the corresponding sub block ops removed"""
        for sub_block_idx in Resharder.while_block_info:
            sub_block = auto_parallel_main_prog.blocks[sub_block_idx]
            parent_while_op_id = Resharder.while_block_info[sub_block_idx][
1228 1229
                "op_id"
            ]
1230 1231 1232 1233 1234 1235 1236
            parent_block = auto_parallel_main_prog.blocks[sub_block.parent_idx]

            sub_block_op_inputs = set()
            sub_block_op_outputs = []
            for op in sub_block.ops:
                # skip the input and output of operators inserted in the reshard phase
                dist_op = dist_context.get_dist_op_for_program(op)
1237 1238 1239 1240 1241 1242
                if (
                    dist_op
                    or (op.type == "slice" and not dist_op)
                    or (op.type == "split" and not dist_op)
                    or (op.type == "assign" and not dist_op)
                ):
1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255
                    for var_name in op.output_arg_names:
                        if var_name not in sub_block_op_outputs:
                            sub_block_op_outputs.append(var_name)
                    for var_name in op.input_arg_names:
                        sub_block_op_inputs.add(var_name)

            # find the while op
            while_op = None
            for op in parent_block.ops:
                if op.desc.id() == parent_while_op_id and op.type == "while":
                    while_op = op
                    break

1256 1257
            if while_op is None:
                continue
1258 1259 1260 1261 1262 1263 1264 1265

            # find the actual input and output of while op
            proto = OpProtoHolder.instance().get_op_proto(while_op.type)
            new_X = []
            for var_name in while_op.input("X"):
                if var_name in sub_block_op_inputs:
                    new_X.append(var_name)
            assert new_X
1266
            new_X.sort()
1267 1268 1269 1270 1271
            while_op.desc.set_input(proto.inputs[0].name, new_X)

            new_Out = []
            for var_name in while_op.output("Out"):
                for output_name in sub_block_op_outputs[::-1]:
1272
                    if output_name.find(var_name) != -1 and (
1273 1274 1275
                        len(var_name) == len(output_name)
                        or "@RESHARD" in output_name
                    ):
1276 1277
                        if output_name not in new_Out:
                            new_Out.append(output_name)
1278 1279 1280 1281 1282 1283
            assert new_Out
            while_op.desc.set_output(proto.outputs[0].name, new_Out)

    def is_overlapped(self, shape_x, shape_y):
        """Judge whether two partitions intersect on the specified dimension."""
        overlapped = False
1284 1285 1286
        if (shape_y[0] <= shape_x[0] < shape_y[1]) or (
            shape_x[0] <= shape_y[0] < shape_x[1]
        ):
1287 1288 1289 1290 1291 1292 1293 1294 1295 1296
            overlapped = True
        return overlapped

    def is_unshard(self, dims_mapping):
        for dim in dims_mapping:
            if dim != -1:
                return False
        return True

    def is_special_op(self, op):
1297
        global _g_special_ops, _g_gradient_clip_ops
Z
zhaoyingli 已提交
1298 1299
        if op.type in _g_special_ops:
            return True
1300
        if is_gradient_clip_op(op) and op.type in _g_gradient_clip_ops:
1301
            return True
Z
zhaoyingli 已提交
1302 1303
        return False

1304 1305
    def is_condition_replicative(self, op):
        sub_block = self.auto_parallel_main_prog.blocks[op.attr("sub_block").id]
1306 1307 1308 1309 1310

        if op.type == "while":
            input_cond = op.input("Condition")
        elif op.type == "conditional_block":
            input_cond = op.input("Cond")
1311 1312

        # the dims mapping of condition tensor should be replicative
1313
        for var_name in input_cond:
1314 1315 1316
            var = get_var_with_recursion(
                var_name, sub_block, self.auto_parallel_main_prog
            )
1317 1318 1319 1320 1321 1322
            dist_tensor = self.dist_context.get_dist_tensor_for_program(var)
            tensor_dist_attr = dist_tensor.dist_attr
            var_dims_mapping = tensor_dist_attr.dims_mapping
            for dim in var_dims_mapping:
                if dim != -1:
                    return False
1323

1324 1325
        return True

1326
    def need_reshard(self, dist_tensor, dist_attr, op_input=True, dist_op=None):
1327 1328 1329 1330 1331
        """Judge the tensor whether needs to be resharded."""
        is_reshard = False
        tensor_dist_attr = dist_tensor.dist_attr
        tensor_dims_mapping = tensor_dist_attr.dims_mapping
        tensor_process_mesh = tensor_dist_attr.process_mesh
1332 1333 1334 1335

        # dist_attr is [process_mesh, dims_mapping] and process_mesh is not a union
        op_process_mesh = dist_attr[0]

1336
        if op_input:
1337
            op_input_dims_mapping = dist_attr[1]
1338
            if all(
1339 1340 1341 1342 1343 1344 1345 1346 1347 1348
                map(
                    lambda x: x,
                    [
                        tensor_dims_mapping,
                        tensor_process_mesh,
                        op_input_dims_mapping,
                        op_process_mesh,
                    ],
                )
            ):
1349
                # judge whether need reshard by dims_mapping
1350
                if tensor_dims_mapping != op_input_dims_mapping:
1351 1352 1353 1354
                    if (
                        tensor_process_mesh
                        not in self.dist_context.process_meshes
                    ):
1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365
                        # assert whether -1 when union.
                        for item in tensor_dims_mapping:
                            if item != -1:
                                raise ValueError(
                                    "The dim must be -1 when tensor process mesh is a union."
                                )
                        # tensor process_mesh: [0, 1, 2, 3], dims_mapping: [-1, -1]
                        # op process_mesh: [4, 5], dims_mapping: [0, -1]
                        # reshard is not supported such as above
                        if not is_reshard:
                            return is_reshard
1366
                        else:
1367 1368 1369 1370 1371 1372 1373 1374
                            raise ValueError(
                                "it is not supported that tensor process mesh is a union and needs reshard."
                            )
                    is_reshard = True

                # judge whether need reshard by process_mesh
                if tensor_process_mesh != op_process_mesh:
                    is_reshard = True
1375
        else:
1376
            op_output_dims_mapping = dist_attr[1]
1377
            if all(
1378 1379 1380 1381 1382 1383 1384 1385 1386 1387
                map(
                    lambda x: x,
                    [
                        tensor_dims_mapping,
                        tensor_process_mesh,
                        op_output_dims_mapping,
                        op_process_mesh,
                    ],
                )
            ):
1388 1389 1390 1391
                if tensor_dims_mapping != op_output_dims_mapping:
                    raise ValueError(
                        "It is not supported that tensor dims mapping is different from op output dims mapping."
                    )
1392 1393
                if tensor_process_mesh != op_process_mesh:
                    is_reshard = True
1394 1395 1396 1397

        return is_reshard

    def get_op_process_meshes(self, op):
1398
        """Get sub process meshes of the given op if op process mesh is a union."""
1399 1400 1401
        process_meshes = []
        dist_op = self.dist_context.get_dist_op_for_program(op)
        op_process_mesh = dist_op.dist_attr.process_mesh
1402

1403
        for process_mesh in self.dist_context.process_meshes:
1404 1405 1406
            if set(process_mesh.processes) & (
                set(op_process_mesh.processes)
            ) and len(process_mesh.processes) < len(op_process_mesh.processes):
1407 1408 1409 1410 1411 1412 1413 1414
                process_meshes.append(process_mesh)

        # it means the process mesh is not a union when process meshes is null
        if not process_meshes:
            process_meshes.append(op_process_mesh)

        return process_meshes

1415
    def find_op_desc_seq(self, dist_tensor, dist_attr, serial=False):
1416 1417 1418 1419 1420
        """
        Find the op description sequence to reshard the source tensor for matching the op requirement.

        Args:
            dist_tensor (DistributedTensor): A distributed tensor.
1421 1422
            dist_attr (list): A list contains process_mesh and dims_mapping such as [process_mesh, dims_mapping].
            serial (bool): If serial is true, the dist tensor and dist op come from serial program. Otherwise, they come from auto program.
1423 1424 1425 1426 1427 1428 1429 1430

        Returns:
            Dict, the dict represents the required op description sequence corresponding to process, The key of dict is
            process and value is a list containing op description.
        """
        tensor_dist_attr = dist_tensor.dist_attr
        source_tensor = dist_tensor.serial_tensor
        tensor_name = source_tensor.name
1431

1432 1433 1434 1435 1436
        source_dims_mapping = tensor_dist_attr.dims_mapping
        source_process_mesh = tensor_dist_attr.process_mesh
        source_process_group = source_process_mesh.processes
        source_process_shape = source_process_mesh.topology

1437 1438
        target_process_mesh = dist_attr[0]
        target_dims_mapping = dist_attr[1]
1439 1440 1441 1442
        target_process_group = target_process_mesh.processes
        target_process_shape = target_process_mesh.topology

        if source_tensor.shape[0] < 0:
1443
            assert source_tensor.shape[0] == -1
1444 1445 1446 1447
            new_shape = list(source_tensor.shape)
            new_shape[0] = self.batch_size
            source_tensor.desc.set_shape(new_shape)

1448 1449 1450 1451 1452 1453 1454
        complete_shape = (
            Resharder.compute_complete_shape(
                source_tensor.shape, source_process_shape, source_dims_mapping
            )
            if not serial
            else source_tensor.shape
        )
1455 1456 1457
        op_desc_seq = {}

        # TODO: if the target process group has the same process with source process group
1458 1459 1460
        if set(target_process_group).intersection(
            set(source_process_group)
        ) and set(target_process_group).difference(set(source_process_group)):
1461 1462 1463 1464 1465
            pass

        elif target_process_group != source_process_group:
            partition_process_mapping_list = []
            for source_process in source_process_group:
1466
                # get partition index of source process
1467 1468 1469 1470 1471 1472 1473
                source_partition_index = Resharder.compute_partition_index(
                    source_process,
                    complete_shape,
                    source_dims_mapping,
                    source_process_shape,
                    source_process_group,
                )
1474
                if not partition_process_mapping_list:
1475
                    # the item in partition_process_mapping_list is source_partition_index, which processes and whether has been used
1476
                    partition_process_mapping_list.append(
1477 1478
                        [source_partition_index, [source_process], [False]]
                    )
1479
                else:
1480
                    partition_list = list(
1481 1482
                        [item[0] for item in partition_process_mapping_list]
                    )
1483
                    process_list = list(
1484 1485
                        [item[1] for item in partition_process_mapping_list]
                    )
1486
                    has_used = list(
1487 1488
                        [item[2] for item in partition_process_mapping_list]
                    )
1489

1490 1491 1492 1493 1494
                    if partition_list.count(source_partition_index) == 1:
                        index = partition_list.index(source_partition_index)
                        process_list[index].append(source_process)
                        has_used[index].append(False)
                    else:
1495
                        partition_process_mapping_list.append(
1496 1497
                            [source_partition_index, [source_process], [False]]
                        )
1498 1499

            for target_process in target_process_group:
1500
                # has_sent means the source_partition_index has been sent to target_process
1501 1502
                has_sent = []
                target_partition_index = Resharder.compute_partition_index(
1503 1504 1505 1506 1507 1508
                    target_process,
                    complete_shape,
                    target_dims_mapping,
                    target_process_shape,
                    target_process_group,
                )
1509 1510 1511 1512
                partition_index_list = []
                all_partition_index_list = []
                for source_process in source_process_group:
                    source_partition_index = Resharder.compute_partition_index(
1513 1514 1515 1516 1517 1518
                        source_process,
                        complete_shape,
                        source_dims_mapping,
                        source_process_shape,
                        source_process_group,
                    )
1519
                    to_send_process = None
1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541
                    if (
                        all(
                            _
                            for _ in list(
                                map(
                                    self.is_overlapped,
                                    source_partition_index,
                                    target_partition_index,
                                )
                            )
                        )
                        and source_partition_index not in has_sent
                    ):
                        idx = list(
                            [item[0] for item in partition_process_mapping_list]
                        ).index(source_partition_index)
                        has_used = list(
                            [item[2] for item in partition_process_mapping_list]
                        )[idx]
                        process_list = list(
                            [item[1] for item in partition_process_mapping_list]
                        )[idx]
1542 1543 1544 1545 1546 1547 1548
                        i = 0
                        while i < len(has_used):
                            if not has_used[i]:
                                to_send_process = process_list[i]
                                has_used[i] = True
                                break
                            i += 1
1549

1550 1551 1552 1553
                        if i == len(has_used):
                            has_used = list(map(lambda x: False, has_used))
                            to_send_process = process_list[0]
                            has_used[0] = True
1554 1555 1556
                        assert (
                            to_send_process is not None
                        ), "Failed to find the send process."
1557 1558 1559 1560 1561 1562 1563 1564

                        if to_send_process not in op_desc_seq.keys():
                            op_desc_seq[to_send_process] = []
                        if target_process not in op_desc_seq.keys():
                            op_desc_seq[target_process] = []
                        all_partition_index_list.append(source_partition_index)

                        # append send and recv op desc
1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577
                        is_bool = dist_tensor.serial_tensor.dtype == paddle.bool
                        send_op_desc = SendOpDesc(
                            source_partition_index,
                            to_send_process,
                            target_process,
                            is_bool=is_bool,
                        )
                        recv_op_desc = RecvOpDesc(
                            source_partition_index,
                            to_send_process,
                            target_process,
                            is_bool=is_bool,
                        )
1578 1579 1580
                        op_desc_seq[to_send_process].append(send_op_desc)
                        op_desc_seq[target_process].append(recv_op_desc)
                        has_sent.append(source_partition_index)
1581 1582 1583
                        Resharder.concat_partitions(
                            partition_index_list, source_partition_index
                        )
1584 1585 1586

                # append concat op desc
                op_desc_seq[target_process].append(
1587 1588
                    ConcatOpDesc(all_partition_index_list)
                )
1589 1590 1591 1592 1593 1594

                # append slice op desc
                slice_starts = []
                slice_ends = []
                slices_axes = []
                concatenated_partition_index = partition_index_list[0]
1595 1596
                to_slice_tensor_shape = []

1597
                for idx, item in enumerate(concatenated_partition_index):
1598 1599 1600
                    slice_starts.append(
                        target_partition_index[idx][0] - item[0]
                    )
1601 1602
                    slice_ends.append(target_partition_index[idx][1] - item[0])
                    slices_axes.append(idx)
1603 1604
                    to_slice_tensor_shape.append(item[1] - item[0])

1605
                op_desc_seq[target_process].append(
1606 1607 1608 1609 1610 1611 1612
                    SliceOpDesc(
                        slice_starts,
                        slice_ends,
                        slices_axes,
                        shape=to_slice_tensor_shape,
                    )
                )
1613

1614
        # In the same process group, it will use allgahther and slice op.
1615
        else:
1616
            # NOTE: It just supports even partition scene.
1617 1618 1619 1620 1621
            partition_index_list = []
            all_partition_index_list = []
            process_index = []
            for source_process in source_process_group:
                source_partition_index = Resharder.compute_partition_index(
1622 1623 1624 1625 1626 1627
                    source_process,
                    complete_shape,
                    source_dims_mapping,
                    source_process_shape,
                    source_process_group,
                )
1628 1629
                if source_partition_index not in partition_index_list:
                    partition_index_list.append(source_partition_index)
1630 1631 1632 1633 1634 1635 1636 1637
                    process_index.append(
                        [
                            [
                                source_process,
                            ],
                            source_partition_index,
                        ]
                    )
1638
                else:
1639 1640 1641
                    process_index[
                        partition_index_list.index(source_partition_index)
                    ][0].append(source_process)
1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654

            for i in range(len(process_index[0][0])):
                group = []
                for j in range(len(process_index)):
                    group.append(process_index[j][0][i])
                    if i == 0:
                        all_partition_index_list.append(process_index[j][1])
                for process in group:
                    # append slice op desc
                    slice_starts = []
                    slice_ends = []
                    slices_axes = []
                    target_partition_index = Resharder.compute_partition_index(
1655 1656 1657 1658 1659 1660
                        process,
                        complete_shape,
                        target_dims_mapping,
                        target_process_shape,
                        target_process_group,
                    )
1661 1662 1663 1664 1665
                    for idx, item in enumerate(target_partition_index):
                        slice_starts.append(item[0])
                        slice_ends.append(item[1])
                        slices_axes.append(idx)

1666
                    to_slice_tensor_shape = dist_tensor.global_sizes()
1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677
                    slice_op_desc = SliceOpDesc(
                        starts=slice_starts,
                        ends=slice_ends,
                        axes=slices_axes,
                        shape=to_slice_tensor_shape,
                    )
                    allgather_shape = (
                        None
                        if not serial
                        else dist_tensor.local_sizes(rank=process)
                    )
1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689
                    # c_concat pass
                    if (
                        target_dims_mapping.count(-1)
                        == len(target_dims_mapping)
                        and source_dims_mapping[:-1].count(-1)
                        == len(source_dims_mapping[:-1])
                        and source_dims_mapping[-1] != -1
                    ):
                        op_desc_seq[process] = [
                            AllGatherConcatOpDesc(
                                group=group, shape=allgather_shape
                            )
1690
                        ]
1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708
                    else:
                        op_desc_seq[process] = (
                            [
                                AllGatherOpDesc(
                                    group=group,
                                    shape=allgather_shape,
                                    is_bool=(
                                        source_tensor.dtype == paddle.bool
                                    ),
                                ),
                                ConcatOpDesc(
                                    partition_index_list=all_partition_index_list
                                ),
                                slice_op_desc,
                            ]
                            if len(group) > 1
                            else [slice_op_desc]
                        )
1709 1710 1711

        return op_desc_seq

1712 1713 1714
    def parse_op_desc(
        self, block, op_desc_seq, var_name, reshard_op, dist_attr
    ):
1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726
        """Parse op desc sequence and insert op in the block"""
        tensor_list = []
        partition_tensor_list = []
        if self.rank_id not in op_desc_seq.keys():
            return
        op_desc_list = op_desc_seq[self.rank_id]

        idx = None
        for index, op in list(enumerate(block.ops)):
            if op.desc.id == reshard_op.desc.id:
                idx = index
                break
1727 1728 1729 1730 1731
        assert (
            idx is not None
        ), "The op for reshard cannot be found in the rank {} program.".format(
            self.rank_id
        )
1732 1733

        matched_op = block.ops[idx]
1734 1735 1736
        source_tensor = get_var_with_recursion(
            var_name, block, self.auto_parallel_main_prog
        )
1737 1738 1739 1740
        for op_desc in op_desc_list:
            if isinstance(op_desc, AllGatherOpDesc):  # noqa: F401
                if var_name not in self.has_allgather.keys():
                    self.has_allgather[var_name] = []
1741 1742 1743 1744 1745
                if not self.has_allgather[
                    var_name
                ] or op_desc.group not in list(
                    map(lambda x: x[0], self.has_allgather[var_name])
                ):
1746 1747 1748
                    if op_desc.is_bool:
                        # for bool data allgather, cast to int64 -> allgather -> cast bool
                        out_cast = Inserter.insert_cast_op(
1749 1750 1751 1752 1753 1754
                            block,
                            idx,
                            source_tensor,
                            reshard_op.attr('op_role'),
                            paddle.int64,
                        )
1755
                        tensor_list, idx_offset = Inserter.insert_allgather_op(
1756 1757 1758 1759 1760 1761
                            block,
                            idx + 1,
                            out_cast,
                            op_desc.group,
                            reshard_op.attr('op_role'),
                        )
1762 1763 1764 1765
                        idx += idx_offset
                        tensor_name_list = []
                        for var in tensor_list:
                            out_cast = Inserter.insert_cast_op(
1766 1767 1768 1769 1770 1771
                                block,
                                idx,
                                var,
                                reshard_op.attr('op_role'),
                                paddle.bool,
                            )
1772 1773 1774
                            tensor_name_list.append(out_cast.name)
                            idx += 1
                        self.has_allgather[var_name].append(
1775 1776
                            [op_desc.group, tensor_name_list]
                        )
1777 1778
                    else:
                        tensor_list, idx_offset = Inserter.insert_allgather_op(
1779 1780 1781 1782 1783 1784
                            block,
                            idx,
                            source_tensor,
                            op_desc.group,
                            reshard_op.attr('op_role'),
                        )
1785 1786 1787
                        idx += idx_offset
                        tensor_name_list = [var.name for var in tensor_list]
                        self.has_allgather[var_name].append(
1788 1789
                            [op_desc.group, tensor_name_list]
                        )
1790 1791 1792 1793
                else:
                    for item in self.has_allgather[var_name]:
                        if op_desc.group == item[0]:
                            tensor_list = [
C
caozhou 已提交
1794
                                get_var_with_recursion(
1795 1796 1797 1798
                                    var_name,
                                    block,
                                    self.auto_parallel_main_prog,
                                )
1799 1800 1801
                                for var_name in item[1]
                            ]
                            break
1802 1803 1804
                assert (
                    tensor_list
                ), "The result of parsing allgather op should not be None."
1805 1806 1807 1808 1809

            elif isinstance(op_desc, SendOpDesc):
                if var_name not in self.has_sent.keys():
                    self.has_sent[var_name] = []
                if op_desc.dst not in self.has_sent[var_name]:
1810 1811
                    if op_desc.is_bool:
                        out_cast = Inserter.insert_cast_op(
1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825
                            block,
                            idx,
                            source_tensor,
                            reshard_op.attr('op_role'),
                            paddle.int64,
                        )
                        Inserter.insert_send_op(
                            block,
                            idx + 1,
                            out_cast,
                            op_desc.src,
                            op_desc.dst,
                            reshard_op.attr('op_role'),
                        )
1826 1827
                        idx += 2
                    else:
1828 1829 1830 1831 1832 1833 1834 1835
                        Inserter.insert_send_op(
                            block,
                            idx,
                            source_tensor,
                            op_desc.src,
                            op_desc.dst,
                            reshard_op.attr('op_role'),
                        )
1836
                        idx += 1
1837 1838 1839 1840 1841 1842 1843 1844 1845 1846
                    self.has_sent[var_name].append(op_desc.dst)

            elif isinstance(op_desc, RecvOpDesc):
                if var_name not in self.has_recv.keys():
                    self.has_recv[var_name] = {}
                if op_desc.src not in self.has_recv[var_name].keys():
                    partition_index = op_desc.partition_index
                    shape = []
                    for index in partition_index:
                        shape.append(index[1] - index[0])
1847 1848 1849 1850 1851 1852 1853
                    if op_desc.is_bool:
                        # for bool data, recv int64 -> cast to bool
                        recv_tensor = block.create_var(
                            name=unique_name.generate(var_name + "@recv"),
                            shape=shape,
                            lod_level=source_tensor.lod_level,
                            dtype=paddle.int64,
1854 1855 1856 1857 1858 1859 1860 1861 1862 1863
                            type=source_tensor.type,
                        )
                        Inserter.insert_recv_op(
                            block,
                            idx,
                            recv_tensor,
                            op_desc.src,
                            op_desc.dst,
                            reshard_op.attr('op_role'),
                        )
1864
                        out_cast = Inserter.insert_cast_op(
1865 1866 1867 1868 1869 1870
                            block,
                            idx + 1,
                            recv_tensor,
                            reshard_op.attr('op_role'),
                            paddle.bool,
                        )
1871 1872 1873 1874 1875 1876 1877 1878 1879
                        tensor_list.append(out_cast)
                        idx += 2
                        self.has_recv[var_name][op_desc.src] = out_cast
                    else:
                        recv_tensor = block.create_var(
                            name=unique_name.generate(var_name + "@recv"),
                            shape=shape,
                            lod_level=source_tensor.lod_level,
                            dtype=source_tensor.dtype,
1880 1881 1882 1883 1884 1885 1886 1887 1888 1889
                            type=source_tensor.type,
                        )
                        Inserter.insert_recv_op(
                            block,
                            idx,
                            recv_tensor,
                            op_desc.src,
                            op_desc.dst,
                            reshard_op.attr('op_role'),
                        )
1890 1891 1892 1893 1894

                        # for lod tensor, need reset lod after received
                        if recv_tensor.lod_level != 0:
                            set_lod = False
                            # use data lod to reset tensor lod
1895 1896 1897
                            for (
                                tmp_block
                            ) in self.auto_parallel_main_prog.blocks:
1898 1899
                                for tmp_var_name in tmp_block.vars:
                                    tmp_var = tmp_block.vars[tmp_var_name]
1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913
                                    if (
                                        tmp_var.is_data
                                        and tmp_var.lod_level
                                        == recv_tensor.lod_level
                                    ):
                                        reset_lod_out = (
                                            Inserter.insert_reset_lod_op(
                                                block,
                                                idx + 1,
                                                recv_tensor,
                                                tmp_var,
                                                reshard_op.attr('op_role'),
                                            )
                                        )
1914 1915 1916
                                        tensor_list.append(reset_lod_out)
                                        idx += 2
                                        self.has_recv[var_name][
1917 1918
                                            op_desc.src
                                        ] = reset_lod_out
1919 1920 1921 1922 1923 1924 1925 1926 1927
                                        set_lod = True
                                        break
                                if set_lod:
                                    break
                            assert set_lod is True
                        else:
                            tensor_list.append(recv_tensor)
                            idx += 1
                            self.has_recv[var_name][op_desc.src] = recv_tensor
1928 1929 1930 1931 1932 1933 1934 1935
                else:
                    tensor_list.append(self.has_recv[var_name][op_desc.src])

            elif isinstance(op_desc, ConcatOpDesc):
                partition_index_list = op_desc.partition_index_list
                idx_list = [idx]
                for index, tensor in enumerate(tensor_list):
                    Inserter.concat_partitions_with_op(
1936 1937 1938 1939 1940 1941 1942
                        partition_tensor_list,
                        tensor,
                        partition_index_list[index],
                        block,
                        idx_list,
                        reshard_op.attr('op_role'),
                    )
1943 1944
                idx = idx_list[0]

1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977
            elif isinstance(op_desc, SliceOpDesc) or isinstance(
                op_desc, AllGatherConcatOpDesc
            ):
                target_tensor = None
                if isinstance(op_desc, SliceOpDesc):
                    assert (
                        len(partition_tensor_list) == 1
                        or not partition_tensor_list
                    )
                    to_slice_tensor = (
                        partition_tensor_list[0][0]
                        if len(partition_tensor_list) == 1
                        else source_tensor
                    )
                    new_name = unique_name.generate(var_name + "@RESHARD")
                    target_tensor = Inserter.insert_slice_op(
                        block,
                        idx,
                        to_slice_tensor,
                        starts=op_desc.starts,
                        ends=op_desc.ends,
                        axes=op_desc.axes,
                        new_var_name=new_name,
                        op_role=reshard_op.attr('op_role'),
                    )
                else:
                    target_tensor = Inserter.insert_c_concat_op(
                        block,
                        idx,
                        source_tensor,
                        op_desc.group,
                        reshard_op.attr('op_role'),
                    )
1978

1979
                assert target_tensor is not None
1980 1981 1982
                process_mesh = dist_attr[0]
                dims_mapping = dist_attr[1]

1983 1984 1985 1986
                tensor_attr = TensorDistributedAttribute()
                tensor_attr.dims_mapping = dims_mapping
                tensor_attr.process_mesh = process_mesh
                self.dist_context.set_tensor_dist_attr_for_program(
1987 1988
                    target_tensor, tensor_attr
                )
1989

1990
                if matched_op.type == "while":
1991
                    # var_reshard_mapping means the while op input need be changed to
1992 1993 1994 1995 1996 1997
                    if (
                        "var_reshard_mapping"
                        not in Resharder.while_block_info[
                            op.attr("sub_block").id
                        ].keys()
                    ):
1998
                        Resharder.while_block_info[op.attr("sub_block").id][
1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009
                            "var_reshard_mapping"
                        ] = {}
                    if (
                        var_name
                        not in Resharder.while_block_info[
                            op.attr("sub_block").id
                        ]["var_reshard_mapping"].keys()
                    ):
                        Resharder.while_block_info[op.attr("sub_block").id][
                            "var_reshard_mapping"
                        ][var_name] = []
2010
                    Resharder.while_block_info[op.attr("sub_block").id][
2011 2012
                        "var_reshard_mapping"
                    ][var_name].append([dist_attr, target_tensor.name])
2013 2014 2015

                # rename op input name according to new name
                for op in block.ops:
2016 2017
                    # just for while op
                    while_op_X_append = []
2018
                    for name in op.input_arg_names:
2019 2020 2021
                        op_dist_attr = (
                            self.dist_context.get_op_dist_attr_for_program(op)
                        )
2022 2023
                        if name == var_name and op_dist_attr is not None:
                            if op.desc.id() == matched_op.desc.id():
2024 2025 2026 2027
                                if matched_op.type == "while":
                                    old_name = name
                                    new_name = target_tensor.name
                                    assert old_name != new_name
2028 2029 2030 2031 2032
                                    op_input_dist_attr = (
                                        op_dist_attr.get_input_dist_attr(
                                            old_name
                                        )
                                    )
2033
                                    op_dist_attr.set_input_dist_attr(
2034 2035
                                        new_name, op_input_dist_attr
                                    )
2036
                                    op_dist_attr.set_input_dims_mapping(
2037 2038 2039 2040 2041 2042
                                        new_name, dims_mapping
                                    )
                                    if (
                                        old_name
                                        in op_dist_attr._inputs_dist_attrs
                                    ):
2043
                                        op_dist_attr.del_input_dist_attr(
2044 2045
                                            old_name
                                        )
2046 2047 2048 2049
                                    while_op_X_append.append(new_name)
                                    continue
                                else:
                                    op.desc._rename_input(
2050 2051
                                        name, target_tensor.name
                                    )
2052 2053 2054
                                    old_name = name
                                    new_name = target_tensor.name
                                    assert old_name != new_name
2055 2056 2057 2058 2059
                                    op_input_dist_attr = (
                                        op_dist_attr.get_input_dist_attr(
                                            old_name
                                        )
                                    )
2060
                                    op_dist_attr.set_input_dist_attr(
2061 2062
                                        new_name, op_input_dist_attr
                                    )
2063
                                    op_dist_attr.set_input_dims_mapping(
2064 2065
                                        new_name, dims_mapping
                                    )
2066 2067
                                    op_dist_attr.del_input_dist_attr(old_name)
                                    continue
2068 2069

                            op_process_mesh = op_dist_attr.process_mesh
2070 2071 2072
                            op_input_dims_mapping = (
                                op_dist_attr.get_input_dims_mapping(var_name)
                            )
2073
                            # NOTE: For op whose process mesh is a union, its input will not be renamed by other op reshard result now which means that it will have more reshard operation.
2074 2075 2076 2077
                            if (
                                op_process_mesh == process_mesh
                                and op_input_dims_mapping == dims_mapping
                            ):
2078
                                op.desc._rename_input(name, target_tensor.name)
2079 2080 2081
                                old_name = name
                                new_name = target_tensor.name
                                assert old_name != new_name
2082 2083 2084
                                op_input_dist_attr = (
                                    op_dist_attr.get_input_dist_attr(old_name)
                                )
2085
                                op_dist_attr.set_input_dist_attr(
2086 2087
                                    new_name, op_input_dist_attr
                                )
2088
                                op_dist_attr.set_input_dims_mapping(
2089 2090
                                    new_name, dims_mapping
                                )
2091
                                op_dist_attr.del_input_dist_attr(old_name)
2092

2093 2094 2095
                    # for while op, the input X should reset
                    if while_op_X_append:
                        proto = OpProtoHolder.instance().get_op_proto(op.type)
2096 2097 2098 2099
                        op.desc.set_input(
                            proto.inputs[0].name,
                            op.input("X") + while_op_X_append,
                        )
2100

2101
    def _get_subblock_input_attrs(self, op, var_name):
2102
        # NOTE: Multi while loop is not supported
2103
        assert op.type in _g_subblock_ops
2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116
        sub_block = self.auto_parallel_main_prog.blocks[op.attr("sub_block").id]
        ops = sub_block.ops
        input_attrs = []

        for op in ops:
            dist_op = self.dist_context.get_dist_op_for_program(op)
            if not dist_op:
                continue
            dist_attr = dist_op.dist_attr
            for name in op.input_arg_names:
                if name == var_name:
                    process_mesh = dist_attr.process_mesh
                    input_dims_mapping = dist_attr.get_input_dims_mapping(
2117 2118
                        var_name
                    )
2119 2120
                    has_exist = False
                    for input_attr in input_attrs:
2121 2122 2123 2124
                        if (
                            process_mesh == input_attr[0]
                            and input_dims_mapping == input_attr[1]
                        ):
2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136
                            has_exist = True
                            break
                    if not has_exist:
                        input_attrs.append([process_mesh, input_dims_mapping])
        return input_attrs

    def _get_common_op_input_attrs(self, op, var_name):
        process_meshes = []
        dist_op = self.dist_context.get_dist_op_for_program(op)
        dist_attr = dist_op.dist_attr
        op_process_mesh = dist_attr.process_mesh
        for process_mesh in self.dist_context.process_meshes:
2137 2138 2139
            if set(process_mesh.processes) & (
                set(op_process_mesh.processes)
            ) and len(process_mesh.processes) < len(op_process_mesh.processes):
2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154
                process_meshes.append(process_mesh)

        # it means that the process mesh is not a union when process meshes is none
        if not process_meshes:
            process_meshes.append(op_process_mesh)

        input_dims_mapping = dist_attr.get_input_dims_mapping(var_name)
        input_attrs = []
        for process_mesh in process_meshes:
            input_attrs.append([process_mesh, input_dims_mapping])

        return input_attrs

    def get_op_input_attrs(self, op, var_name):
        op_input_attrs = []
2155

2156 2157
        if op.type in _g_subblock_ops:
            op_input_attrs = self._get_subblock_input_attrs(op, var_name)
2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174
        else:
            op_input_attrs = self._get_common_op_input_attrs(op, var_name)

        assert op_input_attrs

        return op_input_attrs

    def _remove_global_process_mesh(self):
        """Remove global process mesh from dist_context.process_meshes"""
        processes = set()
        process_mesh_count = len(self.dist_context.process_meshes)
        if process_mesh_count > 1:
            global_process_mesh_idx = None
            for process_mesh in self.dist_context.process_meshes:
                for process in process_mesh.processes:
                    processes.add(process)
            for idx, process_mesh in enumerate(
2175 2176
                self.dist_context.process_meshes
            ):
2177 2178 2179
                if len(set(process_mesh.processes)) == len(processes):
                    global_process_mesh_idx = idx
                    break
2180

2181
            if global_process_mesh_idx is not None:
2182 2183 2184 2185 2186 2187 2188 2189 2190 2191
                is_removed = False
                global_mesh = self.dist_context.process_meshes[idx]
                for i, mesh in enumerate(self.dist_context.process_meshes):
                    if i == idx:
                        continue
                    if set(mesh.processes) < set(global_mesh.processes):
                        is_removed = True

                if is_removed:
                    self.dist_context.process_meshes.pop(idx)
2192 2193 2194 2195

    def _change_subblock_op_input_and_output(self, block_idx, block):
        if "var_reshard_mapping" in Resharder.while_block_info[block_idx]:
            var_reshard_mapping = Resharder.while_block_info[block_idx][
2196 2197
                "var_reshard_mapping"
            ]
2198 2199 2200 2201 2202 2203 2204 2205
            for op in block.ops:
                for var_name in op.input_arg_names:
                    if var_name in var_reshard_mapping:
                        # in while sub block, the union process mesh is not split before reshard sub block
                        dist_op = self.dist_context.get_dist_op_for_program(op)
                        dist_attr = dist_op.dist_attr
                        target_name = None
                        for item in var_reshard_mapping[var_name]:
2206 2207 2208 2209 2210
                            if (
                                dist_attr.process_mesh == item[0][0]
                                and dist_attr.get_input_dims_mapping(var_name)
                                == item[0][1]
                            ):
2211 2212 2213 2214 2215 2216 2217
                                target_name = item[1]
                                break
                        if target_name is None:
                            continue
                        else:
                            op.desc._rename_input(var_name, target_name)
                            dist_op = self.dist_context.get_dist_op_for_program(
2218 2219
                                op
                            )
2220 2221 2222 2223
                            op_dist_attr = dist_op.dist_attr
                            old_name = var_name
                            new_name = target_name
                            assert old_name != new_name
2224 2225 2226
                            op_input_dist_attr = (
                                op_dist_attr.get_input_dist_attr(old_name)
                            )
2227
                            op_dist_attr.set_input_dist_attr(
2228 2229
                                new_name, op_input_dist_attr
                            )
2230 2231 2232 2233 2234 2235 2236
                            op_dist_attr.del_input_dist_attr(old_name)

                # the outputs also need to be renamed when the output name is the same with input name in inplace op
                for var_name in op.output_arg_names:
                    # if the tensor has been resharded multiply, it is not supported now.
                    if var_name in var_reshard_mapping:
                        if len(var_reshard_mapping[var_name]) > 1:
2237
                            raise ValueError(
2238
                                "The scene is not supported that the output is inplaced and the tensor has been resharded multiply when as input."
2239
                            )
2240 2241 2242 2243 2244 2245 2246 2247 2248
                        target_name = var_reshard_mapping[var_name][0][1]

                        op.desc._rename_output(var_name, target_name)
                        dist_op = self.dist_context.get_dist_op_for_program(op)
                        op_dist_attr = dist_op.dist_attr
                        old_name = var_name
                        new_name = target_name
                        assert old_name != new_name
                        op_output_dist_attr = op_dist_attr.get_output_dist_attr(
2249 2250
                            old_name
                        )
2251
                        op_dist_attr.set_output_dist_attr(
2252 2253
                            new_name, op_output_dist_attr
                        )
2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267
                        op_dist_attr.del_output_dist_attr(old_name)

    def _reshard_input(self, block):
        idx = 0
        while idx < len(block.ops):
            pre_op_count = len(block.ops)
            op = block.ops[idx]

            if self.is_special_op(op):
                idx += 1
                continue

            dist_op = self.dist_context.get_dist_op_for_program(op)
            if dist_op is not None:
2268 2269 2270
                op_input_dist_attrs = (
                    []
                )  # [(op_process_mesh, op_input_dims_mapping), (op_process_mesh, op_input_dims_mapping)]
2271
                if op.type in _g_subblock_ops:
2272 2273 2274 2275
                    if not self.is_condition_replicative(op):
                        raise ValueError(
                            "Please check the condition due to the dims mapping is not replicative."
                        )
2276 2277 2278 2279
                    if (
                        op.attr("sub_block").id
                        not in Resharder.while_block_info
                    ):
2280
                        Resharder.while_block_info[op.attr("sub_block").id] = {}
2281 2282 2283
                    Resharder.while_block_info[op.attr("sub_block").id][
                        "op_id"
                    ] = op.desc.id()
2284 2285 2286 2287

                if op.type == "while":
                    # condition var process mesh is the same with op and dims_mapping is replicative, so it do not need reshard
                    input_var_names = op.input("X")
2288 2289
                elif op.type == "conditional_block":
                    input_var_names = op.input("Input")
2290
                else:
2291 2292 2293 2294 2295 2296
                    input_var_names = op.input_arg_names
                # to avoid while op X order different
                input_var_names.sort()

                idx_offset = 0
                for var_name in input_var_names:
2297 2298
                    # skip lod_tensor_blocking_queue_? name
                    if "lod_tensor_blocking_queue" in var_name:
2299
                        continue
2300 2301 2302
                    var = get_var_with_recursion(
                        var_name, block, self.auto_parallel_main_prog
                    )
2303
                    dist_tensor = self.dist_context.get_dist_tensor_for_program(
2304 2305
                        var
                    )
2306 2307 2308

                    # judge whether union tensor dims_mapping all -1
                    is_union_process_mesh_tensor = False
2309 2310 2311 2312 2313
                    if (
                        dist_tensor.dist_attr.process_mesh
                        not in self.dist_context.process_meshes
                        and self.dist_context.process_meshes
                    ):
2314 2315
                        is_union_process_mesh_tensor = True
                        assert dist_tensor.dist_attr.dims_mapping.count(
2316 2317
                            -1
                        ) == len(dist_tensor.dist_attr.dims_mapping)
2318 2319 2320 2321 2322 2323 2324 2325 2326

                    op_input_attrs = self.get_op_input_attrs(op, var_name)
                    for input_attr in op_input_attrs:
                        input_process_mesh = None

                        # deal with union tensor
                        if is_union_process_mesh_tensor:
                            # if op process mesh is subset of union tensor process mesh, need no reshard
                            if set(input_attr[0].processes) <= set(
2327
                                dist_tensor.dist_attr.process_mesh.processes
2328 2329
                            ):
                                continue
2330 2331

                        if dist_tensor is not None and self.need_reshard(
2332 2333
                            dist_tensor, input_attr
                        ):
2334
                            reshard_op_desc = self.find_op_desc_seq(
2335 2336 2337 2338 2339
                                dist_tensor, input_attr
                            )
                            self.parse_op_desc(
                                block, reshard_op_desc, var_name, op, input_attr
                            )
2340
                            cur_op_count = len(block.ops)
2341 2342 2343
                            idx_offset = (
                                idx_offset + cur_op_count - pre_op_count
                            )
2344
                            pre_op_count = cur_op_count
2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357
                idx = idx + idx_offset + 1
            else:
                idx += 1

    def _hadnle_recv(self, block, idx, var, op, send_rank, recv_rank):
        if self.rank_id == recv_rank:
            # if recv bool data, recv then cast
            if var.dtype == paddle.bool:
                recv_cast_out = block.create_var(
                    name=unique_name.generate(var.name + "@recv"),
                    shape=var.shape,
                    lod_level=var.lod_level,
                    dtype=paddle.int64,
2358 2359 2360 2361 2362 2363 2364 2365 2366 2367
                    type=var.type,
                )
                Inserter.insert_recv_op(
                    block,
                    idx + 1,
                    recv_cast_out,
                    send_rank,
                    recv_rank,
                    op.attr('op_role'),
                )
2368 2369 2370 2371 2372 2373
                reset_lod_out = None
                if var.lod_level != 0:
                    set_lod = False
                    for tmp_block in self.auto_parallel_main_prog.blocks:
                        for tmp_var_name in tmp_block.vars:
                            tmp_var = tmp_block.vars[tmp_var_name]
2374 2375 2376 2377
                            if (
                                tmp_var.is_data
                                and tmp_var.lod_level == var.lod_level
                            ):
2378
                                reset_lod_out = block.create_var(
2379 2380 2381
                                    name=unique_name.generate(
                                        var.name + "@RESETLOD"
                                    ),
2382 2383 2384
                                    shape=recv_cast_out.shape,
                                    type=recv_cast_out.type,
                                    dtype=recv_cast_out.dtype,
2385 2386
                                    lod_level=recv_cast_out.lod_level,
                                )
2387 2388 2389 2390
                                idx += 1
                                block._insert_op(
                                    idx,
                                    type="lod_reset",
2391
                                    inputs={'X': recv_cast_out, 'Y': tmp_var},
2392
                                    outputs={'Out': reset_lod_out},
2393 2394
                                    attrs={'op_role': op.attr("op_role")},
                                )
2395 2396 2397 2398 2399 2400 2401
                                set_lod = True
                                break
                        if set_lod:
                            break
                    assert set_lod is True

                # cast int64 to bool
2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416
                block._insert_op(
                    idx + 2,
                    type='cast',
                    inputs={
                        'X': [recv_cast_out]
                        if reset_lod_out is None
                        else [reset_lod_out]
                    },
                    outputs={'Out': [var]},
                    attrs={
                        'in_dtype': recv_cast_out.dtype,
                        'out_dtype': var.dtype,
                        'op_role': op.attr('op_role'),
                    },
                )
2417 2418 2419 2420 2421 2422 2423
            else:
                if var.lod_level != 0:
                    recv_out = block.create_var(
                        name=unique_name.generate(var.name + "@recv"),
                        shape=var.shape,
                        lod_level=var.lod_level,
                        dtype=var.int64,
2424 2425 2426 2427 2428 2429 2430 2431 2432 2433
                        type=var.type,
                    )
                    Inserter.insert_recv_op(
                        block,
                        idx + 1,
                        recv_out,
                        send_rank,
                        recv_rank,
                        op.attr('op_role'),
                    )
2434 2435 2436 2437
                    set_lod = False
                    for tmp_block in self.auto_parallel_main_prog.blocks:
                        for tmp_var_name in tmp_block.vars:
                            tmp_var = tmp_block.vars[tmp_var_name]
2438 2439 2440 2441
                            if (
                                tmp_var.is_data
                                and tmp_var.lod_level == var.lod_level
                            ):
2442 2443 2444 2445
                                idx += 1
                                block._insert_op(
                                    idx,
                                    type="lod_reset",
2446
                                    inputs={'X': recv_out, 'Y': tmp_var},
2447
                                    outputs={'Out': var},
2448 2449
                                    attrs={'op_role': op.attr("op_role")},
                                )
2450 2451 2452 2453 2454
                                set_lod = True
                                break
                        if set_lod:
                            break
                    assert set_lod is True
2455
                else:
2456 2457 2458 2459 2460 2461 2462 2463
                    Inserter.insert_recv_op(
                        block,
                        idx + 1,
                        var,
                        send_rank,
                        recv_rank,
                        op.attr('op_role'),
                    )
2464 2465 2466

    def _handle_send(self, block, idx, var, op, send_rank, recv_rank):
        if var.dtype == paddle.bool:
2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477
            cast_out = Inserter.insert_cast_op(
                block, idx + 1, var, op.attr('op_role'), paddle.int64
            )
            Inserter.insert_send_op(
                block,
                idx + 2,
                cast_out,
                send_rank,
                recv_rank,
                op.attr('op_role'),
            )
2478
        else:
2479 2480 2481
            Inserter.insert_send_op(
                block, idx + 1, var, send_rank, recv_rank, op.attr('op_role')
            )
2482 2483 2484 2485 2486 2487

    def _reshard_output(self, block):
        # insert send and recv op if output process mesh is different from tensor process mesh
        idx = 0
        # skip reader and ops whose process mesh is union
        skip_ops = [
2488 2489 2490 2491 2492
            "create_py_reader",
            "create_double_buffer_reader",
            "read",
            "write_to_array",
            "read_from_array",
2493 2494 2495
        ]
        global _g_special_ops
        skip_ops += _g_special_ops
2496
        skip_ops += _g_subblock_ops
2497 2498 2499 2500 2501 2502 2503
        while idx < len(block.ops):
            pre_op_count = len(block.ops)
            op = block.ops[idx]
            dist_op = self.dist_context.get_dist_op_for_program(op)
            if dist_op is not None and op.type not in skip_ops:
                idx_offset = 0
                for var_name in op.output_arg_names:
2504 2505 2506
                    var = get_var_with_recursion(
                        var_name, block, self.auto_parallel_main_prog
                    )
2507
                    dist_tensor = self.dist_context.get_dist_tensor_for_program(
2508 2509
                        var
                    )
2510 2511 2512
                    tensor_process_mesh = dist_tensor.dist_attr.process_mesh
                    output_attr = [
                        dist_op.dist_attr.process_mesh,
2513
                        dist_op.dist_attr.get_output_dims_mapping(var_name),
2514 2515
                    ]
                    if dist_tensor is not None and self.need_reshard(
2516 2517
                        dist_tensor, output_attr, False
                    ):
2518
                        tensor_processes = set(
2519 2520 2521 2522 2523
                            tensor_process_mesh.processes
                        ) - (
                            set(tensor_process_mesh.processes)
                            & set(output_attr[0].processes)
                        )
2524 2525
                        if tensor_processes:
                            if len(tensor_processes) != len(
2526 2527
                                output_attr[0].processes
                            ):
2528
                                if dist_tensor.dist_attr.dims_mapping.count(
2529 2530 2531 2532 2533 2534 2535 2536 2537 2538
                                    -1
                                ) != len(
                                    dist_tensor.dist_attr.dims_mapping
                                ) or output_attr[
                                    1
                                ].count(
                                    -1
                                ) != len(
                                    output_attr[1]
                                ):
2539
                                    raise ValueError(
2540 2541
                                        "The dims_mapping must be -1"
                                    )
2542 2543
                                else:
                                    for index, tensor_process in enumerate(
2544 2545
                                        tensor_processes
                                    ):
2546 2547 2548
                                        recv_rank = tensor_process
                                        actual_index = index
                                        if index >= len(
2549 2550
                                            output_attr[0].processes
                                        ):
2551
                                            actual_index = (
2552 2553
                                                index
                                                - len(output_attr[0].processes)
2554 2555
                                            ) % len(output_attr[0].processes)
                                        item = output_attr[0].processes[
2556 2557
                                            actual_index
                                        ]
2558 2559 2560 2561 2562
                                        if recv_rank == item:
                                            continue
                                        if self.rank_id == item:
                                            # if send bool data, cast then send
                                            self._handle_send(
2563 2564 2565 2566 2567 2568 2569
                                                block,
                                                idx,
                                                var,
                                                op,
                                                item,
                                                recv_rank,
                                            )
2570 2571 2572
                                        if self.rank_id == recv_rank:
                                            # if recv bool data, recv then cast
                                            self._hadnle_recv(
2573 2574 2575 2576 2577 2578 2579
                                                block,
                                                idx,
                                                var,
                                                op,
                                                item,
                                                recv_rank,
                                            )
2580 2581
                            else:
                                for index, tensor_process in enumerate(
2582 2583
                                    tensor_processes
                                ):
2584 2585 2586 2587 2588 2589 2590
                                    recv_rank = tensor_process
                                    item = output_attr[0].processes[index]
                                    if recv_rank == item:
                                        continue
                                    if self.rank_id == item:
                                        # if send bool data, cast then send
                                        self._handle_send(
2591 2592
                                            block, idx, var, op, item, recv_rank
                                        )
2593 2594 2595
                                    if self.rank_id == recv_rank:
                                        # if recv bool data, recv then cast
                                        self._hadnle_recv(
2596 2597
                                            block, idx, var, op, item, recv_rank
                                        )
2598 2599

                            cur_op_count = len(block.ops)
2600 2601 2602
                            idx_offset = (
                                idx_offset + cur_op_count - pre_op_count
                            )
2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621
                            pre_op_count = cur_op_count

                idx = idx + idx_offset + 1
            else:
                idx += 1

    def reshard(self):
        self._remove_global_process_mesh()
        for block_idx, block in enumerate(self.auto_parallel_main_prog.blocks):
            # change the var_name before resharding sub block
            if block_idx in Resharder.while_block_info:
                self._change_subblock_op_input_and_output(block_idx, block)

            # reshard input
            self._reshard_input(block)

            # reshard output
            # NOTE: Only support that insert send and recv op if output process mesh is different from tensor process mesh
            self._reshard_output(block)
2622 2623

        # remove no need vars and ops in the main program
2624 2625 2626 2627 2628 2629
        Remover.remove_no_need_in_main(
            self.auto_parallel_main_prog,
            self.dist_context,
            self.rank_id,
            self.dist_params_grads,
        )
2630

2631
        # remove no need vars and ops in the startip program
2632 2633 2634
        Remover.remove_no_need_in_startup(
            self.auto_parallel_main_prog, self.auto_parallel_startup_prog
        )
C
caozhou 已提交
2635

2636 2637
        # reset some variable when remove operation ended
        Resharder.while_block_info = {}
2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651

    def get_cost(self, op, tensor, cluster):
        # NOTE: The program should be the serial_program which is not been parted
        global _g_special_ops
        not_supported_op_type = _g_special_ops + ["while"]
        reshard_op_cost = None
        if op.type in not_supported_op_type:
            return reshard_op_cost
        else:
            tensor_name = tensor.name
            if tensor_name == "lod_tensor_blocking_queue_0":
                return reshard_op_cost
            else:
                dist_tensor = self.dist_context.get_dist_tensor_for_program(
2652 2653
                    tensor
                )
2654 2655 2656
                # simplified processing: ignore union process mesh and output reshard
                dist_op = self.dist_context.get_dist_op_for_program(op)
                dims_mapping = dist_op.dist_attr.get_input_dims_mapping(
2657 2658
                    tensor.name
                )
2659 2660 2661
                process_mesh = dist_op.dist_attr.process_mesh
                dist_attr = [process_mesh, dims_mapping]
                if dist_tensor is not None and self.need_reshard(
2662 2663
                    dist_tensor, dist_attr
                ):
2664 2665 2666 2667 2668
                    if tensor_name not in self._has_resharded:
                        self._has_resharded[tensor_name] = [dist_op]
                    else:
                        for item in self._has_resharded[tensor_name]:
                            item_dist_attr = item.dist_attr
2669 2670 2671 2672 2673
                            item_dims_mapping = (
                                item_dist_attr.get_input_dims_mapping(
                                    tensor_name
                                )
                            )
2674
                            item_process_mesh = item_dist_attr.process_mesh
2675 2676 2677 2678
                            if (
                                dims_mapping == item_dims_mapping
                                and item_process_mesh == process_mesh
                            ):
2679 2680 2681
                                return reshard_op_cost
                        self._has_resharded[tensor_name].append(dist_op)

2682 2683 2684
                    reshard_op_desc = self.find_op_desc_seq(
                        dist_tensor, dist_attr, serial=True
                    )
2685 2686
                    dtype = dist_tensor.serial_tensor.dtype
                    reshard_op_cost = self.parse_op_desc_for_cost(
2687 2688
                        reshard_op_desc, dtype, cluster
                    )
2689 2690 2691

        return reshard_op_cost

2692 2693 2694 2695 2696 2697 2698 2699 2700
    def _concat_partitions_for_cost(
        self,
        partition_tensor_list,
        partition_index,
        dtype,
        rank_id,
        local_rank_comp_cost,
        cluster,
    ):
2701 2702 2703 2704 2705 2706
        if not partition_tensor_list:
            partition_tensor_list.append(partition_index)
        else:
            i = 0
            has_concat = False
            while i < len(partition_tensor_list):
2707 2708 2709 2710 2711 2712 2713
                (
                    concat_axis,
                    first_order,
                    new_partition,
                ) = Resharder.compute_concat_info(
                    partition_tensor_list[i], partition_index
                )
2714 2715 2716 2717 2718 2719 2720
                if concat_axis != -1:
                    has_concat = True
                    concat_desc = {}
                    concat_desc["op"] = "concat"
                    concat_desc["attrs"] = {"axis": concat_axis}
                    if first_order == 0:
                        concat_desc["inputs"] = {
2721 2722 2723 2724
                            "X": [
                                (dtype, partition_tensor_list[i]),
                                (dtype, partition_index),
                            ]
2725 2726 2727
                        }
                    else:
                        concat_desc["inputs"] = {
2728 2729 2730 2731
                            "X": [
                                (dtype, partition_index),
                                (dtype, partition_tensor_list[i]),
                            ]
2732 2733 2734 2735 2736
                        }
                    partition_tensor_list.pop(i)
                    if rank_id not in local_rank_comp_cost:
                        local_rank_comp_cost[rank_id] = []
                    local_rank_comp_cost[rank_id].append(
2737 2738 2739 2740 2741 2742 2743 2744 2745 2746
                        ConcatOpCost(op_desc=concat_desc, cluster=cluster)
                    )
                    self._concat_partitions_for_cost(
                        partition_tensor_list,
                        new_partition,
                        dtype,
                        rank_id,
                        local_rank_comp_cost,
                        cluster,
                    )
2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782
                    break
                i += 1
            if not has_concat:
                partition_tensor_list.append(partition_index)

    def parse_op_desc_for_cost(self, reshard_op_desc, dtype, cluster):
        def _get_idx(comm_ranks, group_ranks):
            res, is_the_same = None, False
            idx = 0
            while idx < len(comm_ranks):
                if comm_ranks[idx] == set(group_ranks):
                    is_the_same = True

                for rank in group_ranks:
                    if rank in comm_ranks[idx]:
                        res = idx
                        comm_ranks[idx].add(rank)
                if res is None:
                    idx += 1
                else:
                    break
            return res, is_the_same

        comm_context = CommContext(cluster)
        # run communication op before computation op
        # TODO: Communication cost is not calculated when the var has been transfered by the same group in the past
        comm_costs = []
        comm_ranks = []
        local_rank_comp_cost = {}
        for key in reshard_op_desc:
            partition_tensor_list = []
            op_desc_list = reshard_op_desc[key]
            for op_desc in op_desc_list:
                if isinstance(op_desc, SendOpDesc):
                    group_ranks = [key, op_desc.dst]
                    shape = op_desc.shape
2783 2784 2785
                    send_desc = build_comm_desc(
                        "send_v2", group_ranks, dtype, shape
                    )
2786 2787
                    idx, is_the_same = _get_idx(comm_ranks, group_ranks)
                    if idx is None:
2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798
                        comm_costs.append(
                            [
                                (
                                    group_ranks,
                                    SendOpCost(
                                        op_desc=send_desc,
                                        comm_context=comm_context,
                                    ),
                                )
                            ]
                        )
2799 2800 2801 2802
                        comm_ranks.append(set(group_ranks))
                    else:
                        if not is_the_same:
                            comm_costs[idx].append(
2803 2804 2805 2806 2807 2808 2809 2810
                                (
                                    group_ranks,
                                    SendOpCost(
                                        op_desc=send_desc,
                                        comm_context=comm_context,
                                    ),
                                )
                            )
2811 2812 2813 2814
                elif isinstance(op_desc, AllGatherOpDesc):
                    # NOTE: fill_const and other unnecessary op is not calculated because those cost is very small
                    group_ranks = op_desc.group
                    shape = op_desc.shape
2815 2816 2817
                    allgather_desc = build_comm_desc(
                        "c_allgather", group_ranks, dtype, shape
                    )
2818 2819 2820 2821 2822 2823 2824 2825
                    split_inputs_shape = []
                    for idx, dim in enumerate(shape):
                        if idx == 0:
                            split_inputs_shape.append(dim * len(group_ranks))
                        else:
                            split_inputs_shape.append(dim)
                    idx, is_the_same = _get_idx(comm_ranks, group_ranks)
                    if idx is None:
2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836
                        comm_costs.append(
                            [
                                (
                                    group_ranks,
                                    AllgatherOpCost(
                                        op_desc=allgather_desc,
                                        comm_context=comm_context,
                                    ),
                                )
                            ]
                        )
2837 2838 2839 2840
                        comm_ranks.append(set(group_ranks))
                    else:
                        if not is_the_same:
                            comm_costs[idx].append(
2841 2842 2843 2844 2845 2846 2847 2848
                                (
                                    group_ranks,
                                    AllgatherOpCost(
                                        op_desc=allgather_desc,
                                        comm_context=comm_context,
                                    ),
                                )
                            )
2849 2850 2851 2852 2853 2854 2855 2856 2857 2858
                    # calc the split op cost
                    if key not in local_rank_comp_cost:
                        local_rank_comp_cost[key] = []
                    split_desc = {}
                    split_desc["op"] = "split"
                    split_desc["inputs"] = {
                        "inputs": [(dtype, split_inputs_shape)]
                    }
                    split_desc["attrs"] = {"num": len(group_ranks), "axis": 0}
                    local_rank_comp_cost[key].append(
2859 2860
                        SplitOpCost(op_desc=split_desc, cluster=cluster)
                    )
2861 2862 2863 2864
                elif isinstance(op_desc, ConcatOpDesc):
                    partition_index_list = op_desc._partition_index_list
                    for idx, partion_idex in enumerate(partition_index_list):
                        self._concat_partitions_for_cost(
2865 2866 2867 2868 2869 2870 2871
                            partition_tensor_list,
                            partion_idex,
                            dtype,
                            key,
                            local_rank_comp_cost,
                            cluster,
                        )
2872 2873 2874 2875

                elif isinstance(op_desc, SliceOpDesc):
                    if key not in local_rank_comp_cost:
                        local_rank_comp_cost[key] = []
2876 2877 2878 2879
                    assert (
                        len(partition_tensor_list) == 1
                        or not partition_tensor_list
                    )
2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892
                    to_slice_tensor_shape = []
                    if len(partition_tensor_list) == 1:
                        for item in partition_tensor_list[0]:
                            to_slice_tensor_shape.append(item[1] - item[0])
                    else:
                        to_slice_tensor_shape = op_desc.shape
                    slice_desc = {}
                    slice_desc["op"] = "slice"
                    infer_flags = list(1 for i in range(len(op_desc.axes)))
                    slice_desc["attrs"] = {
                        "axes": op_desc.axes,
                        "starts": op_desc.starts,
                        "ends": op_desc.ends,
2893
                        "infer_flags": infer_flags,
2894 2895 2896 2897 2898
                    }
                    slice_desc["inputs"] = {
                        "Input": [(dtype, to_slice_tensor_shape)]
                    }
                    local_rank_comp_cost[key].append(
2899 2900
                        SliceOpCost(op_desc=slice_desc, cluster=cluster)
                    )
2901 2902 2903 2904

        res = (comm_costs, local_rank_comp_cost)

        return res