reshard.py 116.3 KB
Newer Older
C
caozhou 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License

import copy
from functools import reduce

import paddle
import paddle.fluid.core as core
from paddle.utils import unique_name
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.framework import Program, OpProtoHolder
23
from paddle.distributed.fleet.meta_optimizers.common import OpRole
C
caozhou 已提交
24 25
import paddle.fluid.layers.utils as utils
from ..collective import _get_global_env
26
from .dist_context import DistributedContext
27 28 29 30
from .dist_attribute import (
    OperatorDistributedAttribute,
    TensorDistributedAttribute,
)
31
from .process_group import new_process_group, ProcessGroup, _g_process_group_map
32 33 34 35
from .cost import build_comm_desc, CommContext
from .cost import AllgatherOpCost, SendOpCost
from .cost import SliceOpCost, SplitOpCost, ConcatOpCost
from .cluster import Cluster
36
from .utils import print_program_with_dist_attr, is_gradient_clip_op
C
caozhou 已提交
37

38
# NOTE: If op in _g_special_ops or _g_gradient_clip_ops, it will not be resharded.
39
_g_special_ops = ['check_finite_and_unscale', 'update_loss_scaling']
40
_g_gradient_clip_ops = [
41 42 43 44 45
    "sum",
    "sqrt",
    "fill_constant",
    "elementwise_max",
    "elementwise_div",
46
]
47
_g_subblock_ops = ["while", "conditional_block"]
48 49 50 51 52 53 54 55


def get_var_with_recursion(var_name, block, program):
    """Get var in the parent block if not found in the current block"""
    var = None
    if var_name in block.vars:
        var = block.vars[var_name]
    else:
56 57 58 59 60
        var = block._var_recursive(var_name)
        # parent_block = program.blocks[block.parent_idx]
        # if var_name in parent_block.vars:
        #     var = parent_block.vars[var_name]
    assert var is not None, "{} is not found".format(var.name)
61

62
    return var
63

C
caozhou 已提交
64 65 66 67 68 69 70

class AllGatherOpDesc:
    """
    Describe the allgather op in the reshard phase.

    Args:
        group (list): Process group.
71 72
        shape (list): The tensor shape.
        is_bool (bool): Whether allgather bool data. Default: False.
C
caozhou 已提交
73 74
    """

75
    def __init__(self, group, shape, is_bool=False):
C
caozhou 已提交
76 77
        self._group = group
        self._desc = "all_gather"
78 79 80 81 82 83
        self._shape = shape
        self._is_bool = is_bool

    @property
    def is_bool(self):
        return self._is_bool
C
caozhou 已提交
84 85 86 87 88 89 90 91 92

    @property
    def group(self):
        return self._group

    @property
    def desc(self):
        return self._desc

93 94 95 96
    @property
    def shape(self):
        return self._shape

C
caozhou 已提交
97
    def __repr__(self):
98
        return f"op: {self._desc}, group: {self._group}, shape: {self._shape}, is_bool: {self._is_bool}."
C
caozhou 已提交
99 100 101 102 103 104 105 106


class SendOpDesc:
    """
    Describe the send op in the reshard phase.

    Args:
        partition_index (list): The index of partition in complete tensor.
107
        src (int): The source process to send.
C
caozhou 已提交
108
        dst (int): The destination process to receive.
109
        is_bool (bool): Whether send bool data. Default: False.
C
caozhou 已提交
110 111
    """

112
    def __init__(self, partition_index, src, dst, is_bool=False):
C
caozhou 已提交
113 114 115
        self._dst = dst
        self._partition_index = partition_index
        self._desc = "send"
116 117 118 119 120 121 122 123 124 125 126
        self._shape = []
        self._is_bool = is_bool
        self._src = src

    @property
    def src(self):
        return self._src

    @property
    def is_bool(self):
        return self._is_bool
C
caozhou 已提交
127 128 129 130 131 132 133 134 135 136 137 138 139

    @property
    def partition_index(self):
        return self._partition_index

    @property
    def dst(self):
        return self._dst

    @property
    def desc(self):
        return self._desc

140 141 142 143 144 145 146
    @property
    def shape(self):
        if not self._shape:
            for item in self.partition_index:
                self._shape.append(item[1] - item[0])
        return self._shape

C
caozhou 已提交
147
    def __repr__(self):
148
        return f"op: {self._desc}, partition_index: {self._partition_index}, dst: {self._dst}, shape: {self._shape}, is_bool: {self._is_bool}."
C
caozhou 已提交
149 150 151 152 153 154 155 156 157


class RecvOpDesc:
    """
    Describe the recv op in the reshard op.

    Args:
        partition_index (list): The index of partition in complete tensor.
        src (int): The source process to send.
158 159
        dst (int): The destination process to receive.
        is_bool (bool): Whether receive bool data. Default: False.
C
caozhou 已提交
160 161
    """

162
    def __init__(self, partition_index, src, dst, is_bool=False):
C
caozhou 已提交
163 164 165
        self._src = src
        self._partition_index = partition_index
        self._desc = "recv"
166 167 168 169 170 171 172 173 174 175 176
        self._shape = []
        self._is_bool = is_bool
        self._dst = dst

    @property
    def dst(self):
        return self._dst

    @property
    def is_bool(self):
        return self._is_bool
C
caozhou 已提交
177 178 179 180 181 182 183 184 185 186 187 188 189

    @property
    def partition_index(self):
        return self._partition_index

    @property
    def src(self):
        return self._src

    @property
    def desc(self):
        return self._desc

190 191 192 193 194 195 196
    @property
    def shape(self):
        if not self._shape:
            for item in self.partition_index:
                self._shape.append(item[1] - item[0])
        return self._shape

C
caozhou 已提交
197
    def __repr__(self):
198
        return f"op: {self._desc}, partition_index: {self._partition_index}, dst: {self._dst}, shape: {self._shape}, is_bool: {self._is_bool}."
C
caozhou 已提交
199 200 201 202 203 204 205


class SliceOpDesc:
    """
    Describe the slice op in the reshard phase.

    Args:
206 207 208 209
        starts (list): It represents start indices of corresponding axis in ``axes``.
        ends (list):  It represents end indices of corresponding axis in ``axes``.
        axes (list):  Axes that `starts` and `ends` apply to.
        shape (list): The shape of the tensor to be sliced.
C
caozhou 已提交
210 211
    """

212
    def __init__(self, starts, ends, axes, shape=None):
C
caozhou 已提交
213 214 215 216
        self._starts = starts
        self._ends = ends
        self._axes = axes
        self._desc = "slice"
217
        self._shape = shape
C
caozhou 已提交
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234

    @property
    def starts(self):
        return self._starts

    @property
    def ends(self):
        return self._ends

    @property
    def axes(self):
        return self._axes

    @property
    def desc(self):
        return self._desc

235 236 237 238
    @property
    def shape(self):
        return self._shape

C
caozhou 已提交
239
    def __repr__(self):
240 241 242 243
        if self._shape is not None:
            return f"op: {self._desc}, starts: {self._starts}, ends: {self._ends}, axes: {self._axes}, shape: {self._shape}."
        else:
            return f"op: {self._desc}, starts: {self._starts}, ends: {self._ends}, axes: {self._axes}."
C
caozhou 已提交
244 245 246 247 248 249 250


class ConcatOpDesc:
    """
    Describe the concat op in the reshard phase.

    Args:
251
        partition_index_list (list): The list contains all partition index.
C
caozhou 已提交
252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
    """

    def __init__(self, partition_index_list):
        self._partition_index_list = partition_index_list
        self._desc = "concat"

    @property
    def partition_index_list(self):
        return self._partition_index_list

    @property
    def desc(self):
        return self._desc

    def __repr__(self):
        return f"op: {self._desc}, partition_index_list: {self._partition_index_list}."


270 271
class Inserter:
    """Insert op required in the reshard process."""
C
caozhou 已提交
272

273
    @staticmethod
274 275 276
    def insert_cast_op(block, idx, tensor, op_role, tensor_type):
        # to avoid name conflict with framework
        new_var_name = paddle.fluid.unique_name.generate_with_ignorable_key(
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
            ".".join(["cast@RESHARD", 'tmp'])
        )
        out = block.create_var(
            name=new_var_name,
            dtype=tensor_type,
            type=tensor.type,
            lod_level=tensor.lod_level,
        )
        cast_op = block._insert_op(
            idx,
            type='cast',
            inputs={'X': [tensor]},
            outputs={'Out': [out]},
            attrs={
                'in_dtype': tensor.dtype,
                'out_dtype': out.dtype,
                'op_role': op_role,
            },
        )
296
        cast_op._set_attr('op_namescope', "/auto_parallel/reshard")
297 298 299 300
        return out

    @staticmethod
    def insert_send_op(block, idx, tensor, src, dst, op_role):
301 302
        """Insert send op into block at the given index."""
        op_type = 'send_v2'
303 304
        # use pair comm group
        process_group = new_process_group([src, dst])
305 306 307 308 309 310 311 312 313 314 315 316
        send_op = block._insert_op(
            idx,
            type=op_type,
            inputs={'X': [tensor]},
            attrs={
                'ring_id': process_group.id,
                'peer': process_group.ranks.index(dst),
                'use_calc_stream': True,
                'op_role': op_role,
                'dynamic_shape': False,
            },
        )
317
        send_op._set_attr('op_namescope', "/auto_parallel/reshard")
318 319

    @staticmethod
320
    def insert_recv_op(block, idx, tensor, src, dst, op_role):
321 322
        """Insert recv op into block at the given index."""
        op_type = 'recv_v2'
323 324
        # use pair group
        process_group = new_process_group([src, dst])
325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
        recv_op = block._insert_op(
            idx,
            type=op_type,
            inputs={'X': [tensor]},
            outputs={'Out': [tensor]},
            attrs={
                'ring_id': process_group.id,
                'peer': process_group.ranks.index(src),
                'out_shape': tensor.shape,
                'dtype': tensor.dtype,
                'use_calc_stream': True,
                'op_role': op_role,
                'dynamic_shape': False,
            },
        )
340
        recv_op._set_attr('op_namescope', "/auto_parallel/reshard")
341

342 343 344 345 346
    @staticmethod
    def insert_reset_lod_op(block, idx, X, Y, op_role):
        """Insert reset_lod op into block at the given index."""

        new_var_name = paddle.fluid.unique_name.generate_with_ignorable_key(
347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363
            ".".join(["reset_lod@RESHARD", 'tmp'])
        )
        reset_lod_out = block.create_var(
            name=new_var_name,
            shape=X.shape,
            type=X.type,
            dtype=X.dtype,
            lod_level=X.lod_level,
        )

        reset_op = block._insert_op(
            idx,
            type="lod_reset",
            inputs={'X': X, 'Y': Y},
            outputs={'Out': reset_lod_out},
            attrs={'op_role': op_role},
        )
364
        reset_op._set_attr('op_namescope', "/auto_parallel/reshard")
365 366
        return reset_lod_out

367 368 369 370 371 372 373
    @staticmethod
    def insert_concat_op(block, idx, tensors, axis, op_role):
        """Insert concat op into block at the given block."""
        inputs = {'X': tensors}
        attrs = {}
        attrs['axis'] = axis
        attrs['op_role'] = op_role
374 375
        # to avoid name conflict with framework
        helper = LayerHelper('concat@RESHARD', **locals())
376
        with paddle.static.program_guard(block.program):
377 378
            out = block.create_var(
                name=paddle.fluid.unique_name.generate_with_ignorable_key(
379 380
                    ".".join([helper.name, 'tmp'])
                ),
381 382 383 384 385
                dtype=tensors[0].dtype,
                shape=None,
                lod_level=tensors[0].lod_level,
                type=tensors[0].type,
                persistable=False,
386 387 388 389 390 391 392 393 394
                stop_gradient=False,
            )
        concat_op = block._insert_op(
            idx,
            type='concat',
            inputs=inputs,
            outputs={'Out': [out]},
            attrs=attrs,
        )
395
        concat_op._set_attr('op_namescope', "/auto_parallel/reshard")
396
        return out
C
caozhou 已提交
397

398
    @staticmethod
399 400 401
    def insert_slice_op(
        block, idx, tensor, starts, ends, axes, new_var_name, op_role
    ):
402
        """Insert slice op into block at the given block."""
403 404 405 406 407 408 409 410 411 412 413 414 415
        # This is a hack to insert split op to get slice tensor
        # 1. [128, 128] => [64, 128]: split
        # 2. [128, 128] => [128, 128]: assign
        # 3. [128, 128] => [64, 64]: slice, it will replaced by multi split
        global_shape = tensor.shape
        slice_shape = [ends[i] - starts[i] for i in range(len(starts))]
        diff_dims = []
        for index, item in enumerate(slice_shape):
            if item != global_shape[index]:
                diff_dims.append(index)

        # use assign
        if len(diff_dims) == 0:
416 417 418 419 420 421 422
            out = block.create_var(
                name=new_var_name,
                dtype=tensor.dtype,
                type=tensor.type,
                shape=slice_shape,
                lod_level=tensor.lod_level,
            )
423 424
            inputs = {'X': [tensor]}
            outputs = {"Out": [out]}
425 426
            attrs = {"in_place": False, "op_role": op_role}
            assign_op = block._insert_op(
427 428
                idx, type="assign", inputs=inputs, outputs=outputs, attrs=attrs
            )
429
            assign_op._set_attr('op_namescope', "/auto_parallel/reshard")
430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448
            return out

        # use split once
        elif len(diff_dims) == 1:
            diff_dim = diff_dims[0]
            num_or_sections = global_shape[diff_dim] // slice_shape[diff_dim]
            axis = diff_dim
            cur_idx = starts[diff_dim] // slice_shape[diff_dim]
            input_shape = global_shape
            inputs = {'X': tensor}
            attrs = {'num': num_or_sections, 'axis': axis, 'op_role': op_role}
            new_shape = []
            for index, item in enumerate(tensor.shape):
                if index != axis:
                    new_shape.append(item)
                else:
                    new_shape.append(item // num_or_sections)
            with paddle.static.program_guard(block.program):
                outs = [
449 450 451 452 453 454 455 456 457 458 459
                    block.create_var(
                        name=paddle.fluid.unique_name.generate_with_ignorable_key(
                            ".".join(['split@RESHARD', 'tmp'])
                        ),
                        dtype=tensor.dtype,
                        shape=None,
                        type=tensor.type,
                        persistable=False,
                        lod_level=tensor.lod_level,
                        stop_gradient=False,
                    )
460 461 462
                    for i in range(num_or_sections)
                ]
                out = outs[cur_idx]
463 464 465 466 467 468 469
            split_op = block._insert_op(
                idx,
                type="split",
                inputs=inputs,
                outputs={'Out': outs},
                attrs=attrs,
            )
470
            split_op._set_attr('op_namescope', "/auto_parallel/reshard")
471 472 473 474 475 476 477 478 479 480 481
            return out

        # use slice
        else:
            inputs = {'Input': tensor}
            infer_flags = list(1 for i in range(len(axes)))
            attrs = {
                "axes": axes,
                "starts": starts,
                "ends": ends,
                "infer_flags": infer_flags,
482
                'op_role': op_role,
483
            }
484 485 486 487 488 489 490 491 492 493 494 495 496
            out = block.create_var(
                name=new_var_name,
                dtype=tensor.dtype,
                type=tensor.type,
                lod_level=tensor.lod_level,
            )
            slice_op = block._insert_op(
                idx,
                type="slice",
                inputs=inputs,
                outputs={'Out': [out]},
                attrs=attrs,
            )
497
            slice_op._set_attr('op_namescope', "/auto_parallel/reshard")
498
            return out
C
caozhou 已提交
499

500
    @staticmethod
501
    def insert_split_op(block, idx, tensor, num_or_sections, op_role, axis=0):
502
        """Insert split op into block at the given index."""
503
        helper = LayerHelper('split@RESHARD', **locals())
504 505
        input_shape = tensor.shape
        inputs = {'X': tensor}
506 507 508 509 510 511 512
        attrs = {'num': num_or_sections, 'axis': axis, 'op_role': op_role}
        new_shape = []
        for index, item in enumerate(tensor.shape):
            if index != axis:
                new_shape.append(item)
            else:
                new_shape.append(item // num_or_sections)
513 514
        with paddle.static.program_guard(block.program):
            outs = [
515 516
                block.create_var(
                    name=paddle.fluid.unique_name.generate_with_ignorable_key(
517 518
                        ".".join([helper.name, 'tmp'])
                    ),
519 520 521 522 523
                    dtype=tensor.dtype,
                    shape=None,
                    lod_level=tensor.lod_level,
                    type=tensor.type,
                    persistable=False,
524 525 526
                    stop_gradient=False,
                )
                for i in range(num_or_sections)
527
            ]
528 529 530
        split_op = block._insert_op(
            idx, type="split", inputs=inputs, outputs={'Out': outs}, attrs=attrs
        )
531
        split_op._set_attr('op_namescope', "/auto_parallel/reshard")
532
        return outs
C
caozhou 已提交
533

534 535
    @staticmethod
    def insert_fill_constant_op(block, idx, op_role):
C
caozhou 已提交
536
        """Insert fill constant op into block at the given index."""
537 538 539
        # to avoid name conflict with framework
        helper = LayerHelper('fill_constant@RESHARD', **locals())
        # use paddle.int64 as dtype
C
caozhou 已提交
540
        with paddle.static.program_guard(block.program):
541 542
            out = block.create_var(
                name=paddle.fluid.unique_name.generate_with_ignorable_key(
543 544
                    ".".join([helper.name, 'tmp'])
                ),
545 546 547 548
                dtype=paddle.int64,
                shape=None,
                type=core.VarDesc.VarType.LOD_TENSOR,
                persistable=False,
549 550
                stop_gradient=False,
            )
C
caozhou 已提交
551 552 553 554 555
        inputs = {}
        attrs = {'force_cpu': False}
        attrs['str_value'] = str(int("1"))
        attrs['value'] = int("1")
        attrs['dtype'] = out.dtype
556
        attrs['op_role'] = op_role
557 558 559 560 561 562 563 564 565 566
        utils.get_shape_tensor_inputs(
            inputs=inputs, attrs=attrs, shape=[0], op_type='fill_constant'
        )
        fillconstant_op = block._insert_op(
            idx,
            type='fill_constant',
            inputs=inputs,
            outputs={'Out': [out]},
            attrs=attrs,
        )
C
caozhou 已提交
567
        out.stop_gradient = True
568
        fillconstant_op._set_attr('op_namescope', "/auto_parallel/reshard")
C
caozhou 已提交
569 570
        return out

571 572 573 574 575 576 577 578 579 580
    @staticmethod
    def insert_allgather_op(block, idx, tensor, ranks, op_role):
        """Insert allgather op into block at the given index."""
        tensor_list = []
        group = new_process_group(ranks)
        idx_offset = 0

        # instant process group before insert allgather op.
        if not group.is_instantiate():
            # insert fill_constant op
581
            fill_constant_out = Inserter.insert_fill_constant_op(
582 583
                block, idx, op_role
            )
584 585 586
            fill_constant_out.stop_gradient = True

            # insert c_allreduce_sum op
587 588 589 590 591 592
            allreduce_op = block._insert_op(
                idx + 1,
                type="c_allreduce_sum",
                inputs={'X': [fill_constant_out]},
                outputs={'Out': [fill_constant_out]},
                attrs={
593
                    'ring_id': 1000,
594
                    'use_calc_stream': True,
595 596 597
                    'op_role': op_role,
                },
            )
598
            allreduce_op._set_attr('op_namescope', "/auto_parallel/reshard")
599
            # insert c_sync_calc_stream op
600 601 602 603 604
            sync_calc_op = block._insert_op(
                idx + 2,
                type="c_sync_calc_stream",
                inputs={'X': [fill_constant_out]},
                outputs={'Out': [fill_constant_out]},
605 606
                attrs={'op_role': op_role},
            )
607
            sync_calc_op._set_attr('op_namescope', "/auto_parallel/reshard")
608 609 610 611
            idx_offset = 3

        # insert c_allgather op
        op_type = 'c_allgather'
612 613
        # to avoid name conflict with framework
        helper = LayerHelper(op_type + "@RESHARD", **locals())
614
        with paddle.static.program_guard(block.program):
615 616
            allgather_out = block.create_var(
                name=paddle.fluid.unique_name.generate_with_ignorable_key(
617 618
                    ".".join([helper.name, 'tmp'])
                ),
619 620 621 622 623
                dtype=tensor.dtype,
                shape=None,
                lod_level=tensor.lod_level,
                type=tensor.type,
                persistable=False,
624 625 626 627 628 629 630 631 632 633 634 635 636 637
                stop_gradient=False,
            )
        allgather_op = block._insert_op(
            idx + idx_offset,
            type=op_type,
            inputs={'X': [tensor]},
            outputs={'Out': [allgather_out]},
            attrs={
                'ring_id': group.id,
                'use_calc_stream': True,
                'nranks': group.nranks,
                'op_role': op_role,
            },
        )
638
        allgather_op._set_attr('op_namescope', "/auto_parallel/reshard")
639 640 641
        idx_offset += 1

        # insert split op
642 643 644
        split_out = Inserter.insert_split_op(
            block, idx + idx_offset, allgather_out, group.nranks, op_role
        )
645 646 647 648 649
        idx_offset += 1
        tensor_list.extend(split_out)
        return tensor_list, idx_offset

    @staticmethod
650 651 652
    def concat_partitions_with_op(
        partition_tensor_list, tensor, partition_index, block, idx, op_role
    ):
653 654
        """Concat the tensors and insert concat op."""
        if not partition_tensor_list:
C
caozhou 已提交
655
            partition_tensor_list.append((tensor, partition_index))
656 657 658 659
        else:
            i = 0
            has_concat = False
            while i < len(partition_tensor_list):
660 661 662 663 664 665 666
                (
                    concat_axis,
                    first_order,
                    new_partition,
                ) = Resharder.compute_concat_info(
                    partition_tensor_list[i][1], partition_index
                )
667 668
                if concat_axis != -1:
                    has_concat = True
669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685
                    _ = (
                        Inserter.insert_concat_op(
                            block,
                            idx[0],
                            [partition_tensor_list[i][0], tensor],
                            concat_axis,
                            op_role,
                        )
                        if first_order == 0
                        else Inserter.insert_concat_op(
                            block,
                            idx[0],
                            [tensor, partition_tensor_list[i][0]],
                            concat_axis,
                            op_role,
                        )
                    )
686 687
                    partition_tensor_list.pop(i)
                    idx[0] += 1
688 689 690 691 692 693 694 695
                    Inserter.concat_partitions_with_op(
                        partition_tensor_list,
                        _,
                        new_partition,
                        block,
                        idx,
                        op_role,
                    )
696 697 698 699 700 701 702 703 704 705 706 707 708
                    break
                i += 1
            if not has_concat:
                partition_tensor_list.append((tensor, partition_index))


class Remover:
    """Remove var and op in the reshard process."""

    @staticmethod
    def remove_no_need_ops(auto_parallel_main_prog, dist_context, rank_id):
        """Remove no need ops in the main program"""
        not_remove_op_ref = [
709 710 711
            "create_py_reader",
            "create_double_buffer_reader",
            "read",
712
        ]
C
caozhou 已提交
713

714 715 716 717
        # NOTE: The nested sub block is not be supported now.
        remove_block_order = []
        for block_idx in Resharder.while_block_info:
            remove_block_order.append(block_idx)
C
caozhou 已提交
718

719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734
        for block_idx, block in enumerate(auto_parallel_main_prog.blocks):
            if block_idx not in remove_block_order:
                remove_block_order.append(block_idx)

        # the sub block should be removed first
        for block_idx in remove_block_order:
            remove_op_idx = []
            block = auto_parallel_main_prog.blocks[block_idx]
            ops = block.ops
            vars = block.vars
            for idx, op in enumerate(ops):
                if op.type == "read":
                    dim_list = []
                    for var_name in op.output_arg_names:
                        dim_list.extend(
                            get_var_with_recursion(
735 736 737
                                var_name, block, auto_parallel_main_prog
                            ).shape
                        )
738 739 740 741 742
                    for i in range(idx, -1, -1):
                        if ops[i].type == "create_py_reader":
                            ops[i]._set_attr("shape_concat", dim_list)
                            break
                    continue
743

744 745 746 747
                # replace the input and output of c_sync_comm_stream op when in pipeline scene.
                if op.type == "c_sync_comm_stream":
                    need_save = []
                    for var_name in op.input_arg_names:
748 749 750 751 752 753 754
                        process_mesh = (
                            dist_context.get_tensor_dist_attr_for_program(
                                get_var_with_recursion(
                                    var_name, block, auto_parallel_main_prog
                                )
                            ).process_mesh
                        )
755 756 757 758 759
                        if rank_id in process_mesh.processes:
                            need_save.append(var_name)
                    if not need_save:
                        remove_op_idx.append(idx)
                        continue
760

761 762 763 764
                    proto = OpProtoHolder.instance().get_op_proto(op.type)
                    op.desc.set_input(proto.inputs[0].name, need_save)
                    op.desc.set_output(proto.outputs[0].name, need_save)
                    continue
765

766 767 768 769
                # judge the other op whether should be removed.
                op_dist_attr = dist_context.get_op_dist_attr_for_program(op)
                if op_dist_attr is not None:
                    op_process_mesh = op_dist_attr.process_mesh
770 771 772 773
                    if (
                        rank_id not in op_process_mesh.processes
                        and op.type not in not_remove_op_ref
                    ):
774 775 776
                        remove_op_idx.append(idx)

            for idx in remove_op_idx[::-1]:
777 778
                block._remove_op(idx, sync=False)
            block._sync_with_cpp()
779 780

    @staticmethod
781 782 783
    def remove_no_need_vars(
        auto_parallel_main_prog, dist_params_grads, feed_var_names
    ):
784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805
        """Remove no need vars in the main program"""
        for block_idx, block in enumerate(auto_parallel_main_prog.blocks):
            remove_vars = set()
            ops = block.ops
            vars = block.vars
            need_vars = set()
            for op in ops:
                for var_name in op.input_arg_names:
                    if var_name in vars:
                        need_vars.add(var_name)
                for var_name in op.output_arg_names:
                    if var_name in vars:
                        need_vars.add(var_name)
            for var in vars:
                if var not in need_vars:
                    remove_vars.add(var)

            # change dist_params_grads, the optimize op just in block 0.
            if block_idx == 0:
                param_grad_map = {}
                for op in ops:
                    if int(op.attr('op_role')) == int(OpRole.Optimize):
806 807 808 809
                        if (
                            "Param" in op.input_names
                            and "Grad" in op.input_names
                        ):
810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827
                            param_name = op.input("Param")[0]
                            grad_name = op.input("Grad")[0]
                            param_grad_map[param_name] = grad_name

                need_remove_idx = []
                for idx, item in enumerate(dist_params_grads):
                    if item[0].name not in param_grad_map.keys():
                        need_remove_idx.append(idx)

                for idx in need_remove_idx[::-1]:
                    dist_params_grads.pop(idx)

                idx = 0
                while idx < len(dist_params_grads):
                    param_name = dist_params_grads[idx][0].name
                    grad_name = dist_params_grads[idx][1].name
                    if grad_name != param_grad_map[param_name]:
                        dist_params_grads[idx] = (
828 829 830
                            vars[param_name],
                            vars[param_grad_map[param_name]],
                        )
831 832 833
                    idx += 1

            for var in remove_vars:
834
                if var in feed_var_names:
835
                    continue
836 837 838
                block._remove_var(var)

    @staticmethod
839 840 841
    def remove_no_need_in_main(
        auto_parallel_main_prog, dist_context, rank_id, dist_params_grads
    ):
842
        """Remove no need vars and ops in the main program."""
843 844 845 846 847 848
        Remover.remove_no_need_ops(
            auto_parallel_main_prog, dist_context, rank_id
        )
        Resharder.change_while_op_input_and_output(
            auto_parallel_main_prog, dist_context
        )
849 850 851 852
        # 'feed_var_names' cannot be removed from auto_parallel_main_prog
        feed_var_names = []
        for var in sum(list(dist_context.serial_feed_vars.values()), []):
            feed_var_names.append(var.name)
853 854 855
        Remover.remove_no_need_vars(
            auto_parallel_main_prog, dist_params_grads, feed_var_names
        )
856 857

    @staticmethod
858 859 860
    def remove_no_need_in_startup(
        auto_parallel_main_prog, auto_parallel_startup_prog
    ):
861 862 863 864 865 866
        """Remove no need vars and ops in the startup program."""
        main_input_vars = set()
        main_ops = auto_parallel_main_prog.global_block().ops
        for op in main_ops:
            for var_name in op.input_arg_names:
                main_input_vars.add(var_name)
867

868 869 870 871 872 873 874 875 876
        startup_block = auto_parallel_startup_prog.global_block()
        startup_output_vars = set()
        startup_ops = startup_block.ops
        for op in startup_ops:
            # skip c_sync_comm_stream op
            if op.type == "c_sync_comm_stream":
                continue
            for var_name in op.output_arg_names:
                startup_output_vars.add(var_name)
877

878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897
        need_vars = set()
        for var_name in startup_output_vars:
            if var_name in main_input_vars:
                need_vars.add(var_name)

        startup_ops = startup_block.ops
        actual_need_vars = set()
        for idx, op in enumerate(startup_ops):
            is_need_op = False
            if op.type == "c_sync_comm_stream":
                continue
            for var_name in op.output_arg_names:
                if var_name in need_vars:
                    is_need_op = True
                    break
            if is_need_op:
                for var_name in op.output_arg_names:
                    actual_need_vars.add(var_name)
                for var_name in op.input_arg_names:
                    actual_need_vars.add(var_name)
898

899 900 901 902 903 904
        remove_vars = set()
        for var_name in startup_block.vars:
            if var_name not in actual_need_vars:
                remove_vars.add(var_name)
        for var in remove_vars:
            startup_block._remove_var(var)
905 906

        remove_op_idx = []
907 908 909
        vars = startup_block.vars
        for idx, op in enumerate(startup_block.ops):
            is_no_need_op = False
910
            if op.type == "c_sync_comm_stream":
911
                var_names = []
912
                for var_name in op.input_arg_names:
913 914 915
                    if var_name in vars:
                        var_names.append(var_name)
                if not var_names:
916
                    remove_op_idx.append(idx)
917 918 919 920
                else:
                    proto = OpProtoHolder.instance().get_op_proto(op.type)
                    op.desc.set_input(proto.inputs[0].name, var_names)
                    op.desc.set_output(proto.outputs[0].name, var_names)
921
                continue
C
caozhou 已提交
922

923 924 925 926 927 928
            for var_name in op.output_arg_names:
                if var_name not in vars:
                    is_no_need_op = True
                    break
            if is_no_need_op:
                remove_op_idx.append(idx)
929
        for idx in remove_op_idx[::-1]:
930 931
            startup_block._remove_op(idx, sync=False)
        startup_block._sync_with_cpp()
C
caozhou 已提交
932 933


934 935 936
class Resharder:
    """
    Reshard tensor in the program according to its distributed attribute and corresponding op distributed attribute.
937

938 939 940 941 942 943 944 945
    Args:
        auto_parallel_main_prog (Program): An auto parallel main program.
        auto_parallel_startup_prog (Program): An auto parallel startup program.
        rank_id (int): The process id.
        dist_context (DistributedContext): The distributed context of this rank.
        dist_params_grads (list): The list contains the tuple of param and grad.
        batch_size (int): The batch size. Default: None.
    """
946

947 948
    while_block_info = {}

949 950 951 952 953 954 955 956 957 958 959 960 961
    def __init__(
        self,
        auto_parallel_main_prog,
        auto_parallel_startup_prog,
        rank_id,
        dist_context,
        dist_params_grads,
        batch_size=None,
    ):
        assert isinstance(auto_parallel_main_prog, Program), (
            "The type of auto_parallel_main_prog should be Program, "
            "but got {}.".format(type(auto_parallel_main_prog))
        )
962
        if auto_parallel_startup_prog is not None:
963 964 965 966 967 968 969 970 971 972 973 974 975
            assert isinstance(auto_parallel_main_prog, Program), (
                "The type of auto_parallel_startup_prog should be Program or None, "
                "but got {}.".format(type(auto_parallel_startup_prog))
            )
        assert isinstance(
            rank_id, int
        ), "The type of rank_id should be int, " "but got {}.".format(
            type(rank_id)
        )
        assert isinstance(dist_context, DistributedContext), (
            "The type of dist_context should be DistributedContext, "
            "but got {}.".format(type(dist_context))
        )
976

977
        if batch_size is not None:
978 979 980 981 982
            assert isinstance(
                batch_size, int
            ), "The type of batch_size should be int, " "but got {}.".format(
                type(batch_size)
            )
983 984 985 986 987 988 989 990 991 992

        self._auto_parallel_main_prog = auto_parallel_main_prog
        self._auto_parallel_startup_prog = auto_parallel_startup_prog
        self._rank_id = rank_id
        self._dist_context = dist_context
        self._dist_params_grads = dist_params_grads
        self._batch_size = batch_size
        self._has_sent = {}
        self._has_recv = {}
        self._has_allgather = {}
993 994
        # to avoid reshard repeatly
        self._has_resharded = {}
995

996 997 998
    @property
    def auto_parallel_main_prog(self):
        return self._auto_parallel_main_prog
999

1000 1001 1002
    @property
    def auto_parallel_startup_prog(self):
        return self._auto_parallel_startup_prog
1003

1004 1005 1006
    @property
    def rank_id(self):
        return self._rank_id
1007

1008 1009 1010
    @property
    def dist_context(self):
        return self._dist_context
1011

1012 1013 1014
    @property
    def dist_params_grads(self):
        return self._dist_params_grads
1015

1016 1017 1018
    @property
    def batch_size(self):
        return self._batch_size
1019

1020 1021 1022
    @property
    def has_sent(self):
        return self._has_sent
1023

1024 1025 1026
    @property
    def has_recv(self):
        return self._has_recv
1027

1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040
    @property
    def has_allgather(self):
        return self._has_allgather

    @staticmethod
    def compute_partition_shape(complete_shape, dims_mapping, process_shape):
        """Compute the shape of partition."""
        partition_shape = []
        for idx, item in enumerate(complete_shape):
            if dims_mapping[idx] == -1:
                partition_shape.append(item)
            else:
                partition_shape.append(item // process_shape[dims_mapping[idx]])
1041

1042
        return partition_shape
1043

1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
    @staticmethod
    def compute_process_index(process, process_group, process_shape):
        """Compute the index of process_shape corresponding to the process."""
        relative_process = process_group.index(process)
        process_index = []
        product = reduce(lambda x, y: x * y, process_shape)

        for i in range(len(process_shape)):
            idx = relative_process // (product // process_shape[i])
            product = product // process_shape[i]
1054 1055 1056
            relative_process = (
                relative_process - relative_process // product * product
            )
1057 1058 1059 1060 1061
            process_index.append(idx)

        return process_index

    @staticmethod
1062 1063 1064
    def compute_partition_index(
        process, complete_shape, dims_mapping, process_shape, process_group
    ):
1065 1066
        """Compute the partition index in complete tensor."""
        partition_shape = Resharder.compute_partition_shape(
1067 1068 1069 1070 1071
            complete_shape, dims_mapping, process_shape
        )
        process_index = Resharder.compute_process_index(
            process, process_group, process_shape
        )
1072 1073 1074 1075 1076 1077
        partition_index = []

        for i in range(len(complete_shape)):
            if dims_mapping[i] == -1:
                partition_index.append([0, partition_shape[i]])
            else:
1078 1079 1080 1081 1082 1083 1084
                partition_index.append(
                    [
                        process_index[dims_mapping[i]] * partition_shape[i],
                        (process_index[dims_mapping[i]] + 1)
                        * partition_shape[i],
                    ]
                )
1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098

        return partition_index

    @staticmethod
    def compute_concat_info(partition_index_x, partition_index_y):
        """Judge whether two partition can be concatenated and compute concatenated partition index."""
        differ_count = 0
        concat_axis = -1
        first_order = 0
        new_partition = []

        for idx, item in enumerate(partition_index_x):
            if item != partition_index_y[idx]:
                differ_count += 1
1099 1100 1101 1102
                if (
                    item[1] == partition_index_y[idx][0]
                    and item[0] < partition_index_y[idx][1]
                ):
1103 1104
                    concat_axis = idx
                    new_partition.append([item[0], partition_index_y[idx][1]])
1105 1106 1107 1108
                elif (
                    item[0] == partition_index_y[idx][1]
                    and item[1] > partition_index_y[idx][0]
                ):
1109 1110 1111 1112 1113
                    first_order = 1
                    concat_axis = idx
                    new_partition.append([partition_index_y[idx][0], item[1]])
            else:
                new_partition.append(item)
1114

1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129
        if differ_count == 1:
            return concat_axis, first_order, new_partition
        else:
            return -1, first_order, new_partition

    @staticmethod
    def compute_complete_shape(slice_shape, process_shape, dims_mapping):
        """compute the complete shape of the slice tensor  with its process mesh and dims mapping"""
        complete_shape = []
        for idx, item in enumerate(slice_shape):
            if dims_mapping[idx] == -1:
                complete_shape.append(item)
            else:
                complete_shape.append(item * process_shape[dims_mapping[idx]])
        return complete_shape
C
caozhou 已提交
1130

1131 1132 1133 1134 1135 1136 1137 1138 1139 1140
    @staticmethod
    def concat_partitions(partition_index_list, partition_index):
        """Concat the given partitions without inserting concat op."""
        if not partition_index_list:
            partition_index_list.append(partition_index)
        else:
            i = 0
            has_concat = False
            while i < len(partition_index_list):
                concat_axis, _, new_partition = Resharder.compute_concat_info(
1141 1142
                    partition_index_list[i], partition_index
                )
1143 1144 1145
                if concat_axis != -1:
                    has_concat = True
                    partition_index_list.pop(i)
1146 1147 1148
                    Resharder.concat_partitions(
                        partition_index_list, new_partition
                    )
1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159
                    break
                i += 1
            if not has_concat:
                partition_index_list.append(partition_index)

    @staticmethod
    def change_while_op_input_and_output(auto_parallel_main_prog, dist_context):
        """Change while op input and output after the corresponding sub block ops removed"""
        for sub_block_idx in Resharder.while_block_info:
            sub_block = auto_parallel_main_prog.blocks[sub_block_idx]
            parent_while_op_id = Resharder.while_block_info[sub_block_idx][
1160 1161
                "op_id"
            ]
1162 1163 1164 1165 1166 1167 1168
            parent_block = auto_parallel_main_prog.blocks[sub_block.parent_idx]

            sub_block_op_inputs = set()
            sub_block_op_outputs = []
            for op in sub_block.ops:
                # skip the input and output of operators inserted in the reshard phase
                dist_op = dist_context.get_dist_op_for_program(op)
1169 1170 1171 1172 1173 1174
                if (
                    dist_op
                    or (op.type == "slice" and not dist_op)
                    or (op.type == "split" and not dist_op)
                    or (op.type == "assign" and not dist_op)
                ):
1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187
                    for var_name in op.output_arg_names:
                        if var_name not in sub_block_op_outputs:
                            sub_block_op_outputs.append(var_name)
                    for var_name in op.input_arg_names:
                        sub_block_op_inputs.add(var_name)

            # find the while op
            while_op = None
            for op in parent_block.ops:
                if op.desc.id() == parent_while_op_id and op.type == "while":
                    while_op = op
                    break

1188 1189
            if while_op is None:
                continue
1190 1191 1192 1193 1194 1195 1196 1197

            # find the actual input and output of while op
            proto = OpProtoHolder.instance().get_op_proto(while_op.type)
            new_X = []
            for var_name in while_op.input("X"):
                if var_name in sub_block_op_inputs:
                    new_X.append(var_name)
            assert new_X
1198
            new_X.sort()
1199 1200 1201 1202 1203
            while_op.desc.set_input(proto.inputs[0].name, new_X)

            new_Out = []
            for var_name in while_op.output("Out"):
                for output_name in sub_block_op_outputs[::-1]:
1204
                    if output_name.find(var_name) != -1 and (
1205 1206 1207
                        len(var_name) == len(output_name)
                        or "@RESHARD" in output_name
                    ):
1208 1209
                        if output_name not in new_Out:
                            new_Out.append(output_name)
1210 1211 1212 1213 1214 1215
            assert new_Out
            while_op.desc.set_output(proto.outputs[0].name, new_Out)

    def is_overlapped(self, shape_x, shape_y):
        """Judge whether two partitions intersect on the specified dimension."""
        overlapped = False
1216 1217 1218
        if (shape_y[0] <= shape_x[0] < shape_y[1]) or (
            shape_x[0] <= shape_y[0] < shape_x[1]
        ):
1219
            overlapped = True
1220 1221
        if shape_x == [0, 0] and shape_y == [0, 0]:
            overlapped = True
1222 1223 1224 1225 1226 1227 1228 1229 1230
        return overlapped

    def is_unshard(self, dims_mapping):
        for dim in dims_mapping:
            if dim != -1:
                return False
        return True

    def is_special_op(self, op):
1231
        global _g_special_ops, _g_gradient_clip_ops
Z
zhaoyingli 已提交
1232 1233
        if op.type in _g_special_ops:
            return True
1234
        if is_gradient_clip_op(op) and op.type in _g_gradient_clip_ops:
1235
            return True
Z
zhaoyingli 已提交
1236 1237
        return False

1238 1239
    def is_condition_replicative(self, op):
        sub_block = self.auto_parallel_main_prog.blocks[op.attr("sub_block").id]
1240 1241 1242 1243 1244

        if op.type == "while":
            input_cond = op.input("Condition")
        elif op.type == "conditional_block":
            input_cond = op.input("Cond")
1245 1246

        # the dims mapping of condition tensor should be replicative
1247
        for var_name in input_cond:
1248 1249 1250
            var = get_var_with_recursion(
                var_name, sub_block, self.auto_parallel_main_prog
            )
1251 1252 1253 1254 1255 1256
            dist_tensor = self.dist_context.get_dist_tensor_for_program(var)
            tensor_dist_attr = dist_tensor.dist_attr
            var_dims_mapping = tensor_dist_attr.dims_mapping
            for dim in var_dims_mapping:
                if dim != -1:
                    return False
1257

1258 1259
        return True

1260
    def need_reshard(self, dist_tensor, dist_attr, op_input=True, dist_op=None):
1261 1262 1263 1264 1265
        """Judge the tensor whether needs to be resharded."""
        is_reshard = False
        tensor_dist_attr = dist_tensor.dist_attr
        tensor_dims_mapping = tensor_dist_attr.dims_mapping
        tensor_process_mesh = tensor_dist_attr.process_mesh
1266 1267 1268 1269

        # dist_attr is [process_mesh, dims_mapping] and process_mesh is not a union
        op_process_mesh = dist_attr[0]

1270
        if op_input:
1271
            op_input_dims_mapping = dist_attr[1]
1272
            if all(
1273 1274 1275 1276 1277 1278 1279 1280 1281 1282
                map(
                    lambda x: x,
                    [
                        tensor_dims_mapping,
                        tensor_process_mesh,
                        op_input_dims_mapping,
                        op_process_mesh,
                    ],
                )
            ):
1283
                # judge whether need reshard by dims_mapping
1284
                if tensor_dims_mapping != op_input_dims_mapping:
1285 1286 1287 1288
                    if (
                        tensor_process_mesh
                        not in self.dist_context.process_meshes
                    ):
1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299
                        # assert whether -1 when union.
                        for item in tensor_dims_mapping:
                            if item != -1:
                                raise ValueError(
                                    "The dim must be -1 when tensor process mesh is a union."
                                )
                        # tensor process_mesh: [0, 1, 2, 3], dims_mapping: [-1, -1]
                        # op process_mesh: [4, 5], dims_mapping: [0, -1]
                        # reshard is not supported such as above
                        if not is_reshard:
                            return is_reshard
1300
                        else:
1301 1302 1303 1304 1305 1306 1307 1308
                            raise ValueError(
                                "it is not supported that tensor process mesh is a union and needs reshard."
                            )
                    is_reshard = True

                # judge whether need reshard by process_mesh
                if tensor_process_mesh != op_process_mesh:
                    is_reshard = True
1309 1310 1311 1312 1313 1314 1315 1316
                # not reshard data in send/recv scene
                if (
                    tensor_process_mesh != op_process_mesh
                    and len(tensor_process_mesh.process_ids)
                    == len(op_process_mesh.process_ids)
                    and dist_tensor.serial_tensor.is_data
                ):
                    is_reshard = False
1317
        else:
1318
            op_output_dims_mapping = dist_attr[1]
1319
            if all(
1320 1321 1322 1323 1324 1325 1326 1327 1328 1329
                map(
                    lambda x: x,
                    [
                        tensor_dims_mapping,
                        tensor_process_mesh,
                        op_output_dims_mapping,
                        op_process_mesh,
                    ],
                )
            ):
1330 1331 1332 1333
                if tensor_dims_mapping != op_output_dims_mapping:
                    raise ValueError(
                        "It is not supported that tensor dims mapping is different from op output dims mapping."
                    )
1334 1335
                if tensor_process_mesh != op_process_mesh:
                    is_reshard = True
1336 1337 1338 1339

        return is_reshard

    def get_op_process_meshes(self, op):
1340
        """Get sub process meshes of the given op if op process mesh is a union."""
1341 1342 1343
        process_meshes = []
        dist_op = self.dist_context.get_dist_op_for_program(op)
        op_process_mesh = dist_op.dist_attr.process_mesh
1344

1345
        for process_mesh in self.dist_context.process_meshes:
1346 1347 1348
            if set(process_mesh.processes) & (
                set(op_process_mesh.processes)
            ) and len(process_mesh.processes) < len(op_process_mesh.processes):
1349 1350 1351 1352 1353 1354 1355 1356
                process_meshes.append(process_mesh)

        # it means the process mesh is not a union when process meshes is null
        if not process_meshes:
            process_meshes.append(op_process_mesh)

        return process_meshes

1357
    def find_op_desc_seq(self, dist_tensor, dist_attr, serial=False):
1358 1359 1360 1361 1362
        """
        Find the op description sequence to reshard the source tensor for matching the op requirement.

        Args:
            dist_tensor (DistributedTensor): A distributed tensor.
1363 1364
            dist_attr (list): A list contains process_mesh and dims_mapping such as [process_mesh, dims_mapping].
            serial (bool): If serial is true, the dist tensor and dist op come from serial program. Otherwise, they come from auto program.
1365 1366 1367 1368 1369 1370 1371 1372

        Returns:
            Dict, the dict represents the required op description sequence corresponding to process, The key of dict is
            process and value is a list containing op description.
        """
        tensor_dist_attr = dist_tensor.dist_attr
        source_tensor = dist_tensor.serial_tensor
        tensor_name = source_tensor.name
1373

1374 1375 1376 1377 1378
        source_dims_mapping = tensor_dist_attr.dims_mapping
        source_process_mesh = tensor_dist_attr.process_mesh
        source_process_group = source_process_mesh.processes
        source_process_shape = source_process_mesh.topology

1379 1380
        target_process_mesh = dist_attr[0]
        target_dims_mapping = dist_attr[1]
1381 1382 1383
        target_process_group = target_process_mesh.processes
        target_process_shape = target_process_mesh.topology

1384 1385
        op_role = dist_attr[2]

1386
        if source_tensor.shape[0] < 0:
1387
            assert source_tensor.shape[0] == -1
1388 1389 1390 1391
            new_shape = list(source_tensor.shape)
            new_shape[0] = self.batch_size
            source_tensor.desc.set_shape(new_shape)

1392 1393 1394 1395 1396 1397 1398
        complete_shape = (
            Resharder.compute_complete_shape(
                source_tensor.shape, source_process_shape, source_dims_mapping
            )
            if not serial
            else source_tensor.shape
        )
1399 1400 1401
        op_desc_seq = {}

        # TODO: if the target process group has the same process with source process group
1402 1403 1404
        if set(target_process_group).intersection(
            set(source_process_group)
        ) and set(target_process_group).difference(set(source_process_group)):
1405 1406 1407 1408 1409
            pass

        elif target_process_group != source_process_group:
            partition_process_mapping_list = []
            for source_process in source_process_group:
1410
                # get partition index of source process
1411 1412 1413 1414 1415 1416 1417
                source_partition_index = Resharder.compute_partition_index(
                    source_process,
                    complete_shape,
                    source_dims_mapping,
                    source_process_shape,
                    source_process_group,
                )
1418
                if not partition_process_mapping_list:
1419
                    # the item in partition_process_mapping_list is source_partition_index, which processes and whether has been used
1420
                    partition_process_mapping_list.append(
1421 1422
                        [source_partition_index, [source_process], [False]]
                    )
1423
                else:
1424
                    partition_list = list(
1425 1426
                        [item[0] for item in partition_process_mapping_list]
                    )
1427
                    process_list = list(
1428 1429
                        [item[1] for item in partition_process_mapping_list]
                    )
1430
                    has_used = list(
1431 1432
                        [item[2] for item in partition_process_mapping_list]
                    )
1433

1434 1435 1436 1437 1438
                    if partition_list.count(source_partition_index) == 1:
                        index = partition_list.index(source_partition_index)
                        process_list[index].append(source_process)
                        has_used[index].append(False)
                    else:
1439
                        partition_process_mapping_list.append(
1440 1441
                            [source_partition_index, [source_process], [False]]
                        )
1442 1443

            for target_process in target_process_group:
1444
                # has_sent means the source_partition_index has been sent to target_process
1445 1446
                has_sent = []
                target_partition_index = Resharder.compute_partition_index(
1447 1448 1449 1450 1451 1452
                    target_process,
                    complete_shape,
                    target_dims_mapping,
                    target_process_shape,
                    target_process_group,
                )
1453 1454 1455 1456
                partition_index_list = []
                all_partition_index_list = []
                for source_process in source_process_group:
                    source_partition_index = Resharder.compute_partition_index(
1457 1458 1459 1460 1461 1462
                        source_process,
                        complete_shape,
                        source_dims_mapping,
                        source_process_shape,
                        source_process_group,
                    )
1463
                    to_send_process = None
1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485
                    if (
                        all(
                            _
                            for _ in list(
                                map(
                                    self.is_overlapped,
                                    source_partition_index,
                                    target_partition_index,
                                )
                            )
                        )
                        and source_partition_index not in has_sent
                    ):
                        idx = list(
                            [item[0] for item in partition_process_mapping_list]
                        ).index(source_partition_index)
                        has_used = list(
                            [item[2] for item in partition_process_mapping_list]
                        )[idx]
                        process_list = list(
                            [item[1] for item in partition_process_mapping_list]
                        )[idx]
1486 1487 1488 1489 1490 1491 1492
                        i = 0
                        while i < len(has_used):
                            if not has_used[i]:
                                to_send_process = process_list[i]
                                has_used[i] = True
                                break
                            i += 1
1493

1494 1495 1496 1497
                        if i == len(has_used):
                            has_used = list(map(lambda x: False, has_used))
                            to_send_process = process_list[0]
                            has_used[0] = True
1498 1499 1500
                        assert (
                            to_send_process is not None
                        ), "Failed to find the send process."
1501 1502 1503 1504 1505 1506 1507 1508

                        if to_send_process not in op_desc_seq.keys():
                            op_desc_seq[to_send_process] = []
                        if target_process not in op_desc_seq.keys():
                            op_desc_seq[target_process] = []
                        all_partition_index_list.append(source_partition_index)

                        # append send and recv op desc
1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521
                        is_bool = dist_tensor.serial_tensor.dtype == paddle.bool
                        send_op_desc = SendOpDesc(
                            source_partition_index,
                            to_send_process,
                            target_process,
                            is_bool=is_bool,
                        )
                        recv_op_desc = RecvOpDesc(
                            source_partition_index,
                            to_send_process,
                            target_process,
                            is_bool=is_bool,
                        )
1522 1523 1524
                        op_desc_seq[to_send_process].append(send_op_desc)
                        op_desc_seq[target_process].append(recv_op_desc)
                        has_sent.append(source_partition_index)
1525 1526 1527 1528 1529 1530 1531
                        Resharder.concat_partitions(
                            partition_index_list, source_partition_index
                        )
                        if int(op_role) == int(OpRole.Forward):
                            self.dist_context.up_down_streams.add_pair_stream(
                                to_send_process, target_process
                            )
1532 1533 1534

                # append concat op desc
                op_desc_seq[target_process].append(
1535 1536
                    ConcatOpDesc(all_partition_index_list)
                )
1537 1538 1539 1540 1541 1542

                # append slice op desc
                slice_starts = []
                slice_ends = []
                slices_axes = []
                concatenated_partition_index = partition_index_list[0]
1543 1544
                to_slice_tensor_shape = []

1545
                for idx, item in enumerate(concatenated_partition_index):
1546 1547 1548
                    slice_starts.append(
                        target_partition_index[idx][0] - item[0]
                    )
1549 1550
                    slice_ends.append(target_partition_index[idx][1] - item[0])
                    slices_axes.append(idx)
1551 1552
                    to_slice_tensor_shape.append(item[1] - item[0])

1553
                op_desc_seq[target_process].append(
1554 1555 1556 1557 1558 1559 1560
                    SliceOpDesc(
                        slice_starts,
                        slice_ends,
                        slices_axes,
                        shape=to_slice_tensor_shape,
                    )
                )
1561

1562
        # in the same process group, it will use allgahther and slice op.
1563
        else:
1564
            # NOTE: It just supports even partition scene.
1565 1566 1567 1568 1569
            partition_index_list = []
            all_partition_index_list = []
            process_index = []
            for source_process in source_process_group:
                source_partition_index = Resharder.compute_partition_index(
1570 1571 1572 1573 1574 1575
                    source_process,
                    complete_shape,
                    source_dims_mapping,
                    source_process_shape,
                    source_process_group,
                )
1576 1577
                if source_partition_index not in partition_index_list:
                    partition_index_list.append(source_partition_index)
1578 1579 1580 1581 1582 1583 1584 1585
                    process_index.append(
                        [
                            [
                                source_process,
                            ],
                            source_partition_index,
                        ]
                    )
1586
                else:
1587 1588 1589
                    process_index[
                        partition_index_list.index(source_partition_index)
                    ][0].append(source_process)
1590 1591 1592 1593 1594 1595 1596 1597

            for i in range(len(process_index[0][0])):
                group = []
                for j in range(len(process_index)):
                    group.append(process_index[j][0][i])
                    if i == 0:
                        all_partition_index_list.append(process_index[j][1])
                for process in group:
1598 1599 1600 1601
                    min_comm_group = copy.deepcopy(group)
                    all_partition_index_list_copied = copy.deepcopy(
                        all_partition_index_list
                    )
1602
                    target_partition_index = Resharder.compute_partition_index(
1603 1604 1605 1606 1607 1608
                        process,
                        complete_shape,
                        target_dims_mapping,
                        target_process_shape,
                        target_process_group,
                    )
1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655
                    for _process in group:
                        source_partition_index = (
                            Resharder.compute_partition_index(
                                _process,
                                complete_shape,
                                source_dims_mapping,
                                source_process_shape,
                                source_process_group,
                            )
                        )
                        if not all(
                            _
                            for _ in list(
                                map(
                                    self.is_overlapped,
                                    source_partition_index,
                                    target_partition_index,
                                )
                            )
                        ):
                            min_comm_group.remove(_process)
                            all_partition_index_list_copied.remove(
                                source_partition_index
                            )

                    concatenated_partition_index_list = []
                    for partition_index in all_partition_index_list_copied:
                        Resharder.concat_partitions(
                            concatenated_partition_index_list, partition_index
                        )

                    concatenated_partition_index = (
                        concatenated_partition_index_list[0]
                    )

                    slice_starts = []
                    slice_ends = []
                    slices_axes = []
                    to_slice_tensor_shape = []

                    for idx, item in enumerate(concatenated_partition_index):
                        slice_starts.append(
                            target_partition_index[idx][0] - item[0]
                        )
                        slice_ends.append(
                            target_partition_index[idx][1] - item[0]
                        )
1656
                        slices_axes.append(idx)
1657
                    to_slice_tensor_shape.append(item[1] - item[0])
1658

1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672
                    slice_op_desc = SliceOpDesc(
                        starts=slice_starts,
                        ends=slice_ends,
                        axes=slices_axes,
                        shape=to_slice_tensor_shape,
                    )
                    allgather_shape = (
                        None
                        if not serial
                        else dist_tensor.local_sizes(rank=process)
                    )
                    op_desc_seq[process] = (
                        [
                            AllGatherOpDesc(
1673
                                group=min_comm_group,
1674 1675 1676 1677
                                shape=allgather_shape,
                                is_bool=(source_tensor.dtype == paddle.bool),
                            ),
                            ConcatOpDesc(
1678
                                partition_index_list=all_partition_index_list_copied
1679 1680 1681
                            ),
                            slice_op_desc,
                        ]
1682
                        if len(min_comm_group) > 1
1683 1684
                        else [slice_op_desc]
                    )
1685 1686 1687

        return op_desc_seq

1688 1689 1690
    def parse_op_desc(
        self, block, op_desc_seq, var_name, reshard_op, dist_attr
    ):
1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702
        """Parse op desc sequence and insert op in the block"""
        tensor_list = []
        partition_tensor_list = []
        if self.rank_id not in op_desc_seq.keys():
            return
        op_desc_list = op_desc_seq[self.rank_id]

        idx = None
        for index, op in list(enumerate(block.ops)):
            if op.desc.id == reshard_op.desc.id:
                idx = index
                break
1703 1704 1705 1706 1707
        assert (
            idx is not None
        ), "The op for reshard cannot be found in the rank {} program.".format(
            self.rank_id
        )
1708 1709

        matched_op = block.ops[idx]
1710 1711 1712
        source_tensor = get_var_with_recursion(
            var_name, block, self.auto_parallel_main_prog
        )
1713 1714 1715 1716
        for op_desc in op_desc_list:
            if isinstance(op_desc, AllGatherOpDesc):  # noqa: F401
                if var_name not in self.has_allgather.keys():
                    self.has_allgather[var_name] = []
1717 1718 1719 1720 1721
                if not self.has_allgather[
                    var_name
                ] or op_desc.group not in list(
                    map(lambda x: x[0], self.has_allgather[var_name])
                ):
1722 1723 1724
                    if op_desc.is_bool:
                        # for bool data allgather, cast to int64 -> allgather -> cast bool
                        out_cast = Inserter.insert_cast_op(
1725 1726 1727 1728 1729 1730
                            block,
                            idx,
                            source_tensor,
                            reshard_op.attr('op_role'),
                            paddle.int64,
                        )
1731
                        tensor_list, idx_offset = Inserter.insert_allgather_op(
1732 1733 1734 1735 1736 1737
                            block,
                            idx + 1,
                            out_cast,
                            op_desc.group,
                            reshard_op.attr('op_role'),
                        )
1738 1739 1740 1741
                        idx += idx_offset
                        tensor_name_list = []
                        for var in tensor_list:
                            out_cast = Inserter.insert_cast_op(
1742 1743 1744 1745 1746 1747
                                block,
                                idx,
                                var,
                                reshard_op.attr('op_role'),
                                paddle.bool,
                            )
1748 1749 1750
                            tensor_name_list.append(out_cast.name)
                            idx += 1
                        self.has_allgather[var_name].append(
1751 1752
                            [op_desc.group, tensor_name_list]
                        )
1753 1754
                    else:
                        tensor_list, idx_offset = Inserter.insert_allgather_op(
1755 1756 1757 1758 1759 1760
                            block,
                            idx,
                            source_tensor,
                            op_desc.group,
                            reshard_op.attr('op_role'),
                        )
1761 1762 1763
                        idx += idx_offset
                        tensor_name_list = [var.name for var in tensor_list]
                        self.has_allgather[var_name].append(
1764 1765
                            [op_desc.group, tensor_name_list]
                        )
1766 1767 1768 1769
                else:
                    for item in self.has_allgather[var_name]:
                        if op_desc.group == item[0]:
                            tensor_list = [
C
caozhou 已提交
1770
                                get_var_with_recursion(
1771 1772 1773 1774
                                    var_name,
                                    block,
                                    self.auto_parallel_main_prog,
                                )
1775 1776 1777
                                for var_name in item[1]
                            ]
                            break
1778 1779 1780
                assert (
                    tensor_list
                ), "The result of parsing allgather op should not be None."
1781 1782 1783 1784 1785

            elif isinstance(op_desc, SendOpDesc):
                if var_name not in self.has_sent.keys():
                    self.has_sent[var_name] = []
                if op_desc.dst not in self.has_sent[var_name]:
1786 1787
                    if op_desc.is_bool:
                        out_cast = Inserter.insert_cast_op(
1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801
                            block,
                            idx,
                            source_tensor,
                            reshard_op.attr('op_role'),
                            paddle.int64,
                        )
                        Inserter.insert_send_op(
                            block,
                            idx + 1,
                            out_cast,
                            op_desc.src,
                            op_desc.dst,
                            reshard_op.attr('op_role'),
                        )
1802 1803
                        idx += 2
                    else:
1804 1805 1806 1807 1808 1809 1810 1811
                        Inserter.insert_send_op(
                            block,
                            idx,
                            source_tensor,
                            op_desc.src,
                            op_desc.dst,
                            reshard_op.attr('op_role'),
                        )
1812
                        idx += 1
1813 1814 1815 1816 1817 1818 1819 1820 1821 1822
                    self.has_sent[var_name].append(op_desc.dst)

            elif isinstance(op_desc, RecvOpDesc):
                if var_name not in self.has_recv.keys():
                    self.has_recv[var_name] = {}
                if op_desc.src not in self.has_recv[var_name].keys():
                    partition_index = op_desc.partition_index
                    shape = []
                    for index in partition_index:
                        shape.append(index[1] - index[0])
1823 1824 1825 1826 1827 1828 1829
                    if op_desc.is_bool:
                        # for bool data, recv int64 -> cast to bool
                        recv_tensor = block.create_var(
                            name=unique_name.generate(var_name + "@recv"),
                            shape=shape,
                            lod_level=source_tensor.lod_level,
                            dtype=paddle.int64,
1830 1831 1832 1833 1834 1835 1836 1837 1838 1839
                            type=source_tensor.type,
                        )
                        Inserter.insert_recv_op(
                            block,
                            idx,
                            recv_tensor,
                            op_desc.src,
                            op_desc.dst,
                            reshard_op.attr('op_role'),
                        )
1840
                        out_cast = Inserter.insert_cast_op(
1841 1842 1843 1844 1845 1846
                            block,
                            idx + 1,
                            recv_tensor,
                            reshard_op.attr('op_role'),
                            paddle.bool,
                        )
1847 1848 1849 1850 1851 1852 1853 1854 1855
                        tensor_list.append(out_cast)
                        idx += 2
                        self.has_recv[var_name][op_desc.src] = out_cast
                    else:
                        recv_tensor = block.create_var(
                            name=unique_name.generate(var_name + "@recv"),
                            shape=shape,
                            lod_level=source_tensor.lod_level,
                            dtype=source_tensor.dtype,
1856 1857 1858 1859 1860 1861 1862 1863 1864 1865
                            type=source_tensor.type,
                        )
                        Inserter.insert_recv_op(
                            block,
                            idx,
                            recv_tensor,
                            op_desc.src,
                            op_desc.dst,
                            reshard_op.attr('op_role'),
                        )
1866 1867 1868 1869 1870

                        # for lod tensor, need reset lod after received
                        if recv_tensor.lod_level != 0:
                            set_lod = False
                            # use data lod to reset tensor lod
1871 1872 1873
                            for (
                                tmp_block
                            ) in self.auto_parallel_main_prog.blocks:
1874 1875
                                for tmp_var_name in tmp_block.vars:
                                    tmp_var = tmp_block.vars[tmp_var_name]
1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889
                                    if (
                                        tmp_var.is_data
                                        and tmp_var.lod_level
                                        == recv_tensor.lod_level
                                    ):
                                        reset_lod_out = (
                                            Inserter.insert_reset_lod_op(
                                                block,
                                                idx + 1,
                                                recv_tensor,
                                                tmp_var,
                                                reshard_op.attr('op_role'),
                                            )
                                        )
1890 1891 1892
                                        tensor_list.append(reset_lod_out)
                                        idx += 2
                                        self.has_recv[var_name][
1893 1894
                                            op_desc.src
                                        ] = reset_lod_out
1895 1896 1897 1898 1899 1900 1901 1902 1903
                                        set_lod = True
                                        break
                                if set_lod:
                                    break
                            assert set_lod is True
                        else:
                            tensor_list.append(recv_tensor)
                            idx += 1
                            self.has_recv[var_name][op_desc.src] = recv_tensor
1904 1905 1906 1907 1908 1909 1910 1911
                else:
                    tensor_list.append(self.has_recv[var_name][op_desc.src])

            elif isinstance(op_desc, ConcatOpDesc):
                partition_index_list = op_desc.partition_index_list
                idx_list = [idx]
                for index, tensor in enumerate(tensor_list):
                    Inserter.concat_partitions_with_op(
1912 1913 1914 1915 1916 1917 1918
                        partition_tensor_list,
                        tensor,
                        partition_index_list[index],
                        block,
                        idx_list,
                        reshard_op.attr('op_role'),
                    )
1919 1920 1921
                idx = idx_list[0]

            elif isinstance(op_desc, SliceOpDesc):
1922 1923 1924 1925 1926 1927 1928 1929
                assert (
                    len(partition_tensor_list) == 1 or not partition_tensor_list
                )
                to_slice_tensor = (
                    partition_tensor_list[0][0]
                    if len(partition_tensor_list) == 1
                    else source_tensor
                )
1930 1931 1932 1933 1934 1935 1936 1937 1938
                new_name = unique_name.generate(var_name + "@RESHARD")
                target_tensor = Inserter.insert_slice_op(
                    block,
                    idx,
                    to_slice_tensor,
                    starts=op_desc.starts,
                    ends=op_desc.ends,
                    axes=op_desc.axes,
                    new_var_name=new_name,
1939 1940
                    op_role=reshard_op.attr('op_role'),
                )
1941

1942 1943 1944
                process_mesh = dist_attr[0]
                dims_mapping = dist_attr[1]

1945 1946 1947 1948
                tensor_attr = TensorDistributedAttribute()
                tensor_attr.dims_mapping = dims_mapping
                tensor_attr.process_mesh = process_mesh
                self.dist_context.set_tensor_dist_attr_for_program(
1949 1950
                    target_tensor, tensor_attr
                )
1951

1952
                if matched_op.type == "while":
1953
                    # var_reshard_mapping means the while op input need be changed to
1954 1955 1956 1957 1958 1959
                    if (
                        "var_reshard_mapping"
                        not in Resharder.while_block_info[
                            op.attr("sub_block").id
                        ].keys()
                    ):
1960
                        Resharder.while_block_info[op.attr("sub_block").id][
1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971
                            "var_reshard_mapping"
                        ] = {}
                    if (
                        var_name
                        not in Resharder.while_block_info[
                            op.attr("sub_block").id
                        ]["var_reshard_mapping"].keys()
                    ):
                        Resharder.while_block_info[op.attr("sub_block").id][
                            "var_reshard_mapping"
                        ][var_name] = []
1972
                    Resharder.while_block_info[op.attr("sub_block").id][
1973 1974
                        "var_reshard_mapping"
                    ][var_name].append([dist_attr, target_tensor.name])
1975 1976 1977

                # rename op input name according to new name
                for op in block.ops:
1978 1979
                    # just for while op
                    while_op_X_append = []
1980
                    for name in op.input_arg_names:
1981 1982 1983
                        op_dist_attr = (
                            self.dist_context.get_op_dist_attr_for_program(op)
                        )
1984 1985
                        if name == var_name and op_dist_attr is not None:
                            if op.desc.id() == matched_op.desc.id():
1986 1987 1988 1989
                                if matched_op.type == "while":
                                    old_name = name
                                    new_name = target_tensor.name
                                    assert old_name != new_name
1990 1991 1992 1993 1994
                                    op_input_dist_attr = (
                                        op_dist_attr.get_input_dist_attr(
                                            old_name
                                        )
                                    )
1995
                                    op_dist_attr.set_input_dist_attr(
1996 1997
                                        new_name, op_input_dist_attr
                                    )
1998
                                    op_dist_attr.set_input_dims_mapping(
1999 2000 2001 2002 2003 2004
                                        new_name, dims_mapping
                                    )
                                    if (
                                        old_name
                                        in op_dist_attr._inputs_dist_attrs
                                    ):
2005
                                        op_dist_attr.del_input_dist_attr(
2006 2007
                                            old_name
                                        )
2008 2009 2010 2011
                                    while_op_X_append.append(new_name)
                                    continue
                                else:
                                    op.desc._rename_input(
2012 2013
                                        name, target_tensor.name
                                    )
2014 2015 2016
                                    old_name = name
                                    new_name = target_tensor.name
                                    assert old_name != new_name
2017 2018 2019 2020 2021
                                    op_input_dist_attr = (
                                        op_dist_attr.get_input_dist_attr(
                                            old_name
                                        )
                                    )
2022
                                    op_dist_attr.set_input_dist_attr(
2023 2024
                                        new_name, op_input_dist_attr
                                    )
2025
                                    op_dist_attr.set_input_dims_mapping(
2026 2027
                                        new_name, dims_mapping
                                    )
2028 2029
                                    op_dist_attr.del_input_dist_attr(old_name)
                                    continue
2030 2031

                            op_process_mesh = op_dist_attr.process_mesh
2032 2033 2034
                            op_input_dims_mapping = (
                                op_dist_attr.get_input_dims_mapping(var_name)
                            )
2035
                            # NOTE: For op whose process mesh is a union, its input will not be renamed by other op reshard result now which means that it will have more reshard operation.
2036 2037 2038 2039
                            if (
                                op_process_mesh == process_mesh
                                and op_input_dims_mapping == dims_mapping
                            ):
2040
                                op.desc._rename_input(name, target_tensor.name)
2041 2042 2043
                                old_name = name
                                new_name = target_tensor.name
                                assert old_name != new_name
2044 2045 2046
                                op_input_dist_attr = (
                                    op_dist_attr.get_input_dist_attr(old_name)
                                )
2047
                                op_dist_attr.set_input_dist_attr(
2048 2049
                                    new_name, op_input_dist_attr
                                )
2050
                                op_dist_attr.set_input_dims_mapping(
2051 2052
                                    new_name, dims_mapping
                                )
2053
                                op_dist_attr.del_input_dist_attr(old_name)
2054

2055 2056 2057
                    # for while op, the input X should reset
                    if while_op_X_append:
                        proto = OpProtoHolder.instance().get_op_proto(op.type)
2058 2059 2060 2061
                        op.desc.set_input(
                            proto.inputs[0].name,
                            op.input("X") + while_op_X_append,
                        )
2062

2063
    def _get_subblock_input_attrs(self, op, var_name):
2064
        # NOTE: Multi while loop is not supported
2065
        assert op.type in _g_subblock_ops
2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078
        sub_block = self.auto_parallel_main_prog.blocks[op.attr("sub_block").id]
        ops = sub_block.ops
        input_attrs = []

        for op in ops:
            dist_op = self.dist_context.get_dist_op_for_program(op)
            if not dist_op:
                continue
            dist_attr = dist_op.dist_attr
            for name in op.input_arg_names:
                if name == var_name:
                    process_mesh = dist_attr.process_mesh
                    input_dims_mapping = dist_attr.get_input_dims_mapping(
2079 2080
                        var_name
                    )
2081 2082
                    has_exist = False
                    for input_attr in input_attrs:
2083 2084 2085 2086
                        if (
                            process_mesh == input_attr[0]
                            and input_dims_mapping == input_attr[1]
                        ):
2087 2088 2089
                            has_exist = True
                            break
                    if not has_exist:
2090 2091 2092 2093 2094 2095 2096
                        input_attrs.append(
                            [
                                process_mesh,
                                input_dims_mapping,
                                op.attr('op_role'),
                            ]
                        )
2097 2098
        return input_attrs

2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133
    def _get_subblock_output_attrs(self, op, var_name):
        assert op.type in _g_subblock_ops
        sub_block = self.auto_parallel_main_prog.blocks[op.attr("sub_block").id]
        ops = sub_block.ops
        output_attrs = []

        for op in ops:
            dist_op = self.dist_context.get_dist_op_for_program(op)
            if not dist_op:
                continue
            dist_attr = dist_op.dist_attr
            for name in op.output_arg_names:
                if name == var_name:
                    process_mesh = dist_attr.process_mesh
                    output_dims_mapping = dist_attr.get_output_dims_mapping(
                        var_name
                    )
                    has_exist = False
                    for output_attr in output_attrs:
                        if (
                            process_mesh == output_attr[0]
                            and output_dims_mapping == output_attr[1]
                        ):
                            has_exist = True
                            break
                    if not has_exist:
                        output_attrs.append(
                            [
                                process_mesh,
                                output_dims_mapping,
                                op.attr('op_role'),
                            ]
                        )
        return output_attrs

2134 2135 2136 2137 2138 2139
    def _get_common_op_input_attrs(self, op, var_name):
        process_meshes = []
        dist_op = self.dist_context.get_dist_op_for_program(op)
        dist_attr = dist_op.dist_attr
        op_process_mesh = dist_attr.process_mesh
        for process_mesh in self.dist_context.process_meshes:
2140 2141 2142
            if set(process_mesh.processes) & (
                set(op_process_mesh.processes)
            ) and len(process_mesh.processes) < len(op_process_mesh.processes):
2143 2144 2145 2146 2147 2148 2149 2150 2151
                process_meshes.append(process_mesh)

        # it means that the process mesh is not a union when process meshes is none
        if not process_meshes:
            process_meshes.append(op_process_mesh)

        input_dims_mapping = dist_attr.get_input_dims_mapping(var_name)
        input_attrs = []
        for process_mesh in process_meshes:
2152 2153 2154
            input_attrs.append(
                [process_mesh, input_dims_mapping, op.attr('op_role')]
            )
2155 2156 2157 2158 2159

        return input_attrs

    def get_op_input_attrs(self, op, var_name):
        op_input_attrs = []
2160

2161 2162
        if op.type in _g_subblock_ops:
            op_input_attrs = self._get_subblock_input_attrs(op, var_name)
2163 2164
            if not op_input_attrs:
                op_input_attrs = self._get_subblock_output_attrs(op, var_name)
2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176
        else:
            op_input_attrs = self._get_common_op_input_attrs(op, var_name)

        assert op_input_attrs

        return op_input_attrs

    def _remove_global_process_mesh(self):
        """Remove global process mesh from dist_context.process_meshes"""
        processes = set()
        process_mesh_count = len(self.dist_context.process_meshes)
        if process_mesh_count > 1:
2177 2178
            global_process_mesh_idx = []
            has_sub_process_mesh = False
2179 2180 2181 2182
            for process_mesh in self.dist_context.process_meshes:
                for process in process_mesh.processes:
                    processes.add(process)
            for idx, process_mesh in enumerate(
2183 2184
                self.dist_context.process_meshes
            ):
2185
                if len(set(process_mesh.processes)) == len(processes):
2186 2187 2188
                    global_process_mesh_idx.append(idx)
                elif set(process_mesh.processes) < processes:
                    has_sub_process_mesh = True
2189

2190 2191
            if has_sub_process_mesh:
                for idx in reversed(global_process_mesh_idx):
2192
                    self.dist_context.process_meshes.pop(idx)
2193 2194 2195 2196

    def _change_subblock_op_input_and_output(self, block_idx, block):
        if "var_reshard_mapping" in Resharder.while_block_info[block_idx]:
            var_reshard_mapping = Resharder.while_block_info[block_idx][
2197 2198
                "var_reshard_mapping"
            ]
2199 2200 2201 2202 2203 2204 2205 2206
            for op in block.ops:
                for var_name in op.input_arg_names:
                    if var_name in var_reshard_mapping:
                        # in while sub block, the union process mesh is not split before reshard sub block
                        dist_op = self.dist_context.get_dist_op_for_program(op)
                        dist_attr = dist_op.dist_attr
                        target_name = None
                        for item in var_reshard_mapping[var_name]:
2207 2208 2209 2210 2211
                            if (
                                dist_attr.process_mesh == item[0][0]
                                and dist_attr.get_input_dims_mapping(var_name)
                                == item[0][1]
                            ):
2212 2213 2214 2215 2216 2217 2218
                                target_name = item[1]
                                break
                        if target_name is None:
                            continue
                        else:
                            op.desc._rename_input(var_name, target_name)
                            dist_op = self.dist_context.get_dist_op_for_program(
2219 2220
                                op
                            )
2221 2222 2223 2224
                            op_dist_attr = dist_op.dist_attr
                            old_name = var_name
                            new_name = target_name
                            assert old_name != new_name
2225 2226 2227
                            op_input_dist_attr = (
                                op_dist_attr.get_input_dist_attr(old_name)
                            )
2228
                            op_dist_attr.set_input_dist_attr(
2229 2230
                                new_name, op_input_dist_attr
                            )
2231 2232 2233 2234 2235 2236 2237
                            op_dist_attr.del_input_dist_attr(old_name)

                # the outputs also need to be renamed when the output name is the same with input name in inplace op
                for var_name in op.output_arg_names:
                    # if the tensor has been resharded multiply, it is not supported now.
                    if var_name in var_reshard_mapping:
                        if len(var_reshard_mapping[var_name]) > 1:
2238
                            raise ValueError(
2239
                                "The scene is not supported that the output is inplaced and the tensor has been resharded multiply when as input."
2240
                            )
2241 2242 2243 2244 2245 2246 2247 2248 2249
                        target_name = var_reshard_mapping[var_name][0][1]

                        op.desc._rename_output(var_name, target_name)
                        dist_op = self.dist_context.get_dist_op_for_program(op)
                        op_dist_attr = dist_op.dist_attr
                        old_name = var_name
                        new_name = target_name
                        assert old_name != new_name
                        op_output_dist_attr = op_dist_attr.get_output_dist_attr(
2250 2251
                            old_name
                        )
2252
                        op_dist_attr.set_output_dist_attr(
2253 2254
                            new_name, op_output_dist_attr
                        )
2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268
                        op_dist_attr.del_output_dist_attr(old_name)

    def _reshard_input(self, block):
        idx = 0
        while idx < len(block.ops):
            pre_op_count = len(block.ops)
            op = block.ops[idx]

            if self.is_special_op(op):
                idx += 1
                continue

            dist_op = self.dist_context.get_dist_op_for_program(op)
            if dist_op is not None:
2269 2270 2271
                op_input_dist_attrs = (
                    []
                )  # [(op_process_mesh, op_input_dims_mapping), (op_process_mesh, op_input_dims_mapping)]
2272
                if op.type in _g_subblock_ops:
2273 2274 2275 2276
                    if not self.is_condition_replicative(op):
                        raise ValueError(
                            "Please check the condition due to the dims mapping is not replicative."
                        )
2277 2278 2279 2280
                    if (
                        op.attr("sub_block").id
                        not in Resharder.while_block_info
                    ):
2281
                        Resharder.while_block_info[op.attr("sub_block").id] = {}
2282 2283 2284
                    Resharder.while_block_info[op.attr("sub_block").id][
                        "op_id"
                    ] = op.desc.id()
2285 2286 2287 2288

                if op.type == "while":
                    # condition var process mesh is the same with op and dims_mapping is replicative, so it do not need reshard
                    input_var_names = op.input("X")
2289 2290
                elif op.type == "conditional_block":
                    input_var_names = op.input("Input")
2291
                else:
2292 2293 2294 2295 2296 2297
                    input_var_names = op.input_arg_names
                # to avoid while op X order different
                input_var_names.sort()

                idx_offset = 0
                for var_name in input_var_names:
2298 2299
                    # skip lod_tensor_blocking_queue_? name
                    if "lod_tensor_blocking_queue" in var_name:
2300
                        continue
2301 2302 2303
                    var = get_var_with_recursion(
                        var_name, block, self.auto_parallel_main_prog
                    )
2304
                    dist_tensor = self.dist_context.get_dist_tensor_for_program(
2305 2306
                        var
                    )
2307 2308 2309

                    # judge whether union tensor dims_mapping all -1
                    is_union_process_mesh_tensor = False
2310 2311 2312 2313 2314
                    if (
                        dist_tensor.dist_attr.process_mesh
                        not in self.dist_context.process_meshes
                        and self.dist_context.process_meshes
                    ):
2315 2316
                        is_union_process_mesh_tensor = True
                        assert dist_tensor.dist_attr.dims_mapping.count(
2317 2318
                            -1
                        ) == len(dist_tensor.dist_attr.dims_mapping)
2319 2320 2321 2322 2323 2324 2325 2326 2327

                    op_input_attrs = self.get_op_input_attrs(op, var_name)
                    for input_attr in op_input_attrs:
                        input_process_mesh = None

                        # deal with union tensor
                        if is_union_process_mesh_tensor:
                            # if op process mesh is subset of union tensor process mesh, need no reshard
                            if set(input_attr[0].processes) <= set(
2328
                                dist_tensor.dist_attr.process_mesh.processes
2329 2330
                            ):
                                continue
2331 2332

                        if dist_tensor is not None and self.need_reshard(
2333 2334
                            dist_tensor, input_attr
                        ):
2335
                            reshard_op_desc = self.find_op_desc_seq(
2336 2337 2338 2339 2340
                                dist_tensor, input_attr
                            )
                            self.parse_op_desc(
                                block, reshard_op_desc, var_name, op, input_attr
                            )
2341
                            cur_op_count = len(block.ops)
2342 2343 2344
                            idx_offset = (
                                idx_offset + cur_op_count - pre_op_count
                            )
2345
                            pre_op_count = cur_op_count
2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358
                idx = idx + idx_offset + 1
            else:
                idx += 1

    def _hadnle_recv(self, block, idx, var, op, send_rank, recv_rank):
        if self.rank_id == recv_rank:
            # if recv bool data, recv then cast
            if var.dtype == paddle.bool:
                recv_cast_out = block.create_var(
                    name=unique_name.generate(var.name + "@recv"),
                    shape=var.shape,
                    lod_level=var.lod_level,
                    dtype=paddle.int64,
2359 2360 2361 2362 2363 2364 2365 2366 2367 2368
                    type=var.type,
                )
                Inserter.insert_recv_op(
                    block,
                    idx + 1,
                    recv_cast_out,
                    send_rank,
                    recv_rank,
                    op.attr('op_role'),
                )
2369 2370 2371 2372 2373 2374
                reset_lod_out = None
                if var.lod_level != 0:
                    set_lod = False
                    for tmp_block in self.auto_parallel_main_prog.blocks:
                        for tmp_var_name in tmp_block.vars:
                            tmp_var = tmp_block.vars[tmp_var_name]
2375 2376 2377 2378
                            if (
                                tmp_var.is_data
                                and tmp_var.lod_level == var.lod_level
                            ):
2379
                                reset_lod_out = block.create_var(
2380 2381 2382
                                    name=unique_name.generate(
                                        var.name + "@RESETLOD"
                                    ),
2383 2384 2385
                                    shape=recv_cast_out.shape,
                                    type=recv_cast_out.type,
                                    dtype=recv_cast_out.dtype,
2386 2387
                                    lod_level=recv_cast_out.lod_level,
                                )
2388 2389 2390 2391
                                idx += 1
                                block._insert_op(
                                    idx,
                                    type="lod_reset",
2392
                                    inputs={'X': recv_cast_out, 'Y': tmp_var},
2393
                                    outputs={'Out': reset_lod_out},
2394 2395
                                    attrs={'op_role': op.attr("op_role")},
                                )
2396 2397 2398 2399 2400 2401 2402
                                set_lod = True
                                break
                        if set_lod:
                            break
                    assert set_lod is True

                # cast int64 to bool
2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418
                cast_op = block._insert_op(
                    idx + 2,
                    type='cast',
                    inputs={
                        'X': [recv_cast_out]
                        if reset_lod_out is None
                        else [reset_lod_out]
                    },
                    outputs={'Out': [var]},
                    attrs={
                        'in_dtype': recv_cast_out.dtype,
                        'out_dtype': var.dtype,
                        'op_role': op.attr('op_role'),
                    },
                )
                cast_op._set_attr('op_namescope', "/auto_parallel/reshard")
2419 2420 2421 2422 2423 2424 2425
            else:
                if var.lod_level != 0:
                    recv_out = block.create_var(
                        name=unique_name.generate(var.name + "@recv"),
                        shape=var.shape,
                        lod_level=var.lod_level,
                        dtype=var.int64,
2426 2427 2428 2429 2430 2431 2432 2433 2434 2435
                        type=var.type,
                    )
                    Inserter.insert_recv_op(
                        block,
                        idx + 1,
                        recv_out,
                        send_rank,
                        recv_rank,
                        op.attr('op_role'),
                    )
2436 2437 2438 2439
                    set_lod = False
                    for tmp_block in self.auto_parallel_main_prog.blocks:
                        for tmp_var_name in tmp_block.vars:
                            tmp_var = tmp_block.vars[tmp_var_name]
2440 2441 2442 2443
                            if (
                                tmp_var.is_data
                                and tmp_var.lod_level == var.lod_level
                            ):
2444 2445 2446 2447
                                idx += 1
                                block._insert_op(
                                    idx,
                                    type="lod_reset",
2448
                                    inputs={'X': recv_out, 'Y': tmp_var},
2449
                                    outputs={'Out': var},
2450 2451
                                    attrs={'op_role': op.attr("op_role")},
                                )
2452 2453 2454 2455 2456
                                set_lod = True
                                break
                        if set_lod:
                            break
                    assert set_lod is True
2457
                else:
2458 2459 2460 2461 2462 2463 2464 2465
                    Inserter.insert_recv_op(
                        block,
                        idx + 1,
                        var,
                        send_rank,
                        recv_rank,
                        op.attr('op_role'),
                    )
2466 2467 2468

    def _handle_send(self, block, idx, var, op, send_rank, recv_rank):
        if var.dtype == paddle.bool:
2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479
            cast_out = Inserter.insert_cast_op(
                block, idx + 1, var, op.attr('op_role'), paddle.int64
            )
            Inserter.insert_send_op(
                block,
                idx + 2,
                cast_out,
                send_rank,
                recv_rank,
                op.attr('op_role'),
            )
2480
        else:
2481 2482 2483
            Inserter.insert_send_op(
                block, idx + 1, var, send_rank, recv_rank, op.attr('op_role')
            )
2484 2485 2486 2487 2488 2489

    def _reshard_output(self, block):
        # insert send and recv op if output process mesh is different from tensor process mesh
        idx = 0
        # skip reader and ops whose process mesh is union
        skip_ops = [
2490 2491 2492 2493 2494
            "create_py_reader",
            "create_double_buffer_reader",
            "read",
            "write_to_array",
            "read_from_array",
2495 2496 2497
        ]
        global _g_special_ops
        skip_ops += _g_special_ops
2498
        skip_ops += _g_subblock_ops
2499 2500 2501 2502 2503 2504 2505
        while idx < len(block.ops):
            pre_op_count = len(block.ops)
            op = block.ops[idx]
            dist_op = self.dist_context.get_dist_op_for_program(op)
            if dist_op is not None and op.type not in skip_ops:
                idx_offset = 0
                for var_name in op.output_arg_names:
2506 2507 2508
                    var = get_var_with_recursion(
                        var_name, block, self.auto_parallel_main_prog
                    )
2509
                    dist_tensor = self.dist_context.get_dist_tensor_for_program(
2510 2511
                        var
                    )
2512 2513 2514
                    tensor_process_mesh = dist_tensor.dist_attr.process_mesh
                    output_attr = [
                        dist_op.dist_attr.process_mesh,
2515
                        dist_op.dist_attr.get_output_dims_mapping(var_name),
2516 2517
                    ]
                    if dist_tensor is not None and self.need_reshard(
2518 2519
                        dist_tensor, output_attr, False
                    ):
2520
                        tensor_processes = set(
2521 2522 2523 2524 2525
                            tensor_process_mesh.processes
                        ) - (
                            set(tensor_process_mesh.processes)
                            & set(output_attr[0].processes)
                        )
2526 2527
                        if tensor_processes:
                            if len(tensor_processes) != len(
2528 2529
                                output_attr[0].processes
                            ):
2530
                                if dist_tensor.dist_attr.dims_mapping.count(
2531 2532 2533 2534 2535 2536 2537 2538 2539 2540
                                    -1
                                ) != len(
                                    dist_tensor.dist_attr.dims_mapping
                                ) or output_attr[
                                    1
                                ].count(
                                    -1
                                ) != len(
                                    output_attr[1]
                                ):
2541
                                    raise ValueError(
2542 2543
                                        "The dims_mapping must be -1"
                                    )
2544 2545
                                else:
                                    for index, tensor_process in enumerate(
2546 2547
                                        tensor_processes
                                    ):
2548 2549 2550
                                        recv_rank = tensor_process
                                        actual_index = index
                                        if index >= len(
2551 2552
                                            output_attr[0].processes
                                        ):
2553
                                            actual_index = (
2554 2555
                                                index
                                                - len(output_attr[0].processes)
2556 2557
                                            ) % len(output_attr[0].processes)
                                        item = output_attr[0].processes[
2558 2559
                                            actual_index
                                        ]
2560 2561
                                        if recv_rank == item:
                                            continue
2562 2563 2564 2565
                                        if var.shape[0] == -1:
                                            new_shape = list(var.shape)
                                            new_shape[0] = self.batch_size
                                            var.desc.set_shape(new_shape)
2566 2567 2568
                                        if self.rank_id == item:
                                            # if send bool data, cast then send
                                            self._handle_send(
2569 2570 2571 2572 2573 2574 2575
                                                block,
                                                idx,
                                                var,
                                                op,
                                                item,
                                                recv_rank,
                                            )
2576 2577 2578
                                        if self.rank_id == recv_rank:
                                            # if recv bool data, recv then cast
                                            self._hadnle_recv(
2579 2580 2581 2582 2583 2584 2585
                                                block,
                                                idx,
                                                var,
                                                op,
                                                item,
                                                recv_rank,
                                            )
2586 2587
                            else:
                                for index, tensor_process in enumerate(
2588 2589
                                    tensor_processes
                                ):
2590 2591 2592 2593
                                    recv_rank = tensor_process
                                    item = output_attr[0].processes[index]
                                    if recv_rank == item:
                                        continue
2594 2595 2596 2597
                                    if var.shape[0] == -1:
                                        new_shape = list(var.shape)
                                        new_shape[0] = self.batch_size
                                        var.desc.set_shape(new_shape)
2598 2599 2600
                                    if self.rank_id == item:
                                        # if send bool data, cast then send
                                        self._handle_send(
2601 2602
                                            block, idx, var, op, item, recv_rank
                                        )
2603 2604 2605
                                    if self.rank_id == recv_rank:
                                        # if recv bool data, recv then cast
                                        self._hadnle_recv(
2606 2607
                                            block, idx, var, op, item, recv_rank
                                        )
2608 2609

                            cur_op_count = len(block.ops)
2610 2611 2612
                            idx_offset = (
                                idx_offset + cur_op_count - pre_op_count
                            )
2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631
                            pre_op_count = cur_op_count

                idx = idx + idx_offset + 1
            else:
                idx += 1

    def reshard(self):
        self._remove_global_process_mesh()
        for block_idx, block in enumerate(self.auto_parallel_main_prog.blocks):
            # change the var_name before resharding sub block
            if block_idx in Resharder.while_block_info:
                self._change_subblock_op_input_and_output(block_idx, block)

            # reshard input
            self._reshard_input(block)

            # reshard output
            # NOTE: Only support that insert send and recv op if output process mesh is different from tensor process mesh
            self._reshard_output(block)
2632 2633

        # remove no need vars and ops in the main program
2634 2635 2636 2637 2638 2639
        Remover.remove_no_need_in_main(
            self.auto_parallel_main_prog,
            self.dist_context,
            self.rank_id,
            self.dist_params_grads,
        )
2640

2641
        # remove no need vars and ops in the startip program
2642 2643 2644
        Remover.remove_no_need_in_startup(
            self.auto_parallel_main_prog, self.auto_parallel_startup_prog
        )
C
caozhou 已提交
2645

2646 2647
        # reset some variable when remove operation ended
        Resharder.while_block_info = {}
2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661

    def get_cost(self, op, tensor, cluster):
        # NOTE: The program should be the serial_program which is not been parted
        global _g_special_ops
        not_supported_op_type = _g_special_ops + ["while"]
        reshard_op_cost = None
        if op.type in not_supported_op_type:
            return reshard_op_cost
        else:
            tensor_name = tensor.name
            if tensor_name == "lod_tensor_blocking_queue_0":
                return reshard_op_cost
            else:
                dist_tensor = self.dist_context.get_dist_tensor_for_program(
2662 2663
                    tensor
                )
2664 2665 2666
                # simplified processing: ignore union process mesh and output reshard
                dist_op = self.dist_context.get_dist_op_for_program(op)
                dims_mapping = dist_op.dist_attr.get_input_dims_mapping(
2667 2668
                    tensor.name
                )
2669
                process_mesh = dist_op.dist_attr.process_mesh
2670 2671 2672 2673 2674
                dist_attr = [
                    process_mesh,
                    dims_mapping,
                    dist_op.serial_op.attr('op_role'),
                ]
2675
                if dist_tensor is not None and self.need_reshard(
2676 2677
                    dist_tensor, dist_attr
                ):
2678 2679 2680 2681 2682
                    if tensor_name not in self._has_resharded:
                        self._has_resharded[tensor_name] = [dist_op]
                    else:
                        for item in self._has_resharded[tensor_name]:
                            item_dist_attr = item.dist_attr
2683 2684 2685 2686 2687
                            item_dims_mapping = (
                                item_dist_attr.get_input_dims_mapping(
                                    tensor_name
                                )
                            )
2688
                            item_process_mesh = item_dist_attr.process_mesh
2689 2690 2691 2692
                            if (
                                dims_mapping == item_dims_mapping
                                and item_process_mesh == process_mesh
                            ):
2693 2694 2695
                                return reshard_op_cost
                        self._has_resharded[tensor_name].append(dist_op)

2696 2697 2698
                    reshard_op_desc = self.find_op_desc_seq(
                        dist_tensor, dist_attr, serial=True
                    )
2699 2700
                    dtype = dist_tensor.serial_tensor.dtype
                    reshard_op_cost = self.parse_op_desc_for_cost(
2701 2702
                        reshard_op_desc, dtype, cluster
                    )
2703 2704 2705

        return reshard_op_cost

2706 2707 2708 2709 2710 2711 2712 2713 2714
    def _concat_partitions_for_cost(
        self,
        partition_tensor_list,
        partition_index,
        dtype,
        rank_id,
        local_rank_comp_cost,
        cluster,
    ):
2715 2716 2717 2718 2719 2720
        if not partition_tensor_list:
            partition_tensor_list.append(partition_index)
        else:
            i = 0
            has_concat = False
            while i < len(partition_tensor_list):
2721 2722 2723 2724 2725 2726 2727
                (
                    concat_axis,
                    first_order,
                    new_partition,
                ) = Resharder.compute_concat_info(
                    partition_tensor_list[i], partition_index
                )
2728 2729 2730 2731 2732 2733 2734
                if concat_axis != -1:
                    has_concat = True
                    concat_desc = {}
                    concat_desc["op"] = "concat"
                    concat_desc["attrs"] = {"axis": concat_axis}
                    if first_order == 0:
                        concat_desc["inputs"] = {
2735 2736 2737 2738
                            "X": [
                                (dtype, partition_tensor_list[i]),
                                (dtype, partition_index),
                            ]
2739 2740 2741
                        }
                    else:
                        concat_desc["inputs"] = {
2742 2743 2744 2745
                            "X": [
                                (dtype, partition_index),
                                (dtype, partition_tensor_list[i]),
                            ]
2746 2747 2748 2749 2750
                        }
                    partition_tensor_list.pop(i)
                    if rank_id not in local_rank_comp_cost:
                        local_rank_comp_cost[rank_id] = []
                    local_rank_comp_cost[rank_id].append(
2751 2752 2753 2754 2755 2756 2757 2758 2759 2760
                        ConcatOpCost(op_desc=concat_desc, cluster=cluster)
                    )
                    self._concat_partitions_for_cost(
                        partition_tensor_list,
                        new_partition,
                        dtype,
                        rank_id,
                        local_rank_comp_cost,
                        cluster,
                    )
2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796
                    break
                i += 1
            if not has_concat:
                partition_tensor_list.append(partition_index)

    def parse_op_desc_for_cost(self, reshard_op_desc, dtype, cluster):
        def _get_idx(comm_ranks, group_ranks):
            res, is_the_same = None, False
            idx = 0
            while idx < len(comm_ranks):
                if comm_ranks[idx] == set(group_ranks):
                    is_the_same = True

                for rank in group_ranks:
                    if rank in comm_ranks[idx]:
                        res = idx
                        comm_ranks[idx].add(rank)
                if res is None:
                    idx += 1
                else:
                    break
            return res, is_the_same

        comm_context = CommContext(cluster)
        # run communication op before computation op
        # TODO: Communication cost is not calculated when the var has been transfered by the same group in the past
        comm_costs = []
        comm_ranks = []
        local_rank_comp_cost = {}
        for key in reshard_op_desc:
            partition_tensor_list = []
            op_desc_list = reshard_op_desc[key]
            for op_desc in op_desc_list:
                if isinstance(op_desc, SendOpDesc):
                    group_ranks = [key, op_desc.dst]
                    shape = op_desc.shape
2797 2798 2799
                    send_desc = build_comm_desc(
                        "send_v2", group_ranks, dtype, shape
                    )
2800 2801
                    idx, is_the_same = _get_idx(comm_ranks, group_ranks)
                    if idx is None:
2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812
                        comm_costs.append(
                            [
                                (
                                    group_ranks,
                                    SendOpCost(
                                        op_desc=send_desc,
                                        comm_context=comm_context,
                                    ),
                                )
                            ]
                        )
2813 2814 2815 2816
                        comm_ranks.append(set(group_ranks))
                    else:
                        if not is_the_same:
                            comm_costs[idx].append(
2817 2818 2819 2820 2821 2822 2823 2824
                                (
                                    group_ranks,
                                    SendOpCost(
                                        op_desc=send_desc,
                                        comm_context=comm_context,
                                    ),
                                )
                            )
2825 2826 2827 2828
                elif isinstance(op_desc, AllGatherOpDesc):
                    # NOTE: fill_const and other unnecessary op is not calculated because those cost is very small
                    group_ranks = op_desc.group
                    shape = op_desc.shape
2829 2830 2831
                    allgather_desc = build_comm_desc(
                        "c_allgather", group_ranks, dtype, shape
                    )
2832 2833 2834 2835 2836 2837 2838 2839
                    split_inputs_shape = []
                    for idx, dim in enumerate(shape):
                        if idx == 0:
                            split_inputs_shape.append(dim * len(group_ranks))
                        else:
                            split_inputs_shape.append(dim)
                    idx, is_the_same = _get_idx(comm_ranks, group_ranks)
                    if idx is None:
2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850
                        comm_costs.append(
                            [
                                (
                                    group_ranks,
                                    AllgatherOpCost(
                                        op_desc=allgather_desc,
                                        comm_context=comm_context,
                                    ),
                                )
                            ]
                        )
2851 2852 2853 2854
                        comm_ranks.append(set(group_ranks))
                    else:
                        if not is_the_same:
                            comm_costs[idx].append(
2855 2856 2857 2858 2859 2860 2861 2862
                                (
                                    group_ranks,
                                    AllgatherOpCost(
                                        op_desc=allgather_desc,
                                        comm_context=comm_context,
                                    ),
                                )
                            )
2863 2864 2865 2866 2867 2868 2869 2870 2871 2872
                    # calc the split op cost
                    if key not in local_rank_comp_cost:
                        local_rank_comp_cost[key] = []
                    split_desc = {}
                    split_desc["op"] = "split"
                    split_desc["inputs"] = {
                        "inputs": [(dtype, split_inputs_shape)]
                    }
                    split_desc["attrs"] = {"num": len(group_ranks), "axis": 0}
                    local_rank_comp_cost[key].append(
2873 2874
                        SplitOpCost(op_desc=split_desc, cluster=cluster)
                    )
2875 2876 2877 2878
                elif isinstance(op_desc, ConcatOpDesc):
                    partition_index_list = op_desc._partition_index_list
                    for idx, partion_idex in enumerate(partition_index_list):
                        self._concat_partitions_for_cost(
2879 2880 2881 2882 2883 2884 2885
                            partition_tensor_list,
                            partion_idex,
                            dtype,
                            key,
                            local_rank_comp_cost,
                            cluster,
                        )
2886 2887 2888 2889

                elif isinstance(op_desc, SliceOpDesc):
                    if key not in local_rank_comp_cost:
                        local_rank_comp_cost[key] = []
2890 2891 2892 2893
                    assert (
                        len(partition_tensor_list) == 1
                        or not partition_tensor_list
                    )
2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906
                    to_slice_tensor_shape = []
                    if len(partition_tensor_list) == 1:
                        for item in partition_tensor_list[0]:
                            to_slice_tensor_shape.append(item[1] - item[0])
                    else:
                        to_slice_tensor_shape = op_desc.shape
                    slice_desc = {}
                    slice_desc["op"] = "slice"
                    infer_flags = list(1 for i in range(len(op_desc.axes)))
                    slice_desc["attrs"] = {
                        "axes": op_desc.axes,
                        "starts": op_desc.starts,
                        "ends": op_desc.ends,
2907
                        "infer_flags": infer_flags,
2908 2909 2910 2911 2912
                    }
                    slice_desc["inputs"] = {
                        "Input": [(dtype, to_slice_tensor_shape)]
                    }
                    local_rank_comp_cost[key].append(
2913 2914
                        SliceOpCost(op_desc=slice_desc, cluster=cluster)
                    )
2915 2916 2917 2918

        res = (comm_costs, local_rank_comp_cost)

        return res