reshard.py 119.6 KB
Newer Older
C
caozhou 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License

15
from collections import OrderedDict
C
caozhou 已提交
16 17 18
from functools import reduce

import paddle
19
from paddle.distributed.fleet.meta_optimizers.common import OpRole
20
from paddle.framework import LayerHelper, OpProtoHolder, Program, core
21 22 23 24 25 26 27 28 29 30 31
from paddle.utils import unique_name

from .cost import (
    AllgatherOpCost,
    CommContext,
    ConcatOpCost,
    SendOpCost,
    SliceOpCost,
    SplitOpCost,
    build_comm_desc,
)
32
from .dist_attribute import TensorDistAttr
33
from .dist_context import DistributedContext
34 35
from .process_group import new_process_group
from .utils import is_gradient_clip_op
C
caozhou 已提交
36

37
# NOTE: If op in _g_special_ops or _g_gradient_clip_ops, it will not be resharded.
38
_g_special_ops = ['check_finite_and_unscale', 'update_loss_scaling']
39
_g_gradient_clip_ops = [
40 41 42 43 44
    "sum",
    "sqrt",
    "fill_constant",
    "elementwise_max",
    "elementwise_div",
45
]
46
_g_subblock_ops = ["while", "conditional_block"]
47 48 49 50 51 52 53 54


def get_var_with_recursion(var_name, block, program):
    """Get var in the parent block if not found in the current block"""
    var = None
    if var_name in block.vars:
        var = block.vars[var_name]
    else:
55 56 57 58
        var = block._var_recursive(var_name)
        # parent_block = program.blocks[block.parent_idx]
        # if var_name in parent_block.vars:
        #     var = parent_block.vars[var_name]
59
    assert var is not None, f"{var.name} is not found"
60

61
    return var
62

C
caozhou 已提交
63 64 65 66 67 68 69

class AllGatherOpDesc:
    """
    Describe the allgather op in the reshard phase.

    Args:
        group (list): Process group.
70 71
        shape (list): The tensor shape.
        is_bool (bool): Whether allgather bool data. Default: False.
C
caozhou 已提交
72 73
    """

74
    def __init__(self, group, shape, is_bool=False):
C
caozhou 已提交
75 76
        self._group = group
        self._desc = "all_gather"
77 78 79 80 81 82
        self._shape = shape
        self._is_bool = is_bool

    @property
    def is_bool(self):
        return self._is_bool
C
caozhou 已提交
83 84 85 86 87 88 89 90 91

    @property
    def group(self):
        return self._group

    @property
    def desc(self):
        return self._desc

92 93 94 95
    @property
    def shape(self):
        return self._shape

C
caozhou 已提交
96
    def __repr__(self):
97
        return f"op: {self._desc}, group: {self._group}, shape: {self._shape}, is_bool: {self._is_bool}."
C
caozhou 已提交
98 99


100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
class AllGatherConcatOpDesc:
    """
    Describe the c_concat op in the reshard phase.

    Args:
        group (list): Process group.
        shape (list): The tensor shape.
        is_bool (bool): Whether c_concat bool data. Default: False.
    """

    def __init__(self, group, shape, is_bool=False):
        self._group = group
        self._desc = "c_concat"
        self._shape = shape
        self._is_bool = is_bool

    @property
    def is_bool(self):
        return self._is_bool

    @property
    def group(self):
        return self._group

    @property
    def desc(self):
        return self._desc

    @property
    def shape(self):
        return self._shape

    def __repr__(self):
        return f"op: {self._desc}, group: {self._group}, shape: {self._shape}, is_bool: {self._is_bool}."


C
caozhou 已提交
136 137 138 139 140 141
class SendOpDesc:
    """
    Describe the send op in the reshard phase.

    Args:
        partition_index (list): The index of partition in complete tensor.
142
        src (int): The source process to send.
C
caozhou 已提交
143
        dst (int): The destination process to receive.
144
        is_bool (bool): Whether send bool data. Default: False.
C
caozhou 已提交
145 146
    """

147
    def __init__(self, partition_index, src, dst, is_bool=False):
C
caozhou 已提交
148 149 150
        self._dst = dst
        self._partition_index = partition_index
        self._desc = "send"
151 152 153 154 155 156 157 158 159 160 161
        self._shape = []
        self._is_bool = is_bool
        self._src = src

    @property
    def src(self):
        return self._src

    @property
    def is_bool(self):
        return self._is_bool
C
caozhou 已提交
162 163 164 165 166 167 168 169 170 171 172 173 174

    @property
    def partition_index(self):
        return self._partition_index

    @property
    def dst(self):
        return self._dst

    @property
    def desc(self):
        return self._desc

175 176 177 178 179 180 181
    @property
    def shape(self):
        if not self._shape:
            for item in self.partition_index:
                self._shape.append(item[1] - item[0])
        return self._shape

C
caozhou 已提交
182
    def __repr__(self):
183
        return f"op: {self._desc}, partition_index: {self._partition_index}, dst: {self._dst}, shape: {self._shape}, is_bool: {self._is_bool}."
C
caozhou 已提交
184 185 186 187 188 189 190 191 192


class RecvOpDesc:
    """
    Describe the recv op in the reshard op.

    Args:
        partition_index (list): The index of partition in complete tensor.
        src (int): The source process to send.
193 194
        dst (int): The destination process to receive.
        is_bool (bool): Whether receive bool data. Default: False.
C
caozhou 已提交
195 196
    """

197
    def __init__(self, partition_index, src, dst, is_bool=False):
C
caozhou 已提交
198 199 200
        self._src = src
        self._partition_index = partition_index
        self._desc = "recv"
201 202 203 204 205 206 207 208 209 210 211
        self._shape = []
        self._is_bool = is_bool
        self._dst = dst

    @property
    def dst(self):
        return self._dst

    @property
    def is_bool(self):
        return self._is_bool
C
caozhou 已提交
212 213 214 215 216 217 218 219 220 221 222 223 224

    @property
    def partition_index(self):
        return self._partition_index

    @property
    def src(self):
        return self._src

    @property
    def desc(self):
        return self._desc

225 226 227 228 229 230 231
    @property
    def shape(self):
        if not self._shape:
            for item in self.partition_index:
                self._shape.append(item[1] - item[0])
        return self._shape

C
caozhou 已提交
232
    def __repr__(self):
233
        return f"op: {self._desc}, partition_index: {self._partition_index}, dst: {self._dst}, shape: {self._shape}, is_bool: {self._is_bool}."
C
caozhou 已提交
234 235 236 237 238 239 240


class SliceOpDesc:
    """
    Describe the slice op in the reshard phase.

    Args:
241 242 243 244
        starts (list): It represents start indices of corresponding axis in ``axes``.
        ends (list):  It represents end indices of corresponding axis in ``axes``.
        axes (list):  Axes that `starts` and `ends` apply to.
        shape (list): The shape of the tensor to be sliced.
C
caozhou 已提交
245 246
    """

247
    def __init__(self, starts, ends, axes, shape=None):
C
caozhou 已提交
248 249 250 251
        self._starts = starts
        self._ends = ends
        self._axes = axes
        self._desc = "slice"
252
        self._shape = shape
C
caozhou 已提交
253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269

    @property
    def starts(self):
        return self._starts

    @property
    def ends(self):
        return self._ends

    @property
    def axes(self):
        return self._axes

    @property
    def desc(self):
        return self._desc

270 271 272 273
    @property
    def shape(self):
        return self._shape

C
caozhou 已提交
274
    def __repr__(self):
275 276 277 278
        if self._shape is not None:
            return f"op: {self._desc}, starts: {self._starts}, ends: {self._ends}, axes: {self._axes}, shape: {self._shape}."
        else:
            return f"op: {self._desc}, starts: {self._starts}, ends: {self._ends}, axes: {self._axes}."
C
caozhou 已提交
279 280 281 282 283 284 285


class ConcatOpDesc:
    """
    Describe the concat op in the reshard phase.

    Args:
286
        partition_index_list (list): The list contains all partition index.
C
caozhou 已提交
287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
    """

    def __init__(self, partition_index_list):
        self._partition_index_list = partition_index_list
        self._desc = "concat"

    @property
    def partition_index_list(self):
        return self._partition_index_list

    @property
    def desc(self):
        return self._desc

    def __repr__(self):
        return f"op: {self._desc}, partition_index_list: {self._partition_index_list}."


305 306
class Inserter:
    """Insert op required in the reshard process."""
C
caozhou 已提交
307

308
    @staticmethod
309 310
    def insert_cast_op(block, idx, tensor, op_role, tensor_type):
        # to avoid name conflict with framework
311
        new_var_name = paddle.utils.unique_name.generate_with_ignorable_key(
312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
            ".".join(["cast@RESHARD", 'tmp'])
        )
        out = block.create_var(
            name=new_var_name,
            dtype=tensor_type,
            type=tensor.type,
            lod_level=tensor.lod_level,
        )
        cast_op = block._insert_op(
            idx,
            type='cast',
            inputs={'X': [tensor]},
            outputs={'Out': [out]},
            attrs={
                'in_dtype': tensor.dtype,
                'out_dtype': out.dtype,
                'op_role': op_role,
            },
        )
331
        cast_op._set_attr('op_namescope', "/auto_parallel/reshard")
332 333 334 335
        return out

    @staticmethod
    def insert_send_op(block, idx, tensor, src, dst, op_role):
336 337
        """Insert send op into block at the given index."""
        op_type = 'send_v2'
338 339
        # use pair comm group
        process_group = new_process_group([src, dst])
340 341 342 343 344 345 346 347 348 349 350 351
        send_op = block._insert_op(
            idx,
            type=op_type,
            inputs={'X': [tensor]},
            attrs={
                'ring_id': process_group.id,
                'peer': process_group.ranks.index(dst),
                'use_calc_stream': True,
                'op_role': op_role,
                'dynamic_shape': True,
            },
        )
352
        send_op._set_attr('op_namescope', "/auto_parallel/reshard")
353 354

    @staticmethod
355
    def insert_recv_op(block, idx, tensor, src, dst, op_role):
356 357
        """Insert recv op into block at the given index."""
        op_type = 'recv_v2'
358 359
        # use pair group
        process_group = new_process_group([src, dst])
360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
        recv_op = block._insert_op(
            idx,
            type=op_type,
            inputs={'X': [tensor]},
            outputs={'Out': [tensor]},
            attrs={
                'ring_id': process_group.id,
                'peer': process_group.ranks.index(src),
                'out_shape': tensor.shape,
                'dtype': tensor.dtype,
                'use_calc_stream': True,
                'op_role': op_role,
                'dynamic_shape': True,
            },
        )
375
        recv_op._set_attr('op_namescope', "/auto_parallel/reshard")
376

377 378 379 380
    @staticmethod
    def insert_reset_lod_op(block, idx, X, Y, op_role):
        """Insert reset_lod op into block at the given index."""

381
        new_var_name = paddle.utils.unique_name.generate_with_ignorable_key(
382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
            ".".join(["reset_lod@RESHARD", 'tmp'])
        )
        reset_lod_out = block.create_var(
            name=new_var_name,
            shape=X.shape,
            type=X.type,
            dtype=X.dtype,
            lod_level=X.lod_level,
        )

        reset_op = block._insert_op(
            idx,
            type="lod_reset",
            inputs={'X': X, 'Y': Y},
            outputs={'Out': reset_lod_out},
            attrs={'op_role': op_role},
        )
399
        reset_op._set_attr('op_namescope', "/auto_parallel/reshard")
400 401
        return reset_lod_out

402 403 404 405 406 407 408
    @staticmethod
    def insert_concat_op(block, idx, tensors, axis, op_role):
        """Insert concat op into block at the given block."""
        inputs = {'X': tensors}
        attrs = {}
        attrs['axis'] = axis
        attrs['op_role'] = op_role
409 410
        # to avoid name conflict with framework
        helper = LayerHelper('concat@RESHARD', **locals())
411
        with paddle.static.program_guard(block.program):
412
            out = block.create_var(
413
                name=paddle.utils.unique_name.generate_with_ignorable_key(
414 415
                    ".".join([helper.name, 'tmp'])
                ),
416 417 418 419 420
                dtype=tensors[0].dtype,
                shape=None,
                lod_level=tensors[0].lod_level,
                type=tensors[0].type,
                persistable=False,
421 422 423 424 425 426 427 428 429
                stop_gradient=False,
            )
        concat_op = block._insert_op(
            idx,
            type='concat',
            inputs=inputs,
            outputs={'Out': [out]},
            attrs=attrs,
        )
430
        concat_op._set_attr('op_namescope', "/auto_parallel/reshard")
431
        return out
C
caozhou 已提交
432

433
    @staticmethod
434 435 436
    def insert_slice_op(
        block, idx, tensor, starts, ends, axes, new_var_name, op_role
    ):
437
        """Insert slice op into block at the given block."""
438 439 440 441 442 443 444 445 446 447 448 449 450
        # This is a hack to insert split op to get slice tensor
        # 1. [128, 128] => [64, 128]: split
        # 2. [128, 128] => [128, 128]: assign
        # 3. [128, 128] => [64, 64]: slice, it will replaced by multi split
        global_shape = tensor.shape
        slice_shape = [ends[i] - starts[i] for i in range(len(starts))]
        diff_dims = []
        for index, item in enumerate(slice_shape):
            if item != global_shape[index]:
                diff_dims.append(index)

        # use assign
        if len(diff_dims) == 0:
451 452 453 454 455 456 457
            out = block.create_var(
                name=new_var_name,
                dtype=tensor.dtype,
                type=tensor.type,
                shape=slice_shape,
                lod_level=tensor.lod_level,
            )
458 459
            inputs = {'X': [tensor]}
            outputs = {"Out": [out]}
460 461
            attrs = {"in_place": False, "op_role": op_role}
            assign_op = block._insert_op(
462 463
                idx, type="assign", inputs=inputs, outputs=outputs, attrs=attrs
            )
464
            assign_op._set_attr('op_namescope', "/auto_parallel/reshard")
465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
            return out

        # use split once
        elif len(diff_dims) == 1:
            diff_dim = diff_dims[0]
            num_or_sections = global_shape[diff_dim] // slice_shape[diff_dim]
            axis = diff_dim
            cur_idx = starts[diff_dim] // slice_shape[diff_dim]
            input_shape = global_shape
            inputs = {'X': tensor}
            attrs = {'num': num_or_sections, 'axis': axis, 'op_role': op_role}
            new_shape = []
            for index, item in enumerate(tensor.shape):
                if index != axis:
                    new_shape.append(item)
                else:
                    new_shape.append(item // num_or_sections)
            with paddle.static.program_guard(block.program):
                outs = [
484
                    block.create_var(
485
                        name=paddle.utils.unique_name.generate_with_ignorable_key(
486 487 488 489 490 491 492 493 494
                            ".".join(['split@RESHARD', 'tmp'])
                        ),
                        dtype=tensor.dtype,
                        shape=None,
                        type=tensor.type,
                        persistable=False,
                        lod_level=tensor.lod_level,
                        stop_gradient=False,
                    )
495 496 497
                    for i in range(num_or_sections)
                ]
                out = outs[cur_idx]
498 499 500 501 502 503 504
            split_op = block._insert_op(
                idx,
                type="split",
                inputs=inputs,
                outputs={'Out': outs},
                attrs=attrs,
            )
505
            split_op._set_attr('op_namescope', "/auto_parallel/reshard")
506 507 508 509 510
            return out

        # use slice
        else:
            inputs = {'Input': tensor}
511
            infer_flags = [1 for i in range(len(axes))]
512 513 514 515 516
            attrs = {
                "axes": axes,
                "starts": starts,
                "ends": ends,
                "infer_flags": infer_flags,
517
                'op_role': op_role,
518
            }
519 520 521 522 523 524 525 526 527 528 529 530 531
            out = block.create_var(
                name=new_var_name,
                dtype=tensor.dtype,
                type=tensor.type,
                lod_level=tensor.lod_level,
            )
            slice_op = block._insert_op(
                idx,
                type="slice",
                inputs=inputs,
                outputs={'Out': [out]},
                attrs=attrs,
            )
532
            slice_op._set_attr('op_namescope', "/auto_parallel/reshard")
533
            return out
C
caozhou 已提交
534

535
    @staticmethod
536
    def insert_split_op(block, idx, tensor, num_or_sections, op_role, axis=0):
537
        """Insert split op into block at the given index."""
538
        helper = LayerHelper('split@RESHARD', **locals())
539 540
        input_shape = tensor.shape
        inputs = {'X': tensor}
541 542 543 544 545 546 547
        attrs = {'num': num_or_sections, 'axis': axis, 'op_role': op_role}
        new_shape = []
        for index, item in enumerate(tensor.shape):
            if index != axis:
                new_shape.append(item)
            else:
                new_shape.append(item // num_or_sections)
548 549
        with paddle.static.program_guard(block.program):
            outs = [
550
                block.create_var(
551
                    name=paddle.utils.unique_name.generate_with_ignorable_key(
552 553
                        ".".join([helper.name, 'tmp'])
                    ),
554 555 556 557 558
                    dtype=tensor.dtype,
                    shape=None,
                    lod_level=tensor.lod_level,
                    type=tensor.type,
                    persistable=False,
559 560 561
                    stop_gradient=False,
                )
                for i in range(num_or_sections)
562
            ]
563 564 565
        split_op = block._insert_op(
            idx, type="split", inputs=inputs, outputs={'Out': outs}, attrs=attrs
        )
566
        split_op._set_attr('op_namescope', "/auto_parallel/reshard")
567
        return outs
C
caozhou 已提交
568

569 570
    @staticmethod
    def insert_fill_constant_op(block, idx, op_role):
C
caozhou 已提交
571
        """Insert fill constant op into block at the given index."""
572 573 574
        # to avoid name conflict with framework
        helper = LayerHelper('fill_constant@RESHARD', **locals())
        # use paddle.int64 as dtype
C
caozhou 已提交
575
        with paddle.static.program_guard(block.program):
576
            out = block.create_var(
577
                name=paddle.utils.unique_name.generate_with_ignorable_key(
578 579
                    ".".join([helper.name, 'tmp'])
                ),
580 581 582 583
                dtype=paddle.int64,
                shape=None,
                type=core.VarDesc.VarType.LOD_TENSOR,
                persistable=False,
584 585
                stop_gradient=False,
            )
C
caozhou 已提交
586 587 588 589 590
        inputs = {}
        attrs = {'force_cpu': False}
        attrs['str_value'] = str(int("1"))
        attrs['value'] = int("1")
        attrs['dtype'] = out.dtype
591
        attrs['op_role'] = op_role
592
        paddle.utils.get_shape_tensor_inputs(
593 594 595 596 597 598 599 600 601
            inputs=inputs, attrs=attrs, shape=[0], op_type='fill_constant'
        )
        fillconstant_op = block._insert_op(
            idx,
            type='fill_constant',
            inputs=inputs,
            outputs={'Out': [out]},
            attrs=attrs,
        )
C
caozhou 已提交
602
        out.stop_gradient = True
603
        fillconstant_op._set_attr('op_namescope', "/auto_parallel/reshard")
C
caozhou 已提交
604 605
        return out

606 607 608 609 610 611 612 613 614 615
    @staticmethod
    def insert_allgather_op(block, idx, tensor, ranks, op_role):
        """Insert allgather op into block at the given index."""
        tensor_list = []
        group = new_process_group(ranks)
        idx_offset = 0

        # instant process group before insert allgather op.
        if not group.is_instantiate():
            # insert fill_constant op
616
            fill_constant_out = Inserter.insert_fill_constant_op(
617 618
                block, idx, op_role
            )
619 620 621
            fill_constant_out.stop_gradient = True

            # insert c_allreduce_sum op
622 623 624 625 626 627 628 629
            allreduce_op = block._insert_op(
                idx + 1,
                type="c_allreduce_sum",
                inputs={'X': [fill_constant_out]},
                outputs={'Out': [fill_constant_out]},
                attrs={
                    'ring_id': 0,
                    'use_calc_stream': True,
630 631 632
                    'op_role': op_role,
                },
            )
633
            allreduce_op._set_attr('op_namescope', "/auto_parallel/reshard")
634
            # insert c_sync_calc_stream op
635 636 637 638 639
            sync_calc_op = block._insert_op(
                idx + 2,
                type="c_sync_calc_stream",
                inputs={'X': [fill_constant_out]},
                outputs={'Out': [fill_constant_out]},
640 641
                attrs={'op_role': op_role},
            )
642
            sync_calc_op._set_attr('op_namescope', "/auto_parallel/reshard")
643 644 645 646
            idx_offset = 3

        # insert c_allgather op
        op_type = 'c_allgather'
647 648
        # to avoid name conflict with framework
        helper = LayerHelper(op_type + "@RESHARD", **locals())
649
        with paddle.static.program_guard(block.program):
650
            allgather_out = block.create_var(
651
                name=paddle.utils.unique_name.generate_with_ignorable_key(
652 653
                    ".".join([helper.name, 'tmp'])
                ),
654 655 656 657 658
                dtype=tensor.dtype,
                shape=None,
                lod_level=tensor.lod_level,
                type=tensor.type,
                persistable=False,
659 660 661 662 663 664 665 666 667 668 669 670 671 672
                stop_gradient=False,
            )
        allgather_op = block._insert_op(
            idx + idx_offset,
            type=op_type,
            inputs={'X': [tensor]},
            outputs={'Out': [allgather_out]},
            attrs={
                'ring_id': group.id,
                'use_calc_stream': True,
                'nranks': group.nranks,
                'op_role': op_role,
            },
        )
673
        allgather_op._set_attr('op_namescope', "/auto_parallel/reshard")
674 675 676
        idx_offset += 1

        # insert split op
677 678 679
        split_out = Inserter.insert_split_op(
            block, idx + idx_offset, allgather_out, group.nranks, op_role
        )
680 681 682 683
        idx_offset += 1
        tensor_list.extend(split_out)
        return tensor_list, idx_offset

684 685 686 687 688 689 690 691 692 693 694 695
    @staticmethod
    def insert_c_concat_op(block, idx, tensor, ranks, op_role):
        """Insert c_concat op into block at the given index."""
        group = new_process_group(ranks)
        idx_offset = 0

        # insert c_concat op
        op_type = 'c_concat'
        # to avoid name conflict with framework
        helper = LayerHelper(op_type + "@RESHARD", **locals())
        with paddle.static.program_guard(block.program):
            c_concat_out = block.create_var(
696
                name=paddle.utils.unique_name.generate_with_ignorable_key(
697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723
                    ".".join([helper.name, 'tmp'])
                ),
                dtype=tensor.dtype,
                shape=None,
                lod_level=tensor.lod_level,
                type=tensor.type,
                persistable=False,
                stop_gradient=False,
            )
        cur_rank = paddle.distributed.get_rank()
        c_concat_op = block._insert_op(
            idx + idx_offset,
            type=op_type,
            inputs={'X': [tensor]},
            outputs={'Out': [c_concat_out]},
            attrs={
                'ring_id': group.id,
                'use_calc_stream': True,
                'use_model_parallel': True,
                'nranks': group.nranks,
                'op_role': op_role,
                'rank': group.ranks.index(cur_rank) if cur_rank in ranks else 0,
            },
        )
        c_concat_op._set_attr('op_namescope', "/auto_parallel/reshard")
        return c_concat_out

724
    @staticmethod
725 726 727
    def concat_partitions_with_op(
        partition_tensor_list, tensor, partition_index, block, idx, op_role
    ):
728 729
        """Concat the tensors and insert concat op."""
        if not partition_tensor_list:
C
caozhou 已提交
730
            partition_tensor_list.append((tensor, partition_index))
731 732 733 734
        else:
            i = 0
            has_concat = False
            while i < len(partition_tensor_list):
735 736 737 738 739 740 741
                (
                    concat_axis,
                    first_order,
                    new_partition,
                ) = Resharder.compute_concat_info(
                    partition_tensor_list[i][1], partition_index
                )
742 743
                if concat_axis != -1:
                    has_concat = True
744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760
                    _ = (
                        Inserter.insert_concat_op(
                            block,
                            idx[0],
                            [partition_tensor_list[i][0], tensor],
                            concat_axis,
                            op_role,
                        )
                        if first_order == 0
                        else Inserter.insert_concat_op(
                            block,
                            idx[0],
                            [tensor, partition_tensor_list[i][0]],
                            concat_axis,
                            op_role,
                        )
                    )
761 762
                    partition_tensor_list.pop(i)
                    idx[0] += 1
763 764 765 766 767 768 769 770
                    Inserter.concat_partitions_with_op(
                        partition_tensor_list,
                        _,
                        new_partition,
                        block,
                        idx,
                        op_role,
                    )
771 772 773 774 775 776 777 778 779 780 781 782 783
                    break
                i += 1
            if not has_concat:
                partition_tensor_list.append((tensor, partition_index))


class Remover:
    """Remove var and op in the reshard process."""

    @staticmethod
    def remove_no_need_ops(auto_parallel_main_prog, dist_context, rank_id):
        """Remove no need ops in the main program"""
        not_remove_op_ref = [
784 785 786
            "create_py_reader",
            "create_double_buffer_reader",
            "read",
787
        ]
C
caozhou 已提交
788

789 790 791 792
        # NOTE: The nested sub block is not be supported now.
        remove_block_order = []
        for block_idx in Resharder.while_block_info:
            remove_block_order.append(block_idx)
C
caozhou 已提交
793

794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809
        for block_idx, block in enumerate(auto_parallel_main_prog.blocks):
            if block_idx not in remove_block_order:
                remove_block_order.append(block_idx)

        # the sub block should be removed first
        for block_idx in remove_block_order:
            remove_op_idx = []
            block = auto_parallel_main_prog.blocks[block_idx]
            ops = block.ops
            vars = block.vars
            for idx, op in enumerate(ops):
                if op.type == "read":
                    dim_list = []
                    for var_name in op.output_arg_names:
                        dim_list.extend(
                            get_var_with_recursion(
810 811 812
                                var_name, block, auto_parallel_main_prog
                            ).shape
                        )
813 814 815 816 817
                    for i in range(idx, -1, -1):
                        if ops[i].type == "create_py_reader":
                            ops[i]._set_attr("shape_concat", dim_list)
                            break
                    continue
818

819 820 821 822
                # replace the input and output of c_sync_comm_stream op when in pipeline scene.
                if op.type == "c_sync_comm_stream":
                    need_save = []
                    for var_name in op.input_arg_names:
823 824 825 826 827 828 829
                        process_mesh = (
                            dist_context.get_tensor_dist_attr_for_program(
                                get_var_with_recursion(
                                    var_name, block, auto_parallel_main_prog
                                )
                            ).process_mesh
                        )
830
                        if rank_id in process_mesh.process_ids:
831 832 833 834
                            need_save.append(var_name)
                    if not need_save:
                        remove_op_idx.append(idx)
                        continue
835

836 837 838 839
                    proto = OpProtoHolder.instance().get_op_proto(op.type)
                    op.desc.set_input(proto.inputs[0].name, need_save)
                    op.desc.set_output(proto.outputs[0].name, need_save)
                    continue
840

841 842 843 844
                # judge the other op whether should be removed.
                op_dist_attr = dist_context.get_op_dist_attr_for_program(op)
                if op_dist_attr is not None:
                    op_process_mesh = op_dist_attr.process_mesh
845
                    if (
846
                        rank_id not in op_process_mesh.process_ids
847 848
                        and op.type not in not_remove_op_ref
                    ):
849 850 851
                        remove_op_idx.append(idx)

            for idx in remove_op_idx[::-1]:
852 853
                block._remove_op(idx, sync=False)
            block._sync_with_cpp()
854 855

    @staticmethod
856 857 858
    def remove_no_need_vars(
        auto_parallel_main_prog, dist_params_grads, feed_var_names
    ):
859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880
        """Remove no need vars in the main program"""
        for block_idx, block in enumerate(auto_parallel_main_prog.blocks):
            remove_vars = set()
            ops = block.ops
            vars = block.vars
            need_vars = set()
            for op in ops:
                for var_name in op.input_arg_names:
                    if var_name in vars:
                        need_vars.add(var_name)
                for var_name in op.output_arg_names:
                    if var_name in vars:
                        need_vars.add(var_name)
            for var in vars:
                if var not in need_vars:
                    remove_vars.add(var)

            # change dist_params_grads, the optimize op just in block 0.
            if block_idx == 0:
                param_grad_map = {}
                for op in ops:
                    if int(op.attr('op_role')) == int(OpRole.Optimize):
881 882 883 884
                        if (
                            "Param" in op.input_names
                            and "Grad" in op.input_names
                        ):
885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902
                            param_name = op.input("Param")[0]
                            grad_name = op.input("Grad")[0]
                            param_grad_map[param_name] = grad_name

                need_remove_idx = []
                for idx, item in enumerate(dist_params_grads):
                    if item[0].name not in param_grad_map.keys():
                        need_remove_idx.append(idx)

                for idx in need_remove_idx[::-1]:
                    dist_params_grads.pop(idx)

                idx = 0
                while idx < len(dist_params_grads):
                    param_name = dist_params_grads[idx][0].name
                    grad_name = dist_params_grads[idx][1].name
                    if grad_name != param_grad_map[param_name]:
                        dist_params_grads[idx] = (
903 904 905
                            vars[param_name],
                            vars[param_grad_map[param_name]],
                        )
906 907 908
                    idx += 1

            for var in remove_vars:
909
                if var in feed_var_names:
910
                    continue
911 912 913
                block._remove_var(var)

    @staticmethod
914 915 916
    def remove_no_need_in_main(
        auto_parallel_main_prog, dist_context, rank_id, dist_params_grads
    ):
917
        """Remove no need vars and ops in the main program."""
918 919 920 921 922 923
        Remover.remove_no_need_ops(
            auto_parallel_main_prog, dist_context, rank_id
        )
        Resharder.change_while_op_input_and_output(
            auto_parallel_main_prog, dist_context
        )
924 925 926 927
        # 'feed_var_names' cannot be removed from auto_parallel_main_prog
        feed_var_names = []
        for var in sum(list(dist_context.serial_feed_vars.values()), []):
            feed_var_names.append(var.name)
928 929 930
        Remover.remove_no_need_vars(
            auto_parallel_main_prog, dist_params_grads, feed_var_names
        )
931 932

    @staticmethod
933 934 935
    def remove_no_need_in_startup(
        auto_parallel_main_prog, auto_parallel_startup_prog
    ):
936 937 938 939 940 941
        """Remove no need vars and ops in the startup program."""
        main_input_vars = set()
        main_ops = auto_parallel_main_prog.global_block().ops
        for op in main_ops:
            for var_name in op.input_arg_names:
                main_input_vars.add(var_name)
942

943 944 945 946 947 948 949 950 951
        startup_block = auto_parallel_startup_prog.global_block()
        startup_output_vars = set()
        startup_ops = startup_block.ops
        for op in startup_ops:
            # skip c_sync_comm_stream op
            if op.type == "c_sync_comm_stream":
                continue
            for var_name in op.output_arg_names:
                startup_output_vars.add(var_name)
952

953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972
        need_vars = set()
        for var_name in startup_output_vars:
            if var_name in main_input_vars:
                need_vars.add(var_name)

        startup_ops = startup_block.ops
        actual_need_vars = set()
        for idx, op in enumerate(startup_ops):
            is_need_op = False
            if op.type == "c_sync_comm_stream":
                continue
            for var_name in op.output_arg_names:
                if var_name in need_vars:
                    is_need_op = True
                    break
            if is_need_op:
                for var_name in op.output_arg_names:
                    actual_need_vars.add(var_name)
                for var_name in op.input_arg_names:
                    actual_need_vars.add(var_name)
973

974 975 976 977 978 979
        remove_vars = set()
        for var_name in startup_block.vars:
            if var_name not in actual_need_vars:
                remove_vars.add(var_name)
        for var in remove_vars:
            startup_block._remove_var(var)
980 981

        remove_op_idx = []
982 983 984
        vars = startup_block.vars
        for idx, op in enumerate(startup_block.ops):
            is_no_need_op = False
985
            if op.type == "c_sync_comm_stream":
986
                var_names = []
987
                for var_name in op.input_arg_names:
988 989 990
                    if var_name in vars:
                        var_names.append(var_name)
                if not var_names:
991
                    remove_op_idx.append(idx)
992 993 994 995
                else:
                    proto = OpProtoHolder.instance().get_op_proto(op.type)
                    op.desc.set_input(proto.inputs[0].name, var_names)
                    op.desc.set_output(proto.outputs[0].name, var_names)
996
                continue
C
caozhou 已提交
997

998 999 1000 1001 1002 1003
            for var_name in op.output_arg_names:
                if var_name not in vars:
                    is_no_need_op = True
                    break
            if is_no_need_op:
                remove_op_idx.append(idx)
1004
        for idx in remove_op_idx[::-1]:
1005 1006
            startup_block._remove_op(idx, sync=False)
        startup_block._sync_with_cpp()
C
caozhou 已提交
1007 1008


1009 1010 1011
class Resharder:
    """
    Reshard tensor in the program according to its distributed attribute and corresponding op distributed attribute.
1012

1013 1014 1015 1016 1017 1018 1019 1020
    Args:
        auto_parallel_main_prog (Program): An auto parallel main program.
        auto_parallel_startup_prog (Program): An auto parallel startup program.
        rank_id (int): The process id.
        dist_context (DistributedContext): The distributed context of this rank.
        dist_params_grads (list): The list contains the tuple of param and grad.
        batch_size (int): The batch size. Default: None.
    """
1021

1022 1023
    while_block_info = {}

1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036
    def __init__(
        self,
        auto_parallel_main_prog,
        auto_parallel_startup_prog,
        rank_id,
        dist_context,
        dist_params_grads,
        batch_size=None,
    ):
        assert isinstance(auto_parallel_main_prog, Program), (
            "The type of auto_parallel_main_prog should be Program, "
            "but got {}.".format(type(auto_parallel_main_prog))
        )
1037
        if auto_parallel_startup_prog is not None:
1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050
            assert isinstance(auto_parallel_main_prog, Program), (
                "The type of auto_parallel_startup_prog should be Program or None, "
                "but got {}.".format(type(auto_parallel_startup_prog))
            )
        assert isinstance(
            rank_id, int
        ), "The type of rank_id should be int, " "but got {}.".format(
            type(rank_id)
        )
        assert isinstance(dist_context, DistributedContext), (
            "The type of dist_context should be DistributedContext, "
            "but got {}.".format(type(dist_context))
        )
1051

1052
        if batch_size is not None:
1053 1054 1055 1056 1057
            assert isinstance(
                batch_size, int
            ), "The type of batch_size should be int, " "but got {}.".format(
                type(batch_size)
            )
1058 1059 1060 1061 1062 1063 1064 1065 1066 1067

        self._auto_parallel_main_prog = auto_parallel_main_prog
        self._auto_parallel_startup_prog = auto_parallel_startup_prog
        self._rank_id = rank_id
        self._dist_context = dist_context
        self._dist_params_grads = dist_params_grads
        self._batch_size = batch_size
        self._has_sent = {}
        self._has_recv = {}
        self._has_allgather = {}
1068 1069
        # to avoid reshard repeatly
        self._has_resharded = {}
1070

1071 1072 1073
    @property
    def auto_parallel_main_prog(self):
        return self._auto_parallel_main_prog
1074

1075 1076 1077
    @property
    def auto_parallel_startup_prog(self):
        return self._auto_parallel_startup_prog
1078

1079 1080 1081
    @property
    def rank_id(self):
        return self._rank_id
1082

1083 1084 1085
    @property
    def dist_context(self):
        return self._dist_context
1086

1087 1088 1089
    @property
    def dist_params_grads(self):
        return self._dist_params_grads
1090

1091 1092 1093
    @property
    def batch_size(self):
        return self._batch_size
1094

1095 1096 1097
    @property
    def has_sent(self):
        return self._has_sent
1098

1099 1100 1101
    @property
    def has_recv(self):
        return self._has_recv
1102

1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115
    @property
    def has_allgather(self):
        return self._has_allgather

    @staticmethod
    def compute_partition_shape(complete_shape, dims_mapping, process_shape):
        """Compute the shape of partition."""
        partition_shape = []
        for idx, item in enumerate(complete_shape):
            if dims_mapping[idx] == -1:
                partition_shape.append(item)
            else:
                partition_shape.append(item // process_shape[dims_mapping[idx]])
1116

1117
        return partition_shape
1118

1119 1120 1121 1122 1123
    @staticmethod
    def compute_process_index(process, process_group, process_shape):
        """Compute the index of process_shape corresponding to the process."""
        relative_process = process_group.index(process)
        process_index = []
1124
        product = reduce(lambda x, y: x * y, process_shape, 1)
1125 1126 1127 1128

        for i in range(len(process_shape)):
            idx = relative_process // (product // process_shape[i])
            product = product // process_shape[i]
1129 1130 1131
            relative_process = (
                relative_process - relative_process // product * product
            )
1132 1133 1134 1135 1136
            process_index.append(idx)

        return process_index

    @staticmethod
1137 1138 1139
    def compute_partition_index(
        process, complete_shape, dims_mapping, process_shape, process_group
    ):
1140 1141
        """Compute the partition index in complete tensor."""
        partition_shape = Resharder.compute_partition_shape(
1142 1143 1144 1145 1146
            complete_shape, dims_mapping, process_shape
        )
        process_index = Resharder.compute_process_index(
            process, process_group, process_shape
        )
1147 1148 1149 1150 1151 1152
        partition_index = []

        for i in range(len(complete_shape)):
            if dims_mapping[i] == -1:
                partition_index.append([0, partition_shape[i]])
            else:
1153 1154 1155 1156 1157 1158 1159
                partition_index.append(
                    [
                        process_index[dims_mapping[i]] * partition_shape[i],
                        (process_index[dims_mapping[i]] + 1)
                        * partition_shape[i],
                    ]
                )
1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173

        return partition_index

    @staticmethod
    def compute_concat_info(partition_index_x, partition_index_y):
        """Judge whether two partition can be concatenated and compute concatenated partition index."""
        differ_count = 0
        concat_axis = -1
        first_order = 0
        new_partition = []

        for idx, item in enumerate(partition_index_x):
            if item != partition_index_y[idx]:
                differ_count += 1
1174 1175 1176 1177
                if (
                    item[1] == partition_index_y[idx][0]
                    and item[0] < partition_index_y[idx][1]
                ):
1178 1179
                    concat_axis = idx
                    new_partition.append([item[0], partition_index_y[idx][1]])
1180 1181 1182 1183
                elif (
                    item[0] == partition_index_y[idx][1]
                    and item[1] > partition_index_y[idx][0]
                ):
1184 1185 1186 1187 1188
                    first_order = 1
                    concat_axis = idx
                    new_partition.append([partition_index_y[idx][0], item[1]])
            else:
                new_partition.append(item)
1189

1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204
        if differ_count == 1:
            return concat_axis, first_order, new_partition
        else:
            return -1, first_order, new_partition

    @staticmethod
    def compute_complete_shape(slice_shape, process_shape, dims_mapping):
        """compute the complete shape of the slice tensor  with its process mesh and dims mapping"""
        complete_shape = []
        for idx, item in enumerate(slice_shape):
            if dims_mapping[idx] == -1:
                complete_shape.append(item)
            else:
                complete_shape.append(item * process_shape[dims_mapping[idx]])
        return complete_shape
C
caozhou 已提交
1205

1206 1207 1208 1209 1210 1211 1212 1213 1214 1215
    @staticmethod
    def concat_partitions(partition_index_list, partition_index):
        """Concat the given partitions without inserting concat op."""
        if not partition_index_list:
            partition_index_list.append(partition_index)
        else:
            i = 0
            has_concat = False
            while i < len(partition_index_list):
                concat_axis, _, new_partition = Resharder.compute_concat_info(
1216 1217
                    partition_index_list[i], partition_index
                )
1218 1219 1220
                if concat_axis != -1:
                    has_concat = True
                    partition_index_list.pop(i)
1221 1222 1223
                    Resharder.concat_partitions(
                        partition_index_list, new_partition
                    )
1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234
                    break
                i += 1
            if not has_concat:
                partition_index_list.append(partition_index)

    @staticmethod
    def change_while_op_input_and_output(auto_parallel_main_prog, dist_context):
        """Change while op input and output after the corresponding sub block ops removed"""
        for sub_block_idx in Resharder.while_block_info:
            sub_block = auto_parallel_main_prog.blocks[sub_block_idx]
            parent_while_op_id = Resharder.while_block_info[sub_block_idx][
1235 1236
                "op_id"
            ]
1237 1238 1239 1240 1241 1242 1243
            parent_block = auto_parallel_main_prog.blocks[sub_block.parent_idx]

            sub_block_op_inputs = set()
            sub_block_op_outputs = []
            for op in sub_block.ops:
                # skip the input and output of operators inserted in the reshard phase
                dist_op = dist_context.get_dist_op_for_program(op)
1244 1245 1246 1247 1248 1249
                if (
                    dist_op
                    or (op.type == "slice" and not dist_op)
                    or (op.type == "split" and not dist_op)
                    or (op.type == "assign" and not dist_op)
                ):
1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262
                    for var_name in op.output_arg_names:
                        if var_name not in sub_block_op_outputs:
                            sub_block_op_outputs.append(var_name)
                    for var_name in op.input_arg_names:
                        sub_block_op_inputs.add(var_name)

            # find the while op
            while_op = None
            for op in parent_block.ops:
                if op.desc.id() == parent_while_op_id and op.type == "while":
                    while_op = op
                    break

1263 1264
            if while_op is None:
                continue
1265 1266 1267 1268 1269 1270 1271 1272

            # find the actual input and output of while op
            proto = OpProtoHolder.instance().get_op_proto(while_op.type)
            new_X = []
            for var_name in while_op.input("X"):
                if var_name in sub_block_op_inputs:
                    new_X.append(var_name)
            assert new_X
1273
            new_X.sort()
1274 1275 1276 1277 1278
            while_op.desc.set_input(proto.inputs[0].name, new_X)

            new_Out = []
            for var_name in while_op.output("Out"):
                for output_name in sub_block_op_outputs[::-1]:
1279
                    if output_name.find(var_name) != -1 and (
1280 1281 1282
                        len(var_name) == len(output_name)
                        or "@RESHARD" in output_name
                    ):
1283 1284
                        if output_name not in new_Out:
                            new_Out.append(output_name)
1285 1286 1287 1288 1289 1290
            assert new_Out
            while_op.desc.set_output(proto.outputs[0].name, new_Out)

    def is_overlapped(self, shape_x, shape_y):
        """Judge whether two partitions intersect on the specified dimension."""
        overlapped = False
1291 1292 1293
        if (shape_y[0] <= shape_x[0] < shape_y[1]) or (
            shape_x[0] <= shape_y[0] < shape_x[1]
        ):
1294 1295 1296 1297 1298 1299 1300 1301 1302 1303
            overlapped = True
        return overlapped

    def is_unshard(self, dims_mapping):
        for dim in dims_mapping:
            if dim != -1:
                return False
        return True

    def is_special_op(self, op):
1304
        global _g_special_ops, _g_gradient_clip_ops
Z
zhaoyingli 已提交
1305 1306
        if op.type in _g_special_ops:
            return True
1307
        if is_gradient_clip_op(op) and op.type in _g_gradient_clip_ops:
1308
            return True
Z
zhaoyingli 已提交
1309 1310
        return False

1311 1312
    def is_condition_replicative(self, op):
        sub_block = self.auto_parallel_main_prog.blocks[op.attr("sub_block").id]
1313 1314 1315 1316 1317

        if op.type == "while":
            input_cond = op.input("Condition")
        elif op.type == "conditional_block":
            input_cond = op.input("Cond")
1318 1319

        # the dims mapping of condition tensor should be replicative
1320
        for var_name in input_cond:
1321 1322 1323
            var = get_var_with_recursion(
                var_name, sub_block, self.auto_parallel_main_prog
            )
1324 1325 1326 1327 1328 1329
            dist_tensor = self.dist_context.get_dist_tensor_for_program(var)
            tensor_dist_attr = dist_tensor.dist_attr
            var_dims_mapping = tensor_dist_attr.dims_mapping
            for dim in var_dims_mapping:
                if dim != -1:
                    return False
1330

1331 1332
        return True

1333
    def need_reshard(self, dist_tensor, dist_attr, op_input=True, dist_op=None):
1334 1335 1336 1337 1338
        """Judge the tensor whether needs to be resharded."""
        is_reshard = False
        tensor_dist_attr = dist_tensor.dist_attr
        tensor_dims_mapping = tensor_dist_attr.dims_mapping
        tensor_process_mesh = tensor_dist_attr.process_mesh
1339 1340 1341 1342

        # dist_attr is [process_mesh, dims_mapping] and process_mesh is not a union
        op_process_mesh = dist_attr[0]

1343
        if op_input:
1344
            op_input_dims_mapping = dist_attr[1]
1345
            if all(
1346 1347 1348 1349 1350 1351 1352
                x
                for x in [
                    tensor_dims_mapping,
                    tensor_process_mesh,
                    op_input_dims_mapping,
                    op_process_mesh,
                ]
1353
            ):
1354
                # judge whether need reshard by dims_mapping
1355
                if tensor_dims_mapping != op_input_dims_mapping:
1356 1357 1358 1359
                    if (
                        tensor_process_mesh
                        not in self.dist_context.process_meshes
                    ):
1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370
                        # assert whether -1 when union.
                        for item in tensor_dims_mapping:
                            if item != -1:
                                raise ValueError(
                                    "The dim must be -1 when tensor process mesh is a union."
                                )
                        # tensor process_mesh: [0, 1, 2, 3], dims_mapping: [-1, -1]
                        # op process_mesh: [4, 5], dims_mapping: [0, -1]
                        # reshard is not supported such as above
                        if not is_reshard:
                            return is_reshard
1371
                        else:
1372 1373 1374 1375 1376 1377 1378 1379
                            raise ValueError(
                                "it is not supported that tensor process mesh is a union and needs reshard."
                            )
                    is_reshard = True

                # judge whether need reshard by process_mesh
                if tensor_process_mesh != op_process_mesh:
                    is_reshard = True
1380
        else:
1381
            op_output_dims_mapping = dist_attr[1]
1382
            if all(
1383 1384 1385 1386 1387 1388 1389
                x
                for x in [
                    tensor_dims_mapping,
                    tensor_process_mesh,
                    op_output_dims_mapping,
                    op_process_mesh,
                ]
1390
            ):
1391 1392 1393 1394
                if tensor_dims_mapping != op_output_dims_mapping:
                    raise ValueError(
                        "It is not supported that tensor dims mapping is different from op output dims mapping."
                    )
1395 1396
                if tensor_process_mesh != op_process_mesh:
                    is_reshard = True
1397 1398 1399 1400

        return is_reshard

    def get_op_process_meshes(self, op):
1401
        """Get sub process meshes of the given op if op process mesh is a union."""
1402 1403 1404
        process_meshes = []
        dist_op = self.dist_context.get_dist_op_for_program(op)
        op_process_mesh = dist_op.dist_attr.process_mesh
1405

1406
        for process_mesh in self.dist_context.process_meshes:
1407 1408 1409 1410 1411
            if set(process_mesh.process_ids) & (
                set(op_process_mesh.process_ids)
            ) and len(process_mesh.process_ids) < len(
                op_process_mesh.process_ids
            ):
1412 1413 1414 1415 1416 1417 1418 1419
                process_meshes.append(process_mesh)

        # it means the process mesh is not a union when process meshes is null
        if not process_meshes:
            process_meshes.append(op_process_mesh)

        return process_meshes

1420
    def find_op_desc_seq(self, dist_tensor, dist_attr, serial=False):
1421 1422 1423 1424 1425
        """
        Find the op description sequence to reshard the source tensor for matching the op requirement.

        Args:
            dist_tensor (DistributedTensor): A distributed tensor.
1426 1427
            dist_attr (list): A list contains process_mesh and dims_mapping such as [process_mesh, dims_mapping].
            serial (bool): If serial is true, the dist tensor and dist op come from serial program. Otherwise, they come from auto program.
1428 1429 1430 1431 1432 1433 1434 1435

        Returns:
            Dict, the dict represents the required op description sequence corresponding to process, The key of dict is
            process and value is a list containing op description.
        """
        tensor_dist_attr = dist_tensor.dist_attr
        source_tensor = dist_tensor.serial_tensor
        tensor_name = source_tensor.name
1436

1437 1438
        source_dims_mapping = tensor_dist_attr.dims_mapping
        source_process_mesh = tensor_dist_attr.process_mesh
1439 1440
        source_process_group = source_process_mesh.process_ids
        source_process_shape = source_process_mesh.shape
1441

1442 1443
        target_process_mesh = dist_attr[0]
        target_dims_mapping = dist_attr[1]
1444 1445
        target_process_group = target_process_mesh.process_ids
        target_process_shape = target_process_mesh.shape
1446

1447 1448
        op_role = dist_attr[2]

1449
        if source_tensor.shape[0] < 0:
1450
            assert source_tensor.shape[0] == -1
1451 1452 1453 1454
            new_shape = list(source_tensor.shape)
            new_shape[0] = self.batch_size
            source_tensor.desc.set_shape(new_shape)

1455 1456 1457 1458 1459 1460 1461
        complete_shape = (
            Resharder.compute_complete_shape(
                source_tensor.shape, source_process_shape, source_dims_mapping
            )
            if not serial
            else source_tensor.shape
        )
1462
        op_desc_seq = OrderedDict()
1463 1464

        # TODO: if the target process group has the same process with source process group
1465 1466 1467
        if set(target_process_group).intersection(
            set(source_process_group)
        ) and set(target_process_group).difference(set(source_process_group)):
1468 1469 1470 1471 1472
            pass

        elif target_process_group != source_process_group:
            partition_process_mapping_list = []
            for source_process in source_process_group:
1473
                # get partition index of source process
1474 1475 1476 1477 1478 1479 1480
                source_partition_index = Resharder.compute_partition_index(
                    source_process,
                    complete_shape,
                    source_dims_mapping,
                    source_process_shape,
                    source_process_group,
                )
1481
                if not partition_process_mapping_list:
1482
                    # the item in partition_process_mapping_list is source_partition_index, which processes and whether has been used
1483
                    partition_process_mapping_list.append(
1484 1485
                        [source_partition_index, [source_process], [False]]
                    )
1486
                else:
1487 1488 1489 1490 1491 1492 1493 1494 1495
                    partition_list = [
                        item[0] for item in partition_process_mapping_list
                    ]
                    process_list = [
                        item[1] for item in partition_process_mapping_list
                    ]
                    has_used = [
                        item[2] for item in partition_process_mapping_list
                    ]
1496

1497 1498 1499 1500 1501
                    if partition_list.count(source_partition_index) == 1:
                        index = partition_list.index(source_partition_index)
                        process_list[index].append(source_process)
                        has_used[index].append(False)
                    else:
1502
                        partition_process_mapping_list.append(
1503 1504
                            [source_partition_index, [source_process], [False]]
                        )
1505 1506

            for target_process in target_process_group:
1507
                # has_sent means the source_partition_index has been sent to target_process
1508 1509
                has_sent = []
                target_partition_index = Resharder.compute_partition_index(
1510 1511 1512 1513 1514 1515
                    target_process,
                    complete_shape,
                    target_dims_mapping,
                    target_process_shape,
                    target_process_group,
                )
1516 1517 1518 1519
                partition_index_list = []
                all_partition_index_list = []
                for source_process in source_process_group:
                    source_partition_index = Resharder.compute_partition_index(
1520 1521 1522 1523 1524 1525
                        source_process,
                        complete_shape,
                        source_dims_mapping,
                        source_process_shape,
                        source_process_group,
                    )
1526
                    to_send_process = None
1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539
                    if (
                        all(
                            _
                            for _ in list(
                                map(
                                    self.is_overlapped,
                                    source_partition_index,
                                    target_partition_index,
                                )
                            )
                        )
                        and source_partition_index not in has_sent
                    ):
1540 1541 1542 1543 1544 1545 1546 1547 1548
                        idx = [
                            item[0] for item in partition_process_mapping_list
                        ].index(source_partition_index)
                        has_used = [
                            item[2] for item in partition_process_mapping_list
                        ][idx]
                        process_list = [
                            item[1] for item in partition_process_mapping_list
                        ][idx]
1549 1550 1551 1552 1553 1554 1555
                        i = 0
                        while i < len(has_used):
                            if not has_used[i]:
                                to_send_process = process_list[i]
                                has_used[i] = True
                                break
                            i += 1
1556

1557
                        if i == len(has_used):
1558
                            has_used = [False for x in has_used]
1559 1560
                            to_send_process = process_list[0]
                            has_used[0] = True
1561 1562 1563
                        assert (
                            to_send_process is not None
                        ), "Failed to find the send process."
1564 1565 1566 1567 1568 1569 1570 1571

                        if to_send_process not in op_desc_seq.keys():
                            op_desc_seq[to_send_process] = []
                        if target_process not in op_desc_seq.keys():
                            op_desc_seq[target_process] = []
                        all_partition_index_list.append(source_partition_index)

                        # append send and recv op desc
1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584
                        is_bool = dist_tensor.serial_tensor.dtype == paddle.bool
                        send_op_desc = SendOpDesc(
                            source_partition_index,
                            to_send_process,
                            target_process,
                            is_bool=is_bool,
                        )
                        recv_op_desc = RecvOpDesc(
                            source_partition_index,
                            to_send_process,
                            target_process,
                            is_bool=is_bool,
                        )
1585 1586 1587
                        op_desc_seq[to_send_process].append(send_op_desc)
                        op_desc_seq[target_process].append(recv_op_desc)
                        has_sent.append(source_partition_index)
1588 1589 1590
                        Resharder.concat_partitions(
                            partition_index_list, source_partition_index
                        )
1591 1592 1593 1594
                        if int(op_role) == int(OpRole.Forward):
                            self.dist_context.up_down_streams.add_pair_stream(
                                to_send_process, target_process
                            )
1595 1596 1597

                # append concat op desc
                op_desc_seq[target_process].append(
1598 1599
                    ConcatOpDesc(all_partition_index_list)
                )
1600 1601 1602 1603 1604 1605

                # append slice op desc
                slice_starts = []
                slice_ends = []
                slices_axes = []
                concatenated_partition_index = partition_index_list[0]
1606 1607
                to_slice_tensor_shape = []

1608
                for idx, item in enumerate(concatenated_partition_index):
1609 1610 1611
                    slice_starts.append(
                        target_partition_index[idx][0] - item[0]
                    )
1612 1613
                    slice_ends.append(target_partition_index[idx][1] - item[0])
                    slices_axes.append(idx)
1614 1615
                    to_slice_tensor_shape.append(item[1] - item[0])

1616
                op_desc_seq[target_process].append(
1617 1618 1619 1620 1621 1622 1623
                    SliceOpDesc(
                        slice_starts,
                        slice_ends,
                        slices_axes,
                        shape=to_slice_tensor_shape,
                    )
                )
1624

1625
        # In the same process group, it will use allgahther and slice op.
1626
        else:
1627
            # NOTE: It just supports even partition scene.
1628 1629 1630 1631 1632
            partition_index_list = []
            all_partition_index_list = []
            process_index = []
            for source_process in source_process_group:
                source_partition_index = Resharder.compute_partition_index(
1633 1634 1635 1636 1637 1638
                    source_process,
                    complete_shape,
                    source_dims_mapping,
                    source_process_shape,
                    source_process_group,
                )
1639 1640
                if source_partition_index not in partition_index_list:
                    partition_index_list.append(source_partition_index)
1641 1642 1643 1644 1645 1646 1647 1648
                    process_index.append(
                        [
                            [
                                source_process,
                            ],
                            source_partition_index,
                        ]
                    )
1649
                else:
1650 1651 1652
                    process_index[
                        partition_index_list.index(source_partition_index)
                    ][0].append(source_process)
1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665

            for i in range(len(process_index[0][0])):
                group = []
                for j in range(len(process_index)):
                    group.append(process_index[j][0][i])
                    if i == 0:
                        all_partition_index_list.append(process_index[j][1])
                for process in group:
                    # append slice op desc
                    slice_starts = []
                    slice_ends = []
                    slices_axes = []
                    target_partition_index = Resharder.compute_partition_index(
1666 1667 1668 1669 1670 1671
                        process,
                        complete_shape,
                        target_dims_mapping,
                        target_process_shape,
                        target_process_group,
                    )
1672 1673 1674 1675 1676
                    for idx, item in enumerate(target_partition_index):
                        slice_starts.append(item[0])
                        slice_ends.append(item[1])
                        slices_axes.append(idx)

1677
                    to_slice_tensor_shape = dist_tensor.global_sizes()
1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688
                    slice_op_desc = SliceOpDesc(
                        starts=slice_starts,
                        ends=slice_ends,
                        axes=slices_axes,
                        shape=to_slice_tensor_shape,
                    )
                    allgather_shape = (
                        None
                        if not serial
                        else dist_tensor.local_sizes(rank=process)
                    )
1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700
                    # c_concat pass
                    if (
                        target_dims_mapping.count(-1)
                        == len(target_dims_mapping)
                        and source_dims_mapping[:-1].count(-1)
                        == len(source_dims_mapping[:-1])
                        and source_dims_mapping[-1] != -1
                    ):
                        op_desc_seq[process] = [
                            AllGatherConcatOpDesc(
                                group=group, shape=allgather_shape
                            )
1701
                        ]
1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719
                    else:
                        op_desc_seq[process] = (
                            [
                                AllGatherOpDesc(
                                    group=group,
                                    shape=allgather_shape,
                                    is_bool=(
                                        source_tensor.dtype == paddle.bool
                                    ),
                                ),
                                ConcatOpDesc(
                                    partition_index_list=all_partition_index_list
                                ),
                                slice_op_desc,
                            ]
                            if len(group) > 1
                            else [slice_op_desc]
                        )
1720 1721 1722

        return op_desc_seq

1723 1724 1725
    def parse_op_desc(
        self, block, op_desc_seq, var_name, reshard_op, dist_attr
    ):
1726
        """Parse op desc sequence and insert op in the block"""
1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743

        # Parse all communicator groups for all ranks
        # Ensure every rank has a global view of communicator groups for entire cluters.
        # When initialize communicators for pipeline parallel, every rank could
        # conduct a correct global synchronization.
        for rank_id in op_desc_seq:
            op_desc_list = op_desc_seq[rank_id]
            for op_desc in op_desc_list:
                if isinstance(op_desc, AllGatherOpDesc):
                    new_process_group(op_desc.group)
                elif isinstance(op_desc, AllGatherConcatOpDesc):
                    new_process_group(op_desc.group)
                elif isinstance(op_desc, SendOpDesc):
                    new_process_group([op_desc.src, op_desc.dst])
                elif isinstance(op_desc, RecvOpDesc):
                    new_process_group([op_desc.src, op_desc.dst])

1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754
        tensor_list = []
        partition_tensor_list = []
        if self.rank_id not in op_desc_seq.keys():
            return
        op_desc_list = op_desc_seq[self.rank_id]

        idx = None
        for index, op in list(enumerate(block.ops)):
            if op.desc.id == reshard_op.desc.id:
                idx = index
                break
1755 1756 1757 1758 1759
        assert (
            idx is not None
        ), "The op for reshard cannot be found in the rank {} program.".format(
            self.rank_id
        )
1760 1761

        matched_op = block.ops[idx]
1762 1763 1764
        source_tensor = get_var_with_recursion(
            var_name, block, self.auto_parallel_main_prog
        )
1765 1766 1767 1768
        for op_desc in op_desc_list:
            if isinstance(op_desc, AllGatherOpDesc):  # noqa: F401
                if var_name not in self.has_allgather.keys():
                    self.has_allgather[var_name] = []
1769 1770 1771
                if not self.has_allgather[var_name] or op_desc.group not in [
                    x[0] for x in self.has_allgather[var_name]
                ]:
1772 1773 1774
                    if op_desc.is_bool:
                        # for bool data allgather, cast to int64 -> allgather -> cast bool
                        out_cast = Inserter.insert_cast_op(
1775 1776 1777 1778 1779 1780
                            block,
                            idx,
                            source_tensor,
                            reshard_op.attr('op_role'),
                            paddle.int64,
                        )
1781
                        tensor_list, idx_offset = Inserter.insert_allgather_op(
1782 1783 1784 1785 1786 1787
                            block,
                            idx + 1,
                            out_cast,
                            op_desc.group,
                            reshard_op.attr('op_role'),
                        )
1788 1789 1790 1791
                        idx += idx_offset
                        tensor_name_list = []
                        for var in tensor_list:
                            out_cast = Inserter.insert_cast_op(
1792 1793 1794 1795 1796 1797
                                block,
                                idx,
                                var,
                                reshard_op.attr('op_role'),
                                paddle.bool,
                            )
1798 1799 1800
                            tensor_name_list.append(out_cast.name)
                            idx += 1
                        self.has_allgather[var_name].append(
1801 1802
                            [op_desc.group, tensor_name_list]
                        )
1803 1804
                    else:
                        tensor_list, idx_offset = Inserter.insert_allgather_op(
1805 1806 1807 1808 1809 1810
                            block,
                            idx,
                            source_tensor,
                            op_desc.group,
                            reshard_op.attr('op_role'),
                        )
1811 1812 1813
                        idx += idx_offset
                        tensor_name_list = [var.name for var in tensor_list]
                        self.has_allgather[var_name].append(
1814 1815
                            [op_desc.group, tensor_name_list]
                        )
1816 1817 1818 1819
                else:
                    for item in self.has_allgather[var_name]:
                        if op_desc.group == item[0]:
                            tensor_list = [
C
caozhou 已提交
1820
                                get_var_with_recursion(
1821 1822 1823 1824
                                    var_name,
                                    block,
                                    self.auto_parallel_main_prog,
                                )
1825 1826 1827
                                for var_name in item[1]
                            ]
                            break
1828 1829 1830
                assert (
                    tensor_list
                ), "The result of parsing allgather op should not be None."
1831 1832 1833 1834 1835

            elif isinstance(op_desc, SendOpDesc):
                if var_name not in self.has_sent.keys():
                    self.has_sent[var_name] = []
                if op_desc.dst not in self.has_sent[var_name]:
1836 1837
                    if op_desc.is_bool:
                        out_cast = Inserter.insert_cast_op(
1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851
                            block,
                            idx,
                            source_tensor,
                            reshard_op.attr('op_role'),
                            paddle.int64,
                        )
                        Inserter.insert_send_op(
                            block,
                            idx + 1,
                            out_cast,
                            op_desc.src,
                            op_desc.dst,
                            reshard_op.attr('op_role'),
                        )
1852 1853
                        idx += 2
                    else:
1854 1855 1856 1857 1858 1859 1860 1861
                        Inserter.insert_send_op(
                            block,
                            idx,
                            source_tensor,
                            op_desc.src,
                            op_desc.dst,
                            reshard_op.attr('op_role'),
                        )
1862
                        idx += 1
1863 1864 1865 1866 1867 1868 1869 1870 1871 1872
                    self.has_sent[var_name].append(op_desc.dst)

            elif isinstance(op_desc, RecvOpDesc):
                if var_name not in self.has_recv.keys():
                    self.has_recv[var_name] = {}
                if op_desc.src not in self.has_recv[var_name].keys():
                    partition_index = op_desc.partition_index
                    shape = []
                    for index in partition_index:
                        shape.append(index[1] - index[0])
1873 1874 1875 1876 1877 1878 1879
                    if op_desc.is_bool:
                        # for bool data, recv int64 -> cast to bool
                        recv_tensor = block.create_var(
                            name=unique_name.generate(var_name + "@recv"),
                            shape=shape,
                            lod_level=source_tensor.lod_level,
                            dtype=paddle.int64,
1880 1881 1882 1883 1884 1885 1886 1887 1888 1889
                            type=source_tensor.type,
                        )
                        Inserter.insert_recv_op(
                            block,
                            idx,
                            recv_tensor,
                            op_desc.src,
                            op_desc.dst,
                            reshard_op.attr('op_role'),
                        )
1890
                        out_cast = Inserter.insert_cast_op(
1891 1892 1893 1894 1895 1896
                            block,
                            idx + 1,
                            recv_tensor,
                            reshard_op.attr('op_role'),
                            paddle.bool,
                        )
1897 1898 1899 1900 1901 1902 1903 1904 1905
                        tensor_list.append(out_cast)
                        idx += 2
                        self.has_recv[var_name][op_desc.src] = out_cast
                    else:
                        recv_tensor = block.create_var(
                            name=unique_name.generate(var_name + "@recv"),
                            shape=shape,
                            lod_level=source_tensor.lod_level,
                            dtype=source_tensor.dtype,
1906 1907 1908 1909 1910 1911 1912 1913 1914 1915
                            type=source_tensor.type,
                        )
                        Inserter.insert_recv_op(
                            block,
                            idx,
                            recv_tensor,
                            op_desc.src,
                            op_desc.dst,
                            reshard_op.attr('op_role'),
                        )
1916 1917 1918 1919 1920

                        # for lod tensor, need reset lod after received
                        if recv_tensor.lod_level != 0:
                            set_lod = False
                            # use data lod to reset tensor lod
1921 1922 1923
                            for (
                                tmp_block
                            ) in self.auto_parallel_main_prog.blocks:
1924 1925
                                for tmp_var_name in tmp_block.vars:
                                    tmp_var = tmp_block.vars[tmp_var_name]
1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939
                                    if (
                                        tmp_var.is_data
                                        and tmp_var.lod_level
                                        == recv_tensor.lod_level
                                    ):
                                        reset_lod_out = (
                                            Inserter.insert_reset_lod_op(
                                                block,
                                                idx + 1,
                                                recv_tensor,
                                                tmp_var,
                                                reshard_op.attr('op_role'),
                                            )
                                        )
1940 1941 1942
                                        tensor_list.append(reset_lod_out)
                                        idx += 2
                                        self.has_recv[var_name][
1943 1944
                                            op_desc.src
                                        ] = reset_lod_out
1945 1946 1947 1948 1949 1950 1951 1952 1953
                                        set_lod = True
                                        break
                                if set_lod:
                                    break
                            assert set_lod is True
                        else:
                            tensor_list.append(recv_tensor)
                            idx += 1
                            self.has_recv[var_name][op_desc.src] = recv_tensor
1954 1955 1956 1957 1958 1959 1960 1961
                else:
                    tensor_list.append(self.has_recv[var_name][op_desc.src])

            elif isinstance(op_desc, ConcatOpDesc):
                partition_index_list = op_desc.partition_index_list
                idx_list = [idx]
                for index, tensor in enumerate(tensor_list):
                    Inserter.concat_partitions_with_op(
1962 1963 1964 1965 1966 1967 1968
                        partition_tensor_list,
                        tensor,
                        partition_index_list[index],
                        block,
                        idx_list,
                        reshard_op.attr('op_role'),
                    )
1969 1970
                idx = idx_list[0]

1971
            elif isinstance(op_desc, (SliceOpDesc, AllGatherConcatOpDesc)):
1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001
                target_tensor = None
                if isinstance(op_desc, SliceOpDesc):
                    assert (
                        len(partition_tensor_list) == 1
                        or not partition_tensor_list
                    )
                    to_slice_tensor = (
                        partition_tensor_list[0][0]
                        if len(partition_tensor_list) == 1
                        else source_tensor
                    )
                    new_name = unique_name.generate(var_name + "@RESHARD")
                    target_tensor = Inserter.insert_slice_op(
                        block,
                        idx,
                        to_slice_tensor,
                        starts=op_desc.starts,
                        ends=op_desc.ends,
                        axes=op_desc.axes,
                        new_var_name=new_name,
                        op_role=reshard_op.attr('op_role'),
                    )
                else:
                    target_tensor = Inserter.insert_c_concat_op(
                        block,
                        idx,
                        source_tensor,
                        op_desc.group,
                        reshard_op.attr('op_role'),
                    )
2002

2003
                assert target_tensor is not None
2004 2005 2006
                process_mesh = dist_attr[0]
                dims_mapping = dist_attr[1]

2007
                tensor_attr = TensorDistAttr()
2008 2009 2010
                tensor_attr.dims_mapping = dims_mapping
                tensor_attr.process_mesh = process_mesh
                self.dist_context.set_tensor_dist_attr_for_program(
2011 2012
                    target_tensor, tensor_attr
                )
2013

2014
                if matched_op.type == "while":
2015
                    # var_reshard_mapping means the while op input need be changed to
2016 2017 2018 2019 2020 2021
                    if (
                        "var_reshard_mapping"
                        not in Resharder.while_block_info[
                            op.attr("sub_block").id
                        ].keys()
                    ):
2022
                        Resharder.while_block_info[op.attr("sub_block").id][
2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033
                            "var_reshard_mapping"
                        ] = {}
                    if (
                        var_name
                        not in Resharder.while_block_info[
                            op.attr("sub_block").id
                        ]["var_reshard_mapping"].keys()
                    ):
                        Resharder.while_block_info[op.attr("sub_block").id][
                            "var_reshard_mapping"
                        ][var_name] = []
2034
                    Resharder.while_block_info[op.attr("sub_block").id][
2035 2036
                        "var_reshard_mapping"
                    ][var_name].append([dist_attr, target_tensor.name])
2037 2038 2039

                # rename op input name according to new name
                for op in block.ops:
2040 2041
                    # just for while op
                    while_op_X_append = []
2042
                    for name in op.input_arg_names:
2043 2044 2045
                        op_dist_attr = (
                            self.dist_context.get_op_dist_attr_for_program(op)
                        )
2046 2047
                        if name == var_name and op_dist_attr is not None:
                            if op.desc.id() == matched_op.desc.id():
2048
                                if matched_op.type == "while":
2049 2050 2051
                                    op.desc._rename_input(
                                        name, target_tensor.name
                                    )
2052 2053 2054
                                    old_name = name
                                    new_name = target_tensor.name
                                    assert old_name != new_name
2055 2056 2057 2058 2059
                                    op_input_dist_attr = (
                                        op_dist_attr.get_input_dist_attr(
                                            old_name
                                        )
                                    )
2060
                                    op_dist_attr.set_input_dist_attr(
2061 2062
                                        new_name, op_input_dist_attr
                                    )
2063
                                    op_dist_attr.set_input_dims_mapping(
2064 2065
                                        new_name, dims_mapping
                                    )
2066 2067 2068
                                    op_dist_attr.set_input_dims_mapping(
                                        new_name, dims_mapping
                                    )
2069 2070 2071 2072
                                    while_op_X_append.append(new_name)
                                    continue
                                else:
                                    op.desc._rename_input(
2073 2074
                                        name, target_tensor.name
                                    )
2075 2076 2077
                                    old_name = name
                                    new_name = target_tensor.name
                                    assert old_name != new_name
2078 2079 2080 2081 2082
                                    op_input_dist_attr = (
                                        op_dist_attr.get_input_dist_attr(
                                            old_name
                                        )
                                    )
2083
                                    op_dist_attr.set_input_dist_attr(
2084 2085
                                        new_name, op_input_dist_attr
                                    )
2086
                                    op_dist_attr.set_input_dims_mapping(
2087 2088
                                        new_name, dims_mapping
                                    )
2089 2090 2091
                                    op_dist_attr.set_input_dims_mapping(
                                        new_name, dims_mapping
                                    )
2092
                                    continue
2093 2094

                            op_process_mesh = op_dist_attr.process_mesh
2095 2096 2097
                            op_input_dims_mapping = (
                                op_dist_attr.get_input_dims_mapping(var_name)
                            )
2098
                            # NOTE: For op whose process mesh is a union, its input will not be renamed by other op reshard result now which means that it will have more reshard operation.
2099 2100 2101 2102
                            if (
                                op_process_mesh == process_mesh
                                and op_input_dims_mapping == dims_mapping
                            ):
2103
                                op.desc._rename_input(name, target_tensor.name)
2104 2105 2106
                                old_name = name
                                new_name = target_tensor.name
                                assert old_name != new_name
2107 2108 2109
                                op_input_dist_attr = (
                                    op_dist_attr.get_input_dist_attr(old_name)
                                )
2110
                                op_dist_attr.set_input_dist_attr(
2111 2112
                                    new_name, op_input_dist_attr
                                )
2113
                                op_dist_attr.set_input_dims_mapping(
2114 2115
                                    new_name, dims_mapping
                                )
2116 2117 2118
                                op_dist_attr.set_input_dims_mapping(
                                    new_name, dims_mapping
                                )
2119

2120 2121 2122
                    # for while op, the input X should reset
                    if while_op_X_append:
                        proto = OpProtoHolder.instance().get_op_proto(op.type)
2123 2124 2125 2126
                        op.desc.set_input(
                            proto.inputs[0].name,
                            op.input("X") + while_op_X_append,
                        )
2127

2128
    def _get_subblock_input_attrs(self, op, var_name):
2129
        # NOTE: Multi while loop is not supported
2130
        assert op.type in _g_subblock_ops
2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143
        sub_block = self.auto_parallel_main_prog.blocks[op.attr("sub_block").id]
        ops = sub_block.ops
        input_attrs = []

        for op in ops:
            dist_op = self.dist_context.get_dist_op_for_program(op)
            if not dist_op:
                continue
            dist_attr = dist_op.dist_attr
            for name in op.input_arg_names:
                if name == var_name:
                    process_mesh = dist_attr.process_mesh
                    input_dims_mapping = dist_attr.get_input_dims_mapping(
2144 2145
                        var_name
                    )
2146 2147
                    has_exist = False
                    for input_attr in input_attrs:
2148 2149 2150 2151
                        if (
                            process_mesh == input_attr[0]
                            and input_dims_mapping == input_attr[1]
                        ):
2152 2153 2154
                            has_exist = True
                            break
                    if not has_exist:
2155 2156 2157 2158 2159 2160 2161
                        input_attrs.append(
                            [
                                process_mesh,
                                input_dims_mapping,
                                op.attr('op_role'),
                            ]
                        )
2162 2163
        return input_attrs

2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190
    def _get_subblock_output_attrs(self, op, var_name):
        # NOTE: Multi while loop is not supported
        assert op.type in _g_subblock_ops
        sub_block = self.auto_parallel_main_prog.blocks[op.attr("sub_block").id]
        ops = sub_block.ops
        output_attrs = []

        for op in ops:
            dist_op = self.dist_context.get_dist_op_for_program(op)
            if not dist_op:
                continue
            dist_attr = dist_op.dist_attr
            for name in op.output_arg_names:
                if name == var_name:
                    process_mesh = dist_attr.process_mesh
                    output_dims_mapping = dist_attr.get_output_dims_mapping(
                        var_name
                    )
                    has_exist = False
                    for output_attr in output_attrs:
                        if (
                            process_mesh == output_attrs[0]
                            and output_dims_mapping == output_attrs[1]
                        ):
                            has_exist = True
                            break
                    if not has_exist:
2191 2192 2193 2194 2195 2196 2197
                        output_attrs.append(
                            [
                                process_mesh,
                                output_dims_mapping,
                                op.attr('op_role'),
                            ]
                        )
2198 2199
        return output_attrs

2200 2201 2202 2203 2204 2205
    def _get_common_op_input_attrs(self, op, var_name):
        process_meshes = []
        dist_op = self.dist_context.get_dist_op_for_program(op)
        dist_attr = dist_op.dist_attr
        op_process_mesh = dist_attr.process_mesh
        for process_mesh in self.dist_context.process_meshes:
2206 2207 2208 2209 2210
            if set(process_mesh.process_ids) & (
                set(op_process_mesh.process_ids)
            ) and len(process_mesh.process_ids) < len(
                op_process_mesh.process_ids
            ):
2211 2212 2213 2214 2215 2216 2217 2218 2219
                process_meshes.append(process_mesh)

        # it means that the process mesh is not a union when process meshes is none
        if not process_meshes:
            process_meshes.append(op_process_mesh)

        input_dims_mapping = dist_attr.get_input_dims_mapping(var_name)
        input_attrs = []
        for process_mesh in process_meshes:
2220 2221 2222
            input_attrs.append(
                [process_mesh, input_dims_mapping, op.attr('op_role')]
            )
2223 2224 2225 2226 2227

        return input_attrs

    def get_op_input_attrs(self, op, var_name):
        op_input_attrs = []
2228

2229 2230
        if op.type in _g_subblock_ops:
            op_input_attrs = self._get_subblock_input_attrs(op, var_name)
2231 2232 2233 2234 2235
            if not op_input_attrs:
                # NOTE: [hack method]
                # Adapt to quantization pass, which presist_vars, including inputs and outputs, all are in global_block.
                # Therefore, the while_op's inputs will contain the all persist_vars, which will be inputs or output of the quantization op in subblock.
                op_input_attrs = self._get_subblock_output_attrs(op, var_name)
2236 2237 2238
        else:
            op_input_attrs = self._get_common_op_input_attrs(op, var_name)

2239 2240
        assert (
            op_input_attrs
2241
        ), "The input '{}' of op '{}' has no distributed attributes in subblock".format(
2242 2243
            op.name, var_name
        )
2244 2245 2246 2247 2248

        return op_input_attrs

    def _remove_global_process_mesh(self):
        """Remove global process mesh from dist_context.process_meshes"""
2249
        process_ids = set()
2250 2251
        process_mesh_count = len(self.dist_context.process_meshes)
        if process_mesh_count > 1:
2252 2253
            global_process_mesh_idx = []
            has_sub_process_mesh = False
2254
            for process_mesh in self.dist_context.process_meshes:
2255 2256
                for process_id in process_mesh.process_ids:
                    process_ids.add(process_id)
2257
            for idx, process_mesh in enumerate(
2258 2259
                self.dist_context.process_meshes
            ):
2260 2261 2262 2263
                if len(set(process_mesh.process_ids)) == len(process_ids):
                    global_process_mesh_idx.append(idx)
                elif set(process_mesh.process_ids) < process_ids:
                    has_sub_process_mesh = True
2264

2265 2266
            if has_sub_process_mesh:
                for idx in reversed(global_process_mesh_idx):
2267
                    self.dist_context.process_meshes.pop(idx)
2268 2269 2270 2271

    def _change_subblock_op_input_and_output(self, block_idx, block):
        if "var_reshard_mapping" in Resharder.while_block_info[block_idx]:
            var_reshard_mapping = Resharder.while_block_info[block_idx][
2272 2273
                "var_reshard_mapping"
            ]
2274 2275 2276 2277 2278 2279 2280 2281
            for op in block.ops:
                for var_name in op.input_arg_names:
                    if var_name in var_reshard_mapping:
                        # in while sub block, the union process mesh is not split before reshard sub block
                        dist_op = self.dist_context.get_dist_op_for_program(op)
                        dist_attr = dist_op.dist_attr
                        target_name = None
                        for item in var_reshard_mapping[var_name]:
2282 2283 2284 2285 2286
                            if (
                                dist_attr.process_mesh == item[0][0]
                                and dist_attr.get_input_dims_mapping(var_name)
                                == item[0][1]
                            ):
2287 2288 2289 2290 2291 2292 2293
                                target_name = item[1]
                                break
                        if target_name is None:
                            continue
                        else:
                            op.desc._rename_input(var_name, target_name)
                            dist_op = self.dist_context.get_dist_op_for_program(
2294 2295
                                op
                            )
2296 2297 2298 2299
                            op_dist_attr = dist_op.dist_attr
                            old_name = var_name
                            new_name = target_name
                            assert old_name != new_name
2300 2301 2302
                            op_input_dist_attr = (
                                op_dist_attr.get_input_dist_attr(old_name)
                            )
2303
                            op_dist_attr.set_input_dist_attr(
2304 2305
                                new_name, op_input_dist_attr
                            )
2306 2307 2308 2309 2310 2311

                # the outputs also need to be renamed when the output name is the same with input name in inplace op
                for var_name in op.output_arg_names:
                    # if the tensor has been resharded multiply, it is not supported now.
                    if var_name in var_reshard_mapping:
                        if len(var_reshard_mapping[var_name]) > 1:
2312
                            raise ValueError(
2313
                                "The scene is not supported that the output is inplaced and the tensor has been resharded multiply when as input."
2314
                            )
2315 2316 2317 2318 2319 2320 2321 2322 2323
                        target_name = var_reshard_mapping[var_name][0][1]

                        op.desc._rename_output(var_name, target_name)
                        dist_op = self.dist_context.get_dist_op_for_program(op)
                        op_dist_attr = dist_op.dist_attr
                        old_name = var_name
                        new_name = target_name
                        assert old_name != new_name
                        op_output_dist_attr = op_dist_attr.get_output_dist_attr(
2324 2325
                            old_name
                        )
2326
                        op_dist_attr.set_output_dist_attr(
2327 2328
                            new_name, op_output_dist_attr
                        )
2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341

    def _reshard_input(self, block):
        idx = 0
        while idx < len(block.ops):
            pre_op_count = len(block.ops)
            op = block.ops[idx]

            if self.is_special_op(op):
                idx += 1
                continue

            dist_op = self.dist_context.get_dist_op_for_program(op)
            if dist_op is not None:
2342 2343 2344
                op_input_dist_attrs = (
                    []
                )  # [(op_process_mesh, op_input_dims_mapping), (op_process_mesh, op_input_dims_mapping)]
2345
                if op.type in _g_subblock_ops:
2346 2347 2348 2349
                    if not self.is_condition_replicative(op):
                        raise ValueError(
                            "Please check the condition due to the dims mapping is not replicative."
                        )
2350 2351 2352 2353
                    if (
                        op.attr("sub_block").id
                        not in Resharder.while_block_info
                    ):
2354
                        Resharder.while_block_info[op.attr("sub_block").id] = {}
2355 2356 2357
                    Resharder.while_block_info[op.attr("sub_block").id][
                        "op_id"
                    ] = op.desc.id()
2358 2359 2360 2361

                if op.type == "while":
                    # condition var process mesh is the same with op and dims_mapping is replicative, so it do not need reshard
                    input_var_names = op.input("X")
2362 2363
                elif op.type == "conditional_block":
                    input_var_names = op.input("Input")
2364
                else:
2365 2366 2367 2368 2369 2370
                    input_var_names = op.input_arg_names
                # to avoid while op X order different
                input_var_names.sort()

                idx_offset = 0
                for var_name in input_var_names:
2371 2372
                    # skip lod_tensor_blocking_queue_? name
                    if "lod_tensor_blocking_queue" in var_name:
2373
                        continue
2374 2375 2376
                    var = get_var_with_recursion(
                        var_name, block, self.auto_parallel_main_prog
                    )
2377
                    dist_tensor = self.dist_context.get_dist_tensor_for_program(
2378 2379
                        var
                    )
2380 2381 2382

                    # judge whether union tensor dims_mapping all -1
                    is_union_process_mesh_tensor = False
2383 2384 2385 2386 2387
                    if (
                        dist_tensor.dist_attr.process_mesh
                        not in self.dist_context.process_meshes
                        and self.dist_context.process_meshes
                    ):
2388 2389
                        is_union_process_mesh_tensor = True
                        assert dist_tensor.dist_attr.dims_mapping.count(
2390 2391
                            -1
                        ) == len(dist_tensor.dist_attr.dims_mapping)
2392 2393 2394 2395 2396 2397 2398 2399

                    op_input_attrs = self.get_op_input_attrs(op, var_name)
                    for input_attr in op_input_attrs:
                        input_process_mesh = None

                        # deal with union tensor
                        if is_union_process_mesh_tensor:
                            # if op process mesh is subset of union tensor process mesh, need no reshard
2400 2401
                            if set(input_attr[0].process_ids) <= set(
                                dist_tensor.dist_attr.process_mesh.process_ids
2402 2403
                            ):
                                continue
2404 2405

                        if dist_tensor is not None and self.need_reshard(
2406 2407
                            dist_tensor, input_attr
                        ):
2408
                            reshard_op_desc = self.find_op_desc_seq(
2409 2410 2411 2412 2413
                                dist_tensor, input_attr
                            )
                            self.parse_op_desc(
                                block, reshard_op_desc, var_name, op, input_attr
                            )
2414
                            cur_op_count = len(block.ops)
2415 2416 2417
                            idx_offset = (
                                idx_offset + cur_op_count - pre_op_count
                            )
2418
                            pre_op_count = cur_op_count
2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431
                idx = idx + idx_offset + 1
            else:
                idx += 1

    def _hadnle_recv(self, block, idx, var, op, send_rank, recv_rank):
        if self.rank_id == recv_rank:
            # if recv bool data, recv then cast
            if var.dtype == paddle.bool:
                recv_cast_out = block.create_var(
                    name=unique_name.generate(var.name + "@recv"),
                    shape=var.shape,
                    lod_level=var.lod_level,
                    dtype=paddle.int64,
2432 2433 2434 2435 2436 2437 2438 2439 2440 2441
                    type=var.type,
                )
                Inserter.insert_recv_op(
                    block,
                    idx + 1,
                    recv_cast_out,
                    send_rank,
                    recv_rank,
                    op.attr('op_role'),
                )
2442 2443 2444 2445 2446 2447
                reset_lod_out = None
                if var.lod_level != 0:
                    set_lod = False
                    for tmp_block in self.auto_parallel_main_prog.blocks:
                        for tmp_var_name in tmp_block.vars:
                            tmp_var = tmp_block.vars[tmp_var_name]
2448 2449 2450 2451
                            if (
                                tmp_var.is_data
                                and tmp_var.lod_level == var.lod_level
                            ):
2452
                                reset_lod_out = block.create_var(
2453 2454 2455
                                    name=unique_name.generate(
                                        var.name + "@RESETLOD"
                                    ),
2456 2457 2458
                                    shape=recv_cast_out.shape,
                                    type=recv_cast_out.type,
                                    dtype=recv_cast_out.dtype,
2459 2460
                                    lod_level=recv_cast_out.lod_level,
                                )
2461 2462 2463 2464
                                idx += 1
                                block._insert_op(
                                    idx,
                                    type="lod_reset",
2465
                                    inputs={'X': recv_cast_out, 'Y': tmp_var},
2466
                                    outputs={'Out': reset_lod_out},
2467 2468
                                    attrs={'op_role': op.attr("op_role")},
                                )
2469 2470 2471 2472 2473 2474 2475
                                set_lod = True
                                break
                        if set_lod:
                            break
                    assert set_lod is True

                # cast int64 to bool
2476
                cast_op = block._insert_op(
2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490
                    idx + 2,
                    type='cast',
                    inputs={
                        'X': [recv_cast_out]
                        if reset_lod_out is None
                        else [reset_lod_out]
                    },
                    outputs={'Out': [var]},
                    attrs={
                        'in_dtype': recv_cast_out.dtype,
                        'out_dtype': var.dtype,
                        'op_role': op.attr('op_role'),
                    },
                )
2491
                cast_op._set_attr('op_namescope', "/auto_parallel/reshard")
2492 2493 2494 2495 2496 2497 2498
            else:
                if var.lod_level != 0:
                    recv_out = block.create_var(
                        name=unique_name.generate(var.name + "@recv"),
                        shape=var.shape,
                        lod_level=var.lod_level,
                        dtype=var.int64,
2499 2500 2501 2502 2503 2504 2505 2506 2507 2508
                        type=var.type,
                    )
                    Inserter.insert_recv_op(
                        block,
                        idx + 1,
                        recv_out,
                        send_rank,
                        recv_rank,
                        op.attr('op_role'),
                    )
2509 2510 2511 2512
                    set_lod = False
                    for tmp_block in self.auto_parallel_main_prog.blocks:
                        for tmp_var_name in tmp_block.vars:
                            tmp_var = tmp_block.vars[tmp_var_name]
2513 2514 2515 2516
                            if (
                                tmp_var.is_data
                                and tmp_var.lod_level == var.lod_level
                            ):
2517 2518 2519 2520
                                idx += 1
                                block._insert_op(
                                    idx,
                                    type="lod_reset",
2521
                                    inputs={'X': recv_out, 'Y': tmp_var},
2522
                                    outputs={'Out': var},
2523 2524
                                    attrs={'op_role': op.attr("op_role")},
                                )
2525 2526 2527 2528 2529
                                set_lod = True
                                break
                        if set_lod:
                            break
                    assert set_lod is True
2530
                else:
2531 2532 2533 2534 2535 2536 2537 2538
                    Inserter.insert_recv_op(
                        block,
                        idx + 1,
                        var,
                        send_rank,
                        recv_rank,
                        op.attr('op_role'),
                    )
2539 2540 2541

    def _handle_send(self, block, idx, var, op, send_rank, recv_rank):
        if var.dtype == paddle.bool:
2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552
            cast_out = Inserter.insert_cast_op(
                block, idx + 1, var, op.attr('op_role'), paddle.int64
            )
            Inserter.insert_send_op(
                block,
                idx + 2,
                cast_out,
                send_rank,
                recv_rank,
                op.attr('op_role'),
            )
2553
        else:
2554 2555 2556
            Inserter.insert_send_op(
                block, idx + 1, var, send_rank, recv_rank, op.attr('op_role')
            )
2557 2558 2559 2560 2561 2562

    def _reshard_output(self, block):
        # insert send and recv op if output process mesh is different from tensor process mesh
        idx = 0
        # skip reader and ops whose process mesh is union
        skip_ops = [
2563 2564 2565 2566 2567
            "create_py_reader",
            "create_double_buffer_reader",
            "read",
            "write_to_array",
            "read_from_array",
2568 2569
            "nop",
            "depend",
2570 2571 2572
        ]
        global _g_special_ops
        skip_ops += _g_special_ops
2573
        skip_ops += _g_subblock_ops
2574 2575 2576 2577 2578 2579 2580
        while idx < len(block.ops):
            pre_op_count = len(block.ops)
            op = block.ops[idx]
            dist_op = self.dist_context.get_dist_op_for_program(op)
            if dist_op is not None and op.type not in skip_ops:
                idx_offset = 0
                for var_name in op.output_arg_names:
2581 2582 2583
                    var = get_var_with_recursion(
                        var_name, block, self.auto_parallel_main_prog
                    )
2584
                    dist_tensor = self.dist_context.get_dist_tensor_for_program(
2585 2586
                        var
                    )
2587 2588 2589
                    tensor_process_mesh = dist_tensor.dist_attr.process_mesh
                    output_attr = [
                        dist_op.dist_attr.process_mesh,
2590
                        dist_op.dist_attr.get_output_dims_mapping(var_name),
2591 2592
                    ]
                    if dist_tensor is not None and self.need_reshard(
2593 2594
                        dist_tensor, output_attr, False
                    ):
2595
                        tensor_processes = set(
2596
                            tensor_process_mesh.process_ids
2597
                        ) - (
2598 2599
                            set(tensor_process_mesh.process_ids)
                            & set(output_attr[0].process_ids)
2600
                        )
2601 2602
                        if tensor_processes:
                            if len(tensor_processes) != len(
2603
                                output_attr[0].process_ids
2604
                            ):
2605
                                if dist_tensor.dist_attr.dims_mapping.count(
2606 2607 2608 2609 2610 2611 2612 2613 2614 2615
                                    -1
                                ) != len(
                                    dist_tensor.dist_attr.dims_mapping
                                ) or output_attr[
                                    1
                                ].count(
                                    -1
                                ) != len(
                                    output_attr[1]
                                ):
2616
                                    raise ValueError(
2617 2618
                                        "The dims_mapping must be -1"
                                    )
2619 2620
                                else:
                                    for index, tensor_process in enumerate(
2621 2622
                                        tensor_processes
                                    ):
2623 2624 2625
                                        recv_rank = tensor_process
                                        actual_index = index
                                        if index >= len(
2626
                                            output_attr[0].process_ids
2627
                                        ):
2628
                                            actual_index = (
2629
                                                index
2630 2631 2632 2633 2634
                                                - len(
                                                    output_attr[0].process_ids
                                                )
                                            ) % len(output_attr[0].process_ids)
                                        item = output_attr[0].process_ids[
2635 2636
                                            actual_index
                                        ]
2637 2638
                                        if recv_rank == item:
                                            continue
2639 2640 2641 2642
                                        if var.shape[0] == -1:
                                            new_shape = list(var.shape)
                                            new_shape[0] = self.batch_size
                                            var.desc.set_shape(new_shape)
2643 2644 2645
                                        if self.rank_id == item:
                                            # if send bool data, cast then send
                                            self._handle_send(
2646 2647 2648 2649 2650 2651 2652
                                                block,
                                                idx,
                                                var,
                                                op,
                                                item,
                                                recv_rank,
                                            )
2653
                                        elif self.rank_id == recv_rank:
2654 2655
                                            # if recv bool data, recv then cast
                                            self._hadnle_recv(
2656 2657 2658 2659 2660 2661 2662
                                                block,
                                                idx,
                                                var,
                                                op,
                                                item,
                                                recv_rank,
                                            )
2663 2664 2665 2666 2667
                                        else:
                                            # Ensure every rank has a global view of communicator groups for entire cluters.
                                            # When initialize communicators for pipeline parallel, every rank could
                                            # conduct a correct global synchronization.
                                            new_process_group([item, recv_rank])
2668 2669
                            else:
                                for index, tensor_process in enumerate(
2670 2671
                                    tensor_processes
                                ):
2672
                                    recv_rank = tensor_process
2673
                                    item = output_attr[0].process_ids[index]
2674 2675
                                    if recv_rank == item:
                                        continue
2676 2677 2678 2679
                                    if var.shape[0] == -1:
                                        new_shape = list(var.shape)
                                        new_shape[0] = self.batch_size
                                        var.desc.set_shape(new_shape)
2680 2681 2682
                                    if self.rank_id == item:
                                        # if send bool data, cast then send
                                        self._handle_send(
2683 2684
                                            block, idx, var, op, item, recv_rank
                                        )
2685
                                    elif self.rank_id == recv_rank:
2686 2687
                                        # if recv bool data, recv then cast
                                        self._hadnle_recv(
2688 2689
                                            block, idx, var, op, item, recv_rank
                                        )
2690 2691 2692 2693 2694
                                    else:
                                        # Ensure every rank has a global view of communicator groups for entire cluters.
                                        # When initialize communicators for pipeline parallel, every rank could
                                        # conduct a correct global synchronization.
                                        new_process_group([item, recv_rank])
2695 2696

                            cur_op_count = len(block.ops)
2697 2698 2699
                            idx_offset = (
                                idx_offset + cur_op_count - pre_op_count
                            )
2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718
                            pre_op_count = cur_op_count

                idx = idx + idx_offset + 1
            else:
                idx += 1

    def reshard(self):
        self._remove_global_process_mesh()
        for block_idx, block in enumerate(self.auto_parallel_main_prog.blocks):
            # change the var_name before resharding sub block
            if block_idx in Resharder.while_block_info:
                self._change_subblock_op_input_and_output(block_idx, block)

            # reshard input
            self._reshard_input(block)

            # reshard output
            # NOTE: Only support that insert send and recv op if output process mesh is different from tensor process mesh
            self._reshard_output(block)
2719 2720

        # remove no need vars and ops in the main program
2721 2722 2723 2724 2725 2726
        Remover.remove_no_need_in_main(
            self.auto_parallel_main_prog,
            self.dist_context,
            self.rank_id,
            self.dist_params_grads,
        )
2727

2728
        # remove no need vars and ops in the startip program
2729 2730 2731
        Remover.remove_no_need_in_startup(
            self.auto_parallel_main_prog, self.auto_parallel_startup_prog
        )
C
caozhou 已提交
2732

2733 2734
        # reset some variable when remove operation ended
        Resharder.while_block_info = {}
2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748

    def get_cost(self, op, tensor, cluster):
        # NOTE: The program should be the serial_program which is not been parted
        global _g_special_ops
        not_supported_op_type = _g_special_ops + ["while"]
        reshard_op_cost = None
        if op.type in not_supported_op_type:
            return reshard_op_cost
        else:
            tensor_name = tensor.name
            if tensor_name == "lod_tensor_blocking_queue_0":
                return reshard_op_cost
            else:
                dist_tensor = self.dist_context.get_dist_tensor_for_program(
2749 2750
                    tensor
                )
2751 2752
                # simplified processing: ignore union process mesh and output reshard
                dist_op = self.dist_context.get_dist_op_for_program(op)
2753 2754
                if not dist_tensor or not dist_op:
                    return reshard_op_cost
2755
                dims_mapping = dist_op.dist_attr.get_input_dims_mapping(
2756 2757
                    tensor.name
                )
2758
                process_mesh = dist_op.dist_attr.process_mesh
2759 2760 2761 2762 2763
                dist_attr = [
                    process_mesh,
                    dims_mapping,
                    dist_op.serial_op.attr('op_role'),
                ]
2764
                if dist_tensor is not None and self.need_reshard(
2765 2766
                    dist_tensor, dist_attr
                ):
2767 2768 2769 2770 2771
                    if tensor_name not in self._has_resharded:
                        self._has_resharded[tensor_name] = [dist_op]
                    else:
                        for item in self._has_resharded[tensor_name]:
                            item_dist_attr = item.dist_attr
2772 2773 2774 2775 2776
                            item_dims_mapping = (
                                item_dist_attr.get_input_dims_mapping(
                                    tensor_name
                                )
                            )
2777
                            item_process_mesh = item_dist_attr.process_mesh
2778 2779 2780 2781
                            if (
                                dims_mapping == item_dims_mapping
                                and item_process_mesh == process_mesh
                            ):
2782 2783 2784
                                return reshard_op_cost
                        self._has_resharded[tensor_name].append(dist_op)

2785 2786 2787
                    reshard_op_desc = self.find_op_desc_seq(
                        dist_tensor, dist_attr, serial=True
                    )
2788 2789
                    dtype = dist_tensor.serial_tensor.dtype
                    reshard_op_cost = self.parse_op_desc_for_cost(
2790 2791
                        reshard_op_desc, dtype, cluster
                    )
2792 2793 2794

        return reshard_op_cost

2795 2796 2797 2798 2799 2800 2801 2802 2803
    def _concat_partitions_for_cost(
        self,
        partition_tensor_list,
        partition_index,
        dtype,
        rank_id,
        local_rank_comp_cost,
        cluster,
    ):
2804 2805 2806 2807 2808 2809
        if not partition_tensor_list:
            partition_tensor_list.append(partition_index)
        else:
            i = 0
            has_concat = False
            while i < len(partition_tensor_list):
2810 2811 2812 2813 2814 2815 2816
                (
                    concat_axis,
                    first_order,
                    new_partition,
                ) = Resharder.compute_concat_info(
                    partition_tensor_list[i], partition_index
                )
2817 2818 2819 2820 2821 2822 2823
                if concat_axis != -1:
                    has_concat = True
                    concat_desc = {}
                    concat_desc["op"] = "concat"
                    concat_desc["attrs"] = {"axis": concat_axis}
                    if first_order == 0:
                        concat_desc["inputs"] = {
2824 2825 2826 2827
                            "X": [
                                (dtype, partition_tensor_list[i]),
                                (dtype, partition_index),
                            ]
2828 2829 2830
                        }
                    else:
                        concat_desc["inputs"] = {
2831 2832 2833 2834
                            "X": [
                                (dtype, partition_index),
                                (dtype, partition_tensor_list[i]),
                            ]
2835 2836 2837 2838 2839
                        }
                    partition_tensor_list.pop(i)
                    if rank_id not in local_rank_comp_cost:
                        local_rank_comp_cost[rank_id] = []
                    local_rank_comp_cost[rank_id].append(
2840 2841 2842 2843 2844 2845 2846 2847 2848 2849
                        ConcatOpCost(op_desc=concat_desc, cluster=cluster)
                    )
                    self._concat_partitions_for_cost(
                        partition_tensor_list,
                        new_partition,
                        dtype,
                        rank_id,
                        local_rank_comp_cost,
                        cluster,
                    )
2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885
                    break
                i += 1
            if not has_concat:
                partition_tensor_list.append(partition_index)

    def parse_op_desc_for_cost(self, reshard_op_desc, dtype, cluster):
        def _get_idx(comm_ranks, group_ranks):
            res, is_the_same = None, False
            idx = 0
            while idx < len(comm_ranks):
                if comm_ranks[idx] == set(group_ranks):
                    is_the_same = True

                for rank in group_ranks:
                    if rank in comm_ranks[idx]:
                        res = idx
                        comm_ranks[idx].add(rank)
                if res is None:
                    idx += 1
                else:
                    break
            return res, is_the_same

        comm_context = CommContext(cluster)
        # run communication op before computation op
        # TODO: Communication cost is not calculated when the var has been transfered by the same group in the past
        comm_costs = []
        comm_ranks = []
        local_rank_comp_cost = {}
        for key in reshard_op_desc:
            partition_tensor_list = []
            op_desc_list = reshard_op_desc[key]
            for op_desc in op_desc_list:
                if isinstance(op_desc, SendOpDesc):
                    group_ranks = [key, op_desc.dst]
                    shape = op_desc.shape
2886 2887 2888
                    send_desc = build_comm_desc(
                        "send_v2", group_ranks, dtype, shape
                    )
2889 2890
                    idx, is_the_same = _get_idx(comm_ranks, group_ranks)
                    if idx is None:
2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901
                        comm_costs.append(
                            [
                                (
                                    group_ranks,
                                    SendOpCost(
                                        op_desc=send_desc,
                                        comm_context=comm_context,
                                    ),
                                )
                            ]
                        )
2902 2903 2904 2905
                        comm_ranks.append(set(group_ranks))
                    else:
                        if not is_the_same:
                            comm_costs[idx].append(
2906 2907 2908 2909 2910 2911 2912 2913
                                (
                                    group_ranks,
                                    SendOpCost(
                                        op_desc=send_desc,
                                        comm_context=comm_context,
                                    ),
                                )
                            )
2914 2915 2916 2917
                elif isinstance(op_desc, AllGatherOpDesc):
                    # NOTE: fill_const and other unnecessary op is not calculated because those cost is very small
                    group_ranks = op_desc.group
                    shape = op_desc.shape
2918 2919 2920
                    allgather_desc = build_comm_desc(
                        "c_allgather", group_ranks, dtype, shape
                    )
2921 2922 2923 2924 2925 2926 2927 2928
                    split_inputs_shape = []
                    for idx, dim in enumerate(shape):
                        if idx == 0:
                            split_inputs_shape.append(dim * len(group_ranks))
                        else:
                            split_inputs_shape.append(dim)
                    idx, is_the_same = _get_idx(comm_ranks, group_ranks)
                    if idx is None:
2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939
                        comm_costs.append(
                            [
                                (
                                    group_ranks,
                                    AllgatherOpCost(
                                        op_desc=allgather_desc,
                                        comm_context=comm_context,
                                    ),
                                )
                            ]
                        )
2940 2941 2942 2943
                        comm_ranks.append(set(group_ranks))
                    else:
                        if not is_the_same:
                            comm_costs[idx].append(
2944 2945 2946 2947 2948 2949 2950 2951
                                (
                                    group_ranks,
                                    AllgatherOpCost(
                                        op_desc=allgather_desc,
                                        comm_context=comm_context,
                                    ),
                                )
                            )
2952 2953 2954 2955 2956 2957 2958 2959 2960 2961
                    # calc the split op cost
                    if key not in local_rank_comp_cost:
                        local_rank_comp_cost[key] = []
                    split_desc = {}
                    split_desc["op"] = "split"
                    split_desc["inputs"] = {
                        "inputs": [(dtype, split_inputs_shape)]
                    }
                    split_desc["attrs"] = {"num": len(group_ranks), "axis": 0}
                    local_rank_comp_cost[key].append(
2962 2963
                        SplitOpCost(op_desc=split_desc, cluster=cluster)
                    )
2964 2965 2966 2967
                elif isinstance(op_desc, ConcatOpDesc):
                    partition_index_list = op_desc._partition_index_list
                    for idx, partion_idex in enumerate(partition_index_list):
                        self._concat_partitions_for_cost(
2968 2969 2970 2971 2972 2973 2974
                            partition_tensor_list,
                            partion_idex,
                            dtype,
                            key,
                            local_rank_comp_cost,
                            cluster,
                        )
2975 2976 2977 2978

                elif isinstance(op_desc, SliceOpDesc):
                    if key not in local_rank_comp_cost:
                        local_rank_comp_cost[key] = []
2979 2980 2981 2982
                    assert (
                        len(partition_tensor_list) == 1
                        or not partition_tensor_list
                    )
2983 2984 2985 2986 2987 2988 2989 2990
                    to_slice_tensor_shape = []
                    if len(partition_tensor_list) == 1:
                        for item in partition_tensor_list[0]:
                            to_slice_tensor_shape.append(item[1] - item[0])
                    else:
                        to_slice_tensor_shape = op_desc.shape
                    slice_desc = {}
                    slice_desc["op"] = "slice"
2991
                    infer_flags = [1 for i in range(len(op_desc.axes))]
2992 2993 2994 2995
                    slice_desc["attrs"] = {
                        "axes": op_desc.axes,
                        "starts": op_desc.starts,
                        "ends": op_desc.ends,
2996
                        "infer_flags": infer_flags,
2997 2998 2999 3000 3001
                    }
                    slice_desc["inputs"] = {
                        "Input": [(dtype, to_slice_tensor_shape)]
                    }
                    local_rank_comp_cost[key].append(
3002 3003
                        SliceOpCost(op_desc=slice_desc, cluster=cluster)
                    )
3004 3005 3006 3007

        res = (comm_costs, local_rank_comp_cost)

        return res