reshard.py 104.1 KB
Newer Older
C
caozhou 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License

import copy
from functools import reduce

import paddle
import paddle.fluid.core as core
from paddle.utils import unique_name
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.framework import Program, OpProtoHolder
23
from paddle.distributed.fleet.meta_optimizers.common import OpRole
C
caozhou 已提交
24 25
import paddle.fluid.layers.utils as utils
from ..collective import _get_global_env
26 27 28
from .dist_context import DistributedContext
from .dist_attribute import OperatorDistributedAttribute, TensorDistributedAttribute
from .process_group import new_process_group, ProcessGroup, _g_process_group_map
29 30 31 32
from .cost import build_comm_desc, CommContext
from .cost import AllgatherOpCost, SendOpCost
from .cost import SliceOpCost, SplitOpCost, ConcatOpCost
from .cluster import Cluster
33
from .utils import print_program_with_dist_attr, is_gradient_clip_op
C
caozhou 已提交
34

35
# NOTE: If op in _g_special_ops or _g_gradient_clip_ops, it will not be resharded.
36
_g_special_ops = ['check_finite_and_unscale', 'update_loss_scaling']
37 38 39
_g_gradient_clip_ops = [
    "sum", "sqrt", "fill_constant", "elementwise_max", "elementwise_div"
]
40
_g_subblock_ops = ["while", "conditional_block"]
41 42 43 44 45 46 47 48


def get_var_with_recursion(var_name, block, program):
    """Get var in the parent block if not found in the current block"""
    var = None
    if var_name in block.vars:
        var = block.vars[var_name]
    else:
49 50 51 52 53
        var = block._var_recursive(var_name)
        # parent_block = program.blocks[block.parent_idx]
        # if var_name in parent_block.vars:
        #     var = parent_block.vars[var_name]
    assert var is not None, "{} is not found".format(var.name)
54

55
    return var
56

C
caozhou 已提交
57 58 59 60 61 62 63

class AllGatherOpDesc:
    """
    Describe the allgather op in the reshard phase.

    Args:
        group (list): Process group.
64 65
        shape (list): The tensor shape.
        is_bool (bool): Whether allgather bool data. Default: False.
C
caozhou 已提交
66 67
    """

68
    def __init__(self, group, shape, is_bool=False):
C
caozhou 已提交
69 70
        self._group = group
        self._desc = "all_gather"
71 72 73 74 75 76
        self._shape = shape
        self._is_bool = is_bool

    @property
    def is_bool(self):
        return self._is_bool
C
caozhou 已提交
77 78 79 80 81 82 83 84 85

    @property
    def group(self):
        return self._group

    @property
    def desc(self):
        return self._desc

86 87 88 89
    @property
    def shape(self):
        return self._shape

C
caozhou 已提交
90
    def __repr__(self):
91
        return f"op: {self._desc}, group: {self._group}, shape: {self._shape}, is_bool: {self._is_bool}."
C
caozhou 已提交
92 93 94 95 96 97 98 99


class SendOpDesc:
    """
    Describe the send op in the reshard phase.

    Args:
        partition_index (list): The index of partition in complete tensor.
100
        src (int): The source process to send.
C
caozhou 已提交
101
        dst (int): The destination process to receive.
102
        is_bool (bool): Whether send bool data. Default: False.
C
caozhou 已提交
103 104
    """

105
    def __init__(self, partition_index, src, dst, is_bool=False):
C
caozhou 已提交
106 107 108
        self._dst = dst
        self._partition_index = partition_index
        self._desc = "send"
109 110 111 112 113 114 115 116 117 118 119
        self._shape = []
        self._is_bool = is_bool
        self._src = src

    @property
    def src(self):
        return self._src

    @property
    def is_bool(self):
        return self._is_bool
C
caozhou 已提交
120 121 122 123 124 125 126 127 128 129 130 131 132

    @property
    def partition_index(self):
        return self._partition_index

    @property
    def dst(self):
        return self._dst

    @property
    def desc(self):
        return self._desc

133 134 135 136 137 138 139
    @property
    def shape(self):
        if not self._shape:
            for item in self.partition_index:
                self._shape.append(item[1] - item[0])
        return self._shape

C
caozhou 已提交
140
    def __repr__(self):
141
        return f"op: {self._desc}, partition_index: {self._partition_index}, dst: {self._dst}, shape: {self._shape}, is_bool: {self._is_bool}."
C
caozhou 已提交
142 143 144 145 146 147 148 149 150


class RecvOpDesc:
    """
    Describe the recv op in the reshard op.

    Args:
        partition_index (list): The index of partition in complete tensor.
        src (int): The source process to send.
151 152
        dst (int): The destination process to receive.
        is_bool (bool): Whether receive bool data. Default: False.
C
caozhou 已提交
153 154
    """

155
    def __init__(self, partition_index, src, dst, is_bool=False):
C
caozhou 已提交
156 157 158
        self._src = src
        self._partition_index = partition_index
        self._desc = "recv"
159 160 161 162 163 164 165 166 167 168 169
        self._shape = []
        self._is_bool = is_bool
        self._dst = dst

    @property
    def dst(self):
        return self._dst

    @property
    def is_bool(self):
        return self._is_bool
C
caozhou 已提交
170 171 172 173 174 175 176 177 178 179 180 181 182

    @property
    def partition_index(self):
        return self._partition_index

    @property
    def src(self):
        return self._src

    @property
    def desc(self):
        return self._desc

183 184 185 186 187 188 189
    @property
    def shape(self):
        if not self._shape:
            for item in self.partition_index:
                self._shape.append(item[1] - item[0])
        return self._shape

C
caozhou 已提交
190
    def __repr__(self):
191
        return f"op: {self._desc}, partition_index: {self._partition_index}, dst: {self._dst}, shape: {self._shape}, is_bool: {self._is_bool}."
C
caozhou 已提交
192 193 194 195 196 197 198


class SliceOpDesc:
    """
    Describe the slice op in the reshard phase.

    Args:
199 200 201 202
        starts (list): It represents start indices of corresponding axis in ``axes``.
        ends (list):  It represents end indices of corresponding axis in ``axes``.
        axes (list):  Axes that `starts` and `ends` apply to.
        shape (list): The shape of the tensor to be sliced.
C
caozhou 已提交
203 204
    """

205
    def __init__(self, starts, ends, axes, shape=None):
C
caozhou 已提交
206 207 208 209
        self._starts = starts
        self._ends = ends
        self._axes = axes
        self._desc = "slice"
210
        self._shape = shape
C
caozhou 已提交
211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227

    @property
    def starts(self):
        return self._starts

    @property
    def ends(self):
        return self._ends

    @property
    def axes(self):
        return self._axes

    @property
    def desc(self):
        return self._desc

228 229 230 231
    @property
    def shape(self):
        return self._shape

C
caozhou 已提交
232
    def __repr__(self):
233 234 235 236
        if self._shape is not None:
            return f"op: {self._desc}, starts: {self._starts}, ends: {self._ends}, axes: {self._axes}, shape: {self._shape}."
        else:
            return f"op: {self._desc}, starts: {self._starts}, ends: {self._ends}, axes: {self._axes}."
C
caozhou 已提交
237 238 239 240 241 242 243


class ConcatOpDesc:
    """
    Describe the concat op in the reshard phase.

    Args:
244
        partition_index_list (list): The list contains all partition index.
C
caozhou 已提交
245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
    """

    def __init__(self, partition_index_list):
        self._partition_index_list = partition_index_list
        self._desc = "concat"

    @property
    def partition_index_list(self):
        return self._partition_index_list

    @property
    def desc(self):
        return self._desc

    def __repr__(self):
        return f"op: {self._desc}, partition_index_list: {self._partition_index_list}."


263 264
class Inserter:
    """Insert op required in the reshard process."""
C
caozhou 已提交
265

266
    @staticmethod
267 268 269 270 271 272 273 274
    def insert_cast_op(block, idx, tensor, op_role, tensor_type):
        # to avoid name conflict with framework
        new_var_name = paddle.fluid.unique_name.generate_with_ignorable_key(
            ".".join(["cast@RESHARD", 'tmp']))
        out = block.create_var(name=new_var_name,
                               dtype=tensor_type,
                               type=tensor.type,
                               lod_level=tensor.lod_level)
275 276 277 278 279 280 281 282 283 284
        cast_op = block._insert_op(idx,
                                   type='cast',
                                   inputs={'X': [tensor]},
                                   outputs={'Out': [out]},
                                   attrs={
                                       'in_dtype': tensor.dtype,
                                       'out_dtype': out.dtype,
                                       'op_role': op_role
                                   })
        cast_op._set_attr('op_namescope', "/auto_parallel/reshard")
285 286 287 288
        return out

    @staticmethod
    def insert_send_op(block, idx, tensor, src, dst, op_role):
289 290
        """Insert send op into block at the given index."""
        op_type = 'send_v2'
291 292
        # use pair comm group
        process_group = new_process_group([src, dst])
293 294 295 296 297 298 299 300 301 302 303
        send_op = block._insert_op(idx,
                                   type=op_type,
                                   inputs={'X': [tensor]},
                                   attrs={
                                       'ring_id': process_group.id,
                                       'peer': process_group.ranks.index(dst),
                                       'use_calc_stream': True,
                                       'op_role': op_role,
                                       'dynamic_shape': True
                                   })
        send_op._set_attr('op_namescope', "/auto_parallel/reshard")
304 305

    @staticmethod
306
    def insert_recv_op(block, idx, tensor, src, dst, op_role):
307 308
        """Insert recv op into block at the given index."""
        op_type = 'recv_v2'
309 310
        # use pair group
        process_group = new_process_group([src, dst])
311 312 313 314 315 316 317 318 319 320 321 322 323 324
        recv_op = block._insert_op(idx,
                                   type=op_type,
                                   inputs={'X': [tensor]},
                                   outputs={'Out': [tensor]},
                                   attrs={
                                       'ring_id': process_group.id,
                                       'peer': process_group.ranks.index(src),
                                       'out_shape': tensor.shape,
                                       'dtype': tensor.dtype,
                                       'use_calc_stream': True,
                                       'op_role': op_role,
                                       'dynamic_shape': True
                                   })
        recv_op._set_attr('op_namescope', "/auto_parallel/reshard")
325

326 327 328 329 330 331 332 333 334 335 336 337
    @staticmethod
    def insert_reset_lod_op(block, idx, X, Y, op_role):
        """Insert reset_lod op into block at the given index."""

        new_var_name = paddle.fluid.unique_name.generate_with_ignorable_key(
            ".".join(["reset_lod@RESHARD", 'tmp']))
        reset_lod_out = block.create_var(name=new_var_name,
                                         shape=X.shape,
                                         type=X.type,
                                         dtype=X.dtype,
                                         lod_level=X.lod_level)

338 339 340 341 342 343 344 345 346
        reset_op = block._insert_op(idx,
                                    type="lod_reset",
                                    inputs={
                                        'X': X,
                                        'Y': Y
                                    },
                                    outputs={'Out': reset_lod_out},
                                    attrs={'op_role': op_role})
        reset_op._set_attr('op_namescope', "/auto_parallel/reshard")
347 348
        return reset_lod_out

349 350 351 352 353 354 355
    @staticmethod
    def insert_concat_op(block, idx, tensors, axis, op_role):
        """Insert concat op into block at the given block."""
        inputs = {'X': tensors}
        attrs = {}
        attrs['axis'] = axis
        attrs['op_role'] = op_role
356 357
        # to avoid name conflict with framework
        helper = LayerHelper('concat@RESHARD', **locals())
358
        with paddle.static.program_guard(block.program):
359 360 361 362 363 364 365 366 367
            out = block.create_var(
                name=paddle.fluid.unique_name.generate_with_ignorable_key(
                    ".".join([helper.name, 'tmp'])),
                dtype=tensors[0].dtype,
                shape=None,
                lod_level=tensors[0].lod_level,
                type=tensors[0].type,
                persistable=False,
                stop_gradient=False)
368 369 370 371 372 373
        concat_op = block._insert_op(idx,
                                     type='concat',
                                     inputs=inputs,
                                     outputs={'Out': [out]},
                                     attrs=attrs)
        concat_op._set_attr('op_namescope', "/auto_parallel/reshard")
374
        return out
C
caozhou 已提交
375

376 377 378 379
    @staticmethod
    def insert_slice_op(block, idx, tensor, starts, ends, axes, new_var_name,
                        op_role):
        """Insert slice op into block at the given block."""
380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
        # This is a hack to insert split op to get slice tensor
        # 1. [128, 128] => [64, 128]: split
        # 2. [128, 128] => [128, 128]: assign
        # 3. [128, 128] => [64, 64]: slice, it will replaced by multi split
        global_shape = tensor.shape
        slice_shape = [ends[i] - starts[i] for i in range(len(starts))]
        diff_dims = []
        for index, item in enumerate(slice_shape):
            if item != global_shape[index]:
                diff_dims.append(index)

        # use assign
        if len(diff_dims) == 0:
            out = block.create_var(name=new_var_name,
                                   dtype=tensor.dtype,
                                   type=tensor.type,
                                   shape=slice_shape,
                                   lod_level=tensor.lod_level)
            inputs = {'X': [tensor]}
            outputs = {"Out": [out]}
            attrs = {"in_place": False}
401 402 403 404 405 406
            slice_op = block._insert_op(idx,
                                        type="assign",
                                        inputs=inputs,
                                        outputs=outputs,
                                        attrs=attrs)
            slice_op._set_attr('op_namescope', "/auto_parallel/reshard")
407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
            return out

        # use split once
        elif len(diff_dims) == 1:
            diff_dim = diff_dims[0]
            num_or_sections = global_shape[diff_dim] // slice_shape[diff_dim]
            axis = diff_dim
            cur_idx = starts[diff_dim] // slice_shape[diff_dim]
            input_shape = global_shape
            inputs = {'X': tensor}
            attrs = {'num': num_or_sections, 'axis': axis, 'op_role': op_role}
            new_shape = []
            for index, item in enumerate(tensor.shape):
                if index != axis:
                    new_shape.append(item)
                else:
                    new_shape.append(item // num_or_sections)
            with paddle.static.program_guard(block.program):
                outs = [
                    block.create_var(name=paddle.fluid.unique_name.
                                     generate_with_ignorable_key(".".join(
                                         ['split@RESHARD', 'tmp'])),
                                     dtype=tensor.dtype,
                                     shape=None,
                                     type=tensor.type,
                                     persistable=False,
                                     lod_level=tensor.lod_level,
                                     stop_gradient=False)
                    for i in range(num_or_sections)
                ]
                out = outs[cur_idx]
438 439 440 441 442 443
            split_op = block._insert_op(idx,
                                        type="split",
                                        inputs=inputs,
                                        outputs={'Out': outs},
                                        attrs=attrs)
            split_op._set_attr('op_namescope', "/auto_parallel/reshard")
444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
            return out

        # use slice
        else:
            inputs = {'Input': tensor}
            infer_flags = list(1 for i in range(len(axes)))
            attrs = {
                "axes": axes,
                "starts": starts,
                "ends": ends,
                "infer_flags": infer_flags,
                'op_role': op_role
            }
            out = block.create_var(name=new_var_name,
                                   dtype=tensor.dtype,
                                   type=tensor.type,
                                   lod_level=tensor.lod_level)
461 462 463 464 465 466
            slice_op = block._insert_op(idx,
                                        type="slice",
                                        inputs=inputs,
                                        outputs={'Out': [out]},
                                        attrs=attrs)
            slice_op._set_attr('op_namescope', "/auto_parallel/reshard")
467
            return out
C
caozhou 已提交
468

469
    @staticmethod
470
    def insert_split_op(block, idx, tensor, num_or_sections, op_role, axis=0):
471
        """Insert split op into block at the given index."""
472
        helper = LayerHelper('split@RESHARD', **locals())
473 474
        input_shape = tensor.shape
        inputs = {'X': tensor}
475 476 477 478 479 480 481
        attrs = {'num': num_or_sections, 'axis': axis, 'op_role': op_role}
        new_shape = []
        for index, item in enumerate(tensor.shape):
            if index != axis:
                new_shape.append(item)
            else:
                new_shape.append(item // num_or_sections)
482 483
        with paddle.static.program_guard(block.program):
            outs = [
484 485 486 487 488 489 490 491 492
                block.create_var(
                    name=paddle.fluid.unique_name.generate_with_ignorable_key(
                        ".".join([helper.name, 'tmp'])),
                    dtype=tensor.dtype,
                    shape=None,
                    lod_level=tensor.lod_level,
                    type=tensor.type,
                    persistable=False,
                    stop_gradient=False) for i in range(num_or_sections)
493
            ]
494 495 496 497 498 499
        split_op = block._insert_op(idx,
                                    type="split",
                                    inputs=inputs,
                                    outputs={'Out': outs},
                                    attrs=attrs)
        split_op._set_attr('op_namescope', "/auto_parallel/reshard")
500
        return outs
C
caozhou 已提交
501

502 503
    @staticmethod
    def insert_fill_constant_op(block, idx, op_role):
C
caozhou 已提交
504
        """Insert fill constant op into block at the given index."""
505 506 507
        # to avoid name conflict with framework
        helper = LayerHelper('fill_constant@RESHARD', **locals())
        # use paddle.int64 as dtype
C
caozhou 已提交
508
        with paddle.static.program_guard(block.program):
509 510 511 512 513 514 515 516
            out = block.create_var(
                name=paddle.fluid.unique_name.generate_with_ignorable_key(
                    ".".join([helper.name, 'tmp'])),
                dtype=paddle.int64,
                shape=None,
                type=core.VarDesc.VarType.LOD_TENSOR,
                persistable=False,
                stop_gradient=False)
C
caozhou 已提交
517 518 519 520 521
        inputs = {}
        attrs = {'force_cpu': False}
        attrs['str_value'] = str(int("1"))
        attrs['value'] = int("1")
        attrs['dtype'] = out.dtype
522
        attrs['op_role'] = op_role
523 524 525 526
        utils.get_shape_tensor_inputs(inputs=inputs,
                                      attrs=attrs,
                                      shape=[0],
                                      op_type='fill_constant')
527 528 529 530 531
        fillconstant_op = block._insert_op(idx,
                                           type='fill_constant',
                                           inputs=inputs,
                                           outputs={'Out': [out]},
                                           attrs=attrs)
C
caozhou 已提交
532
        out.stop_gradient = True
533
        fillconstant_op._set_attr('op_namescope', "/auto_parallel/reshard")
C
caozhou 已提交
534 535
        return out

536 537 538 539 540 541 542 543 544 545
    @staticmethod
    def insert_allgather_op(block, idx, tensor, ranks, op_role):
        """Insert allgather op into block at the given index."""
        tensor_list = []
        group = new_process_group(ranks)
        idx_offset = 0

        # instant process group before insert allgather op.
        if not group.is_instantiate():
            # insert fill_constant op
546 547
            fill_constant_out = Inserter.insert_fill_constant_op(
                block, idx, op_role)
548 549 550
            fill_constant_out.stop_gradient = True

            # insert c_allreduce_sum op
551 552 553 554 555 556 557 558 559 560 561
            allreduce_op = block._insert_op(
                idx + 1,
                type="c_allreduce_sum",
                inputs={'X': [fill_constant_out]},
                outputs={'Out': [fill_constant_out]},
                attrs={
                    'ring_id': 0,
                    'use_calc_stream': True,
                    'op_role': op_role
                })
            allreduce_op._set_attr('op_namescope', "/auto_parallel/reshard")
562
            # insert c_sync_calc_stream op
563 564 565 566 567 568 569
            sync_calc_op = block._insert_op(
                idx + 2,
                type="c_sync_calc_stream",
                inputs={'X': [fill_constant_out]},
                outputs={'Out': [fill_constant_out]},
                attrs={'op_role': op_role})
            sync_calc_op._set_attr('op_namescope', "/auto_parallel/reshard")
570 571 572 573
            idx_offset = 3

        # insert c_allgather op
        op_type = 'c_allgather'
574 575
        # to avoid name conflict with framework
        helper = LayerHelper(op_type + "@RESHARD", **locals())
576
        with paddle.static.program_guard(block.program):
577 578 579 580 581 582 583 584 585
            allgather_out = block.create_var(
                name=paddle.fluid.unique_name.generate_with_ignorable_key(
                    ".".join([helper.name, 'tmp'])),
                dtype=tensor.dtype,
                shape=None,
                lod_level=tensor.lod_level,
                type=tensor.type,
                persistable=False,
                stop_gradient=False)
586 587 588 589 590 591 592 593 594 595 596
        allgather_op = block._insert_op(idx + idx_offset,
                                        type=op_type,
                                        inputs={'X': [tensor]},
                                        outputs={'Out': [allgather_out]},
                                        attrs={
                                            'ring_id': group.id,
                                            'use_calc_stream': True,
                                            'nranks': group.nranks,
                                            'op_role': op_role
                                        })
        allgather_op._set_attr('op_namescope', "/auto_parallel/reshard")
597 598 599
        idx_offset += 1

        # insert split op
600 601 602
        split_out = Inserter.insert_split_op(block, idx + idx_offset,
                                             allgather_out, group.nranks,
                                             op_role)
603 604 605 606 607 608 609 610 611
        idx_offset += 1
        tensor_list.extend(split_out)
        return tensor_list, idx_offset

    @staticmethod
    def concat_partitions_with_op(partition_tensor_list, tensor,
                                  partition_index, block, idx, op_role):
        """Concat the tensors and insert concat op."""
        if not partition_tensor_list:
C
caozhou 已提交
612
            partition_tensor_list.append((tensor, partition_index))
613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
        else:
            i = 0
            has_concat = False
            while i < len(partition_tensor_list):
                concat_axis, first_order, new_partition = Resharder.compute_concat_info(
                    partition_tensor_list[i][1], partition_index)
                if concat_axis != -1:
                    has_concat = True
                    _ = Inserter.insert_concat_op(block, idx[0], [partition_tensor_list[i][0], tensor], concat_axis, op_role) \
                        if first_order == 0 else \
                        Inserter.insert_concat_op(block, idx[0], [tensor, partition_tensor_list[i][0]], concat_axis, op_role)
                    partition_tensor_list.pop(i)
                    idx[0] += 1
                    Inserter.concat_partitions_with_op(partition_tensor_list, _,
                                                       new_partition, block,
                                                       idx, op_role)
                    break
                i += 1
            if not has_concat:
                partition_tensor_list.append((tensor, partition_index))


class Remover:
    """Remove var and op in the reshard process."""

    @staticmethod
    def remove_no_need_ops(auto_parallel_main_prog, dist_context, rank_id):
        """Remove no need ops in the main program"""
        not_remove_op_ref = [
            "create_py_reader", "create_double_buffer_reader", "read"
        ]
C
caozhou 已提交
644

645 646 647 648
        # NOTE: The nested sub block is not be supported now.
        remove_block_order = []
        for block_idx in Resharder.while_block_info:
            remove_block_order.append(block_idx)
C
caozhou 已提交
649

650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671
        for block_idx, block in enumerate(auto_parallel_main_prog.blocks):
            if block_idx not in remove_block_order:
                remove_block_order.append(block_idx)

        # the sub block should be removed first
        for block_idx in remove_block_order:
            remove_op_idx = []
            block = auto_parallel_main_prog.blocks[block_idx]
            ops = block.ops
            vars = block.vars
            for idx, op in enumerate(ops):
                if op.type == "read":
                    dim_list = []
                    for var_name in op.output_arg_names:
                        dim_list.extend(
                            get_var_with_recursion(
                                var_name, block, auto_parallel_main_prog).shape)
                    for i in range(idx, -1, -1):
                        if ops[i].type == "create_py_reader":
                            ops[i]._set_attr("shape_concat", dim_list)
                            break
                    continue
672

673 674 675 676 677 678 679 680 681 682 683 684 685
                # replace the input and output of c_sync_comm_stream op when in pipeline scene.
                if op.type == "c_sync_comm_stream":
                    need_save = []
                    for var_name in op.input_arg_names:
                        process_mesh = dist_context.get_tensor_dist_attr_for_program(
                            get_var_with_recursion(
                                var_name, block,
                                auto_parallel_main_prog)).process_mesh
                        if rank_id in process_mesh.processes:
                            need_save.append(var_name)
                    if not need_save:
                        remove_op_idx.append(idx)
                        continue
686

687 688 689 690
                    proto = OpProtoHolder.instance().get_op_proto(op.type)
                    op.desc.set_input(proto.inputs[0].name, need_save)
                    op.desc.set_output(proto.outputs[0].name, need_save)
                    continue
691

692 693 694 695 696 697 698 699 700 701 702
                # judge the other op whether should be removed.
                op_dist_attr = dist_context.get_op_dist_attr_for_program(op)
                if op_dist_attr is not None:
                    op_process_mesh = op_dist_attr.process_mesh
                    if rank_id not in op_process_mesh.processes and op.type not in not_remove_op_ref:
                        remove_op_idx.append(idx)

            for idx in remove_op_idx[::-1]:
                block._remove_op(idx)

    @staticmethod
703 704
    def remove_no_need_vars(auto_parallel_main_prog, dist_params_grads,
                            feed_var_names):
705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749
        """Remove no need vars in the main program"""
        for block_idx, block in enumerate(auto_parallel_main_prog.blocks):
            remove_vars = set()
            ops = block.ops
            vars = block.vars
            need_vars = set()
            for op in ops:
                for var_name in op.input_arg_names:
                    if var_name in vars:
                        need_vars.add(var_name)
                for var_name in op.output_arg_names:
                    if var_name in vars:
                        need_vars.add(var_name)
            for var in vars:
                if var not in need_vars:
                    remove_vars.add(var)

            # change dist_params_grads, the optimize op just in block 0.
            if block_idx == 0:
                param_grad_map = {}
                for op in ops:
                    if int(op.attr('op_role')) == int(OpRole.Optimize):
                        if "Param" in op.input_names and "Grad" in op.input_names:
                            param_name = op.input("Param")[0]
                            grad_name = op.input("Grad")[0]
                            param_grad_map[param_name] = grad_name

                need_remove_idx = []
                for idx, item in enumerate(dist_params_grads):
                    if item[0].name not in param_grad_map.keys():
                        need_remove_idx.append(idx)

                for idx in need_remove_idx[::-1]:
                    dist_params_grads.pop(idx)

                idx = 0
                while idx < len(dist_params_grads):
                    param_name = dist_params_grads[idx][0].name
                    grad_name = dist_params_grads[idx][1].name
                    if grad_name != param_grad_map[param_name]:
                        dist_params_grads[idx] = (
                            vars[param_name], vars[param_grad_map[param_name]])
                    idx += 1

            for var in remove_vars:
750
                if var in feed_var_names:
751
                    continue
752 753 754 755 756 757 758 759 760 761
                block._remove_var(var)

    @staticmethod
    def remove_no_need_in_main(auto_parallel_main_prog, dist_context, rank_id,
                               dist_params_grads):
        """Remove no need vars and ops in the main program."""
        Remover.remove_no_need_ops(auto_parallel_main_prog, dist_context,
                                   rank_id)
        Resharder.change_while_op_input_and_output(auto_parallel_main_prog,
                                                   dist_context)
762 763 764 765 766 767
        # 'feed_var_names' cannot be removed from auto_parallel_main_prog
        feed_var_names = []
        for var in sum(list(dist_context.serial_feed_vars.values()), []):
            feed_var_names.append(var.name)
        Remover.remove_no_need_vars(auto_parallel_main_prog, dist_params_grads,
                                    feed_var_names)
768 769 770 771 772 773 774 775 776 777

    @staticmethod
    def remove_no_need_in_startup(auto_parallel_main_prog,
                                  auto_parallel_startup_prog):
        """Remove no need vars and ops in the startup program."""
        main_input_vars = set()
        main_ops = auto_parallel_main_prog.global_block().ops
        for op in main_ops:
            for var_name in op.input_arg_names:
                main_input_vars.add(var_name)
778

779 780 781 782 783 784 785 786 787
        startup_block = auto_parallel_startup_prog.global_block()
        startup_output_vars = set()
        startup_ops = startup_block.ops
        for op in startup_ops:
            # skip c_sync_comm_stream op
            if op.type == "c_sync_comm_stream":
                continue
            for var_name in op.output_arg_names:
                startup_output_vars.add(var_name)
788

789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808
        need_vars = set()
        for var_name in startup_output_vars:
            if var_name in main_input_vars:
                need_vars.add(var_name)

        startup_ops = startup_block.ops
        actual_need_vars = set()
        for idx, op in enumerate(startup_ops):
            is_need_op = False
            if op.type == "c_sync_comm_stream":
                continue
            for var_name in op.output_arg_names:
                if var_name in need_vars:
                    is_need_op = True
                    break
            if is_need_op:
                for var_name in op.output_arg_names:
                    actual_need_vars.add(var_name)
                for var_name in op.input_arg_names:
                    actual_need_vars.add(var_name)
809

810 811 812 813 814 815
        remove_vars = set()
        for var_name in startup_block.vars:
            if var_name not in actual_need_vars:
                remove_vars.add(var_name)
        for var in remove_vars:
            startup_block._remove_var(var)
816 817

        remove_op_idx = []
818 819 820
        vars = startup_block.vars
        for idx, op in enumerate(startup_block.ops):
            is_no_need_op = False
821
            if op.type == "c_sync_comm_stream":
822
                var_names = []
823
                for var_name in op.input_arg_names:
824 825 826
                    if var_name in vars:
                        var_names.append(var_name)
                if not var_names:
827
                    remove_op_idx.append(idx)
828 829 830 831
                else:
                    proto = OpProtoHolder.instance().get_op_proto(op.type)
                    op.desc.set_input(proto.inputs[0].name, var_names)
                    op.desc.set_output(proto.outputs[0].name, var_names)
832
                continue
C
caozhou 已提交
833

834 835 836 837 838 839
            for var_name in op.output_arg_names:
                if var_name not in vars:
                    is_no_need_op = True
                    break
            if is_no_need_op:
                remove_op_idx.append(idx)
840
        for idx in remove_op_idx[::-1]:
841
            startup_block._remove_op(idx)
C
caozhou 已提交
842 843


844 845 846
class Resharder:
    """
    Reshard tensor in the program according to its distributed attribute and corresponding op distributed attribute.
847

848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866
    Args:
        auto_parallel_main_prog (Program): An auto parallel main program.
        auto_parallel_startup_prog (Program): An auto parallel startup program.
        rank_id (int): The process id.
        dist_context (DistributedContext): The distributed context of this rank.
        dist_params_grads (list): The list contains the tuple of param and grad.
        batch_size (int): The batch size. Default: None.
    """
    while_block_info = {}

    def __init__(self,
                 auto_parallel_main_prog,
                 auto_parallel_startup_prog,
                 rank_id,
                 dist_context,
                 dist_params_grads,
                 batch_size=None):
        assert isinstance(auto_parallel_main_prog, Program), "The type of auto_parallel_main_prog should be Program, " \
                                            "but got {}.".format(type(auto_parallel_main_prog))
867 868 869
        if auto_parallel_startup_prog is not None:
            assert isinstance(auto_parallel_main_prog, Program), "The type of auto_parallel_startup_prog should be Program or None, " \
                                                "but got {}.".format(type(auto_parallel_startup_prog))
870 871 872 873
        assert isinstance(rank_id, int), "The type of rank_id should be int, " \
                                            "but got {}.".format(type(rank_id))
        assert isinstance(dist_context, DistributedContext), "The type of dist_context should be DistributedContext, " \
                                            "but got {}.".format(type(dist_context))
874

875 876 877 878 879 880 881 882 883 884 885 886 887
        if batch_size is not None:
            assert isinstance(batch_size, int), "The type of batch_size should be int, " \
                                                "but got {}.".format(type(batch_size))

        self._auto_parallel_main_prog = auto_parallel_main_prog
        self._auto_parallel_startup_prog = auto_parallel_startup_prog
        self._rank_id = rank_id
        self._dist_context = dist_context
        self._dist_params_grads = dist_params_grads
        self._batch_size = batch_size
        self._has_sent = {}
        self._has_recv = {}
        self._has_allgather = {}
888 889
        # to avoid reshard repeatly
        self._has_resharded = {}
890

891 892 893
    @property
    def auto_parallel_main_prog(self):
        return self._auto_parallel_main_prog
894

895 896 897
    @property
    def auto_parallel_startup_prog(self):
        return self._auto_parallel_startup_prog
898

899 900 901
    @property
    def rank_id(self):
        return self._rank_id
902

903 904 905
    @property
    def dist_context(self):
        return self._dist_context
906

907 908 909
    @property
    def dist_params_grads(self):
        return self._dist_params_grads
910

911 912 913
    @property
    def batch_size(self):
        return self._batch_size
914

915 916 917
    @property
    def has_sent(self):
        return self._has_sent
918

919 920 921
    @property
    def has_recv(self):
        return self._has_recv
922

923 924 925 926 927 928 929 930 931 932 933 934 935
    @property
    def has_allgather(self):
        return self._has_allgather

    @staticmethod
    def compute_partition_shape(complete_shape, dims_mapping, process_shape):
        """Compute the shape of partition."""
        partition_shape = []
        for idx, item in enumerate(complete_shape):
            if dims_mapping[idx] == -1:
                partition_shape.append(item)
            else:
                partition_shape.append(item // process_shape[dims_mapping[idx]])
936

937
        return partition_shape
938

939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985
    @staticmethod
    def compute_process_index(process, process_group, process_shape):
        """Compute the index of process_shape corresponding to the process."""
        relative_process = process_group.index(process)
        process_index = []
        product = reduce(lambda x, y: x * y, process_shape)

        for i in range(len(process_shape)):
            idx = relative_process // (product // process_shape[i])
            product = product // process_shape[i]
            relative_process = relative_process - relative_process // product * product
            process_index.append(idx)

        return process_index

    @staticmethod
    def compute_partition_index(process, complete_shape, dims_mapping,
                                process_shape, process_group):
        """Compute the partition index in complete tensor."""
        partition_shape = Resharder.compute_partition_shape(
            complete_shape, dims_mapping, process_shape)
        process_index = Resharder.compute_process_index(process, process_group,
                                                        process_shape)
        partition_index = []

        for i in range(len(complete_shape)):
            if dims_mapping[i] == -1:
                partition_index.append([0, partition_shape[i]])
            else:
                partition_index.append([
                    process_index[dims_mapping[i]] * partition_shape[i],
                    (process_index[dims_mapping[i]] + 1) * partition_shape[i]
                ])

        return partition_index

    @staticmethod
    def compute_concat_info(partition_index_x, partition_index_y):
        """Judge whether two partition can be concatenated and compute concatenated partition index."""
        differ_count = 0
        concat_axis = -1
        first_order = 0
        new_partition = []

        for idx, item in enumerate(partition_index_x):
            if item != partition_index_y[idx]:
                differ_count += 1
986 987
                if item[1] == partition_index_y[idx][
                        0] and item[0] < partition_index_y[idx][1]:
988 989
                    concat_axis = idx
                    new_partition.append([item[0], partition_index_y[idx][1]])
990 991
                elif item[0] == partition_index_y[idx][
                        1] and item[1] > partition_index_y[idx][0]:
992 993 994 995 996
                    first_order = 1
                    concat_axis = idx
                    new_partition.append([partition_index_y[idx][0], item[1]])
            else:
                new_partition.append(item)
997

998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012
        if differ_count == 1:
            return concat_axis, first_order, new_partition
        else:
            return -1, first_order, new_partition

    @staticmethod
    def compute_complete_shape(slice_shape, process_shape, dims_mapping):
        """compute the complete shape of the slice tensor  with its process mesh and dims mapping"""
        complete_shape = []
        for idx, item in enumerate(slice_shape):
            if dims_mapping[idx] == -1:
                complete_shape.append(item)
            else:
                complete_shape.append(item * process_shape[dims_mapping[idx]])
        return complete_shape
C
caozhou 已提交
1013

1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048
    @staticmethod
    def concat_partitions(partition_index_list, partition_index):
        """Concat the given partitions without inserting concat op."""
        if not partition_index_list:
            partition_index_list.append(partition_index)
        else:
            i = 0
            has_concat = False
            while i < len(partition_index_list):
                concat_axis, _, new_partition = Resharder.compute_concat_info(
                    partition_index_list[i], partition_index)
                if concat_axis != -1:
                    has_concat = True
                    partition_index_list.pop(i)
                    Resharder.concat_partitions(partition_index_list,
                                                new_partition)
                    break
                i += 1
            if not has_concat:
                partition_index_list.append(partition_index)

    @staticmethod
    def change_while_op_input_and_output(auto_parallel_main_prog, dist_context):
        """Change while op input and output after the corresponding sub block ops removed"""
        for sub_block_idx in Resharder.while_block_info:
            sub_block = auto_parallel_main_prog.blocks[sub_block_idx]
            parent_while_op_id = Resharder.while_block_info[sub_block_idx][
                "op_id"]
            parent_block = auto_parallel_main_prog.blocks[sub_block.parent_idx]

            sub_block_op_inputs = set()
            sub_block_op_outputs = []
            for op in sub_block.ops:
                # skip the input and output of operators inserted in the reshard phase
                dist_op = dist_context.get_dist_op_for_program(op)
1049 1050 1051 1052
                if dist_op or (op.type == "slice" and not dist_op) or (
                        op.type == "split"
                        and not dist_op) or (op.type == "assign"
                                             and not dist_op):
1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065
                    for var_name in op.output_arg_names:
                        if var_name not in sub_block_op_outputs:
                            sub_block_op_outputs.append(var_name)
                    for var_name in op.input_arg_names:
                        sub_block_op_inputs.add(var_name)

            # find the while op
            while_op = None
            for op in parent_block.ops:
                if op.desc.id() == parent_while_op_id and op.type == "while":
                    while_op = op
                    break

1066 1067
            if while_op is None:
                continue
1068 1069 1070 1071 1072 1073 1074 1075

            # find the actual input and output of while op
            proto = OpProtoHolder.instance().get_op_proto(while_op.type)
            new_X = []
            for var_name in while_op.input("X"):
                if var_name in sub_block_op_inputs:
                    new_X.append(var_name)
            assert new_X
1076
            new_X.sort()
1077 1078 1079 1080 1081
            while_op.desc.set_input(proto.inputs[0].name, new_X)

            new_Out = []
            for var_name in while_op.output("Out"):
                for output_name in sub_block_op_outputs[::-1]:
1082 1083 1084
                    if output_name.find(var_name) != -1 and (
                            len(var_name) == len(output_name)
                            or "@RESHARD" in output_name):
1085 1086
                        if output_name not in new_Out:
                            new_Out.append(output_name)
1087 1088 1089 1090 1091 1092
            assert new_Out
            while_op.desc.set_output(proto.outputs[0].name, new_Out)

    def is_overlapped(self, shape_x, shape_y):
        """Judge whether two partitions intersect on the specified dimension."""
        overlapped = False
1093 1094
        if (shape_y[0] <= shape_x[0] < shape_y[1]) or (shape_x[0] <= shape_y[0]
                                                       < shape_x[1]):
1095 1096 1097 1098 1099 1100 1101 1102 1103 1104
            overlapped = True
        return overlapped

    def is_unshard(self, dims_mapping):
        for dim in dims_mapping:
            if dim != -1:
                return False
        return True

    def is_special_op(self, op):
1105
        global _g_special_ops, _g_gradient_clip_ops
Z
zhaoyingli 已提交
1106 1107
        if op.type in _g_special_ops:
            return True
1108
        if is_gradient_clip_op(op) and op.type in _g_gradient_clip_ops:
1109
            return True
Z
zhaoyingli 已提交
1110 1111
        return False

1112 1113
    def is_condition_replicative(self, op):
        sub_block = self.auto_parallel_main_prog.blocks[op.attr("sub_block").id]
1114 1115 1116 1117 1118

        if op.type == "while":
            input_cond = op.input("Condition")
        elif op.type == "conditional_block":
            input_cond = op.input("Cond")
1119 1120

        # the dims mapping of condition tensor should be replicative
1121
        for var_name in input_cond:
1122 1123 1124 1125 1126 1127 1128 1129
            var = get_var_with_recursion(var_name, sub_block,
                                         self.auto_parallel_main_prog)
            dist_tensor = self.dist_context.get_dist_tensor_for_program(var)
            tensor_dist_attr = dist_tensor.dist_attr
            var_dims_mapping = tensor_dist_attr.dims_mapping
            for dim in var_dims_mapping:
                if dim != -1:
                    return False
1130

1131 1132
        return True

1133
    def need_reshard(self, dist_tensor, dist_attr, op_input=True, dist_op=None):
1134 1135 1136 1137 1138
        """Judge the tensor whether needs to be resharded."""
        is_reshard = False
        tensor_dist_attr = dist_tensor.dist_attr
        tensor_dims_mapping = tensor_dist_attr.dims_mapping
        tensor_process_mesh = tensor_dist_attr.process_mesh
1139 1140 1141 1142

        # dist_attr is [process_mesh, dims_mapping] and process_mesh is not a union
        op_process_mesh = dist_attr[0]

1143
        if op_input:
1144
            op_input_dims_mapping = dist_attr[1]
1145
            if all(
1146
                    map(lambda x: x, [
1147 1148 1149
                        tensor_dims_mapping, tensor_process_mesh,
                        op_input_dims_mapping, op_process_mesh
                    ])):
1150
                # judge whether need reshard by dims_mapping
1151
                if tensor_dims_mapping != op_input_dims_mapping:
1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163
                    if tensor_process_mesh not in self.dist_context.process_meshes:
                        # assert whether -1 when union.
                        for item in tensor_dims_mapping:
                            if item != -1:
                                raise ValueError(
                                    "The dim must be -1 when tensor process mesh is a union."
                                )
                        # tensor process_mesh: [0, 1, 2, 3], dims_mapping: [-1, -1]
                        # op process_mesh: [4, 5], dims_mapping: [0, -1]
                        # reshard is not supported such as above
                        if not is_reshard:
                            return is_reshard
1164
                        else:
1165 1166 1167 1168 1169 1170 1171 1172
                            raise ValueError(
                                "it is not supported that tensor process mesh is a union and needs reshard."
                            )
                    is_reshard = True

                # judge whether need reshard by process_mesh
                if tensor_process_mesh != op_process_mesh:
                    is_reshard = True
1173
        else:
1174
            op_output_dims_mapping = dist_attr[1]
1175
            if all(
1176
                    map(lambda x: x, [
1177 1178 1179 1180 1181 1182 1183
                        tensor_dims_mapping, tensor_process_mesh,
                        op_output_dims_mapping, op_process_mesh
                    ])):
                if tensor_dims_mapping != op_output_dims_mapping:
                    raise ValueError(
                        "It is not supported that tensor dims mapping is different from op output dims mapping."
                    )
1184 1185
                if tensor_process_mesh != op_process_mesh:
                    is_reshard = True
1186 1187 1188 1189

        return is_reshard

    def get_op_process_meshes(self, op):
1190
        """Get sub process meshes of the given op if op process mesh is a union."""
1191 1192 1193
        process_meshes = []
        dist_op = self.dist_context.get_dist_op_for_program(op)
        op_process_mesh = dist_op.dist_attr.process_mesh
1194

1195
        for process_mesh in self.dist_context.process_meshes:
1196 1197
            if set(process_mesh.processes) & (set(
                    op_process_mesh.processes)) and len(
1198
                        process_mesh.processes) < len(
1199
                            op_process_mesh.processes):
1200 1201 1202 1203 1204 1205 1206 1207
                process_meshes.append(process_mesh)

        # it means the process mesh is not a union when process meshes is null
        if not process_meshes:
            process_meshes.append(op_process_mesh)

        return process_meshes

1208
    def find_op_desc_seq(self, dist_tensor, dist_attr, serial=False):
1209 1210 1211 1212 1213
        """
        Find the op description sequence to reshard the source tensor for matching the op requirement.

        Args:
            dist_tensor (DistributedTensor): A distributed tensor.
1214 1215
            dist_attr (list): A list contains process_mesh and dims_mapping such as [process_mesh, dims_mapping].
            serial (bool): If serial is true, the dist tensor and dist op come from serial program. Otherwise, they come from auto program.
1216 1217 1218 1219 1220 1221 1222 1223

        Returns:
            Dict, the dict represents the required op description sequence corresponding to process, The key of dict is
            process and value is a list containing op description.
        """
        tensor_dist_attr = dist_tensor.dist_attr
        source_tensor = dist_tensor.serial_tensor
        tensor_name = source_tensor.name
1224

1225 1226 1227 1228 1229
        source_dims_mapping = tensor_dist_attr.dims_mapping
        source_process_mesh = tensor_dist_attr.process_mesh
        source_process_group = source_process_mesh.processes
        source_process_shape = source_process_mesh.topology

1230 1231
        target_process_mesh = dist_attr[0]
        target_dims_mapping = dist_attr[1]
1232 1233 1234 1235
        target_process_group = target_process_mesh.processes
        target_process_shape = target_process_mesh.topology

        if source_tensor.shape[0] < 0:
1236
            assert source_tensor.shape[0] == -1
1237 1238 1239 1240 1241
            new_shape = list(source_tensor.shape)
            new_shape[0] = self.batch_size
            source_tensor.desc.set_shape(new_shape)

        complete_shape = Resharder.compute_complete_shape(
1242 1243
            source_tensor.shape, source_process_shape,
            source_dims_mapping) if not serial else source_tensor.shape
1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254
        op_desc_seq = {}

        # TODO: if the target process group has the same process with source process group
        if set(target_process_group).intersection(set(
                source_process_group)) and set(target_process_group).difference(
                    set(source_process_group)):
            pass

        elif target_process_group != source_process_group:
            partition_process_mapping_list = []
            for source_process in source_process_group:
1255
                # get partition index of source process
1256 1257 1258
                source_partition_index = Resharder.compute_partition_index(source_process, complete_shape, source_dims_mapping, \
                                                                source_process_shape, source_process_group)
                if not partition_process_mapping_list:
1259
                    # the item in partition_process_mapping_list is source_partition_index, which processes and whether has been used
1260 1261
                    partition_process_mapping_list.append(
                        [source_partition_index, [source_process], [False]])
1262
                else:
1263 1264 1265 1266 1267 1268
                    partition_list = list(
                        [item[0] for item in partition_process_mapping_list])
                    process_list = list(
                        [item[1] for item in partition_process_mapping_list])
                    has_used = list(
                        [item[2] for item in partition_process_mapping_list])
1269

1270 1271 1272 1273 1274
                    if partition_list.count(source_partition_index) == 1:
                        index = partition_list.index(source_partition_index)
                        process_list[index].append(source_process)
                        has_used[index].append(False)
                    else:
1275 1276
                        partition_process_mapping_list.append(
                            [source_partition_index, [source_process], [False]])
1277 1278

            for target_process in target_process_group:
1279
                # has_sent means the source_partition_index has been sent to target_process
1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308
                has_sent = []
                target_partition_index = Resharder.compute_partition_index(
                    target_process, complete_shape, target_dims_mapping,
                    target_process_shape, target_process_group)
                partition_index_list = []
                all_partition_index_list = []
                for source_process in source_process_group:
                    source_partition_index = Resharder.compute_partition_index(
                        source_process, complete_shape, source_dims_mapping,
                        source_process_shape, source_process_group)
                    to_send_process = None
                    if all(_ for _ in list(map(self.is_overlapped, source_partition_index, target_partition_index))) \
                            and source_partition_index not in has_sent:
                        idx = list([
                            item[0] for item in partition_process_mapping_list
                        ]).index(source_partition_index)
                        has_used = list([
                            item[2] for item in partition_process_mapping_list
                        ])[idx]
                        process_list = list([
                            item[1] for item in partition_process_mapping_list
                        ])[idx]
                        i = 0
                        while i < len(has_used):
                            if not has_used[i]:
                                to_send_process = process_list[i]
                                has_used[i] = True
                                break
                            i += 1
1309

1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322
                        if i == len(has_used):
                            has_used = list(map(lambda x: False, has_used))
                            to_send_process = process_list[0]
                            has_used[0] = True
                        assert to_send_process is not None, "Failed to find the send process."

                        if to_send_process not in op_desc_seq.keys():
                            op_desc_seq[to_send_process] = []
                        if target_process not in op_desc_seq.keys():
                            op_desc_seq[target_process] = []
                        all_partition_index_list.append(source_partition_index)

                        # append send and recv op desc
1323 1324
                        is_bool = (
                            dist_tensor.serial_tensor.dtype == paddle.bool)
1325
                        send_op_desc = SendOpDesc(source_partition_index,
1326 1327 1328
                                                  to_send_process,
                                                  target_process,
                                                  is_bool=is_bool)
1329
                        recv_op_desc = RecvOpDesc(source_partition_index,
1330 1331 1332
                                                  to_send_process,
                                                  target_process,
                                                  is_bool=is_bool)
1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347
                        op_desc_seq[to_send_process].append(send_op_desc)
                        op_desc_seq[target_process].append(recv_op_desc)
                        has_sent.append(source_partition_index)
                        Resharder.concat_partitions(partition_index_list,
                                                    source_partition_index)

                # append concat op desc
                op_desc_seq[target_process].append(
                    ConcatOpDesc(all_partition_index_list))

                # append slice op desc
                slice_starts = []
                slice_ends = []
                slices_axes = []
                concatenated_partition_index = partition_index_list[0]
1348 1349
                to_slice_tensor_shape = []

1350
                for idx, item in enumerate(concatenated_partition_index):
1351 1352
                    slice_starts.append(target_partition_index[idx][0] -
                                        item[0])
1353 1354
                    slice_ends.append(target_partition_index[idx][1] - item[0])
                    slices_axes.append(idx)
1355 1356
                    to_slice_tensor_shape.append(item[1] - item[0])

1357
                op_desc_seq[target_process].append(
1358 1359 1360 1361
                    SliceOpDesc(slice_starts,
                                slice_ends,
                                slices_axes,
                                shape=to_slice_tensor_shape))
1362

1363
        # in the same process group, it will use allgahther and slice op.
1364
        else:
1365
            # NOTE: It just supports even partition scene.
1366 1367 1368 1369 1370 1371 1372 1373 1374
            partition_index_list = []
            all_partition_index_list = []
            process_index = []
            for source_process in source_process_group:
                source_partition_index = Resharder.compute_partition_index(
                    source_process, complete_shape, source_dims_mapping,
                    source_process_shape, source_process_group)
                if source_partition_index not in partition_index_list:
                    partition_index_list.append(source_partition_index)
1375 1376 1377
                    process_index.append([[
                        source_process,
                    ], source_partition_index])
1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400
                else:
                    process_index[partition_index_list.index(
                        source_partition_index)][0].append(source_process)

            for i in range(len(process_index[0][0])):
                group = []
                for j in range(len(process_index)):
                    group.append(process_index[j][0][i])
                    if i == 0:
                        all_partition_index_list.append(process_index[j][1])
                for process in group:
                    # append slice op desc
                    slice_starts = []
                    slice_ends = []
                    slices_axes = []
                    target_partition_index = Resharder.compute_partition_index(
                        process, complete_shape, target_dims_mapping,
                        target_process_shape, target_process_group)
                    for idx, item in enumerate(target_partition_index):
                        slice_starts.append(item[0])
                        slice_ends.append(item[1])
                        slices_axes.append(idx)

1401
                    to_slice_tensor_shape = dist_tensor.global_sizes()
1402 1403
                    slice_op_desc = SliceOpDesc(starts=slice_starts,
                                                ends=slice_ends,
1404 1405 1406 1407 1408
                                                axes=slices_axes,
                                                shape=to_slice_tensor_shape)
                    allgather_shape = None if not serial else dist_tensor.local_sizes(
                        rank=process)
                    op_desc_seq[process] = [AllGatherOpDesc(group=group, shape=allgather_shape, is_bool=(source_tensor.dtype == paddle.bool)),
1409 1410 1411 1412 1413 1414
                                            ConcatOpDesc(partition_index_list=all_partition_index_list), slice_op_desc] \
                        if len(group) > 1 else [slice_op_desc]

        return op_desc_seq

    def parse_op_desc(self, block, op_desc_seq, var_name, reshard_op,
1415
                      dist_attr):
1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437
        """Parse op desc sequence and insert op in the block"""
        tensor_list = []
        partition_tensor_list = []
        if self.rank_id not in op_desc_seq.keys():
            return
        op_desc_list = op_desc_seq[self.rank_id]

        idx = None
        for index, op in list(enumerate(block.ops)):
            if op.desc.id == reshard_op.desc.id:
                idx = index
                break
        assert idx is not None, "The op for reshard cannot be found in the rank {} program.".format(
            self.rank_id)

        matched_op = block.ops[idx]
        source_tensor = get_var_with_recursion(var_name, block,
                                               self.auto_parallel_main_prog)
        for op_desc in op_desc_list:
            if isinstance(op_desc, AllGatherOpDesc):  # noqa: F401
                if var_name not in self.has_allgather.keys():
                    self.has_allgather[var_name] = []
1438 1439
                if not self.has_allgather[var_name] or op_desc.group not in list(
                        map(lambda x: x[0], self.has_allgather[var_name])):
1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465
                    if op_desc.is_bool:
                        # for bool data allgather, cast to int64 -> allgather -> cast bool
                        out_cast = Inserter.insert_cast_op(
                            block, idx, source_tensor,
                            reshard_op.attr('op_role'), paddle.int64)
                        tensor_list, idx_offset = Inserter.insert_allgather_op(
                            block, idx + 1, out_cast, op_desc.group,
                            reshard_op.attr('op_role'))
                        idx += idx_offset
                        tensor_name_list = []
                        for var in tensor_list:
                            out_cast = Inserter.insert_cast_op(
                                block, idx, var, reshard_op.attr('op_role'),
                                paddle.bool)
                            tensor_name_list.append(out_cast.name)
                            idx += 1
                        self.has_allgather[var_name].append(
                            [op_desc.group, tensor_name_list])
                    else:
                        tensor_list, idx_offset = Inserter.insert_allgather_op(
                            block, idx, source_tensor, op_desc.group,
                            reshard_op.attr('op_role'))
                        idx += idx_offset
                        tensor_name_list = [var.name for var in tensor_list]
                        self.has_allgather[var_name].append(
                            [op_desc.group, tensor_name_list])
1466 1467 1468 1469
                else:
                    for item in self.has_allgather[var_name]:
                        if op_desc.group == item[0]:
                            tensor_list = [
C
caozhou 已提交
1470 1471 1472
                                get_var_with_recursion(
                                    var_name, block,
                                    self.auto_parallel_main_prog)
1473 1474 1475 1476 1477 1478 1479 1480 1481
                                for var_name in item[1]
                            ]
                            break
                assert tensor_list, "The result of parsing allgather op should not be None."

            elif isinstance(op_desc, SendOpDesc):
                if var_name not in self.has_sent.keys():
                    self.has_sent[var_name] = []
                if op_desc.dst not in self.has_sent[var_name]:
1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494
                    if op_desc.is_bool:
                        out_cast = Inserter.insert_cast_op(
                            block, idx, source_tensor,
                            reshard_op.attr('op_role'), paddle.int64)
                        Inserter.insert_send_op(block, idx + 1, out_cast,
                                                op_desc.src, op_desc.dst,
                                                reshard_op.attr('op_role'))
                        idx += 2
                    else:
                        Inserter.insert_send_op(block, idx, source_tensor,
                                                op_desc.src, op_desc.dst,
                                                reshard_op.attr('op_role'))
                        idx += 1
1495 1496 1497 1498 1499 1500 1501 1502 1503 1504
                    self.has_sent[var_name].append(op_desc.dst)

            elif isinstance(op_desc, RecvOpDesc):
                if var_name not in self.has_recv.keys():
                    self.has_recv[var_name] = {}
                if op_desc.src not in self.has_recv[var_name].keys():
                    partition_index = op_desc.partition_index
                    shape = []
                    for index in partition_index:
                        shape.append(index[1] - index[0])
1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556
                    if op_desc.is_bool:
                        # for bool data, recv int64 -> cast to bool
                        recv_tensor = block.create_var(
                            name=unique_name.generate(var_name + "@recv"),
                            shape=shape,
                            lod_level=source_tensor.lod_level,
                            dtype=paddle.int64,
                            type=source_tensor.type)
                        Inserter.insert_recv_op(block, idx, recv_tensor,
                                                op_desc.src, op_desc.dst,
                                                reshard_op.attr('op_role'))
                        out_cast = Inserter.insert_cast_op(
                            block, idx + 1, recv_tensor,
                            reshard_op.attr('op_role'), paddle.bool)
                        tensor_list.append(out_cast)
                        idx += 2
                        self.has_recv[var_name][op_desc.src] = out_cast
                    else:
                        recv_tensor = block.create_var(
                            name=unique_name.generate(var_name + "@recv"),
                            shape=shape,
                            lod_level=source_tensor.lod_level,
                            dtype=source_tensor.dtype,
                            type=source_tensor.type)
                        Inserter.insert_recv_op(block, idx, recv_tensor,
                                                op_desc.src, op_desc.dst,
                                                reshard_op.attr('op_role'))

                        # for lod tensor, need reset lod after received
                        if recv_tensor.lod_level != 0:
                            set_lod = False
                            # use data lod to reset tensor lod
                            for tmp_block in self.auto_parallel_main_prog.blocks:
                                for tmp_var_name in tmp_block.vars:
                                    tmp_var = tmp_block.vars[tmp_var_name]
                                    if tmp_var.is_data and tmp_var.lod_level == recv_tensor.lod_level:
                                        reset_lod_out = Inserter.insert_reset_lod_op(
                                            block, idx + 1, recv_tensor,
                                            tmp_var, reshard_op.attr('op_role'))
                                        tensor_list.append(reset_lod_out)
                                        idx += 2
                                        self.has_recv[var_name][
                                            op_desc.src] = reset_lod_out
                                        set_lod = True
                                        break
                                if set_lod:
                                    break
                            assert set_lod is True
                        else:
                            tensor_list.append(recv_tensor)
                            idx += 1
                            self.has_recv[var_name][op_desc.src] = recv_tensor
1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585
                else:
                    tensor_list.append(self.has_recv[var_name][op_desc.src])

            elif isinstance(op_desc, ConcatOpDesc):
                partition_index_list = op_desc.partition_index_list
                idx_list = [idx]
                for index, tensor in enumerate(tensor_list):
                    Inserter.concat_partitions_with_op(
                        partition_tensor_list, tensor,
                        partition_index_list[index], block, idx_list,
                        reshard_op.attr('op_role'))
                idx = idx_list[0]

            elif isinstance(op_desc, SliceOpDesc):
                assert len(
                    partition_tensor_list) == 1 or not partition_tensor_list
                to_slice_tensor = partition_tensor_list[0][0] if len(
                    partition_tensor_list) == 1 else source_tensor
                new_name = unique_name.generate(var_name + "@RESHARD")
                target_tensor = Inserter.insert_slice_op(
                    block,
                    idx,
                    to_slice_tensor,
                    starts=op_desc.starts,
                    ends=op_desc.ends,
                    axes=op_desc.axes,
                    new_var_name=new_name,
                    op_role=reshard_op.attr('op_role'))

1586 1587 1588
                process_mesh = dist_attr[0]
                dims_mapping = dist_attr[1]

1589 1590 1591 1592 1593 1594
                tensor_attr = TensorDistributedAttribute()
                tensor_attr.dims_mapping = dims_mapping
                tensor_attr.process_mesh = process_mesh
                self.dist_context.set_tensor_dist_attr_for_program(
                    target_tensor, tensor_attr)

1595
                if matched_op.type == "while":
1596
                    # var_reshard_mapping means the while op input need be changed to
1597 1598
                    if "var_reshard_mapping" not in Resharder.while_block_info[
                            op.attr("sub_block").id].keys():
1599 1600
                        Resharder.while_block_info[op.attr(
                            "sub_block").id]["var_reshard_mapping"] = {}
1601 1602 1603 1604
                    if var_name not in Resharder.while_block_info[op.attr(
                            "sub_block").id]["var_reshard_mapping"].keys():
                        Resharder.while_block_info[op.attr("sub_block").id][
                            "var_reshard_mapping"][var_name] = []
1605
                    Resharder.while_block_info[op.attr("sub_block").id][
1606 1607
                        "var_reshard_mapping"][var_name].append(
                            [dist_attr, target_tensor.name])
1608 1609 1610

                # rename op input name according to new name
                for op in block.ops:
1611 1612
                    # just for while op
                    while_op_X_append = []
1613 1614 1615 1616 1617
                    for name in op.input_arg_names:
                        op_dist_attr = self.dist_context.get_op_dist_attr_for_program(
                            op)
                        if name == var_name and op_dist_attr is not None:
                            if op.desc.id() == matched_op.desc.id():
1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646
                                if matched_op.type == "while":
                                    old_name = name
                                    new_name = target_tensor.name
                                    assert old_name != new_name
                                    op_input_dist_attr = op_dist_attr.get_input_dist_attr(
                                        old_name)
                                    op_dist_attr.set_input_dist_attr(
                                        new_name, op_input_dist_attr)
                                    op_dist_attr.set_input_dims_mapping(
                                        new_name, dims_mapping)
                                    if old_name in op_dist_attr._inputs_dist_attrs:
                                        op_dist_attr.del_input_dist_attr(
                                            old_name)
                                    while_op_X_append.append(new_name)
                                    continue
                                else:
                                    op.desc._rename_input(
                                        name, target_tensor.name)
                                    old_name = name
                                    new_name = target_tensor.name
                                    assert old_name != new_name
                                    op_input_dist_attr = op_dist_attr.get_input_dist_attr(
                                        old_name)
                                    op_dist_attr.set_input_dist_attr(
                                        new_name, op_input_dist_attr)
                                    op_dist_attr.set_input_dims_mapping(
                                        new_name, dims_mapping)
                                    op_dist_attr.del_input_dist_attr(old_name)
                                    continue
1647 1648 1649 1650

                            op_process_mesh = op_dist_attr.process_mesh
                            op_input_dims_mapping = op_dist_attr.get_input_dims_mapping(
                                var_name)
1651
                            # NOTE: For op whose process mesh is a union, its input will not be renamed by other op reshard result now which means that it will have more reshard operation.
1652 1653
                            if op_process_mesh == process_mesh and op_input_dims_mapping == dims_mapping:
                                op.desc._rename_input(name, target_tensor.name)
1654 1655 1656 1657 1658 1659 1660
                                old_name = name
                                new_name = target_tensor.name
                                assert old_name != new_name
                                op_input_dist_attr = op_dist_attr.get_input_dist_attr(
                                    old_name)
                                op_dist_attr.set_input_dist_attr(
                                    new_name, op_input_dist_attr)
1661
                                op_dist_attr.set_input_dims_mapping(
1662 1663
                                    new_name, dims_mapping)
                                op_dist_attr.del_input_dist_attr(old_name)
1664

1665 1666 1667 1668 1669 1670
                    # for while op, the input X should reset
                    if while_op_X_append:
                        proto = OpProtoHolder.instance().get_op_proto(op.type)
                        op.desc.set_input(proto.inputs[0].name,
                                          op.input("X") + while_op_X_append)

1671
    def _get_subblock_input_attrs(self, op, var_name):
1672
        # NOTE: Multi while loop is not supported
1673
        assert op.type in _g_subblock_ops
1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722
        sub_block = self.auto_parallel_main_prog.blocks[op.attr("sub_block").id]
        ops = sub_block.ops
        input_attrs = []

        for op in ops:
            dist_op = self.dist_context.get_dist_op_for_program(op)
            if not dist_op:
                continue
            dist_attr = dist_op.dist_attr
            for name in op.input_arg_names:
                if name == var_name:
                    process_mesh = dist_attr.process_mesh
                    input_dims_mapping = dist_attr.get_input_dims_mapping(
                        var_name)
                    has_exist = False
                    for input_attr in input_attrs:
                        if process_mesh == input_attr[
                                0] and input_dims_mapping == input_attr[1]:
                            has_exist = True
                            break
                    if not has_exist:
                        input_attrs.append([process_mesh, input_dims_mapping])
        return input_attrs

    def _get_common_op_input_attrs(self, op, var_name):
        process_meshes = []
        dist_op = self.dist_context.get_dist_op_for_program(op)
        dist_attr = dist_op.dist_attr
        op_process_mesh = dist_attr.process_mesh
        for process_mesh in self.dist_context.process_meshes:
            if set(process_mesh.processes) & (set(
                    op_process_mesh.processes)) and len(
                        process_mesh.processes) < len(
                            op_process_mesh.processes):
                process_meshes.append(process_mesh)

        # it means that the process mesh is not a union when process meshes is none
        if not process_meshes:
            process_meshes.append(op_process_mesh)

        input_dims_mapping = dist_attr.get_input_dims_mapping(var_name)
        input_attrs = []
        for process_mesh in process_meshes:
            input_attrs.append([process_mesh, input_dims_mapping])

        return input_attrs

    def get_op_input_attrs(self, op, var_name):
        op_input_attrs = []
1723

1724 1725
        if op.type in _g_subblock_ops:
            op_input_attrs = self._get_subblock_input_attrs(op, var_name)
1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746
        else:
            op_input_attrs = self._get_common_op_input_attrs(op, var_name)

        assert op_input_attrs

        return op_input_attrs

    def _remove_global_process_mesh(self):
        """Remove global process mesh from dist_context.process_meshes"""
        processes = set()
        process_mesh_count = len(self.dist_context.process_meshes)
        if process_mesh_count > 1:
            global_process_mesh_idx = None
            for process_mesh in self.dist_context.process_meshes:
                for process in process_mesh.processes:
                    processes.add(process)
            for idx, process_mesh in enumerate(
                    self.dist_context.process_meshes):
                if len(set(process_mesh.processes)) == len(processes):
                    global_process_mesh_idx = idx
                    break
1747

1748
            if global_process_mesh_idx is not None:
1749 1750 1751 1752 1753 1754 1755 1756 1757 1758
                is_removed = False
                global_mesh = self.dist_context.process_meshes[idx]
                for i, mesh in enumerate(self.dist_context.process_meshes):
                    if i == idx:
                        continue
                    if set(mesh.processes) < set(global_mesh.processes):
                        is_removed = True

                if is_removed:
                    self.dist_context.process_meshes.pop(idx)
1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797

    def _change_subblock_op_input_and_output(self, block_idx, block):
        if "var_reshard_mapping" in Resharder.while_block_info[block_idx]:
            var_reshard_mapping = Resharder.while_block_info[block_idx][
                "var_reshard_mapping"]
            for op in block.ops:
                for var_name in op.input_arg_names:
                    if var_name in var_reshard_mapping:
                        # in while sub block, the union process mesh is not split before reshard sub block
                        dist_op = self.dist_context.get_dist_op_for_program(op)
                        dist_attr = dist_op.dist_attr
                        target_name = None
                        for item in var_reshard_mapping[var_name]:
                            if dist_attr.process_mesh == item[0][
                                    0] and dist_attr.get_input_dims_mapping(
                                        var_name) == item[0][1]:
                                target_name = item[1]
                                break
                        if target_name is None:
                            continue
                        else:
                            op.desc._rename_input(var_name, target_name)
                            dist_op = self.dist_context.get_dist_op_for_program(
                                op)
                            op_dist_attr = dist_op.dist_attr
                            old_name = var_name
                            new_name = target_name
                            assert old_name != new_name
                            op_input_dist_attr = op_dist_attr.get_input_dist_attr(
                                old_name)
                            op_dist_attr.set_input_dist_attr(
                                new_name, op_input_dist_attr)
                            op_dist_attr.del_input_dist_attr(old_name)

                # the outputs also need to be renamed when the output name is the same with input name in inplace op
                for var_name in op.output_arg_names:
                    # if the tensor has been resharded multiply, it is not supported now.
                    if var_name in var_reshard_mapping:
                        if len(var_reshard_mapping[var_name]) > 1:
1798
                            raise ValueError(
1799
                                "The scene is not supported that the output is inplaced and the tensor has been resharded multiply when as input."
1800
                            )
1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828
                        target_name = var_reshard_mapping[var_name][0][1]

                        op.desc._rename_output(var_name, target_name)
                        dist_op = self.dist_context.get_dist_op_for_program(op)
                        op_dist_attr = dist_op.dist_attr
                        old_name = var_name
                        new_name = target_name
                        assert old_name != new_name
                        op_output_dist_attr = op_dist_attr.get_output_dist_attr(
                            old_name)
                        op_dist_attr.set_output_dist_attr(
                            new_name, op_output_dist_attr)
                        op_dist_attr.del_output_dist_attr(old_name)

    def _reshard_input(self, block):
        idx = 0
        while idx < len(block.ops):
            pre_op_count = len(block.ops)
            op = block.ops[idx]

            if self.is_special_op(op):
                idx += 1
                continue

            dist_op = self.dist_context.get_dist_op_for_program(op)
            if dist_op is not None:
                op_input_dist_attrs = [
                ]  # [(op_process_mesh, op_input_dims_mapping), (op_process_mesh, op_input_dims_mapping)]
1829
                if op.type in _g_subblock_ops:
1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842
                    if not self.is_condition_replicative(op):
                        raise ValueError(
                            "Please check the condition due to the dims mapping is not replicative."
                        )
                    if op.attr(
                            "sub_block").id not in Resharder.while_block_info:
                        Resharder.while_block_info[op.attr("sub_block").id] = {}
                    Resharder.while_block_info[op.attr(
                        "sub_block").id]["op_id"] = op.desc.id()

                if op.type == "while":
                    # condition var process mesh is the same with op and dims_mapping is replicative, so it do not need reshard
                    input_var_names = op.input("X")
1843 1844
                elif op.type == "conditional_block":
                    input_var_names = op.input("Input")
1845
                else:
1846 1847 1848 1849 1850 1851
                    input_var_names = op.input_arg_names
                # to avoid while op X order different
                input_var_names.sort()

                idx_offset = 0
                for var_name in input_var_names:
1852 1853
                    # skip lod_tensor_blocking_queue_? name
                    if "lod_tensor_blocking_queue" in var_name:
1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877
                        continue
                    var = get_var_with_recursion(var_name, block,
                                                 self.auto_parallel_main_prog)
                    dist_tensor = self.dist_context.get_dist_tensor_for_program(
                        var)

                    # judge whether union tensor dims_mapping all -1
                    is_union_process_mesh_tensor = False
                    if dist_tensor.dist_attr.process_mesh not in self.dist_context.process_meshes and self.dist_context.process_meshes:
                        is_union_process_mesh_tensor = True
                        assert dist_tensor.dist_attr.dims_mapping.count(
                            -1) == len(dist_tensor.dist_attr.dims_mapping)

                    op_input_attrs = self.get_op_input_attrs(op, var_name)
                    for input_attr in op_input_attrs:
                        input_process_mesh = None

                        # deal with union tensor
                        if is_union_process_mesh_tensor:
                            # if op process mesh is subset of union tensor process mesh, need no reshard
                            if set(input_attr[0].processes) <= set(
                                    dist_tensor.dist_attr.process_mesh.processes
                            ):
                                continue
1878 1879

                        if dist_tensor is not None and self.need_reshard(
1880 1881 1882 1883 1884
                                dist_tensor, input_attr):
                            reshard_op_desc = self.find_op_desc_seq(
                                dist_tensor, input_attr)
                            self.parse_op_desc(block, reshard_op_desc, var_name,
                                               op, input_attr)
1885 1886 1887
                            cur_op_count = len(block.ops)
                            idx_offset = idx_offset + cur_op_count - pre_op_count
                            pre_op_count = cur_op_count
1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977
                idx = idx + idx_offset + 1
            else:
                idx += 1

    def _hadnle_recv(self, block, idx, var, op, send_rank, recv_rank):
        if self.rank_id == recv_rank:
            # if recv bool data, recv then cast
            if var.dtype == paddle.bool:
                recv_cast_out = block.create_var(
                    name=unique_name.generate(var.name + "@recv"),
                    shape=var.shape,
                    lod_level=var.lod_level,
                    dtype=paddle.int64,
                    type=var.type)
                Inserter.insert_recv_op(block, idx + 1,
                                        recv_cast_out, send_rank, recv_rank,
                                        op.attr('op_role'))
                reset_lod_out = None
                if var.lod_level != 0:
                    set_lod = False
                    for tmp_block in self.auto_parallel_main_prog.blocks:
                        for tmp_var_name in tmp_block.vars:
                            tmp_var = tmp_block.vars[tmp_var_name]
                            if tmp_var.is_data and tmp_var.lod_level == var.lod_level:
                                reset_lod_out = block.create_var(
                                    name=unique_name.generate(var.name +
                                                              "@RESETLOD"),
                                    shape=recv_cast_out.shape,
                                    type=recv_cast_out.type,
                                    dtype=recv_cast_out.dtype,
                                    lod_level=recv_cast_out.lod_level)
                                idx += 1
                                block._insert_op(
                                    idx,
                                    type="lod_reset",
                                    inputs={
                                        'X': recv_cast_out,
                                        'Y': tmp_var
                                    },
                                    outputs={'Out': reset_lod_out},
                                    attrs={'op_role': op.attr("op_role")})
                                set_lod = True
                                break
                        if set_lod:
                            break
                    assert set_lod is True

                # cast int64 to bool
                block._insert_op(idx + 2,
                                 type='cast',
                                 inputs={
                                     'X': [recv_cast_out] if
                                     reset_lod_out is None else [reset_lod_out]
                                 },
                                 outputs={'Out': [var]},
                                 attrs={
                                     'in_dtype': recv_cast_out.dtype,
                                     'out_dtype': var.dtype,
                                     'op_role': op.attr('op_role')
                                 })
            else:
                if var.lod_level != 0:
                    recv_out = block.create_var(
                        name=unique_name.generate(var.name + "@recv"),
                        shape=var.shape,
                        lod_level=var.lod_level,
                        dtype=var.int64,
                        type=var.type)
                    Inserter.insert_recv_op(block, idx + 1, recv_out, send_rank,
                                            recv_rank, op.attr('op_role'))
                    set_lod = False
                    for tmp_block in self.auto_parallel_main_prog.blocks:
                        for tmp_var_name in tmp_block.vars:
                            tmp_var = tmp_block.vars[tmp_var_name]
                            if tmp_var.is_data and tmp_var.lod_level == var.lod_level:
                                idx += 1
                                block._insert_op(
                                    idx,
                                    type="lod_reset",
                                    inputs={
                                        'X': recv_out,
                                        'Y': tmp_var
                                    },
                                    outputs={'Out': var},
                                    attrs={'op_role': op.attr("op_role")})
                                set_lod = True
                                break
                        if set_lod:
                            break
                    assert set_lod is True
1978
                else:
1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996
                    Inserter.insert_recv_op(block, idx + 1, var, send_rank,
                                            recv_rank, op.attr('op_role'))

    def _handle_send(self, block, idx, var, op, send_rank, recv_rank):
        if var.dtype == paddle.bool:
            cast_out = Inserter.insert_cast_op(block, idx + 1, var,
                                               op.attr('op_role'), paddle.int64)
            Inserter.insert_send_op(block, idx + 2, cast_out, send_rank,
                                    recv_rank, op.attr('op_role'))
        else:
            Inserter.insert_send_op(block, idx + 1, var, send_rank, recv_rank,
                                    op.attr('op_role'))

    def _reshard_output(self, block):
        # insert send and recv op if output process mesh is different from tensor process mesh
        idx = 0
        # skip reader and ops whose process mesh is union
        skip_ops = [
1997
            "create_py_reader", "create_double_buffer_reader", "read",
1998 1999 2000 2001
            "write_to_array", "read_from_array"
        ]
        global _g_special_ops
        skip_ops += _g_special_ops
2002
        skip_ops += _g_subblock_ops
2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098
        while idx < len(block.ops):
            pre_op_count = len(block.ops)
            op = block.ops[idx]
            dist_op = self.dist_context.get_dist_op_for_program(op)
            if dist_op is not None and op.type not in skip_ops:
                idx_offset = 0
                for var_name in op.output_arg_names:
                    var = get_var_with_recursion(var_name, block,
                                                 self.auto_parallel_main_prog)
                    dist_tensor = self.dist_context.get_dist_tensor_for_program(
                        var)
                    tensor_process_mesh = dist_tensor.dist_attr.process_mesh
                    output_attr = [
                        dist_op.dist_attr.process_mesh,
                        dist_op.dist_attr.get_output_dims_mapping(var_name)
                    ]
                    if dist_tensor is not None and self.need_reshard(
                            dist_tensor, output_attr, False):
                        tensor_processes = set(
                            tensor_process_mesh.processes) - (
                                set(tensor_process_mesh.processes)
                                & set(output_attr[0].processes))
                        if tensor_processes:
                            if len(tensor_processes) != len(
                                    output_attr[0].processes):
                                if dist_tensor.dist_attr.dims_mapping.count(
                                        -1) != len(
                                            dist_tensor.dist_attr.dims_mapping
                                        ) or output_attr[1].count(-1) != len(
                                            output_attr[1]):
                                    raise ValueError(
                                        "The dims_mapping must be -1")
                                else:
                                    for index, tensor_process in enumerate(
                                            tensor_processes):
                                        recv_rank = tensor_process
                                        actual_index = index
                                        if index >= len(
                                                output_attr[0].processes):
                                            actual_index = (
                                                index -
                                                len(output_attr[0].processes)
                                            ) % len(output_attr[0].processes)
                                        item = output_attr[0].processes[
                                            actual_index]
                                        if recv_rank == item:
                                            continue
                                        if self.rank_id == item:
                                            # if send bool data, cast then send
                                            self._handle_send(
                                                block, idx, var, op, item,
                                                recv_rank)
                                        if self.rank_id == recv_rank:
                                            # if recv bool data, recv then cast
                                            self._hadnle_recv(
                                                block, idx, var, op, item,
                                                recv_rank)
                            else:
                                for index, tensor_process in enumerate(
                                        tensor_processes):
                                    recv_rank = tensor_process
                                    item = output_attr[0].processes[index]
                                    if recv_rank == item:
                                        continue
                                    if self.rank_id == item:
                                        # if send bool data, cast then send
                                        self._handle_send(
                                            block, idx, var, op, item,
                                            recv_rank)
                                    if self.rank_id == recv_rank:
                                        # if recv bool data, recv then cast
                                        self._hadnle_recv(
                                            block, idx, var, op, item,
                                            recv_rank)

                            cur_op_count = len(block.ops)
                            idx_offset = idx_offset + cur_op_count - pre_op_count
                            pre_op_count = cur_op_count

                idx = idx + idx_offset + 1
            else:
                idx += 1

    def reshard(self):
        self._remove_global_process_mesh()
        for block_idx, block in enumerate(self.auto_parallel_main_prog.blocks):
            # change the var_name before resharding sub block
            if block_idx in Resharder.while_block_info:
                self._change_subblock_op_input_and_output(block_idx, block)

            # reshard input
            self._reshard_input(block)

            # reshard output
            # NOTE: Only support that insert send and recv op if output process mesh is different from tensor process mesh
            self._reshard_output(block)
2099 2100 2101 2102 2103

        # remove no need vars and ops in the main program
        Remover.remove_no_need_in_main(self.auto_parallel_main_prog,
                                       self.dist_context, self.rank_id,
                                       self.dist_params_grads)
2104

2105 2106 2107
        # remove no need vars and ops in the startip program
        Remover.remove_no_need_in_startup(self.auto_parallel_main_prog,
                                          self.auto_parallel_startup_prog)
C
caozhou 已提交
2108

2109 2110
        # reset some variable when remove operation ended
        Resharder.while_block_info = {}
2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316

    def get_cost(self, op, tensor, cluster):
        # NOTE: The program should be the serial_program which is not been parted
        global _g_special_ops
        not_supported_op_type = _g_special_ops + ["while"]
        reshard_op_cost = None
        if op.type in not_supported_op_type:
            return reshard_op_cost
        else:
            tensor_name = tensor.name
            if tensor_name == "lod_tensor_blocking_queue_0":
                return reshard_op_cost
            else:
                dist_tensor = self.dist_context.get_dist_tensor_for_program(
                    tensor)
                # simplified processing: ignore union process mesh and output reshard
                dist_op = self.dist_context.get_dist_op_for_program(op)
                dims_mapping = dist_op.dist_attr.get_input_dims_mapping(
                    tensor.name)
                process_mesh = dist_op.dist_attr.process_mesh
                dist_attr = [process_mesh, dims_mapping]
                if dist_tensor is not None and self.need_reshard(
                        dist_tensor, dist_attr):
                    if tensor_name not in self._has_resharded:
                        self._has_resharded[tensor_name] = [dist_op]
                    else:
                        for item in self._has_resharded[tensor_name]:
                            item_dist_attr = item.dist_attr
                            item_dims_mapping = item_dist_attr.get_input_dims_mapping(
                                tensor_name)
                            item_process_mesh = item_dist_attr.process_mesh
                            if dims_mapping == item_dims_mapping and item_process_mesh == process_mesh:
                                return reshard_op_cost
                        self._has_resharded[tensor_name].append(dist_op)

                    reshard_op_desc = self.find_op_desc_seq(dist_tensor,
                                                            dist_attr,
                                                            serial=True)
                    dtype = dist_tensor.serial_tensor.dtype
                    reshard_op_cost = self.parse_op_desc_for_cost(
                        reshard_op_desc, dtype, cluster)

        return reshard_op_cost

    def _concat_partitions_for_cost(self, partition_tensor_list,
                                    partition_index, dtype, rank_id,
                                    local_rank_comp_cost, cluster):
        if not partition_tensor_list:
            partition_tensor_list.append(partition_index)
        else:
            i = 0
            has_concat = False
            while i < len(partition_tensor_list):
                concat_axis, first_order, new_partition = Resharder.compute_concat_info(
                    partition_tensor_list[i], partition_index)
                if concat_axis != -1:
                    has_concat = True
                    concat_desc = {}
                    concat_desc["op"] = "concat"
                    concat_desc["attrs"] = {"axis": concat_axis}
                    if first_order == 0:
                        concat_desc["inputs"] = {
                            "X": [(dtype, partition_tensor_list[i]),
                                  (dtype, partition_index)]
                        }
                    else:
                        concat_desc["inputs"] = {
                            "X": [(dtype, partition_index),
                                  (dtype, partition_tensor_list[i])]
                        }
                    partition_tensor_list.pop(i)
                    if rank_id not in local_rank_comp_cost:
                        local_rank_comp_cost[rank_id] = []
                    local_rank_comp_cost[rank_id].append(
                        ConcatOpCost(op_desc=concat_desc, cluster=cluster))
                    self._concat_partitions_for_cost(partition_tensor_list,
                                                     new_partition, dtype,
                                                     rank_id,
                                                     local_rank_comp_cost,
                                                     cluster)
                    break
                i += 1
            if not has_concat:
                partition_tensor_list.append(partition_index)

    def parse_op_desc_for_cost(self, reshard_op_desc, dtype, cluster):

        def _get_idx(comm_ranks, group_ranks):
            res, is_the_same = None, False
            idx = 0
            while idx < len(comm_ranks):
                if comm_ranks[idx] == set(group_ranks):
                    is_the_same = True

                for rank in group_ranks:
                    if rank in comm_ranks[idx]:
                        res = idx
                        comm_ranks[idx].add(rank)
                if res is None:
                    idx += 1
                else:
                    break
            return res, is_the_same

        comm_context = CommContext(cluster)
        # run communication op before computation op
        # TODO: Communication cost is not calculated when the var has been transfered by the same group in the past
        comm_costs = []
        comm_ranks = []
        local_rank_comp_cost = {}
        for key in reshard_op_desc:
            partition_tensor_list = []
            op_desc_list = reshard_op_desc[key]
            for op_desc in op_desc_list:
                if isinstance(op_desc, SendOpDesc):
                    group_ranks = [key, op_desc.dst]
                    shape = op_desc.shape
                    send_desc = build_comm_desc("send_v2", group_ranks, dtype,
                                                shape)
                    idx, is_the_same = _get_idx(comm_ranks, group_ranks)
                    if idx is None:
                        comm_costs.append([
                            (group_ranks,
                             SendOpCost(op_desc=send_desc,
                                        comm_context=comm_context))
                        ])
                        comm_ranks.append(set(group_ranks))
                    else:
                        if not is_the_same:
                            comm_costs[idx].append(
                                (group_ranks,
                                 SendOpCost(op_desc=send_desc,
                                            comm_context=comm_context)))
                elif isinstance(op_desc, AllGatherOpDesc):
                    # NOTE: fill_const and other unnecessary op is not calculated because those cost is very small
                    group_ranks = op_desc.group
                    shape = op_desc.shape
                    allgather_desc = build_comm_desc("c_allgather", group_ranks,
                                                     dtype, shape)
                    split_inputs_shape = []
                    for idx, dim in enumerate(shape):
                        if idx == 0:
                            split_inputs_shape.append(dim * len(group_ranks))
                        else:
                            split_inputs_shape.append(dim)
                    idx, is_the_same = _get_idx(comm_ranks, group_ranks)
                    if idx is None:
                        comm_costs.append([
                            (group_ranks,
                             AllgatherOpCost(op_desc=allgather_desc,
                                             comm_context=comm_context))
                        ])
                        comm_ranks.append(set(group_ranks))
                    else:
                        if not is_the_same:
                            comm_costs[idx].append(
                                (group_ranks,
                                 AllgatherOpCost(op_desc=allgather_desc,
                                                 comm_context=comm_context)))
                    # calc the split op cost
                    if key not in local_rank_comp_cost:
                        local_rank_comp_cost[key] = []
                    split_desc = {}
                    split_desc["op"] = "split"
                    split_desc["inputs"] = {
                        "inputs": [(dtype, split_inputs_shape)]
                    }
                    split_desc["attrs"] = {"num": len(group_ranks), "axis": 0}
                    local_rank_comp_cost[key].append(
                        SplitOpCost(op_desc=split_desc, cluster=cluster))
                elif isinstance(op_desc, ConcatOpDesc):
                    partition_index_list = op_desc._partition_index_list
                    for idx, partion_idex in enumerate(partition_index_list):
                        self._concat_partitions_for_cost(
                            partition_tensor_list, partion_idex, dtype, key,
                            local_rank_comp_cost, cluster)

                elif isinstance(op_desc, SliceOpDesc):
                    if key not in local_rank_comp_cost:
                        local_rank_comp_cost[key] = []
                    assert len(
                        partition_tensor_list) == 1 or not partition_tensor_list
                    to_slice_tensor_shape = []
                    if len(partition_tensor_list) == 1:
                        for item in partition_tensor_list[0]:
                            to_slice_tensor_shape.append(item[1] - item[0])
                    else:
                        to_slice_tensor_shape = op_desc.shape
                    slice_desc = {}
                    slice_desc["op"] = "slice"
                    infer_flags = list(1 for i in range(len(op_desc.axes)))
                    slice_desc["attrs"] = {
                        "axes": op_desc.axes,
                        "starts": op_desc.starts,
                        "ends": op_desc.ends,
                        "infer_flags": infer_flags
                    }
                    slice_desc["inputs"] = {
                        "Input": [(dtype, to_slice_tensor_shape)]
                    }
                    local_rank_comp_cost[key].append(
                        SliceOpCost(op_desc=slice_desc, cluster=cluster))

        res = (comm_costs, local_rank_comp_cost)

        return res