dist_reshape.py 28.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License

15
from .common import DistributedOperatorImplContainer
16
from .common import DistributedOperatorImpl
17
from .common import register_distributed_operator_impl_container
C
caozhou 已提交
18
from .common import register_distributed_operator_impl, is_parameter_related
19 20
from ..utils import is_dim_shard
from ..utils import compute_compatible_and_update_dim_mapping
21
from ..utils import set_dist_op_desc_original_id
22
from .dist_default import DistributedDefaultImpl0
C
caozhou 已提交
23 24 25
from ..cost import build_comp_desc_from_dist_op, build_comp_costs_from_descs
from ..cost import Reshape2OpCost
from ..cost import Reshape2GradOpCost
26
from ..cost import build_dp_costs
C
caozhou 已提交
27
from paddle.distributed.fleet.meta_optimizers.common import OpRole
28 29


30
class DistributedReshape2(DistributedOperatorImplContainer):
31
    def __init__(self, op_type):
32
        super().__init__(op_type)
33 34


35
register_distributed_operator_impl_container(DistributedReshape2("reshape2"))
36 37 38 39


class DistributedReshapeImpl0(DistributedOperatorImpl):
    def __init__(self, name):
40
        super().__init__(name)
41
        self._forward_implemented = True
42
        self._backward_implemented = False
43

C
caozhou 已提交
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
    def calc_cost(self, op_role, dist_op, ctx, cluster):
        cost = None
        if int(op_role) == int(OpRole.Backward):
            cost = self.calc_bwd_cost(dist_op, ctx, cluster)
        else:
            cost = self.calc_fwd_cost(dist_op, ctx, cluster)
        assert cost is not None
        return cost

    def calc_fwd_cost(self, dist_op, ctx, cluster):
        res = []
        op = dist_op.serial_op
        dist_attr = dist_op.dist_attr

        shape_list = op.desc.attr("shape")
        # got dist attribute info
        dim_mapping = dist_attr.get_output_dims_mapping(op.output("Out")[0])
        process_mesh_shape = dist_attr.process_mesh.topology

        # modify target shape
        for idx, axis in enumerate(dim_mapping):
            if axis >= 0:
                if len(shape_list) > idx:
67 68 69
                    shape_list[idx] = (
                        shape_list[idx] // process_mesh_shape[axis]
                    )
C
caozhou 已提交
70 71

        # calc comp op cost
72 73 74
        desc_mapping = build_comp_desc_from_dist_op(
            dist_op=dist_op, dist_context=ctx
        )
C
caozhou 已提交
75 76 77 78
        processes = dist_attr.process_mesh.processes
        for key in desc_mapping:
            desc_mapping[key]["shape"] = shape_list

79 80 81
        cost_mapping = build_comp_costs_from_descs(
            Reshape2OpCost, ctx, processes, desc_mapping, cluster
        )
C
caozhou 已提交
82 83 84 85 86 87 88
        res.append(cost_mapping)

        return res

    def calc_bwd_cost(self, dist_op, ctx, cluster):
        # calc comp op cost
        res = []
89 90 91
        desc_mapping = build_comp_desc_from_dist_op(
            dist_op=dist_op, dist_context=ctx
        )
C
caozhou 已提交
92 93 94 95 96
        dist_attr = dist_op.dist_attr
        process_mesh = dist_attr.process_mesh
        processes = process_mesh.processes
        op_type = dist_op.serial_op.type

97 98 99
        cost_mapping = build_comp_costs_from_descs(
            Reshape2GradOpCost, ctx, processes, desc_mapping, cluster
        )
C
caozhou 已提交
100 101 102 103 104 105 106 107
        res.append(cost_mapping)

        backward_op = dist_op.serial_op
        main_block = backward_op.block
        need_gradient_allreduce = False
        for input_name in backward_op.desc.input_names():
            for varname in backward_op.desc.input(input_name):
                if "@GRAD" not in varname and is_parameter_related(
108 109
                    varname, main_block
                ):
C
caozhou 已提交
110 111 112 113 114 115 116 117 118
                    # NOTE input var's dim_mapping of backward op should be the same with input var instead of corresponding varname of forward op
                    var_dim_mapping = dist_attr.get_input_dims_mapping(varname)

                    mesh_shape = process_mesh.topology
                    batch_size_axis = var_dim_mapping[0]
                    if batch_size_axis > -1 and mesh_shape[batch_size_axis] > 1:
                        parallel_axis = batch_size_axis
                        attrs = {"use_calc_stream": True}
                        var_names = [varname + "@GRAD"]
119 120 121 122 123 124 125 126 127
                        build_dp_costs(
                            res,
                            dist_op,
                            ctx,
                            var_names,
                            attrs,
                            parallel_axis,
                            cluster,
                        )
C
caozhou 已提交
128 129 130

        return res

131 132 133
    def is_input_compatible(self, dist_op):
        op_desc = dist_op.serial_op.desc
        op_dist_attr = dist_op.dist_attr
134 135 136 137 138 139 140 141 142 143
        x_name = op_desc.input('X')[0]
        out_name = op_desc.output('Out')[0]
        x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
        out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)

        if len(x_dims_mapping) != len(out_dims_mapping) - 1:
            return False

        return True

144 145 146
    def is_output_compatible(self, dist_op):
        op_desc = dist_op.serial_op.desc
        op_dist_attr = dist_op.dist_attr
147 148 149 150 151 152 153 154 155 156 157 158 159
        x_name = op_desc.input('X')[0]
        out_name = op_desc.output('Out')[0]
        x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
        out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)

        if len(x_dims_mapping) != len(out_dims_mapping) - 1:
            return False

        if is_dim_shard(out_dims_mapping[-1]):
            return False

        return True

沉潜的鱼儿's avatar
沉潜的鱼儿 已提交
160
    def is_auto_compatible(self, dist_op):
161 162 163
        if (not self.is_input_compatible(dist_op)) or (
            not self.is_output_compatible(dist_op)
        ):
164 165
            return False

沉潜的鱼儿's avatar
沉潜的鱼儿 已提交
166 167 168 169 170 171
        op_desc = dist_op.serial_op.desc
        op_dist_attr = dist_op.dist_attr
        x_name = op_desc.input('X')[0]
        out_name = op_desc.output('Out')[0]
        x_shape_name = op_desc.output('XShape')[0]
        x_shape_dims_mapping = op_dist_attr.get_output_dims_mapping(
172 173
            x_shape_name
        )
沉潜的鱼儿's avatar
沉潜的鱼儿 已提交
174 175 176
        x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
        out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)

177 178
        for idx, dim_mapping in enumerate(out_dims_mapping[:-1]):
            if x_dims_mapping[idx] != dim_mapping:
沉潜的鱼儿's avatar
沉潜的鱼儿 已提交
179 180 181 182 183 184 185 186 187 188
                return False

        if x_shape_dims_mapping[0] != -1:
            return False

        if x_shape_dims_mapping[1:] != x_dims_mapping[:]:
            return False

        return True

189
    def update_dims_mapping(self, dist_op):
190
        changed = False
191 192
        op_desc = dist_op.serial_op.desc
        op_dist_attr = dist_op.dist_attr
193 194 195 196 197 198
        x_name = op_desc.input('X')[0]
        out_name = op_desc.output('Out')[0]
        x_shape_name = op_desc.output('XShape')[0]
        x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
        out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)
        x_shape_dims_mapping = op_dist_attr.get_output_dims_mapping(
199 200
            x_shape_name
        )
201 202 203

        for i in range(len(x_dims_mapping)):
            dim_changed = compute_compatible_and_update_dim_mapping(
204 205
                [x_dims_mapping, out_dims_mapping], [i, i]
            )
206 207 208 209 210 211 212 213
            if dim_changed:
                changed = True

        for i in range(len(x_dims_mapping)):
            x_shape_dims_mapping[i + 1] = x_dims_mapping[i]

        return changed

214 215 216 217 218 219
    @staticmethod
    def forward(ctx, *args, **kwargs):
        """
        kwargs: inputname_mapping & outputname_mapping
        """

220
        dist_op_context = ctx.dist_op_context
221 222 223
        main_block = dist_op_context.work_block
        src_op = dist_op_context.cur_src_op
        rank_id = dist_op_context.rank_id
224
        op_dist_attr = ctx.get_op_dist_attr_for_program(src_op)
225 226 227
        assert (
            op_dist_attr is not None
        ), "backward op [{}] don't have dist attribute !".format(str(src_op))
228

229
        # check validation of inputs / outputs
230 231
        for input_name in src_op.desc.input_names():
            assert input_name in kwargs, "input [{}] is not given".format(
232 233
                input_name
            )
234 235 236 237 238
            assert len(kwargs[input_name]) == len(
                src_op.desc.input(input_name)
            ), "number of tensor for input [{}] is not match".format(input_name)
        for output_name in src_op.desc.output_names():
            assert output_name in kwargs, "input [{}] is not given".format(
239 240
                output_name
            )
241 242 243
            assert len(kwargs[output_name]) == len(
                src_op.desc.output(output_name)
            ), "number of tensor for input [{}] is not match".format(
244 245
                output_name
            )
246

Z
zhaoyingli 已提交
247 248 249
        X_var = main_block._var_recursive(kwargs['X'][0])
        Out_var = main_block._var_recursive(kwargs['Out'][0])
        XShape_var = main_block._var_recursive(kwargs['XShape'][0])
250 251 252 253 254 255 256 257 258 259
        shape_list = src_op.desc.attr("shape")
        ShapeTensor_var_list = []
        for name in kwargs['ShapeTensor']:
            ShapeTensor_var_list.append(name)
        Shape_var_list = []
        for name in kwargs['Shape']:
            Shape_var_list.append(name)

        # got dist attribute info
        dim_mapping = op_dist_attr.get_output_dims_mapping(Out_var.name)
260
        process_mesh_shape = op_dist_attr.process_mesh.topology
261 262 263 264 265

        # modify target shape
        for idx, axis in enumerate(dim_mapping):
            if axis >= 0:
                if len(shape_list) > idx:
266 267 268
                    shape_list[idx] = (
                        shape_list[idx] // process_mesh_shape[axis]
                    )
269 270

        # create op
271
        new_op_desc = main_block.append_op(type='nop').desc
272
        new_op_desc.copy_from(src_op.desc)
273
        set_dist_op_desc_original_id(new_op_desc, src_op.desc, ctx)
274 275 276 277 278 279 280 281 282
        new_op_desc.set_input('ShapeTensor', ShapeTensor_var_list)
        new_op_desc.set_input('Shape', Shape_var_list)
        new_op_desc.set_input('X', [X_var.name])
        new_op_desc.set_output('XShape', [XShape_var.name])
        new_op_desc.set_output('Out', [Out_var.name])
        new_op_desc._set_attr('shape', shape_list)

    @staticmethod
    def backward(ctx, *args, **kwargs):
283
        DistributedDefaultImpl0.backward(ctx, *args, **kwargs)
284

285 286 287

class DistributedReshapeImpl1(DistributedOperatorImpl):
    def __init__(self, name):
288
        super().__init__(name)
289
        self._forward_implemented = True
290
        self._backward_implemented = False
291

C
caozhou 已提交
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
    def calc_cost(self, op_role, dist_op, ctx, cluster):
        cost = None
        if int(op_role) == int(OpRole.Backward):
            cost = self.calc_bwd_cost(dist_op, ctx, cluster)
        else:
            cost = self.calc_fwd_cost(dist_op, ctx, cluster)
        assert cost is not None
        return cost

    def calc_fwd_cost(self, dist_op, ctx, cluster):
        res = []
        op = dist_op.serial_op
        dist_attr = dist_op.dist_attr

        shape_list = op.desc.attr("shape")
        # got dist attribute info
        dim_mapping = dist_attr.get_output_dims_mapping(op.output("Out")[0])
        process_mesh_shape = dist_attr.process_mesh.topology

        # modify target shape
        for idx, axis in enumerate(dim_mapping):
            if axis >= 0:
                if len(shape_list) > idx:
315 316 317
                    shape_list[idx] = (
                        shape_list[idx] // process_mesh_shape[axis]
                    )
C
caozhou 已提交
318 319

        # calc comp op cost
320 321 322
        desc_mapping = build_comp_desc_from_dist_op(
            dist_op=dist_op, dist_context=ctx
        )
C
caozhou 已提交
323 324 325 326
        processes = dist_attr.process_mesh.processes
        for key in desc_mapping:
            desc_mapping[key]["shape"] = shape_list

327 328 329
        cost_mapping = build_comp_costs_from_descs(
            Reshape2OpCost, ctx, processes, desc_mapping, cluster
        )
C
caozhou 已提交
330 331 332 333 334 335 336
        res.append(cost_mapping)

        return res

    def calc_bwd_cost(self, dist_op, ctx, cluster):
        # calc comp op cost
        res = []
337 338 339
        desc_mapping = build_comp_desc_from_dist_op(
            dist_op=dist_op, dist_context=ctx
        )
C
caozhou 已提交
340 341 342 343 344
        dist_attr = dist_op.dist_attr
        process_mesh = dist_attr.process_mesh
        processes = process_mesh.processes
        op_type = dist_op.serial_op.type

345 346 347
        cost_mapping = build_comp_costs_from_descs(
            Reshape2GradOpCost, ctx, processes, desc_mapping, cluster
        )
C
caozhou 已提交
348 349 350 351 352 353 354 355
        res.append(cost_mapping)

        backward_op = dist_op.serial_op
        main_block = backward_op.block
        need_gradient_allreduce = False
        for input_name in backward_op.desc.input_names():
            for varname in backward_op.desc.input(input_name):
                if "@GRAD" not in varname and not is_parameter_related(
356 357
                    varname, main_block
                ):
C
caozhou 已提交
358 359 360 361 362 363 364 365 366
                    # NOTE input var's dim_mapping of backward op should be the same with input var instead of corresponding varname of forward op
                    var_dim_mapping = dist_attr.get_input_dims_mapping(varname)

                    mesh_shape = process_mesh.topology
                    batch_size_axis = var_dim_mapping[0]
                    if batch_size_axis > -1 and mesh_shape[batch_size_axis] > 1:
                        parallel_axis = batch_size_axis
                        attrs = {"use_calc_stream": True}
                        var_names = [varname + "@GRAD"]
367 368 369 370 371 372 373 374 375
                        build_dp_costs(
                            res,
                            dist_op,
                            ctx,
                            var_names,
                            attrs,
                            parallel_axis,
                            cluster,
                        )
C
caozhou 已提交
376 377 378

        return res

379 380 381
    def is_input_compatible(self, dist_op):
        op_desc = dist_op.serial_op.desc
        op_dist_attr = dist_op.dist_attr
382 383 384 385 386 387 388 389 390 391 392 393 394
        x_name = op_desc.input('X')[0]
        out_name = op_desc.output('Out')[0]
        x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
        out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)

        if len(x_dims_mapping) != len(out_dims_mapping) + 1:
            return False

        if is_dim_shard(x_dims_mapping[-1]):
            return False

        return True

395 396 397
    def is_output_compatible(self, dist_op):
        op_desc = dist_op.serial_op.desc
        op_dist_attr = dist_op.dist_attr
398 399 400 401 402 403 404 405 406 407
        x_name = op_desc.input('X')[0]
        out_name = op_desc.output('Out')[0]
        x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
        out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)

        if len(x_dims_mapping) != len(out_dims_mapping) + 1:
            return False

        return True

沉潜的鱼儿's avatar
沉潜的鱼儿 已提交
408
    def is_auto_compatible(self, dist_op):
409 410 411
        if (not self.is_input_compatible(dist_op)) or (
            not self.is_output_compatible(dist_op)
        ):
412 413
            return False

沉潜的鱼儿's avatar
沉潜的鱼儿 已提交
414 415 416 417 418 419 420 421
        op_desc = dist_op.serial_op.desc
        op_dist_attr = dist_op.dist_attr
        x_name = op_desc.input('X')[0]
        out_name = op_desc.output('Out')[0]
        x_shape_name = op_desc.output('XShape')[0]
        x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
        out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)
        x_shape_dims_mapping = op_dist_attr.get_output_dims_mapping(
422 423
            x_shape_name
        )
沉潜的鱼儿's avatar
沉潜的鱼儿 已提交
424 425 426 427

        if is_dim_shard(x_dims_mapping[-1]):
            return False

428
        for idx, item in enumerate(x_dims_mapping[:-1]):
沉潜的鱼儿's avatar
沉潜的鱼儿 已提交
429 430 431 432 433 434 435 436 437 438 439
            if out_dims_mapping[idx] != item:
                return False

        if x_shape_dims_mapping[0] != -1:
            return False

        if x_shape_dims_mapping[1:] != x_dims_mapping[:]:
            return False

        return True

440
    def update_dims_mapping(self, dist_op):
441
        changed = False
442 443
        op_desc = dist_op.serial_op.desc
        op_dist_attr = dist_op.dist_attr
444 445 446 447 448 449
        x_name = op_desc.input('X')[0]
        out_name = op_desc.output('Out')[0]
        x_shape_name = op_desc.output('XShape')[0]
        x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
        out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)
        x_shape_dims_mapping = op_dist_attr.get_output_dims_mapping(
450 451
            x_shape_name
        )
452 453 454

        for i in range(len(out_dims_mapping)):
            dim_changed = compute_compatible_and_update_dim_mapping(
455 456
                [x_dims_mapping, out_dims_mapping], [i, i]
            )
457 458 459 460 461 462 463 464
            if dim_changed:
                changed = True

        for i in range(len(x_dims_mapping)):
            x_shape_dims_mapping[i + 1] = x_dims_mapping[i]

        return changed

465 466 467 468 469 470
    @staticmethod
    def forward(ctx, *args, **kwargs):
        """
        kwargs: inputname_mapping & outputname_mapping
        """

471
        dist_op_context = ctx.dist_op_context
472 473 474
        main_block = dist_op_context.work_block
        src_op = dist_op_context.cur_src_op
        rank_id = dist_op_context.rank_id
475
        op_dist_attr = ctx.get_op_dist_attr_for_program(src_op)
476 477 478
        assert (
            op_dist_attr is not None
        ), "backward op [{}] don't have dist attribute !".format(str(src_op))
479

480
        # check validation of inputs / outputs
481 482
        for input_name in src_op.desc.input_names():
            assert input_name in kwargs, "input [{}] is not given".format(
483 484
                input_name
            )
485 486 487 488 489
            assert len(kwargs[input_name]) == len(
                src_op.desc.input(input_name)
            ), "number of tensor for input [{}] is not match".format(input_name)
        for output_name in src_op.desc.output_names():
            assert output_name in kwargs, "input [{}] is not given".format(
490 491
                output_name
            )
492 493 494
            assert len(kwargs[output_name]) == len(
                src_op.desc.output(output_name)
            ), "number of tensor for input [{}] is not match".format(
495 496
                output_name
            )
497

Z
zhaoyingli 已提交
498 499 500
        X_var = main_block._var_recursive(kwargs['X'][0])
        Out_var = main_block._var_recursive(kwargs['Out'][0])
        XShape_var = main_block._var_recursive(kwargs['XShape'][0])
501 502 503 504 505 506 507 508 509 510
        shape_list = src_op.desc.attr("shape")
        ShapeTensor_var_list = []
        for name in kwargs['ShapeTensor']:
            ShapeTensor_var_list.append(name)
        Shape_var_list = []
        for name in kwargs['Shape']:
            Shape_var_list.append(name)

        # got dist attribute info
        dim_mapping = op_dist_attr.get_output_dims_mapping(Out_var.name)
511
        process_mesh_shape = op_dist_attr.process_mesh.topology
512 513 514 515 516

        # modify target shape
        for idx, axis in enumerate(dim_mapping):
            if axis >= 0:
                if len(shape_list) > idx:
517 518 519
                    shape_list[idx] = (
                        shape_list[idx] // process_mesh_shape[axis]
                    )
520 521

        # create op
522
        new_op_desc = main_block.append_op(type='nop').desc
523
        new_op_desc.copy_from(src_op.desc)
524
        set_dist_op_desc_original_id(new_op_desc, src_op.desc, ctx)
525 526 527 528 529 530 531 532 533
        new_op_desc.set_input('ShapeTensor', ShapeTensor_var_list)
        new_op_desc.set_input('Shape', Shape_var_list)
        new_op_desc.set_input('X', [X_var.name])
        new_op_desc.set_output('XShape', [XShape_var.name])
        new_op_desc.set_output('Out', [Out_var.name])
        new_op_desc._set_attr('shape', shape_list)

    @staticmethod
    def backward(ctx, *args, **kwargs):
534
        DistributedDefaultImpl0.backward(ctx, *args, **kwargs)
535

536

537 538
class DistributedReshapeImpl2(DistributedOperatorImpl):
    def __init__(self, name):
539
        super().__init__(name)
540 541 542
        self._forward_implemented = True
        self._backward_implemented = False

C
caozhou 已提交
543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565
    def calc_cost(self, op_role, dist_op, ctx, cluster):
        cost = None
        if int(op_role) == int(OpRole.Backward):
            cost = self.calc_bwd_cost(dist_op, ctx, cluster)
        else:
            cost = self.calc_fwd_cost(dist_op, ctx, cluster)
        assert cost is not None
        return cost

    def calc_fwd_cost(self, dist_op, ctx, cluster):
        res = []
        op = dist_op.serial_op
        dist_attr = dist_op.dist_attr

        shape_list = op.desc.attr("shape")
        # got dist attribute info
        dim_mapping = dist_attr.get_output_dims_mapping(op.output("Out")[0])
        process_mesh_shape = dist_attr.process_mesh.topology

        # modify target shape
        for idx, axis in enumerate(dim_mapping):
            if axis >= 0:
                if len(shape_list) > idx:
566 567 568
                    shape_list[idx] = (
                        shape_list[idx] // process_mesh_shape[axis]
                    )
C
caozhou 已提交
569 570

        # calc comp op cost
571 572 573
        desc_mapping = build_comp_desc_from_dist_op(
            dist_op=dist_op, dist_context=ctx
        )
C
caozhou 已提交
574 575 576 577
        processes = dist_attr.process_mesh.processes
        for key in desc_mapping:
            desc_mapping[key]["shape"] = shape_list

578 579 580
        cost_mapping = build_comp_costs_from_descs(
            Reshape2OpCost, ctx, processes, desc_mapping, cluster
        )
C
caozhou 已提交
581 582 583 584 585 586 587
        res.append(cost_mapping)

        return res

    def calc_bwd_cost(self, dist_op, ctx, cluster):
        # calc comp op cost
        res = []
588 589 590
        desc_mapping = build_comp_desc_from_dist_op(
            dist_op=dist_op, dist_context=ctx
        )
C
caozhou 已提交
591 592 593 594 595
        dist_attr = dist_op.dist_attr
        process_mesh = dist_attr.process_mesh
        processes = process_mesh.processes
        op_type = dist_op.serial_op.type

596 597 598
        cost_mapping = build_comp_costs_from_descs(
            Reshape2GradOpCost, ctx, processes, desc_mapping, cluster
        )
C
caozhou 已提交
599 600 601 602 603 604 605 606
        res.append(cost_mapping)

        backward_op = dist_op.serial_op
        main_block = backward_op.block
        need_gradient_allreduce = False
        for input_name in backward_op.desc.input_names():
            for varname in backward_op.desc.input(input_name):
                if "@GRAD" not in varname and not is_parameter_related(
607 608
                    varname, main_block
                ):
C
caozhou 已提交
609 610 611 612 613 614 615 616 617
                    # NOTE input var's dim_mapping of backward op should be the same with input var instead of corresponding varname of forward op
                    var_dim_mapping = dist_attr.get_input_dims_mapping(varname)

                    mesh_shape = process_mesh.topology
                    batch_size_axis = var_dim_mapping[0]
                    if batch_size_axis > -1 and mesh_shape[batch_size_axis] > 1:
                        parallel_axis = batch_size_axis
                        attrs = {"use_calc_stream": True}
                        var_names = [varname + "@GRAD"]
618 619 620 621 622 623 624 625 626
                        build_dp_costs(
                            res,
                            dist_op,
                            ctx,
                            var_names,
                            attrs,
                            parallel_axis,
                            cluster,
                        )
C
caozhou 已提交
627 628 629

        return res

630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656
    def is_input_compatible(self, dist_op):
        op_desc = dist_op.serial_op.desc
        op_dist_attr = dist_op.dist_attr
        x_name = op_desc.input('X')[0]
        out_name = op_desc.output('Out')[0]
        x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
        out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)

        if len(x_dims_mapping) != len(out_dims_mapping):
            return False

        return True

    def is_output_compatible(self, dist_op):
        op_desc = dist_op.serial_op.desc
        op_dist_attr = dist_op.dist_attr
        out_name = op_desc.output('Out')[0]
        x_name = op_desc.input('X')[0]
        x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
        out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)

        if len(x_dims_mapping) != len(out_dims_mapping):
            return False

        return True

    def is_auto_compatible(self, dist_op):
657 658 659
        if (not self.is_input_compatible(dist_op)) or (
            not self.is_output_compatible(dist_op)
        ):
660 661 662 663 664 665 666 667 668 669
            return False

        op_desc = dist_op.serial_op.desc
        op_dist_attr = dist_op.dist_attr
        x_name = op_desc.input('X')[0]
        out_name = op_desc.output('Out')[0]
        x_shape_name = op_desc.output('XShape')[0]
        x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
        out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)
        x_shape_dims_mapping = op_dist_attr.get_output_dims_mapping(
670 671
            x_shape_name
        )
672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694

        for idx, item in enumerate(x_dims_mapping[:-1]):
            if out_dims_mapping[idx] != item:
                return False

        if x_shape_dims_mapping[0] != -1:
            return False

        if x_shape_dims_mapping[1:] != out_dims_mapping[:]:
            return False

        return True

    def update_dims_mapping(self, dist_op):
        changed = False
        op_desc = dist_op.serial_op.desc
        op_dist_attr = dist_op.dist_attr
        x_name = op_desc.input('X')[0]
        out_name = op_desc.output('Out')[0]
        x_shape_name = op_desc.output('XShape')[0]
        x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
        out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)
        x_shape_dims_mapping = op_dist_attr.get_output_dims_mapping(
695 696
            x_shape_name
        )
697 698 699

        for i in range(len(out_dims_mapping) - 1):
            dim_changed = compute_compatible_and_update_dim_mapping(
700 701
                [x_dims_mapping, out_dims_mapping], [i, i]
            )
702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719
            if dim_changed:
                changed = True

        for i in range(len(out_dims_mapping)):
            x_shape_dims_mapping[i + 1] = out_dims_mapping[i]

        return changed

    @staticmethod
    def forward(ctx, *args, **kwargs):
        """
        kwargs: inputname_mapping & outputname_mapping
        """

        dist_op_context = ctx.dist_op_context
        main_block = dist_op_context.work_block
        src_op = dist_op_context.cur_src_op
        op_dist_attr = ctx.get_op_dist_attr_for_program(src_op)
720 721 722
        assert (
            op_dist_attr is not None
        ), "backward op [{}] don't have dist attribute !".format(str(src_op))
723 724 725 726

        # check validation of inputs / outputs
        for input_name in src_op.desc.input_names():
            assert input_name in kwargs, "input [{}] is not given".format(
727 728
                input_name
            )
729 730 731 732 733
            assert len(kwargs[input_name]) == len(
                src_op.desc.input(input_name)
            ), "number of tensor for input [{}] is not match".format(input_name)
        for output_name in src_op.desc.output_names():
            assert output_name in kwargs, "input [{}] is not given".format(
734 735
                output_name
            )
736 737 738
            assert len(kwargs[output_name]) == len(
                src_op.desc.output(output_name)
            ), "number of tensor for input [{}] is not match".format(
739 740
                output_name
            )
741

Z
zhaoyingli 已提交
742 743 744
        X_var = main_block._var_recursive(kwargs['X'][0])
        Out_var = main_block._var_recursive(kwargs['Out'][0])
        XShape_var = main_block._var_recursive(kwargs['XShape'][0])
745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760
        shape_list = src_op.desc.attr("shape")
        ShapeTensor_var_list = []
        for name in kwargs['ShapeTensor']:
            ShapeTensor_var_list.append(name)
        Shape_var_list = []
        for name in kwargs['Shape']:
            Shape_var_list.append(name)

        # got dist attribute info
        out_dim_mapping = op_dist_attr.get_output_dims_mapping(Out_var.name)
        process_mesh_shape = op_dist_attr.process_mesh.topology

        # modify target shape
        for idx, axis in enumerate(out_dim_mapping):
            if axis >= 0:
                if len(shape_list) > idx:
761 762 763
                    shape_list[idx] = (
                        shape_list[idx] // process_mesh_shape[axis]
                    )
764 765

        # create op
766
        new_op_desc = main_block.append_op(type='nop').desc
767 768 769 770 771 772 773 774 775 776 777 778 779 780
        new_op_desc.copy_from(src_op.desc)
        set_dist_op_desc_original_id(new_op_desc, src_op.desc, ctx)
        new_op_desc.set_input('ShapeTensor', ShapeTensor_var_list)
        new_op_desc.set_input('Shape', Shape_var_list)
        new_op_desc.set_input('X', [X_var.name])
        new_op_desc.set_output('XShape', [XShape_var.name])
        new_op_desc.set_output('Out', [Out_var.name])
        new_op_desc._set_attr('shape', shape_list)

    @staticmethod
    def backward(ctx, *args, **kwargs):
        DistributedDefaultImpl0.backward(ctx, *args, **kwargs)


781
register_distributed_operator_impl(
782 783 784 785 786 787 788 789
    "reshape2", DistributedReshapeImpl0("add_one_dim_back")
)
register_distributed_operator_impl(
    "reshape2", DistributedReshapeImpl1("remove_one_dim_back")
)
register_distributed_operator_impl(
    "reshape2", DistributedReshapeImpl2("same_dim_shape")
)