dist_default.py 23.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License

15
from .common import DistributedOperatorImplContainer
16
from .common import DistributedOperatorImpl
17
from .common import register_distributed_operator_impl_container
18
from .common import gradient_synchronization
J
JZ-LIANG 已提交
19
from .common import register_distributed_operator_impl, is_parameter_related
20
from ..utils import is_prim_op
21
from ..utils import compute_compatible_dim_mapping
22
from ..utils import set_dist_op_desc_original_id
23
from ..dist_attribute import OperatorDistributedAttribute
24
from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole
25
from ..process_group import new_process_group
26
from ..utils import _get_comm_group, _get_corresponding_rank
27 28 29
from ..cost import _g_op_cost_factory
from ..cost import build_comp_desc_from_dist_op, build_dp_costs
from ..cost import build_comp_costs_from_descs
30

31 32
__op_not_need_param_init__ = ["while", "cond"]

33

34 35 36 37 38 39 40
def prim_operator_data_parallel_functor(ctx, src_op):
    dist_op_context = ctx.dist_op_context
    main_block = dist_op_context.work_block
    startup_block = dist_op_context.startup_block

    var_name = src_op.output_arg_names[0]
    if var_name in ctx.grads_params:
41 42 43
        assert (
            var_name not in ctx.synced_gradient
        ), "in primtive mode, grad is already {} synced".format(var_name)
44 45 46
        ctx.synced_gradient.add(var_name)
        sync_group = new_process_group(ctx.data_parallel_group)

47 48 49 50 51 52 53 54 55 56
        allreduce_op = main_block.append_op(
            type='c_allreduce_sum',
            inputs={'X': [var_name]},
            outputs={'Out': [var_name]},
            attrs={
                'ring_id': sync_group.id,
                'use_calc_stream': True,
                OP_ROLE_KEY: OpRole.Backward,
            },
        )
57 58 59

        param = ctx.grads_params[var_name]
        startup_block = dist_op_context.startup_block
60 61 62 63 64 65 66 67 68 69 70
        new_op = startup_block.append_op(
            type='c_broadcast',
            inputs={'X': [param]},
            outputs={'Out': [param]},
            attrs={
                'ring_id': sync_group.id,
                'root': 0,
                'use_calc_stream': True,
                OP_ROLE_KEY: OpRole.Forward,
            },
        )
71 72 73

        grad_var = main_block.var(var_name)
        dims_mapping = ctx.get_tensor_dist_attr_for_program(
74 75
            grad_var
        ).dims_mapping
76 77 78 79 80 81 82 83 84 85 86
        dist_attr = ctx.get_op_dist_attr_for_program(src_op)
        process_mesh = dist_attr.process_mesh
        op_attr = OperatorDistributedAttribute()
        op_attr.process_mesh = process_mesh
        op_attr.set_output_dims_mapping(grad_var.name, dims_mapping)
        op_attr.set_input_dims_mapping(grad_var.name, dims_mapping)
        ctx.set_op_dist_attr_for_program(allreduce_op, op_attr)

    return


87
class DistributedDefault(DistributedOperatorImplContainer):
88
    def __init__(self, op_type):
89
        super().__init__(op_type)
90 91


92
register_distributed_operator_impl_container(DistributedDefault("default"))
93 94


95
# Replicated Default
96 97
class DistributedDefaultImpl0(DistributedOperatorImpl):
    def __init__(self, name):
98
        super().__init__(name)
99 100 101
        self._forward_implemented = True
        self._backward_implemented = True

102 103 104 105 106 107 108 109 110 111 112 113
    def calc_cost(self, op_role, dist_op, ctx, cluster):
        """Calculate the cost by the op role."""
        cost = None
        if int(op_role) == int(OpRole.Backward):
            cost = self.calc_bwd_cost(dist_op, ctx, cluster)
        else:
            cost = self.calc_fwd_cost(dist_op, ctx, cluster)
        assert cost is not None
        return cost

    def calc_fwd_cost(self, dist_op, ctx, cluster):
        # calc comp op cost
114 115 116
        desc_mapping = build_comp_desc_from_dist_op(
            dist_op=dist_op, dist_context=ctx
        )
117 118
        processes = dist_op.dist_attr.process_mesh.processes
        op_type = dist_op.serial_op.type
119 120 121
        cost_mapping = build_comp_costs_from_descs(
            _g_op_cost_factory[op_type], ctx, processes, desc_mapping, cluster
        )
122 123 124 125 126 127 128
        res_cost = [cost_mapping]

        return res_cost

    def calc_bwd_cost(self, dist_op, ctx, cluster):
        # calc comp op cost
        res = []
129 130 131
        desc_mapping = build_comp_desc_from_dist_op(
            dist_op=dist_op, dist_context=ctx
        )
132 133 134 135 136
        dist_attr = dist_op.dist_attr
        process_mesh = dist_attr.process_mesh
        processes = process_mesh.processes
        backward_op = dist_op.serial_op
        op_type = backward_op.type
137 138 139
        cost_mapping = build_comp_costs_from_descs(
            _g_op_cost_factory[op_type], ctx, processes, desc_mapping, cluster
        )
140 141 142 143 144 145 146 147
        res.append(cost_mapping)

        main_block = backward_op.block
        vars = main_block.vars
        need_gradient_allreduce = False
        for input_name in backward_op.desc.input_names():
            for varname in backward_op.desc.input(input_name):
                if "@GRAD" not in varname and not is_parameter_related(
148 149
                    varname, main_block
                ):
150 151 152 153 154 155 156 157 158 159 160
                    var_dim_mapping = dist_attr.get_input_dims_mapping(varname)
                    mesh_shape = process_mesh.topology
                    batch_size_axis = var_dim_mapping[0]
                    if batch_size_axis > -1 and mesh_shape[batch_size_axis] > 1:
                        need_gradient_allreduce = True
                        break

        if need_gradient_allreduce:
            for input_name in backward_op.desc.input_names():
                for varname in backward_op.desc.input(input_name):
                    if "@GRAD" not in varname and is_parameter_related(
161 162
                        varname, main_block
                    ):
163
                        var_dim_mapping = dist_attr.get_input_dims_mapping(
164 165
                            varname
                        )
166 167 168 169 170
                        mesh_shape = process_mesh.topology
                        batch_size_axis = var_dim_mapping[0]
                        parallel_axis = batch_size_axis
                        attrs = {"use_calc_stream": True}
                        var_names = [varname + "@GRAD"]
171 172 173 174 175 176 177 178 179
                        build_dp_costs(
                            res,
                            dist_op,
                            ctx,
                            var_names,
                            attrs,
                            parallel_axis,
                            cluster,
                        )
180 181
        return res

182
    def is_input_compatible(self, dist_op):
183 184
        op_desc = dist_op.serial_op.desc
        op_dist_attr = dist_op.dist_attr
185
        batch_dim_mappings = []
186 187 188 189
        input_names = op_desc.input_names()
        xshape_arg_names = []
        if "XShape" in input_names:
            xshape_arg_names = op_desc.input("XShape")
190 191 192
        for arg_name in op_desc.input_arg_names():
            serial_tensor = dist_op.get_serial_input(arg_name)
            dims_mapping = op_dist_attr.get_input_dims_mapping(arg_name)
193 194 195 196
            if serial_tensor.is_parameter:
                for mapping in dims_mapping:
                    if mapping != -1:
                        return False
197
                continue
198 199 200 201 202
            if arg_name not in xshape_arg_names:
                if len(dims_mapping) > 1:
                    for mapping in dims_mapping[1:]:
                        if mapping != -1:
                            return False
203 204
                if len(dims_mapping) >= 1:
                    batch_dim_mappings.append(dims_mapping[0])
205 206 207 208 209 210 211
            else:
                if dims_mapping[0] != -1:
                    return False
                if len(dims_mapping) > 2:
                    for mapping in dims_mapping[2:]:
                        if mapping != -1:
                            return False
212 213 214 215 216 217
                if len(dims_mapping) >= 2:
                    batch_dim_mappings.append(dims_mapping[1])

        if compute_compatible_dim_mapping(batch_dim_mappings) is None:
            return False

218
        return True
219

220
    def is_output_compatible(self, dist_op):
221 222 223
        op_desc = dist_op.serial_op.desc
        op_dist_attr = dist_op.dist_attr
        output_names = op_desc.output_names()
224
        batch_dim_mappings = []
225 226 227 228 229 230
        xshape_arg_names = []
        if "XShape" in output_names:
            xshape_arg_names = op_desc.output("XShape")
        for arg_name in op_desc.output_arg_names():
            serial_tensor = dist_op.get_serial_output(arg_name)
            dims_mapping = op_dist_attr.get_output_dims_mapping(arg_name)
231 232 233 234
            if serial_tensor.is_parameter:
                for mapping in dims_mapping:
                    if mapping != -1:
                        return False
235
                continue
236 237 238 239 240
            if arg_name not in xshape_arg_names:
                if len(dims_mapping) > 1:
                    for mapping in dims_mapping[1:]:
                        if mapping != -1:
                            return False
241 242
                if len(dims_mapping) >= 1:
                    batch_dim_mappings.append(dims_mapping[0])
243 244 245 246 247 248 249
            else:
                if dims_mapping[0] != -1:
                    return False
                if len(dims_mapping) > 2:
                    for mapping in dims_mapping[2:]:
                        if mapping != -1:
                            return False
250 251 252 253 254 255
                if len(dims_mapping) >= 2:
                    batch_dim_mappings.append(dims_mapping[1])

        if compute_compatible_dim_mapping(batch_dim_mappings) is None:
            return False

256 257 258 259 260 261 262
        return True

    def is_auto_compatible(self, dist_op):
        op_desc = dist_op.serial_op.desc
        op_dist_attr = dist_op.dist_attr
        batch_dim_mappings = []
        # Check input compatibility
263 264 265 266
        input_names = op_desc.input_names()
        xshape_arg_names = []
        if "XShape" in input_names:
            xshape_arg_names = op_desc.input("XShape")
267 268
        for arg_name in op_desc.input_arg_names():
            serial_tensor = dist_op.get_serial_input(arg_name)
269
            dims_mapping = op_dist_attr.get_input_dims_mapping(arg_name)
270
            if serial_tensor is not None and serial_tensor.is_parameter:
271 272 273
                for mapping in dims_mapping:
                    if mapping != -1:
                        return False
274
                continue
275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290
            if arg_name not in xshape_arg_names:
                if len(dims_mapping) > 1:
                    for mapping in dims_mapping[1:]:
                        if mapping != -1:
                            return False
                if len(dims_mapping) >= 1:
                    batch_dim_mappings.append(dims_mapping[0])
            else:
                if dims_mapping[0] != -1:
                    return False
                if len(dims_mapping) > 2:
                    for mapping in dims_mapping[2:]:
                        if mapping != -1:
                            return False
                if len(dims_mapping) >= 2:
                    batch_dim_mappings.append(dims_mapping[1])
291 292 293 294 295 296 297 298

        # Check output compatibility
        output_names = op_desc.output_names()
        xshape_arg_names = []
        if "XShape" in output_names:
            xshape_arg_names = op_desc.output("XShape")
        for arg_name in op_desc.output_arg_names():
            serial_tensor = dist_op.get_serial_output(arg_name)
299
            dims_mapping = op_dist_attr.get_output_dims_mapping(arg_name)
300
            if serial_tensor is not None and serial_tensor.is_parameter:
301 302 303
                for mapping in dims_mapping:
                    if mapping != -1:
                        return False
304 305 306 307 308 309
                continue
            if arg_name not in xshape_arg_names:
                if len(dims_mapping) > 1:
                    for mapping in dims_mapping[1:]:
                        if mapping != -1:
                            return False
310 311
                if len(dims_mapping) >= 1:
                    batch_dim_mappings.append(dims_mapping[0])
312 313 314 315 316 317 318
            else:
                if dims_mapping[0] != -1:
                    return False
                if len(dims_mapping) > 2:
                    for mapping in dims_mapping[2:]:
                        if mapping != -1:
                            return False
319 320
                if len(dims_mapping) >= 2:
                    batch_dim_mappings.append(dims_mapping[1])
321 322

        # Check batch dim mapping compatibility
323 324 325 326
        if not all(
            batch_dim_mappings[0] == dim_mapping
            for dim_mapping in batch_dim_mappings
        ):
327 328 329
            return False

        return True
330

331
    def update_dims_mapping(self, dist_op):
332 333 334
        changed = False
        op_desc = dist_op.serial_op.desc
        op_dist_attr = dist_op.dist_attr
335 336

        if op_desc.type() == "while":
337
            return False
338 339 340 341 342 343

        input_names = op_desc.input_names()
        input_xshape_arg_names = []
        if "XShape" in input_names:
            input_xshape_arg_names = op_desc.input("XShape")

344
        output_names = op_desc.output_names()
345
        output_xshape_arg_names = []
346
        if "XShape" in output_names:
347 348
            output_xshape_arg_names = op_desc.output("XShape")

349 350 351 352 353 354
        batch_dim_mappings = []
        for arg_name in op_desc.input_arg_names():
            serial_tensor = dist_op.get_serial_input(arg_name)
            if serial_tensor.is_parameter:
                continue
            dims_mapping = op_dist_attr.get_input_dims_mapping(arg_name)
355 356 357 358 359
            if arg_name not in input_xshape_arg_names:
                if len(dims_mapping) >= 1:
                    batch_dim_mappings.append(dims_mapping[0])
            else:
                batch_dim_mappings.append(dims_mapping[1])
360
        for arg_name in op_desc.output_arg_names():
361
            if op_desc.type() == 'fill_any_like':
362
                input_tensor = dist_op.get_serial_input(
363 364
                    op_desc.input_arg_names()[0]
                )
365 366
                if input_tensor.is_parameter:
                    continue
367 368 369 370
            serial_tensor = dist_op.get_serial_output(arg_name)
            if serial_tensor.is_parameter:
                continue
            dims_mapping = op_dist_attr.get_output_dims_mapping(arg_name)
371
            if arg_name not in output_xshape_arg_names:
372 373
                if len(dims_mapping) >= 1:
                    batch_dim_mappings.append(dims_mapping[0])
374 375 376
            else:
                batch_dim_mappings.append(dims_mapping[1])

377 378 379
        if not batch_dim_mappings:
            return changed

380
        compatible_dim_mapping = compute_compatible_dim_mapping(
381 382
            batch_dim_mappings
        )
383 384 385
        if compatible_dim_mapping is None:
            return False

386 387 388 389 390
        for arg_name in op_desc.input_arg_names():
            serial_tensor = dist_op.get_serial_input(arg_name)
            if serial_tensor.is_parameter:
                continue
            dims_mapping = op_dist_attr.get_input_dims_mapping(arg_name)
391
            if arg_name not in input_xshape_arg_names:
392 393 394 395
                if (
                    len(dims_mapping) >= 1
                    and compatible_dim_mapping != dims_mapping[0]
                ):
396 397 398
                    dims_mapping[0] = compatible_dim_mapping
                    changed = True
            else:
399 400 401 402
                if (
                    len(dims_mapping) >= 2
                    and compatible_dim_mapping != dims_mapping[1]
                ):
403 404
                    dims_mapping[1] = compatible_dim_mapping
                    changed = True
405
        for arg_name in op_desc.output_arg_names():
406
            if op_desc.type() == 'fill_any_like':
407
                input_tensor = dist_op.get_serial_input(
408 409
                    op_desc.input_arg_names()[0]
                )
410 411
                if input_tensor.is_parameter:
                    continue
412 413
            if op_desc.type() in ["shape", "slice"]:
                continue
414 415 416 417
            serial_tensor = dist_op.get_serial_output(arg_name)
            if serial_tensor.is_parameter:
                continue
            dims_mapping = op_dist_attr.get_output_dims_mapping(arg_name)
418
            if arg_name not in output_xshape_arg_names:
419 420 421 422
                if (
                    len(dims_mapping) >= 1
                    and compatible_dim_mapping != dims_mapping[0]
                ):
423 424 425
                    dims_mapping[0] = compatible_dim_mapping
                    changed = True
            else:
426 427 428 429
                if (
                    len(dims_mapping) >= 2
                    and compatible_dim_mapping != dims_mapping[1]
                ):
430 431 432 433
                    dims_mapping[1] = compatible_dim_mapping
                    changed = True

        return changed
434 435 436

    @staticmethod
    def forward(ctx, *args, **kwargs):
437
        dist_op_context = ctx.dist_op_context
438 439 440 441
        main_block = dist_op_context.work_block
        startup_block = dist_op_context.startup_block
        src_op = dist_op_context.cur_src_op
        rank_id = dist_op_context.rank_id
442

443
        # check validation of inputs / outputs
444 445
        for input_name in src_op.desc.input_names():
            assert input_name in kwargs, "input [{}] is not given".format(
446 447
                input_name
            )
448 449 450 451 452
            assert len(kwargs[input_name]) == len(
                src_op.desc.input(input_name)
            ), "number of tensor for input [{}] is not match".format(input_name)
        for output_name in src_op.desc.output_names():
            assert output_name in kwargs, "input [{}] is not given".format(
453 454
                output_name
            )
455 456 457
            assert len(kwargs[output_name]) == len(
                src_op.desc.output(output_name)
            ), "number of tensor for input [{}] is not match".format(
458 459
                output_name
            )
460 461

        # replicate op in dist program
462
        dist_op_desc = main_block.append_op(type='nop').desc
463
        dist_op_desc.copy_from(src_op.desc)
464
        set_dist_op_desc_original_id(dist_op_desc, src_op.desc, ctx)
465 466 467 468 469
        for input_name in src_op.desc.input_names():
            dist_op_desc.set_input(input_name, kwargs[input_name])
        for output_name in src_op.desc.output_names():
            dist_op_desc.set_output(output_name, kwargs[output_name])

470 471
        # data parallel synchronization for primtive operators
        from paddle.incubate.autograd import prim_enabled
472

473 474 475 476
        if prim_enabled():
            assert is_prim_op(src_op)
            prim_operator_data_parallel_functor(ctx, src_op)
            return
477 478

        # param initialization sync
479 480 481
        if src_op.type in __op_not_need_param_init__:
            return

482
        for varname in dist_op_desc.input_arg_names():
483 484 485 486 487
            if (
                startup_block.has_var(varname)
                and startup_block.var(varname).is_parameter
                and varname not in dist_op_context.already_init_sync_vars
            ):
488
                dist_op_context.already_init_sync_vars.add(varname)
489
                param = startup_block.var(varname)
490 491 492
                param_dist_attr = ctx.get_tensor_dist_attr_for_program(param)
                process_mesh = param_dist_attr.process_mesh
                dims_mapping = param_dist_attr.dims_mapping
493 494

                # FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism
495
                if rank_id not in process_mesh.processes:
496 497 498
                    rank_id = _get_corresponding_rank(
                        ctx, process_mesh, rank_id
                    )
499

500
                # NOTE all not splited axis should be presented in mesh
501 502 503 504
                for axis, size in enumerate(process_mesh.topology):
                    if size <= 1 or axis in dims_mapping:
                        pass
                    else:
505 506 507 508 509 510
                        group_ranks = _get_comm_group(
                            process_mesh.processes,
                            process_mesh.topology,
                            axis,
                            rank_id,
                        )
511 512
                        sync_group = new_process_group(group_ranks)

513 514 515 516 517 518 519 520 521 522 523
                        new_op = startup_block.append_op(
                            type='c_broadcast',
                            inputs={'X': param},
                            outputs={'Out': param},
                            attrs={
                                'ring_id': sync_group.id,
                                'root': 0,
                                'use_calc_stream': True,
                                OP_ROLE_KEY: OpRole.Forward,
                            },
                        )
524 525

                        # set distributed attribute
526 527
                        op_attr = OperatorDistributedAttribute()
                        op_attr.process_mesh = process_mesh
528 529 530
                        op_attr.set_output_dims_mapping(
                            param.name, dims_mapping
                        )
531
                        op_attr.set_input_dims_mapping(param.name, dims_mapping)
532
                        ctx.set_op_dist_attr_for_program(new_op, op_attr)
533 534 535 536 537

    @staticmethod
    def backward(ctx, *args, **kwargs):

        # by now the backward function only insert the gradient allreduce for dist op itself
538
        dist_op_context = ctx.dist_op_context
539 540
        main_block = dist_op_context.work_block
        backward_op = dist_op_context.cur_src_op
541
        dist_attr = ctx.get_op_dist_attr_for_program(backward_op)
542 543 544 545 546
        assert (
            dist_attr is not None
        ), "backward op [{}] don't have dist attribute !".format(
            str(backward_op)
        )
547
        rank_id = dist_op_context.rank_id
548

549 550 551
        # check validation of inputs / outputs
        for input_name in backward_op.desc.input_names():
            assert input_name in kwargs, "input [{}] is not given".format(
552 553
                input_name
            )
554 555 556 557 558
            assert len(kwargs[input_name]) == len(
                backward_op.desc.input(input_name)
            ), "number of tensor for input [{}] is not match".format(input_name)
        for output_name in backward_op.desc.output_names():
            assert output_name in kwargs, "input [{}] is not given".format(
559 560
                output_name
            )
561 562 563
            assert len(kwargs[output_name]) == len(
                backward_op.desc.output(output_name)
            ), "number of tensor for input [{}] is not match".format(
564 565
                output_name
            )
566 567

        # replicate op in dist program
568
        dist_op_desc = main_block.append_op(type='nop').desc
569
        dist_op_desc.copy_from(backward_op.desc)
570 571
        # Refer to the related dist op
        set_dist_op_desc_original_id(dist_op_desc, backward_op.desc, ctx)
572 573 574 575 576
        for input_name in backward_op.desc.input_names():
            dist_op_desc.set_input(input_name, kwargs[input_name])
        for output_name in backward_op.desc.output_names():
            dist_op_desc.set_output(output_name, kwargs[output_name])

577 578
        # data parallel gradient synchronization
        act_grad_names = []
579 580
        for input_name in backward_op.desc.input_names():
            for varname in backward_op.desc.input(input_name):
J
JZ-LIANG 已提交
581
                if "@GRAD" not in varname and not is_parameter_related(
582 583
                    varname, main_block
                ):
584
                    act_grad_names.append(varname)
585

586 587 588 589 590 591 592 593 594 595
        out_grad_names = []
        for output_name in backward_op.desc.output_names():
            for varname in backward_op.desc.output(output_name):
                if varname in kwargs["grad_var_to_var"]:
                    fwd_name = kwargs["grad_var_to_var"][varname]
                    if fwd_name not in main_block.vars:
                        continue
                    if is_parameter_related(fwd_name, main_block):
                        out_grad_names.append(varname)

596 597 598
        gradient_synchronization(
            ctx, backward_op, act_grad_names, out_grad_names, rank_id
        )
599 600 601


register_distributed_operator_impl(
602 603
    "default", DistributedDefaultImpl0("replicate_parallel")
)