“7b723839ef9c48d4b593ae8c51874e1a1ea95225”上不存在“git@gitcode.net:s920243400/PaddleDetection.git”
dist_default.py 21.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License

15
from .common import DistributedOperatorImplContainer
16
from .common import DistributedOperatorImpl
17
from .common import register_distributed_operator_impl_container
J
JZ-LIANG 已提交
18
from .common import register_distributed_operator_impl, is_parameter_related
19 20 21 22 23 24
from ..utils import is_dim_shard
from ..utils import is_dim_replicate
from ..utils import is_valid_list_index
from ..utils import compute_compatible_dim_mapping
from ..utils import compute_compatible_dims_mapping
from ..utils import compute_compatible_and_update_dim_mapping
25
from ..utils import set_dist_op_desc_original_id
26
from ..dist_attribute import OperatorDistributedAttribute
27
from paddle.fluid import core, unique_name
J
Jiabin Yang 已提交
28
from paddle.fluid.framework import _non_static_mode
29 30 31
from paddle.fluid.framework import Program, Parameter, Variable, program_guard
from paddle.fluid.data_feeder import check_variable_and_dtype, check_dtype
from paddle.distributed.fleet.meta_optimizers.common import OpRole, OP_ROLE_KEY, OP_ROLE_VAR_KEY
32
from ..process_group import new_process_group
33 34
from ..utils import _get_comm_group, _get_corresponding_rank

35 36
__op_not_need_param_init__ = ["while", "cond"]

37

38
class DistributedDefault(DistributedOperatorImplContainer):
39 40
    def __init__(self, op_type):
        super(DistributedDefault, self).__init__(op_type)
41 42


43
register_distributed_operator_impl_container(DistributedDefault("default"))
44 45


46
# Replicated Default
47 48
class DistributedDefaultImpl0(DistributedOperatorImpl):
    def __init__(self, name):
49
        super(DistributedDefaultImpl0, self).__init__(name)
50 51 52
        self._forward_implemented = True
        self._backward_implemented = True

53
    def is_input_compatible(self, dist_op):
54 55
        op_desc = dist_op.serial_op.desc
        op_dist_attr = dist_op.dist_attr
56
        batch_dim_mappings = []
57 58 59 60
        input_names = op_desc.input_names()
        xshape_arg_names = []
        if "XShape" in input_names:
            xshape_arg_names = op_desc.input("XShape")
61 62 63
        for arg_name in op_desc.input_arg_names():
            serial_tensor = dist_op.get_serial_input(arg_name)
            dims_mapping = op_dist_attr.get_input_dims_mapping(arg_name)
64 65 66 67
            if serial_tensor.is_parameter:
                for mapping in dims_mapping:
                    if mapping != -1:
                        return False
68
                continue
69 70 71 72 73
            if arg_name not in xshape_arg_names:
                if len(dims_mapping) > 1:
                    for mapping in dims_mapping[1:]:
                        if mapping != -1:
                            return False
74 75
                if len(dims_mapping) >= 1:
                    batch_dim_mappings.append(dims_mapping[0])
76 77 78 79 80 81 82
            else:
                if dims_mapping[0] != -1:
                    return False
                if len(dims_mapping) > 2:
                    for mapping in dims_mapping[2:]:
                        if mapping != -1:
                            return False
83 84 85 86 87 88
                if len(dims_mapping) >= 2:
                    batch_dim_mappings.append(dims_mapping[1])

        if compute_compatible_dim_mapping(batch_dim_mappings) is None:
            return False

89
        return True
90

91
    def is_output_compatible(self, dist_op):
92 93 94
        op_desc = dist_op.serial_op.desc
        op_dist_attr = dist_op.dist_attr
        output_names = op_desc.output_names()
95
        batch_dim_mappings = []
96 97 98 99 100 101
        xshape_arg_names = []
        if "XShape" in output_names:
            xshape_arg_names = op_desc.output("XShape")
        for arg_name in op_desc.output_arg_names():
            serial_tensor = dist_op.get_serial_output(arg_name)
            dims_mapping = op_dist_attr.get_output_dims_mapping(arg_name)
102 103 104 105
            if serial_tensor.is_parameter:
                for mapping in dims_mapping:
                    if mapping != -1:
                        return False
106
                continue
107 108 109 110 111
            if arg_name not in xshape_arg_names:
                if len(dims_mapping) > 1:
                    for mapping in dims_mapping[1:]:
                        if mapping != -1:
                            return False
112 113
                if len(dims_mapping) >= 1:
                    batch_dim_mappings.append(dims_mapping[0])
114 115 116 117 118 119 120
            else:
                if dims_mapping[0] != -1:
                    return False
                if len(dims_mapping) > 2:
                    for mapping in dims_mapping[2:]:
                        if mapping != -1:
                            return False
121 122 123 124 125 126
                if len(dims_mapping) >= 2:
                    batch_dim_mappings.append(dims_mapping[1])

        if compute_compatible_dim_mapping(batch_dim_mappings) is None:
            return False

127 128 129 130 131 132 133
        return True

    def is_auto_compatible(self, dist_op):
        op_desc = dist_op.serial_op.desc
        op_dist_attr = dist_op.dist_attr
        batch_dim_mappings = []
        # Check input compatibility
134 135 136 137
        input_names = op_desc.input_names()
        xshape_arg_names = []
        if "XShape" in input_names:
            xshape_arg_names = op_desc.input("XShape")
138 139
        for arg_name in op_desc.input_arg_names():
            serial_tensor = dist_op.get_serial_input(arg_name)
140
            dims_mapping = op_dist_attr.get_input_dims_mapping(arg_name)
141
            if serial_tensor.is_parameter:
142 143 144
                for mapping in dims_mapping:
                    if mapping != -1:
                        return False
145
                continue
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
            if arg_name not in xshape_arg_names:
                if len(dims_mapping) > 1:
                    for mapping in dims_mapping[1:]:
                        if mapping != -1:
                            return False
                if len(dims_mapping) >= 1:
                    batch_dim_mappings.append(dims_mapping[0])
            else:
                if dims_mapping[0] != -1:
                    return False
                if len(dims_mapping) > 2:
                    for mapping in dims_mapping[2:]:
                        if mapping != -1:
                            return False
                if len(dims_mapping) >= 2:
                    batch_dim_mappings.append(dims_mapping[1])
162 163 164 165 166 167 168 169

        # Check output compatibility
        output_names = op_desc.output_names()
        xshape_arg_names = []
        if "XShape" in output_names:
            xshape_arg_names = op_desc.output("XShape")
        for arg_name in op_desc.output_arg_names():
            serial_tensor = dist_op.get_serial_output(arg_name)
170
            dims_mapping = op_dist_attr.get_output_dims_mapping(arg_name)
171
            if serial_tensor.is_parameter:
172 173 174
                for mapping in dims_mapping:
                    if mapping != -1:
                        return False
175 176 177 178 179 180
                continue
            if arg_name not in xshape_arg_names:
                if len(dims_mapping) > 1:
                    for mapping in dims_mapping[1:]:
                        if mapping != -1:
                            return False
181 182
                if len(dims_mapping) >= 1:
                    batch_dim_mappings.append(dims_mapping[0])
183 184 185 186 187 188 189
            else:
                if dims_mapping[0] != -1:
                    return False
                if len(dims_mapping) > 2:
                    for mapping in dims_mapping[2:]:
                        if mapping != -1:
                            return False
190 191
                if len(dims_mapping) >= 2:
                    batch_dim_mappings.append(dims_mapping[1])
192 193 194 195 196 197 198

        # Check batch dim mapping compatibility
        if not all(batch_dim_mappings[0] == dim_mapping
                   for dim_mapping in batch_dim_mappings):
            return False

        return True
199

200
    def update_dims_mapping(self, dist_op):
201 202 203 204
        changed = False
        op_desc = dist_op.serial_op.desc
        op_dist_attr = dist_op.dist_attr
        # The following statement will be replaced by a more elegent way
205 206 207
        if op_desc.type() == "shape" \
            or op_desc.type() == "slice" \
                or op_desc.type() == "while":
208
            return False
209 210 211 212 213 214

        input_names = op_desc.input_names()
        input_xshape_arg_names = []
        if "XShape" in input_names:
            input_xshape_arg_names = op_desc.input("XShape")

215
        output_names = op_desc.output_names()
216
        output_xshape_arg_names = []
217
        if "XShape" in output_names:
218 219
            output_xshape_arg_names = op_desc.output("XShape")

220 221 222 223 224 225
        batch_dim_mappings = []
        for arg_name in op_desc.input_arg_names():
            serial_tensor = dist_op.get_serial_input(arg_name)
            if serial_tensor.is_parameter:
                continue
            dims_mapping = op_dist_attr.get_input_dims_mapping(arg_name)
226 227 228 229 230
            if arg_name not in input_xshape_arg_names:
                if len(dims_mapping) >= 1:
                    batch_dim_mappings.append(dims_mapping[0])
            else:
                batch_dim_mappings.append(dims_mapping[1])
231
        for arg_name in op_desc.output_arg_names():
232 233 234 235 236
            if op_desc.type() == "fill_zeros_like":
                input_tensor = dist_op.get_serial_input(op_desc.input_arg_names(
                )[0])
                if input_tensor.is_parameter:
                    continue
237 238 239 240
            serial_tensor = dist_op.get_serial_output(arg_name)
            if serial_tensor.is_parameter:
                continue
            dims_mapping = op_dist_attr.get_output_dims_mapping(arg_name)
241
            if arg_name not in output_xshape_arg_names:
242 243
                if len(dims_mapping) >= 1:
                    batch_dim_mappings.append(dims_mapping[0])
244 245 246
            else:
                batch_dim_mappings.append(dims_mapping[1])

247 248 249
        if not batch_dim_mappings:
            return changed

250 251
        compatible_dim_mapping = compute_compatible_dim_mapping(
            batch_dim_mappings)
252 253 254
        if compatible_dim_mapping is None:
            return False

255 256 257 258 259
        for arg_name in op_desc.input_arg_names():
            serial_tensor = dist_op.get_serial_input(arg_name)
            if serial_tensor.is_parameter:
                continue
            dims_mapping = op_dist_attr.get_input_dims_mapping(arg_name)
260 261 262 263 264 265 266 267 268 269
            if arg_name not in input_xshape_arg_names:
                if len(dims_mapping) >= 1 and \
                    compatible_dim_mapping != dims_mapping[0]:
                    dims_mapping[0] = compatible_dim_mapping
                    changed = True
            else:
                if len(dims_mapping) >= 2 and \
                    compatible_dim_mapping != dims_mapping[1]:
                    dims_mapping[1] = compatible_dim_mapping
                    changed = True
270
        for arg_name in op_desc.output_arg_names():
271 272 273 274 275
            if op_desc.type() == "fill_zeros_like":
                input_tensor = dist_op.get_serial_input(op_desc.input_arg_names(
                )[0])
                if input_tensor.is_parameter:
                    continue
276 277 278 279
            serial_tensor = dist_op.get_serial_output(arg_name)
            if serial_tensor.is_parameter:
                continue
            dims_mapping = op_dist_attr.get_output_dims_mapping(arg_name)
280
            if arg_name not in output_xshape_arg_names:
281 282
                if len(dims_mapping
                       ) >= 1 and compatible_dim_mapping != dims_mapping[0]:
283 284 285
                    dims_mapping[0] = compatible_dim_mapping
                    changed = True
            else:
286 287
                if len(dims_mapping
                       ) >= 2 and compatible_dim_mapping != dims_mapping[1]:
288 289 290 291
                    dims_mapping[1] = compatible_dim_mapping
                    changed = True

        return changed
292 293 294 295

    @staticmethod
    def forward(ctx, *args, **kwargs):

296
        dist_op_context = ctx.dist_op_context
297 298 299 300
        main_block = dist_op_context.work_block
        startup_block = dist_op_context.startup_block
        src_op = dist_op_context.cur_src_op
        rank_id = dist_op_context.rank_id
301

302
        # check validation of inputs / outputs
303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319
        for input_name in src_op.desc.input_names():
            assert input_name in kwargs, "input [{}] is not given".format(
                input_name)
            assert len(kwargs[input_name]) == len(
                src_op.desc.input(input_name)
            ), "number of tensor for input [{}] is not match".format(input_name)
        for output_name in src_op.desc.output_names():
            assert output_name in kwargs, "input [{}] is not given".format(
                output_name)
            assert len(kwargs[output_name]) == len(
                src_op.desc.output(output_name)
            ), "number of tensor for input [{}] is not match".format(
                output_name)

        # replicate op in dist program
        dist_op_desc = main_block.desc.append_op()
        dist_op_desc.copy_from(src_op.desc)
320
        set_dist_op_desc_original_id(dist_op_desc, src_op.desc, ctx)
321 322 323 324 325 326 327 328
        for input_name in src_op.desc.input_names():
            dist_op_desc.set_input(input_name, kwargs[input_name])
        for output_name in src_op.desc.output_names():
            dist_op_desc.set_output(output_name, kwargs[output_name])

        main_block._sync_with_cpp()

        # param initialization sync
329 330 331
        if src_op.type in __op_not_need_param_init__:
            return

332 333 334
        for varname in dist_op_desc.input_arg_names():
            if startup_block.has_var(varname) and startup_block.var(
                    varname
335 336
            ).is_parameter and varname not in dist_op_context.already_init_sync_vars:
                dist_op_context.already_init_sync_vars.add(varname)
337
                param = startup_block.var(varname)
338 339 340
                param_dist_attr = ctx.get_tensor_dist_attr_for_program(param)
                process_mesh = param_dist_attr.process_mesh
                dims_mapping = param_dist_attr.dims_mapping
341 342

                # FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism
343 344 345
                if rank_id not in process_mesh.processes:
                    rank_id = _get_corresponding_rank(ctx, process_mesh,
                                                      rank_id)
346

347
                # NOTE all not splited axis should be presented in mesh
348 349 350 351
                for axis, size in enumerate(process_mesh.topology):
                    if size <= 1 or axis in dims_mapping:
                        pass
                    else:
352 353 354
                        group_ranks = _get_comm_group(process_mesh.processes,
                                                      process_mesh.topology,
                                                      axis, rank_id)
355 356 357 358 359 360 361 362 363 364 365 366 367 368
                        sync_group = new_process_group(group_ranks)

                        new_op = startup_block.append_op(
                            type='c_broadcast',
                            inputs={'X': param},
                            outputs={'Out': param},
                            attrs={
                                'ring_id': sync_group.id,
                                'root': 0,
                                'use_calc_stream': True,
                                OP_ROLE_KEY: OpRole.Forward
                            })

                        # set distributed attribute
369 370
                        op_attr = OperatorDistributedAttribute()
                        op_attr.process_mesh = process_mesh
371 372 373
                        op_attr.set_output_dims_mapping(param.name,
                                                        dims_mapping)
                        op_attr.set_input_dims_mapping(param.name, dims_mapping)
374
                        ctx.set_op_dist_attr_for_program(new_op, op_attr)
375 376 377 378 379 380 381

                startup_block._sync_with_cpp()

    @staticmethod
    def backward(ctx, *args, **kwargs):

        # by now the backward function only insert the gradient allreduce for dist op itself
382
        dist_op_context = ctx.dist_op_context
383 384
        main_block = dist_op_context.work_block
        backward_op = dist_op_context.cur_src_op
385
        dist_attr = ctx.get_op_dist_attr_for_program(backward_op)
386 387
        assert dist_attr is not None, "backward op [{}] don't have dist attribute !".format(
            str(backward_op))
388
        rank_id = dist_op_context.rank_id
389

390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
        # check validation of inputs / outputs
        for input_name in backward_op.desc.input_names():
            assert input_name in kwargs, "input [{}] is not given".format(
                input_name)
            assert len(kwargs[input_name]) == len(
                backward_op.desc.input(input_name)
            ), "number of tensor for input [{}] is not match".format(input_name)
        for output_name in backward_op.desc.output_names():
            assert output_name in kwargs, "input [{}] is not given".format(
                output_name)
            assert len(kwargs[output_name]) == len(
                backward_op.desc.output(output_name)
            ), "number of tensor for input [{}] is not match".format(
                output_name)

        # replicate op in dist program
        dist_op_desc = main_block.desc.append_op()
        dist_op_desc.copy_from(backward_op.desc)
408 409
        # Refer to the related dist op
        set_dist_op_desc_original_id(dist_op_desc, backward_op.desc, ctx)
410 411 412 413 414 415 416
        for input_name in backward_op.desc.input_names():
            dist_op_desc.set_input(input_name, kwargs[input_name])
        for output_name in backward_op.desc.output_names():
            dist_op_desc.set_output(output_name, kwargs[output_name])

        main_block._sync_with_cpp()

417
        # check if need gradient allreduce
418
        # if there is a non-gradient & non-parameter input and its batch dimension is splited,
419 420 421 422
        # we need insert gradient allreduce for the gradient of parameter in its output
        need_gradient_allreduce = False
        for input_name in backward_op.desc.input_names():
            for varname in backward_op.desc.input(input_name):
J
JZ-LIANG 已提交
423 424
                if "@GRAD" not in varname and not is_parameter_related(
                        varname, main_block):
425 426

                    # NOTE input var's dim_mapping of backward op should be the same with input var instead of corresponding varname of forward op
427
                    process_mesh = dist_attr.process_mesh
428 429 430
                    var_dim_mapping = dist_attr.get_input_dims_mapping(varname)

                    # FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism
431 432 433
                    if rank_id not in process_mesh.processes:
                        rank_id = _get_corresponding_rank(ctx, process_mesh,
                                                          rank_id)
434 435 436 437 438

                    mesh_shape = process_mesh.topology
                    batch_size_axis = var_dim_mapping[0]
                    if batch_size_axis > -1 and mesh_shape[batch_size_axis] > 1:
                        need_gradient_allreduce = True
439 440 441
                        group_ranks = _get_comm_group(process_mesh.processes,
                                                      process_mesh.topology,
                                                      batch_size_axis, rank_id)
442 443 444 445 446 447
                        dp_degree = len(group_ranks)
                        dp_group = new_process_group(group_ranks)
                        break

        if need_gradient_allreduce:
            allreduce_vars = []
448 449 450 451 452 453 454 455
            for output_name in backward_op.desc.output_names():
                for varname in backward_op.desc.output(output_name):
                    if varname in kwargs["grad_var_to_var"]:
                        fwd_name = kwargs["grad_var_to_var"][varname]
                        if fwd_name not in main_block.vars:
                            continue
                        if is_parameter_related(fwd_name, main_block):
                            allreduce_vars.append(varname)
456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480

            if len(allreduce_vars) > 0:

                for varname in allreduce_vars:

                    grad_var = main_block.var(varname)
                    allreduce_op = main_block.append_op(
                        type='c_allreduce_sum',
                        inputs={'X': [grad_var]},
                        outputs={'Out': [grad_var]},
                        attrs={
                            'ring_id': dp_group.id,
                            'use_calc_stream': True,
                            OP_ROLE_KEY: OpRole.Backward
                        })

                    scale_op = main_block.append_op(
                        type='scale',
                        inputs={'X': grad_var},
                        outputs={'Out': grad_var},
                        attrs={
                            'scale': 1.0 / dp_degree,
                            OP_ROLE_KEY: OpRole.Backward
                        })

481 482 483
                    dims_mapping = ctx.get_tensor_dist_attr_for_program(
                        grad_var).dims_mapping
                    process_mesh = dist_attr.process_mesh
484
                    for op in [allreduce_op, scale_op]:
485 486
                        op_attr = OperatorDistributedAttribute()
                        op_attr.process_mesh = process_mesh
487 488 489 490
                        op_attr.set_output_dims_mapping(grad_var.name,
                                                        dims_mapping)
                        op_attr.set_input_dims_mapping(grad_var.name,
                                                       dims_mapping)
491
                        ctx.set_op_dist_attr_for_program(op, op_attr)
492 493 494 495 496 497

                main_block._sync_with_cpp()


register_distributed_operator_impl(
    "default", DistributedDefaultImpl0("replicate_parallel"))