all_reduce.py 4.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import paddle.fluid.data_feeder as data_feeder
16
import paddle.fluid.framework as framework
17
import paddle.fluid.layer_helper as layer_helper
18 19 20 21
from paddle.distributed.communication.group import (
    _get_global_group,
    _warn_cur_rank_not_in_group,
)
22
from paddle.distributed.communication.reduce import ReduceOp, _get_reduce_op
23 24 25


def _all_reduce_in_dygraph(tensor, op, group, sync_op, use_calc_stream):
26
    op_type = _get_reduce_op(op, "allreduce")
27

28
    if use_calc_stream:
L
LiYuRio 已提交
29
        return group.process_group.all_reduce_on_calc_stream(tensor, op_type)
30

L
LiYuRio 已提交
31
    task = group.process_group.all_reduce(tensor, op_type, sync_op)
32 33 34 35 36 37
    if sync_op:
        task.wait()

    return task


38
def _all_reduce_in_static_mode(tensor, op, group, sync_op, use_calc_stream):
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
    data_feeder.check_variable_and_dtype(
        tensor,
        'tensor',
        [
            'float16',
            'float32',
            'float64',
            'int32',
            'int64',
            'int8',
            'uint8',
            'bool',
        ],
        'all_reduce',
    )
54

55
    op_type = _get_reduce_op(op, "allreduce")
56 57 58 59 60 61 62 63
    ring_id = 0 if group is None else group.id

    if not isinstance(ring_id, int):
        raise ValueError("The type of 'ring_id' for all_reduce should be int.")

    # TODO: Support task and use task.wait in static mode
    #       Use use_calc_stream rather than sync_op
    helper = layer_helper.LayerHelper(op_type, **locals())
64 65 66 67 68 69
    helper.append_op(
        type=op_type,
        inputs={'X': [tensor]},
        outputs={'Out': [tensor]},
        attrs={'ring_id': ring_id, 'use_calc_stream': sync_op},
    )
70 71 72 73

    return None


74 75 76
def all_reduce(
    tensor, op=ReduceOp.SUM, group=None, sync_op=True, use_calc_stream=False
):
77 78 79 80 81 82
    """

    Perform specific reduction (for example, sum, max) on inputs across devices.

    Args:
        tensor (Tensor): The input tensor on each rank. The result will overwrite this tenor after communication. Support
83
            float16, float32, float64, int32, int64, int8, uint8 or bool as the input data type.
84
        op (ReduceOp.SUM|ReduceOp.MAX|ReduceOp.MIN|ReduceOp.PROD, optional): The reduction used. If none is given, use ReduceOp.SUM as default.
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
        group (Group, optional): Communicate in which group. If none is given, use the global group as default.
        sync_op (bool, optional): Indicate whether the communication is sync or not. If none is given, use true as default.
        use_calc_stream (bool, optional): Indicate whether the communication is done on calculation stream. If none is given, use false as default. This
            option is designed for high performance demand, be careful to turn it on except you are clearly know its meaning.

    Returns:
        Return a task object.

    Examples:
        .. code-block:: python

            # required: distributed
            import paddle
            import paddle.distributed as dist

            dist.init_parallel_env()
            local_rank = dist.get_rank()
            data = None
            if local_rank == 0:
                data = paddle.to_tensor([[4, 5, 6], [4, 5, 6]])
            else:
                data = paddle.to_tensor([[1, 2, 3], [1, 2, 3]])
            task = dist.stream.all_reduce(data, sync_op=False)
            task.wait()
109
            out = data
110 111
            # [[5, 7, 9], [5, 7, 9]]
    """
112 113
    if _warn_cur_rank_not_in_group(group):
        return
114 115 116

    if not sync_op and use_calc_stream:
        raise RuntimeError(
117 118
            "use_calc_stream can only be true in sync op behavior."
        )
119 120

    if framework.in_dygraph_mode():
L
LiYuRio 已提交
121
        group = _get_global_group() if group is None else group
122 123 124
        return _all_reduce_in_dygraph(
            tensor, op, group, sync_op, use_calc_stream
        )
125
    else:
126
        assert group is None, "Group can not be used in static mode for now."
127 128 129
        return _all_reduce_in_static_mode(
            tensor, op, group, sync_op, use_calc_stream
        )