scatter.py 8.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import warnings
16 17 18
import paddle
import paddle.distributed as dist
import paddle.fluid.framework as framework
19 20 21 22 23 24 25
import paddle.fluid.data_feeder as data_feeder
import paddle.fluid.layer_helper as layer_helper
from paddle.distributed.communication.group import (
    _get_global_group,
    _warn_cur_rank_not_in_group,
    _get_or_throw_group_rank,
)
26 27 28 29 30 31 32 33 34 35 36 37


def _check_tensor_shape(tensor, shape, nranks=1):
    expect_shape = list(shape)
    expect_shape[0] //= nranks
    if list(tensor.shape) != expect_shape:
        raise RuntimeError("The in_tensor for scatter is not correctly-sized.")


def _check_tensor_list_shape(tensor_list, shape, nranks=1):
    if len(tensor_list) != nranks:
        raise RuntimeError(
38 39
            "The tensor_list for scatter is not correctly-sized."
        )
40 41 42
    for tensor in tensor_list:
        if tensor.shape != shape:
            raise RuntimeError(
43 44
                "The tensor_list for scatter is not correctly-sized."
            )
45 46


47
def _scatter_tensor_in_dygraph(
48
    out_tensor, in_tensor, src_rank_in_group, group, sync_op, use_calc_stream
49
):
50
    nranks = group.nranks
51
    if group.rank == src_rank_in_group:
52 53 54 55
        _check_tensor_shape(out_tensor, in_tensor.shape, nranks)

    if use_calc_stream:
        return group.process_group.scatter_tensor_on_calc_stream(
56
            in_tensor, out_tensor, src_rank_in_group
57
        )
58

59
    task = group.process_group.scatter_tensor(
60
        in_tensor, out_tensor, src_rank_in_group, sync_op
61
    )
62 63 64 65 66 67
    if sync_op:
        task.wait()

    return task


68
def _scatter_in_dygraph(
69
    tensor, tensor_list, src_rank_in_group, group, sync_op, use_calc_stream
70
):
71
    nranks = group.nranks
72
    if group.rank == src_rank_in_group:
73 74
        if len(tensor_list) == 0:
            raise RuntimeError(
75 76
                "The tensor_list should not be empty on src rank."
            )
77 78 79 80 81 82
        _check_tensor_list_shape(tensor_list, tensor.shape, nranks)
    else:
        tensor_list = [tensor for _ in range(nranks)]

    if use_calc_stream:
        return group.process_group.scatter_on_calc_stream(
83
            tensor_list, tensor, src_rank_in_group
84
        )
85

86 87 88
    task = group.process_group.scatter(
        tensor_list, tensor, src_rank_in_group, sync_op
    )
89 90 91 92 93 94
    if sync_op:
        task.wait()

    return task


95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
def _scatter_in_static_mode(
    tensor,
    tensor_or_tensor_list,
    src_rank_in_group,
    group,
    sync_op,
    use_calc_stream,
):
    nranks = dist.get_world_size() if group is None else group.nranks
    rank = dist.get_rank()

    input_tensor = tensor_or_tensor_list
    if isinstance(tensor_or_tensor_list, list):
        tensor_list = tensor_or_tensor_list
        if rank == src_rank_in_group:
            if len(tensor_list) == 0:
                raise RuntimeError(
                    "The tensor_list should not be empty on src rank."
                )
        else:
            tensor_list = [tensor for _ in range(nranks)]
        input_tensor = paddle.concat(tensor_list, axis=0)

    ring_id = 0 if group is None else group.id

    data_feeder.check_variable_and_dtype(
        tensor,
        'tensor',
        [
            'float16',
            'float32',
            'float64',
            'int32',
            'int64',
            'int8',
            'uint8',
            'bool',
        ],
        'scatter',
    )

    op_type = 'c_scatter'
    helper = layer_helper.LayerHelper(op_type, **locals())
    helper.append_op(
        type=op_type,
        inputs={'X': [input_tensor]},
        outputs={'Out': [tensor]},
        attrs={
            'ring_id': ring_id,
            'root': src_rank_in_group,
            'use_calc_stream': sync_op,
            'nranks': nranks,
        },
    )

    return None


153 154 155 156 157 158 159 160
def scatter(
    tensor,
    tensor_or_tensor_list=None,
    src=0,
    group=None,
    sync_op=True,
    use_calc_stream=False,
):
161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
    """

    Scatter a tensor (or a tensor list) across devices.

    Args:
        tensor (Tensor): The output tensor on each rank. The result will overwrite this tenor after communication. Support
            float16, float32, float64, int32, int64, int8, uint8 or bool as the input data type.
        tensor_or_tensor_list (Union[Tensor, List[Tensor]]): The input to scatter (default is `None`, must be specified on the source rank).
            If it is a tensor, it should be correctly-sized. If it is a list, it should contain correctly-sized tensors.
        src (int, optional): Rank of the source device. If none is given, use `0` as default.
        group (Group, optional): Communicate in which group. If none is given, use the global group as default.
        sync_op (bool, optional): Indicate whether the communication is sync or not. If none is given, use true as default.
        use_calc_stream (bool, optional): Indicate whether the communication is done on calculation stream. If none is given, use false as default. This
            option is designed for high performance demand, be careful to turn it on except you are clearly know its meaning.

    Returns:
        Return a task object.

    Warning:
        This API only supports the dygraph mode now.

    Examples:
        .. code-block:: python

            # required: distributed
            import paddle
            import paddle.distributed as dist

            dist.init_parallel_env()
            if dist.get_rank() == 0:
                data1 = paddle.to_tensor([7, 8, 9])
                data2 = paddle.to_tensor([10, 11, 12])
                dist.stream.scatter(data1, src=1)
            else:
                data1 = paddle.to_tensor([1, 2, 3])
                data2 = paddle.to_tensor([4, 5, 6])
                dist.stream.scatter(data1, [data1, data2], src=1)
            out = data1.numpy()
            # [1, 2, 3] (2 GPUs, out for rank 0)
            # [4, 5, 6] (2 GPUs, out for rank 1)
    """
202 203
    if _warn_cur_rank_not_in_group(group):
        return
204 205 206

    if not sync_op and use_calc_stream:
        raise RuntimeError(
207 208
            "use_calc_stream can only be true in sync op behavior."
        )
209

210 211 212 213 214 215 216 217 218 219 220
    # NOTE(liyurui): Only the source rank needs to specific the tensor_or_tensor_list argument.
    # Other ranks which pass this argument in will be ignored with a warning.
    # If a tensor_list passed in, we need to concat it to a tensor before invoke C++ API.
    # If a tensor passed in, concat is not needed.
    # The passed in type for non-src rank is meaningless, for it will be ignored.
    if src != dist.get_rank():
        if tensor_or_tensor_list is not None:
            warnings.warn(
                "Specific `tensor_or_tensor_list` is meaningless for rank which is not src."
            )
        tensor_or_tensor_list = []
221 222

    if framework.in_dygraph_mode():
223 224
        group = _get_global_group() if group is None else group
        src_rank_in_group = _get_or_throw_group_rank(src, group)
225
        if paddle.is_tensor(tensor_or_tensor_list):
226 227 228
            return _scatter_tensor_in_dygraph(
                tensor,
                tensor_or_tensor_list,
229
                src_rank_in_group,
230 231 232 233
                group,
                sync_op,
                use_calc_stream,
            )
234
        else:
235 236 237
            return _scatter_in_dygraph(
                tensor,
                tensor_or_tensor_list,
238
                src_rank_in_group,
239 240 241 242
                group,
                sync_op,
                use_calc_stream,
            )
243 244 245 246 247 248 249 250 251 252 253
    else:
        assert group is None, "Group can not be used in static mode for now."

        return _scatter_in_static_mode(
            tensor,
            tensor_or_tensor_list,
            src,
            group,
            sync_op,
            use_calc_stream,
        )