hybrid_parallel_util.py 8.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
#   Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from paddle import framework
import paddle
from paddle.fluid import core
18 19 20 21 22
from paddle.fluid.dygraph.parallel import (
    _split_tensors,
    sync_params_buffers,
    build_groups,
)
23
from paddle.fluid.framework import in_dygraph_mode, _in_legacy_dygraph
24
from .log_util import logger
25

26 27
__all__ = []

28

29
def _apply_collective_grads(parameters, comm_group, bucket_size, scale=None):
30 31 32 33 34 35 36
    grad_var_set = set()
    grad_vars = []
    sparse_grad_vars = []

    for param in parameters:
        if param.trainable and (param._grad_ivar() is not None):
            g_var = param._grad_ivar()
37 38
            assert (
                not g_var._is_sparse()
39 40 41 42 43
            ), "Now, it doesn't support sparse parameters"
            grad_vars.append(g_var)
            assert g_var not in grad_var_set
            grad_var_set.add(g_var)

44
    coalesced_grads_and_vars = build_groups(grad_vars, bucket_size)
45

46 47 48 49 50
    nranks = (
        paddle.distributed.get_world_size()
        if comm_group is None
        else comm_group.nranks
    )
51 52 53 54

    scale = nranks if scale is None else 1.0 / scale
    scale = None if scale == 1.0 else scale

55 56
    for coalesced_grad, _, _ in coalesced_grads_and_vars:
        # need to div nranks
57 58 59 60 61 62 63 64
        if scale is not None:
            div_factor = paddle.to_tensor(scale, dtype=coalesced_grad.dtype)
            paddle.fluid.framework._dygraph_tracer().trace_op(
                type="elementwise_div",
                inputs={'X': coalesced_grad, 'Y': div_factor},
                outputs={'Out': coalesced_grad},
                attrs={'axis': -1},
            )
65
        paddle.distributed.all_reduce(coalesced_grad, group=comm_group)
66

67 68 69
    _split_tensors(coalesced_grads_and_vars)


70 71 72
def _apply_collective_grads_eager(
    parameters, comm_group, bucket_size, scale=None
):
73 74 75 76 77 78
    grad_var_set = set()
    grad_vars = []

    for param in parameters:
        if param.trainable and (param._grad_ivar() is not None):
            g_var = param._grad_ivar()
79 80
            assert (
                not g_var.is_sparse()
81 82 83 84 85
            ), "Now, it doesn't support sparse parameters"
            grad_vars.append(g_var)
            assert g_var not in grad_var_set
            grad_var_set.add(g_var)

86
    coalesced_grads_and_vars = build_groups(grad_vars, bucket_size)
87

88 89 90 91 92
    nranks = (
        paddle.distributed.get_world_size()
        if comm_group is None
        else comm_group.nranks
    )
93 94 95 96

    scale = 1.0 / nranks if scale is None else scale
    scale = None if scale == 1.0 else scale

97
    for coalesced_grad, _, _ in coalesced_grads_and_vars:
98
        # need to div nranks
99 100
        if scale is not None:
            coalesced_grad.scale_(scale)
101 102 103 104 105
        paddle.distributed.all_reduce(coalesced_grad, group=comm_group)

    _split_tensors(coalesced_grads_and_vars)


106
def _broadcast_data_help(data, shape, dtype, hcg):
107 108
    model_parallel_group = hcg.get_model_parallel_group()
    src_rank = hcg.get_model_parallel_group_src_rank()
109 110 111
    mp_rank = hcg.get_model_parallel_rank()

    shape_gpu = paddle.to_tensor(shape, dtype="int32")
112 113 114
    paddle.distributed.broadcast(
        shape_gpu, src=src_rank, group=model_parallel_group, sync_op=True
    )
115 116 117 118 119 120

    if mp_rank != 0:
        input_data = paddle.zeros(shape_gpu, dtype=dtype)
    else:
        input_data = data

121 122 123
    paddle.distributed.broadcast(
        input_data, src=src_rank, group=model_parallel_group, sync_op=True
    )
124

125 126 127 128 129 130 131
    if mp_rank != 0:
        if in_dygraph_mode():
            data._clear_data()
            input_data._share_buffer_to(data)
        else:
            data.value().get_tensor()._clear()
            data.value().get_tensor()._share_data_with(
132 133
                input_data.value().get_tensor()
            )
134

135 136

def broadcast_input_data(hcg, *inputs, **kwargs):
137
    cur_device = paddle.get_device()
138
    for v in inputs:
139
        if isinstance(v, (core.VarBase, core.eager.Tensor)):
140
            with framework.no_grad():
141 142 143 144 145
                if (
                    "gpu" in cur_device
                    and in_dygraph_mode()
                    and not v.place.is_gpu_place()
                ):
146 147 148
                    v_gpu = v.cuda(int(cur_device.split(":")[1]))
                    v._clear_data()
                    v_gpu._share_buffer_to(v)
149
                _broadcast_data_help(v, v.shape, v.dtype, hcg)
150
        else:
151
            logger.error("it doesn't support data type {}".format(type(v)))
152 153

    for k, v in kwargs.items():
154
        if isinstance(v, (core.VarBase, core.eager.Tensor)):
155
            with framework.no_grad():
156 157 158 159 160
                if (
                    "gpu" in cur_device
                    and in_dygraph_mode()
                    and not v.place.is_gpu_place()
                ):
161 162 163
                    v_gpu = v.cuda(int(cur_device.split(":")[1]))
                    v._clear_data()
                    v_gpu._share_buffer_to(v)
164
                _broadcast_data_help(v, v.shape, v.dtype, hcg)
165 166
            kwargs[k] = v
        else:
167
            logger.error("it doesn't support data type {}".format(type(v)))
168 169 170 171 172 173
    return inputs, kwargs


def broadcast_mp_parameters(model, hcg):
    model_parallel_group = hcg.get_model_parallel_group()
    src_rank = hcg.get_model_parallel_group_src_rank()
174 175 176
    sync_params_buffers(
        model, model_parallel_group, src_rank, is_model_parallel=True
    )
177 178 179 180 181


def broadcast_dp_parameters(model, hcg):
    data_parallel_group = hcg.get_data_parallel_group()
    src_rank = hcg.get_data_parallel_group_src_rank()
182 183 184
    sync_params_buffers(
        model, data_parallel_group, src_rank, is_model_parallel=False
    )
185 186


187 188 189
def fused_allreduce_gradients_with_group(
    parameter_list, group, bucket_size=128 * 1024 * 1024, scale=None
):
190 191 192 193 194
    apply_func = (
        _apply_collective_grads_eager
        if in_dygraph_mode()
        else _apply_collective_grads
    )
H
Haohongxiang 已提交
195
    with framework.no_grad():
196 197 198 199 200 201 202
        apply_func(parameter_list, group, bucket_size)


def fused_allreduce_gradients(parameter_list, hcg):
    data_parallel_group = None if hcg is None else hcg.get_data_parallel_group()
    logger.debug("dp start fuse allreduce gradients")
    fused_allreduce_gradients_with_group(parameter_list, data_parallel_group)
J
JZ-LIANG 已提交
203 204 205 206


def sharding_reduce_gradients(parameter_list, hcg):
    # TODO allreduce --> reduce
207
    # TODO merge grad / nrank with dp
J
JZ-LIANG 已提交
208 209 210 211 212 213
    logger.debug("sharding start gradients sync")
    with framework.no_grad():

        sharding_nrank = hcg.get_sharding_parallel_group().nranks
        for param in parameter_list:
            if param.trainable and (param._grad_ivar() is not None):
214 215 216 217 218
                if in_dygraph_mode():
                    param.grad.scale_(1.0 / sharding_nrank)
                    paddle.distributed.all_reduce(
                        param.grad,
                        group=hcg.get_sharding_parallel_group(),
219 220
                        sync_op=True,
                    )
221 222 223

                elif _in_legacy_dygraph():
                    g_var = param._grad_ivar()
224
                    # need use trace_op to allreduce
225 226 227 228 229 230 231 232
                    # paddle.distributed.all_reduce(
                    #     g_var, group=hcg.get_sharding_parallel_group(), use_calc_stream=True)
                    paddle.fluid.framework._dygraph_tracer().trace_op(
                        type="c_allreduce_sum",
                        inputs={'X': g_var},
                        outputs={'Out': g_var},
                        attrs={
                            'ring_id': hcg.get_sharding_parallel_group().id,
233 234 235
                            'use_calc_stream': True,
                        },
                    )
236 237

                    # grad / sharding_rank
238 239 240
                    div_factor = paddle.to_tensor(
                        sharding_nrank, dtype=g_var.dtype
                    )
241 242
                    paddle.fluid.framework._dygraph_tracer().trace_op(
                        type="elementwise_div",
243
                        inputs={'X': g_var, 'Y': div_factor},
244
                        outputs={'Out': g_var},
245 246
                        attrs={'axis': -1},
                    )
J
JZ-LIANG 已提交
247 248 249 250 251 252 253


def broadcast_sharding_parameters(model, hcg):
    # TODO TO save memory, use un-fused broadcast to avoid potentional OOM
    logger.debug("sharding start init parameters sync")
    sharding_parallel_group = hcg.get_sharding_parallel_group()
    src_rank = hcg.get_sharding_parallel_group_src_rank()
254 255 256
    sync_params_buffers(
        model, sharding_parallel_group, src_rank, is_model_parallel=False
    )