process_group.py 7.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License

15 16
from collections import OrderedDict

17
import paddle
18
from paddle.framework import core
19

20
from ..collective import _get_global_env, _new_ring_id
21 22 23 24
from ..utils.log_utils import get_logger
from .utils import dygraph_guard

logger = get_logger("INFO", __name__)
25 26 27


def get_all_process_groups():
28 29
    global _g_process_group_map
    return _g_process_group_map.values()
30 31


32
def get_process_group(group_id, g_process_group_map=None):
33
    global _g_process_group_map
34 35 36 37 38
    return (
        _g_process_group_map.get(group_id, None)
        if g_process_group_map is None
        else g_process_group_map.get(group_id, None)
    )
39 40


J
JZ-LIANG 已提交
41
def get_world_process_group():
42 43 44 45
    global _g_process_group_map
    return _g_process_group_map[0]


46 47 48 49 50 51
def clear_all_process_groups():
    global _g_process_group_map
    _g_process_group_map = {}
    _g_process_group_map[0] = ProcessGroup(0, [])


52 53
def new_process_group(ranks, group_id=None, force_new_group=False):

54
    global _g_process_group_map
55 56
    if not force_new_group:
        # A key constructed from ranks is used for avoiding duplication
57
        new_key = ''.join(map(str, ranks))
58
        for pg_id, pg in _g_process_group_map.items():
59
            cur_key = ''.join(map(str, pg.ranks))
60 61
            if pg_id != 0 and new_key == cur_key:
                return pg
C
chenxujun 已提交
62
    # If not matching the existing one, construct a new process group
63 64 65
    num_groups = len(_g_process_group_map)
    # Note: our process group may interfere with the original implementation
    # so the created group id should start from the original _new_ring_id()
66
    if group_id is None:
67 68
        group_id = _new_ring_id() + num_groups + 1

69 70 71
    new_pg = ProcessGroup(group_id, ranks)
    _g_process_group_map[group_id] = new_pg
    return new_pg
72 73 74


# This implementation refers to lots of Paddle/python/paddle/distributed/collective.py,
75
# Fleet also has a collective helper which uses ops to initialize communication in
76
# Paddle/python/paddle/distributed/fleet/meta_optimizers/common.py. We use the first one
77 78
# because it seems simple. This should be enhanced to manage the process membership and
# the instantiation process in a more general way. In the future, the process group may
79 80 81
# handle the communication implementation choice.
class ProcessGroup:
    def __init__(self, group_id, ranks):
82
        if group_id == 0 and get_process_group(0) is not None:
83 84 85
            assert (
                group_id != 0
            ), "Process group id 0 is reserved for all ranks."
86
        self._group_id = group_id
87
        self._ranks = ranks
88 89 90 91
        # Add the current ranks into group 0
        if group_id != 0:
            global _g_process_group_map
            _g_process_group_map[0].add_ranks(ranks)
92 93 94 95 96 97
        self._is_instantiate = False

    @property
    def id(self):
        return self._group_id

98 99 100 101 102 103 104 105 106 107 108 109
    @property
    def ranks(self):
        return self._ranks

    @property
    def nranks(self):
        return len(self._ranks)

    def add_ranks(self, new_ranks):
        if set(new_ranks) <= set(self.ranks):
            return
        else:
110
            assert (
111
                not self.is_instantiate()
112
            ), "Cannot add new ranks after instantiating the process group"
113
        self._ranks.extend(new_ranks)
114
        self._ranks = list(set(self.ranks))
115 116

    def local_rank(self, global_rank):
117 118
        if global_rank in self.ranks:
            return self.ranks.index(global_rank)
119
        else:
120
            raise AssertionError(
121
                f"Rank {global_rank} doesn't belong to this group"
122
            )
123 124 125 126

    def is_instantiate(self):
        return self._is_instantiate

127
    @dygraph_guard
128 129 130 131 132 133 134
    def instantiate(self):
        if self._is_instantiate:
            return
        ring_id = self.id
        genv = _get_global_env()
        global_rank = genv.rank

135 136 137 138
        if self.nranks >= 2 and global_rank in self.ranks:
            logger.info(
                f"group_id: {self.id}, ranks: {self.ranks}, nranks: {self.nranks}, trainer_endpoints: {genv.current_endpoint}"
            )
139
            strategy = core.ParallelStrategy()
140
            strategy.nranks = self.nranks
141 142
            strategy.local_rank = self.local_rank(global_rank)
            strategy.trainer_endpoints = [
143
                genv.trainer_endpoints[i] for i in self.ranks
144 145 146 147 148
            ]
            strategy.current_endpoint = genv.current_endpoint
            strategy.nrings = 1
            if core.is_compiled_with_cuda():
                place = core.CUDAPlace(genv.device_id)
149 150 151
                core.NCCLParallelContext(strategy, place).init_with_ring_id(
                    ring_id
                )
152 153 154 155 156
            elif core.is_compiled_with_xpu():
                place = core.XPUPlace(genv.device_id)
                core.BKCLParallelContext(strategy, place).init_with_ring_id(
                    ring_id
                )
157 158 159 160 161
            elif genv.device_type in core.get_all_custom_device_type():
                place = core.CustomPlace(genv.device_type, genv.device_id)
                core.XCCLParallelContext(strategy, place).init_with_ring_id(
                    ring_id
                )
162
            else:
163
                raise AssertionError('No CUDA device found')
164

165 166 167 168 169 170 171 172
            if core.is_compiled_with_cuda():
                paddle.set_device(
                    'gpu:%d' % paddle.distributed.ParallelEnv().dev_id
                )
            elif core.is_compiled_with_xpu():
                paddle.set_device(
                    'xpu:%d' % paddle.distributed.ParallelEnv().dev_id
                )
173 174 175 176 177 178 179 180
            elif genv.device_type in core.get_all_custom_device_type():
                paddle.set_device(
                    '%s:%d'
                    % (
                        paddle.distributed.ParallelEnv().device_type,
                        paddle.distributed.ParallelEnv().dev_id,
                    ),
                )
181 182 183 184 185 186

            # TODO(shenliang03): This is a temporary solution to solve the problem of
            # hang caused by cross-creation of new_group
            barrier_tensor = paddle.full([1], 1, dtype="int32")
            paddle._legacy_C_ops.barrier(
                barrier_tensor, barrier_tensor, 'ring_id', ring_id
187
            )
188 189 190 191 192

        if self.nranks > 1:
            barrier_tensor = paddle.full([1], 1, dtype="int32")
            paddle._legacy_C_ops.barrier(
                barrier_tensor, barrier_tensor, 'ring_id', 0
193
            )
194 195 196

        self._is_instantiate = True

197 198 199
    def is_member(self):
        return True

200 201 202 203 204 205
    def __eq__(self, other):
        if not isinstance(other, ProcessGroup):
            return False
        if self.id != other.id:
            return False
        return True
206

207 208
    def __ne__(self, other):
        return not self.__eq__(other)
209

210 211
    def __str__(self):
        string = "id: {}, nranks: {}, ranks: {}.".format(
212 213
            self.id, self.nranks, ", ".join(map(str, self.ranks))
        )
214
        return string
215

216 217 218
    def __hash__(self):
        return hash(self.__str__())

219

220
# Note that Process group 0 is reserved for representing all ranks.
221
# At the beginning, group 0 is empty and new ranks will be added automatically.
222
_g_process_group_map = OrderedDict()
223
_g_process_group_map[0] = ProcessGroup(0, [])