parallel.py 16.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except jin compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import six
import warnings
18 19
from multiprocessing import Process  # noqa: F401
from multiprocessing import Manager  # noqa: F401
20 21
import time
import sys
22
import paddle
23 24 25 26 27

from paddle import compat as cpt

# deprecated module import
from paddle.fluid import core
L
lilong12 已提交
28
from paddle.fluid.framework import in_dygraph_mode
29 30
from paddle.fluid.framework import _set_expected_place
from paddle.fluid.dygraph import parallel_helper
X
xiongkun 已提交
31
from paddle.distributed.fleet.launch_utils import check_backend
32
from paddle.fluid.dygraph.parallel import ParallelEnv
L
Ligoml 已提交
33 34 35
from paddle.distributed.fleet.base.private_helper_function import (
    wait_server_ready,
)  # noqa: F401
36
from paddle.distributed import collective
L
lilong12 已提交
37 38 39
from paddle.distributed.collective import _set_group_map
from paddle.distributed.collective import _set_group_map_by_name
from paddle.distributed.collective import _get_group_map_by_name
40 41 42
from paddle.distributed.collective import _group_map_by_name
from paddle.distributed.collective import _default_group_name
from paddle.distributed.collective import _valid_backend_list
L
lilong12 已提交
43 44
from paddle.distributed.collective import _set_default_backend
from paddle.distributed.collective import _set_default_store
45 46
from paddle.distributed.collective import _new_process_group_impl
from paddle.distributed.collective import Group
47
from paddle.distributed.collective import _set_group_map_backend
48
from paddle.distributed.communication.group import _add_new_group
49

50
__all__ = []
51 52 53

ParallelStrategy = core.ParallelStrategy

54
# NOTE(chenweihang): Maintain a global parallel env to avoid
55 56 57 58 59 60 61 62 63 64
# initializing ParallelEnv every time and improve performance
_global_parallel_env = None


def _get_global_parallel_env():
    global _global_parallel_env
    if _global_parallel_env is None:
        _global_parallel_env = ParallelEnv()
    return _global_parallel_env

65

66
def _start_kv_server(port, http_server_d, size):
67
    from paddle.distributed.fleet.utils.http_server import KVServer
L
Ligoml 已提交
68

69
    http_server = KVServer(int(port), size=size)
70
    http_server.start()
71
    wait_seconds = 3
L
lilong12 已提交
72
    while http_server_d.get("running", False) or not http_server.should_stop():
73 74 75 76
        time.sleep(wait_seconds)
    http_server.stop()


X
xiongkun 已提交
77 78
def _is_cpuonly(backend):
    check_backend(backend)
L
Ligoml 已提交
79 80 81 82 83 84 85 86 87
    if (
        backend in ['auto', 'nccl', 'bkcl', 'hccl', 'heter', 'cncl']
        and (
            core.is_compiled_with_cuda()
            or core.is_compiled_with_xpu()
            or core.is_compiled_with_npu()
            or core.is_compiled_with_mlu()
        )
    ) or backend is 'xccl':
88

89 90 91 92 93 94
        # passes 'auto' and can use cuda or xpu, use the default logics. so return False
        return False
    else:
        return True


K
kuizhiqing 已提交
95 96 97
def _check_var_exists(var_name):
    var = os.environ.get(var_name, None)
    if var is None:
L
Ligoml 已提交
98 99 100 101
        raise ValueError(
            "paddle.distributed initialize error, "
            "environment variable %s is needed, but not set." % var_name
        )
K
kuizhiqing 已提交
102 103


X
xiongkun 已提交
104
def init_parallel_env():
105
    """
106
    Initialize parallel training environment in dynamic graph mode.
107

108
    .. note::
109
        Now initialize both `NCCL` and `GLOO` contexts for communication.
110

111 112 113 114 115
    Args:
        backend (string): A string represents the backend used by DataParallel,
            should be one of 'gloo'(for cpu), 'nccl'(for cuda), 'bkcl'(for xpu), 'auto'(auto detect).
            The auto detection prefer 'nccl', 'bkcl' than 'gloo'.

116 117
    Returns:
        None
L
Ligoml 已提交
118

119 120
    Examples:
        .. code-block:: python
121
            # required: gpu
122 123 124 125 126 127 128 129 130 131
            import paddle
            import paddle.nn as nn
            import paddle.optimizer as opt
            import paddle.distributed as dist

            class LinearNet(nn.Layer):
                def __init__(self):
                    super(LinearNet, self).__init__()
                    self._linear1 = nn.Linear(10, 10)
                    self._linear2 = nn.Linear(10, 1)
L
Ligoml 已提交
132

133 134 135 136
                def forward(self, x):
                    return self._linear2(self._linear1(x))

            def train():
137
                # 1. initialize parallel environment
138 139
                dist.init_parallel_env()

140
                # 2. create data parallel layer & optimizer
141 142 143 144 145 146 147
                layer = LinearNet()
                dp_layer = paddle.DataParallel(layer)

                loss_fn = nn.MSELoss()
                adam = opt.Adam(
                    learning_rate=0.001, parameters=dp_layer.parameters())

148
                # 3. run layer
149 150 151 152
                inputs = paddle.randn([10, 10], 'float32')
                outputs = dp_layer(inputs)
                labels = paddle.randn([10, 1], 'float32')
                loss = loss_fn(outputs, labels)
L
Ligoml 已提交
153

154 155 156 157 158 159 160 161 162
                loss.backward()

                adam.step()
                adam.clear_grad()

            if __name__ == '__main__':
                dist.spawn(train)
    """

163 164 165 166 167 168 169 170 171 172 173
    # 0. get env & check world size
    global _global_parallel_env
    # when call init_parallel_env, need update `_global_parallel_env`
    _global_parallel_env = ParallelEnv()
    parallel_env = _global_parallel_env
    # if not parallel, `init_parallel_env` do nothing
    if parallel_env.world_size < 2:
        warnings.warn(
            "Currently not a parallel execution environment, `paddle.distributed.init_parallel_env` will not do anything."
        )
        return
174
    # NOTE(xiongkun): support cpu gloo only, add this environment variable to
175
    #                 enable cpu only gloo prarllel training)
X
xiongkun 已提交
176 177
    backend = os.environ.get('PADDLE_DISTRI_BACKEND', 'auto')
    is_cpu_only = _is_cpuonly(backend)
178
    # 1. gpu xpu check, must be gpu or xpu,
L
Ligoml 已提交
179 180 181 182 183 184 185
    if not (
        is_cpu_only
        or core.is_compiled_with_cuda()
        or core.is_compiled_with_xpu()
        or core.is_compiled_with_npu()
        or core.is_compiled_with_mlu()
    ):
186
        raise NotImplementedError(
L
Ligoml 已提交
187 188
            "If you want to use CPU-only version, please use 'gloo' as backend"
        )
189

190 191
    if backend == "xccl":
        FLAGS_selected_custom_devices = 'FLAGS_selected_{}s'.format(
L
Ligoml 已提交
192 193
            parallel_env.device_type
        )
194 195 196 197 198 199 200 201 202 203 204 205 206 207
        _check_var_exists(FLAGS_selected_custom_devices)
    else:
        if not is_cpu_only and core.is_compiled_with_cuda():
            _check_var_exists("FLAGS_selected_gpus")
            backend = "nccl" if backend == "auto" else backend
        elif not is_cpu_only and core.is_compiled_with_xpu():
            _check_var_exists('FLAGS_selected_xpus')
            backend = "bkcl" if backend == "auto" else backend
        elif not is_cpu_only and core.is_compiled_with_npu():
            _check_var_exists('FLAGS_selected_npus')
            backend = "hccl" if backend == "auto" else backend
        elif not is_cpu_only and core.is_compiled_with_mlu():
            _check_var_exists('FLAGS_selected_mlus')
            backend = "cncl" if backend == "auto" else backend
208

209 210 211 212 213
    _check_var_exists("PADDLE_TRAINER_ID")
    _check_var_exists("PADDLE_CURRENT_ENDPOINT")
    _check_var_exists("PADDLE_TRAINERS_NUM")
    _check_var_exists("PADDLE_TRAINER_ENDPOINTS")

214 215 216 217 218 219
    # NOTE(chenweihang): [ why config global place here? ]
    # the dygraph mode will be set to default mode,
    # users will not call `dygraph.guard` or `enable_dygraph`
    # directly, if they want to switch default place,
    # they need to call a function to change default place,
    # here just set correctly place to users
220
    if backend == "xccl":
L
Ligoml 已提交
221 222 223
        place = core.CustomPlace(
            parallel_env.device_type, parallel_env.device_id
        )
224
    elif is_cpu_only:
225 226 227 228 229 230 231 232 233 234 235 236 237
        place = core.CPUPlace()
    elif core.is_compiled_with_cuda():
        place = core.CUDAPlace(parallel_env.device_id)
    elif core.is_compiled_with_xpu():
        place = core.XPUPlace(parallel_env.device_id)
    elif core.is_compiled_with_npu():
        place = core.NPUPlace(parallel_env.device_id)
    elif core.is_compiled_with_mlu():
        place = core.MLUPlace(parallel_env.device_id)

    _set_expected_place(place)

    group = None
L
lilong12 已提交
238 239 240 241
    if backend in _valid_backend_list and in_dygraph_mode():
        if _default_group_name in _get_group_map_by_name():
            return _get_group_map_by_name()[_default_group_name]
        _set_default_backend(backend)
242 243 244 245 246
        rank = int(os.getenv("PADDLE_TRAINER_ID"))
        world_size = int(os.getenv("PADDLE_TRAINERS_NUM"))
        assert rank >= 0 and world_size > rank and world_size > 1, (
            "rank must be non-negative and world_size must be the "
            "maximum rank plus one. Moreover, at least two processes are "
L
Ligoml 已提交
247 248
            "required to create a process group."
        )
249 250
        master_addr = os.getenv("MASTER_ADDR", None)
        master_port = os.getenv("MASTER_PORT", None)
L
Ligoml 已提交
251 252 253 254 255
        endpoints = (
            ":".join([master_addr, master_port])
            if master_addr and master_port
            else None
        )
256
        if endpoints is None:
257 258 259 260 261 262 263
            endpoints = os.getenv("PADDLE_MASTER", None)
        if endpoints is None:
            endpoints = os.getenv("PADDLE_TRAINER_ENDPOINTS").split(',')[0]
        assert endpoints, (
            "The environment variable 'MASTER_ADDR' and 'MASTER_PORT' "
            "must be specified, for example 'export MASTER_ADDR=127.0.0.1' "
            "and 'export MASTER_ADDR=54612'. Or you can start your training"
L
Ligoml 已提交
264 265
            "with paddle.distributed.run module."
        )
266 267 268
        master_addr, master_port = endpoints.split(":")
        master_port = int(master_port)
        is_master = rank == 0
269
        stop_check_timeout = int(os.getenv("FLAGS_stop_check_timeout", "900"))
L
Ligoml 已提交
270 271 272 273 274 275 276
        default_store = core.TCPStore(
            master_addr,
            master_port,
            is_master,
            world_size,
            timeout=stop_check_timeout,
        )
L
lilong12 已提交
277
        _set_default_store(default_store)
L
Ligoml 已提交
278 279 280 281 282 283 284 285
        pg = _new_process_group_impl(
            backend,
            default_store,
            rank,
            world_size,
            _default_group_name,
            pg_options=None,
        )
286
        ranks = list(range(world_size))
287
        group = Group(rank, 0, ranks, pg=pg, name=_default_group_name)
L
lilong12 已提交
288 289
        _set_group_map_by_name(_default_group_name, group)
        _set_group_map(0, group)
290
        _set_group_map_backend(group, backend)
291
        _add_new_group(group)
292
        parallel_helper._set_parallel_ctx(True)
293 294

        paddle.distributed.barrier(group=group)
295 296
        return group

K
kuizhiqing 已提交
297
    node_num = set([i.split(":")[0] for i in parallel_env.trainer_endpoints])
298
    # 3: init gloo context (step 1: httpsever start)
L
lilong12 已提交
299
    init_gloo = int(os.getenv("PADDLE_WITH_GLOO", "0"))
K
kuizhiqing 已提交
300
    if is_cpu_only or init_gloo or backend == "heter":
L
lilong12 已提交
301 302 303 304 305 306 307 308
        ep_rank_0 = parallel_env.trainer_endpoints[0].split(":")
        manager = Manager()
        # glboal dict to store status
        http_server_d = manager.dict()
        http_server_d["running"] = False
        if parallel_env.rank == 0:
            # The scope for worker used by http server is '_worker'
            size = {'_worker': parallel_env.world_size}
K
kuizhiqing 已提交
309 310
            if backend == "heter":
                size = {'_worker': len(node_num)}
L
Ligoml 已提交
311 312 313 314
            http_server = Process(
                target=_start_kv_server,
                args=(int(ep_rank_0[1]), http_server_d, size),
            )
L
lilong12 已提交
315 316 317
            http_server.daemon = True
            http_server_d["running"] = True
            http_server.start()
318 319

    # 4. init NCCL ParallelStrategy
320
    strategy = ParallelStrategy()
321 322
    if parallel_helper._is_parallel_ctx_initialized():
        warnings.warn("The parallel environment has been initialized.")
323 324 325 326
    strategy.nranks = parallel_env.world_size
    strategy.local_rank = parallel_env.rank
    strategy.trainer_endpoints = parallel_env.trainer_endpoints
    strategy.current_endpoint = parallel_env.current_endpoint
327
    strategy.nrings = parallel_env.nrings
328

K
kuizhiqing 已提交
329
    # init nccl or hccl or bkcl or heter context
330 331
    if is_cpu_only:
        parallel_helper._set_parallel_ctx(
L
Ligoml 已提交
332 333 334
            core.GLOOParallelContext(strategy, place)
        )
    elif backend == "heter":
K
kuizhiqing 已提交
335
        parallel_helper._set_parallel_ctx(
L
Ligoml 已提交
336 337
            core.HeterParallelContext(strategy, parallel_env.device_id)
        )
338
    elif core.is_compiled_with_cuda():
339
        parallel_helper._set_parallel_ctx(
L
Ligoml 已提交
340 341
            core.NCCLParallelContext(strategy, place)
        )
342 343
    elif core.is_compiled_with_xpu():
        parallel_helper._set_parallel_ctx(
L
Ligoml 已提交
344 345
            core.BKCLParallelContext(strategy, place)
        )
346 347
    elif core.is_compiled_with_npu():
        parallel_helper._set_parallel_ctx(
L
Ligoml 已提交
348 349
            core.HCCLParallelContext(strategy, place)
        )
350 351
    elif core.is_compiled_with_mlu():
        parallel_helper._set_parallel_ctx(
L
Ligoml 已提交
352 353
            core.CNCLParallelContext(strategy, place)
        )
354

K
kuizhiqing 已提交
355 356 357 358 359
    if backend != "heter":
        other_endpoints = strategy.trainer_endpoints[:]
        other_endpoints.remove(strategy.current_endpoint)
        if not is_cpu_only and strategy.local_rank == 0:
            wait_server_ready(other_endpoints)
360

361
    parallel_helper._init_parallel_ctx()
K
kuizhiqing 已提交
362

363 364 365 366
    # 5: init gloo context (step 2: gloo init)
    # dividing init_gloo into two part beacause nccl and gloo
    # are separately looking for free ports which sometimes
    # leads to port-conflict.
K
kuizhiqing 已提交
367
    if (is_cpu_only or backend == "heter") and parallel_env.rank == 0:
368
        # compare to init_gloo, we don't need to
369 370 371
        # init gloo, because we do this in _init_parallel_ctx;
        http_server_d["running"] = False
        http_server.join()
L
lilong12 已提交
372

373 374
    elif init_gloo:
        wait_server_ready([parallel_env.trainer_endpoints[0]])
L
lilong12 已提交
375 376 377 378 379 380 381 382 383 384 385 386 387 388
        gloo_strategy = core.GlooParallelStrategy()
        gloo_strategy.rank = parallel_env.rank
        gloo_strategy.rank_num = parallel_env.world_size
        gloo_strategy.ip_address = ep_rank_0[0]
        gloo_strategy.ip_port = int(ep_rank_0[1])
        default_init_timeout_seconds = 3600
        default_run_timeout_seconds = 9999999
        gloo_strategy.init_seconds = default_init_timeout_seconds
        gloo_strategy.run_seconds = default_run_timeout_seconds
        gloo = core.GlooParallelContext(gloo_strategy)
        gloo.init()
        if parallel_env.rank == 0:
            http_server_d["running"] = False
            http_server.join()
389
    return group
390

391

L
LiYuRio 已提交
392
def get_rank(group=None):
393
    """
L
LiYuRio 已提交
394 395
    Returns the rank of current trainer in the given group, ranks are consecutive integers in [0, ``world_size``).
    If none of the group is given, the global group will be used as default.
396

L
LiYuRio 已提交
397 398
    Args:
        group (Group, optional): The communication group you want to get rank of current trainer, use global group as default if group is None.
399 400

    Returns:
L
LiYuRio 已提交
401 402 403 404
        (int) The rank of current trainer in the given group. Return -1 if the process is not part of the given group.

    Warning:
        Argument ``group`` only supports in dygraph mode.
405 406 407 408

    Examples:
        .. code-block:: python

L
LiYuRio 已提交
409
            # Execute this script using distributed launch with one card configs.
410 411 412
            import paddle
            import paddle.distributed as dist

L
LiYuRio 已提交
413
            dist.init_parallel_env()
414 415 416
            print("The rank is %d" % dist.get_rank())
            # The rank is 0
    """
L
LiYuRio 已提交
417 418 419 420
    if in_dygraph_mode() and group:
        return group.rank

    assert group is None, "Only support group argument in eager mode."
421
    return _get_global_parallel_env().rank
422 423


L
LiYuRio 已提交
424
def get_world_size(group=None):
425
    """
L
LiYuRio 已提交
426 427
    Returns the number of trainers (number of processes participating in current job) in the given group.
    If none of the group is given, the global group will be used as default.
428

L
LiYuRio 已提交
429 430
    Args:
        group (Group, optional): The communication group you want to check world size, use global group as default if group is None.
431 432

    Returns:
L
LiYuRio 已提交
433 434 435 436
        (int) The number of trainers in the given group. Return -1 if the process if not part of the given group.

    Warning:
        Argument ``group`` only supports in dygraph mode.
437 438 439 440

    Examples:
        .. code-block:: python

L
LiYuRio 已提交
441
            # Execute this script using distributed launch with one card configs.
442 443 444
            import paddle
            import paddle.distributed as dist

L
LiYuRio 已提交
445
            dist.init_parallel_env()
446
            print("The world_size is %d" % dist.get_world_size())
L
LiYuRio 已提交
447
            # The world_size is 1
448
    """
L
LiYuRio 已提交
449 450 451 452
    if in_dygraph_mode() and group:
        return group.world_size

    assert group is None, "Only support group argument in eager mode."
453
    return _get_global_parallel_env().world_size