parallel.py 14.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except jin compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import six
import warnings
18 19
from multiprocessing import Process  # noqa: F401
from multiprocessing import Manager  # noqa: F401
20 21
import time
import sys
22 23 24 25 26

from paddle import compat as cpt

# deprecated module import
from paddle.fluid import core
L
lilong12 已提交
27
from paddle.fluid.framework import in_dygraph_mode
28 29
from paddle.fluid.framework import _set_expected_place
from paddle.fluid.dygraph import parallel_helper
X
xiongkun 已提交
30
from paddle.distributed.fleet.launch_utils import check_backend
31
from paddle.fluid.dygraph.parallel import ParallelEnv
32
from paddle.distributed.fleet.base.private_helper_function import wait_server_ready  # noqa: F401
L
lilong12 已提交
33 34 35
from paddle.distributed.collective import _set_group_map
from paddle.distributed.collective import _set_group_map_by_name
from paddle.distributed.collective import _get_group_map_by_name
36 37 38
from paddle.distributed.collective import _group_map_by_name
from paddle.distributed.collective import _default_group_name
from paddle.distributed.collective import _valid_backend_list
L
lilong12 已提交
39 40
from paddle.distributed.collective import _set_default_backend
from paddle.distributed.collective import _set_default_store
41 42
from paddle.distributed.collective import _new_process_group_impl
from paddle.distributed.collective import Group
43

44
__all__ = []
45 46 47

ParallelStrategy = core.ParallelStrategy

48 49 50 51 52 53 54 55 56 57 58
# NOTE(chenweihang): Maintain a global parallel env to avoid 
# initializing ParallelEnv every time and improve performance
_global_parallel_env = None


def _get_global_parallel_env():
    global _global_parallel_env
    if _global_parallel_env is None:
        _global_parallel_env = ParallelEnv()
    return _global_parallel_env

59

60
def _start_kv_server(port, http_server_d, size):
61
    from paddle.distributed.fleet.utils.http_server import KVServer
62
    http_server = KVServer(int(port), size=size)
63
    http_server.start()
64
    wait_seconds = 3
L
lilong12 已提交
65
    while http_server_d.get("running", False) or not http_server.should_stop():
66 67 68 69
        time.sleep(wait_seconds)
    http_server.stop()


X
xiongkun 已提交
70 71
def _is_cpuonly(backend):
    check_backend(backend)
72
    if backend in ['auto', 'nccl', 'bkcl', 'hccl', 'heter', 'cncl'] and (
73
            core.is_compiled_with_cuda() or core.is_compiled_with_xpu() or
74
            core.is_compiled_with_npu() or core.is_compiled_with_mlu()):
75

76 77 78 79 80 81
        # passes 'auto' and can use cuda or xpu, use the default logics. so return False
        return False
    else:
        return True


K
kuizhiqing 已提交
82 83 84 85 86 87 88 89
def _check_var_exists(var_name):
    var = os.environ.get(var_name, None)
    if var is None:
        raise ValueError("paddle.distributed initialize error, "
                         "environment variable %s is needed, but not set." %
                         var_name)


X
xiongkun 已提交
90
def init_parallel_env():
91
    """
92
    Initialize parallel training environment in dynamic graph mode.
93

94
    .. note::
95
        Now initialize both `NCCL` and `GLOO` contexts for communication.
96

97 98 99 100 101
    Args:
        backend (string): A string represents the backend used by DataParallel,
            should be one of 'gloo'(for cpu), 'nccl'(for cuda), 'bkcl'(for xpu), 'auto'(auto detect).
            The auto detection prefer 'nccl', 'bkcl' than 'gloo'.

102 103 104 105 106
    Returns:
        None
        
    Examples:
        .. code-block:: python
107
            # required: gpu
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
            import paddle
            import paddle.nn as nn
            import paddle.optimizer as opt
            import paddle.distributed as dist

            class LinearNet(nn.Layer):
                def __init__(self):
                    super(LinearNet, self).__init__()
                    self._linear1 = nn.Linear(10, 10)
                    self._linear2 = nn.Linear(10, 1)
                    
                def forward(self, x):
                    return self._linear2(self._linear1(x))

            def train():
123
                # 1. initialize parallel environment
124 125
                dist.init_parallel_env()

126
                # 2. create data parallel layer & optimizer
127 128 129 130 131 132 133
                layer = LinearNet()
                dp_layer = paddle.DataParallel(layer)

                loss_fn = nn.MSELoss()
                adam = opt.Adam(
                    learning_rate=0.001, parameters=dp_layer.parameters())

134
                # 3. run layer
135 136 137 138 139 140 141 142 143 144 145 146 147 148
                inputs = paddle.randn([10, 10], 'float32')
                outputs = dp_layer(inputs)
                labels = paddle.randn([10, 1], 'float32')
                loss = loss_fn(outputs, labels)
                
                loss.backward()

                adam.step()
                adam.clear_grad()

            if __name__ == '__main__':
                dist.spawn(train)
    """

149 150 151 152 153 154 155 156 157 158 159
    # 0. get env & check world size
    global _global_parallel_env
    # when call init_parallel_env, need update `_global_parallel_env`
    _global_parallel_env = ParallelEnv()
    parallel_env = _global_parallel_env
    # if not parallel, `init_parallel_env` do nothing
    if parallel_env.world_size < 2:
        warnings.warn(
            "Currently not a parallel execution environment, `paddle.distributed.init_parallel_env` will not do anything."
        )
        return
160 161
    # NOTE(xiongkun): support cpu gloo only, add this environment variable to 
    #                 enable cpu only gloo prarllel training)
X
xiongkun 已提交
162 163
    backend = os.environ.get('PADDLE_DISTRI_BACKEND', 'auto')
    is_cpu_only = _is_cpuonly(backend)
164 165
    # 1. gpu xpu check, must be gpu or xpu, 
    if not (is_cpu_only or core.is_compiled_with_cuda() or
166 167
            core.is_compiled_with_xpu() or core.is_compiled_with_npu() or
            core.is_compiled_with_mlu()):
168
        raise NotImplementedError(
169
            "If you want to use CPU-only version, please use 'gloo' as backend")
170

171
    if not is_cpu_only and core.is_compiled_with_cuda():
172
        _check_var_exists("FLAGS_selected_gpus")
173
        backend = "nccl" if backend == "auto" else backend
174
    elif not is_cpu_only and core.is_compiled_with_xpu():
175
        _check_var_exists('FLAGS_selected_xpus')
176
        backend = "bkcl" if backend == "auto" else backend
K
kuizhiqing 已提交
177 178
    elif not is_cpu_only and core.is_compiled_with_npu():
        _check_var_exists('FLAGS_selected_npus')
179
        backend = "hccl" if backend == "auto" else backend
180 181
    elif not is_cpu_only and core.is_compiled_with_mlu():
        _check_var_exists('FLAGS_selected_mlus')
182
        backend = "cncl" if backend == "auto" else backend
183

184 185 186 187 188
    _check_var_exists("PADDLE_TRAINER_ID")
    _check_var_exists("PADDLE_CURRENT_ENDPOINT")
    _check_var_exists("PADDLE_TRAINERS_NUM")
    _check_var_exists("PADDLE_TRAINER_ENDPOINTS")

189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
    # NOTE(chenweihang): [ why config global place here? ]
    # the dygraph mode will be set to default mode,
    # users will not call `dygraph.guard` or `enable_dygraph`
    # directly, if they want to switch default place,
    # they need to call a function to change default place,
    # here just set correctly place to users
    if is_cpu_only:
        place = core.CPUPlace()
    elif core.is_compiled_with_cuda():
        place = core.CUDAPlace(parallel_env.device_id)
    elif core.is_compiled_with_xpu():
        place = core.XPUPlace(parallel_env.device_id)
    elif core.is_compiled_with_npu():
        place = core.NPUPlace(parallel_env.device_id)
    elif core.is_compiled_with_mlu():
        place = core.MLUPlace(parallel_env.device_id)

    _set_expected_place(place)

    group = None
L
lilong12 已提交
209 210 211 212
    if backend in _valid_backend_list and in_dygraph_mode():
        if _default_group_name in _get_group_map_by_name():
            return _get_group_map_by_name()[_default_group_name]
        _set_default_backend(backend)
213 214 215 216 217 218 219 220
        rank = int(os.getenv("PADDLE_TRAINER_ID"))
        world_size = int(os.getenv("PADDLE_TRAINERS_NUM"))
        assert rank >= 0 and world_size > rank and world_size > 1, (
            "rank must be non-negative and world_size must be the "
            "maximum rank plus one. Moreover, at least two processes are "
            "required to create a process group.")
        master_addr = os.getenv("MASTER_ADDR", None)
        master_port = os.getenv("MASTER_PORT", None)
221
        endpoints = None
222 223 224 225 226 227 228 229 230 231 232 233
        if not master_addr or not master_port:
            endpoints = os.getenv("PADDLE_MASTER", None)
        if endpoints is None:
            endpoints = os.getenv("PADDLE_TRAINER_ENDPOINTS").split(',')[0]
        assert endpoints, (
            "The environment variable 'MASTER_ADDR' and 'MASTER_PORT' "
            "must be specified, for example 'export MASTER_ADDR=127.0.0.1' "
            "and 'export MASTER_ADDR=54612'. Or you can start your training"
            "with paddle.distributed.run module.")
        master_addr, master_port = endpoints.split(":")
        master_port = int(master_port)
        is_master = rank == 0
L
lilong12 已提交
234 235 236
        default_store = core.TCPStore(master_addr, master_port, is_master,
                                      world_size)
        _set_default_store(default_store)
237 238
        pg = _new_process_group_impl(
            backend,
L
lilong12 已提交
239
            default_store,
240 241 242 243 244 245 246 247 248 249 250 251
            rank,
            world_size,
            _default_group_name,
            pg_options=None)
        ranks = list(range(world_size))
        group = Group(
            rank,
            world_size,
            id=0,
            ranks=ranks,
            pg=pg,
            name=_default_group_name)
L
lilong12 已提交
252 253
        _set_group_map_by_name(_default_group_name, group)
        _set_group_map(0, group)
254 255 256
        parallel_helper._set_parallel_ctx(True)
        return group

K
kuizhiqing 已提交
257
    node_num = set([i.split(":")[0] for i in parallel_env.trainer_endpoints])
258
    # 3: init gloo context (step 1: httpsever start)
L
lilong12 已提交
259
    init_gloo = int(os.getenv("PADDLE_WITH_GLOO", "0"))
K
kuizhiqing 已提交
260
    if is_cpu_only or init_gloo or backend == "heter":
L
lilong12 已提交
261 262 263 264 265 266 267 268
        ep_rank_0 = parallel_env.trainer_endpoints[0].split(":")
        manager = Manager()
        # glboal dict to store status
        http_server_d = manager.dict()
        http_server_d["running"] = False
        if parallel_env.rank == 0:
            # The scope for worker used by http server is '_worker'
            size = {'_worker': parallel_env.world_size}
K
kuizhiqing 已提交
269 270
            if backend == "heter":
                size = {'_worker': len(node_num)}
L
lilong12 已提交
271 272 273 274 275 276
            http_server = Process(
                target=_start_kv_server,
                args=(int(ep_rank_0[1]), http_server_d, size))
            http_server.daemon = True
            http_server_d["running"] = True
            http_server.start()
277 278

    # 4. init NCCL ParallelStrategy
279
    strategy = ParallelStrategy()
280 281
    if parallel_helper._is_parallel_ctx_initialized():
        warnings.warn("The parallel environment has been initialized.")
282 283 284 285
    strategy.nranks = parallel_env.world_size
    strategy.local_rank = parallel_env.rank
    strategy.trainer_endpoints = parallel_env.trainer_endpoints
    strategy.current_endpoint = parallel_env.current_endpoint
286
    strategy.nrings = parallel_env.nrings
287

K
kuizhiqing 已提交
288
    # init nccl or hccl or bkcl or heter context
289 290 291
    if is_cpu_only:
        parallel_helper._set_parallel_ctx(
            core.GLOOParallelContext(strategy, place))
K
kuizhiqing 已提交
292 293 294
    elif (backend == "heter"):
        parallel_helper._set_parallel_ctx(
            core.HeterParallelContext(strategy, parallel_env.device_id))
295
    elif core.is_compiled_with_cuda():
296 297 298 299 300
        parallel_helper._set_parallel_ctx(
            core.NCCLParallelContext(strategy, place))
    elif core.is_compiled_with_xpu():
        parallel_helper._set_parallel_ctx(
            core.BKCLParallelContext(strategy, place))
301 302 303
    elif core.is_compiled_with_npu():
        parallel_helper._set_parallel_ctx(
            core.HCCLParallelContext(strategy, place))
304 305 306
    elif core.is_compiled_with_mlu():
        parallel_helper._set_parallel_ctx(
            core.CNCLParallelContext(strategy, place))
307

K
kuizhiqing 已提交
308 309 310 311 312
    if backend != "heter":
        other_endpoints = strategy.trainer_endpoints[:]
        other_endpoints.remove(strategy.current_endpoint)
        if not is_cpu_only and strategy.local_rank == 0:
            wait_server_ready(other_endpoints)
313

314
    parallel_helper._init_parallel_ctx()
K
kuizhiqing 已提交
315

316 317 318 319
    # 5: init gloo context (step 2: gloo init)
    # dividing init_gloo into two part beacause nccl and gloo
    # are separately looking for free ports which sometimes
    # leads to port-conflict.
K
kuizhiqing 已提交
320
    if (is_cpu_only or backend == "heter") and parallel_env.rank == 0:
321 322 323 324
        # compare to init_gloo, we don't need to 
        # init gloo, because we do this in _init_parallel_ctx;
        http_server_d["running"] = False
        http_server.join()
L
lilong12 已提交
325

326 327
    elif init_gloo:
        wait_server_ready([parallel_env.trainer_endpoints[0]])
L
lilong12 已提交
328 329 330 331 332 333 334 335 336 337 338 339 340 341
        gloo_strategy = core.GlooParallelStrategy()
        gloo_strategy.rank = parallel_env.rank
        gloo_strategy.rank_num = parallel_env.world_size
        gloo_strategy.ip_address = ep_rank_0[0]
        gloo_strategy.ip_port = int(ep_rank_0[1])
        default_init_timeout_seconds = 3600
        default_run_timeout_seconds = 9999999
        gloo_strategy.init_seconds = default_init_timeout_seconds
        gloo_strategy.run_seconds = default_run_timeout_seconds
        gloo = core.GlooParallelContext(gloo_strategy)
        gloo.init()
        if parallel_env.rank == 0:
            http_server_d["running"] = False
            http_server.join()
342
    return group
343

344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364

def get_rank():
    """
    Returns the rank of current trainer.

    Its value is equal to the value of the environment variable ``PADDLE_TRAINER_ID`` . 
    The default value is 0.

    Returns:
        (int) The rank of current trainer.

    Examples:
        .. code-block:: python

            import paddle
            import paddle.distributed as dist

            # execute this command in terminal: export PADDLE_TRAINER_ID=0
            print("The rank is %d" % dist.get_rank())
            # The rank is 0
    """
365
    return _get_global_parallel_env().rank
366 367 368 369


def get_world_size():
    """
370
    Returns the number of trainers (number of processes participating in current job).
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387

    Its value is equal to the value of the environment variable ``PADDLE_TRAINERS_NUM`` . 
    The default value is 1.

    Returns:
        (int) The number of trainers.

    Examples:
        .. code-block:: python

            import paddle
            import paddle.distributed as dist

            # execute this command in terminal: export PADDLE_TRAINERS_NUM=4
            print("The world_size is %d" % dist.get_world_size())
            # The world_size is 4
    """
388
    return _get_global_parallel_env().world_size