parallel.py 10.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except jin compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import six
import warnings
18 19
from multiprocessing import Process  # noqa: F401
from multiprocessing import Manager  # noqa: F401
20 21
import time
import sys
22 23 24 25 26 27 28

from paddle import compat as cpt

# deprecated module import
from paddle.fluid import core
from paddle.fluid.framework import _set_expected_place
from paddle.fluid.dygraph import parallel_helper
X
xiongkun 已提交
29
from paddle.distributed.fleet.launch_utils import check_backend
30
from paddle.fluid.dygraph.parallel import ParallelEnv
31
from paddle.distributed.fleet.base.private_helper_function import wait_server_ready  # noqa: F401
32

33
__all__ = []
34 35 36

ParallelStrategy = core.ParallelStrategy

37 38 39 40 41 42 43 44 45 46 47
# NOTE(chenweihang): Maintain a global parallel env to avoid 
# initializing ParallelEnv every time and improve performance
_global_parallel_env = None


def _get_global_parallel_env():
    global _global_parallel_env
    if _global_parallel_env is None:
        _global_parallel_env = ParallelEnv()
    return _global_parallel_env

48

49
def _start_kv_server(port, http_server_d, size):
50
    from paddle.distributed.fleet.utils.http_server import KVServer
51
    http_server = KVServer(int(port), size=size)
52
    http_server.start()
53
    wait_seconds = 3
L
lilong12 已提交
54
    while http_server_d.get("running", False) or not http_server.should_stop():
55 56 57 58
        time.sleep(wait_seconds)
    http_server.stop()


X
xiongkun 已提交
59 60
def _is_cpuonly(backend):
    check_backend(backend)
61 62 63 64
    if backend in ['auto', 'nccl', 'bkcl', 'hccl'] and (
            core.is_compiled_with_cuda() or core.is_compiled_with_xpu() or
            core.is_compiled_with_npu()):

65 66 67 68 69 70
        # passes 'auto' and can use cuda or xpu, use the default logics. so return False
        return False
    else:
        return True


X
xiongkun 已提交
71
def init_parallel_env():
72
    """
73
    Initialize parallel training environment in dynamic graph mode.
74

75
    .. note::
76
        Now initialize both `NCCL` and `GLOO` contexts for communication.
77

78 79 80 81 82
    Args:
        backend (string): A string represents the backend used by DataParallel,
            should be one of 'gloo'(for cpu), 'nccl'(for cuda), 'bkcl'(for xpu), 'auto'(auto detect).
            The auto detection prefer 'nccl', 'bkcl' than 'gloo'.

83 84 85 86 87
    Returns:
        None
        
    Examples:
        .. code-block:: python
88
            # required: gpu
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
            import paddle
            import paddle.nn as nn
            import paddle.optimizer as opt
            import paddle.distributed as dist

            class LinearNet(nn.Layer):
                def __init__(self):
                    super(LinearNet, self).__init__()
                    self._linear1 = nn.Linear(10, 10)
                    self._linear2 = nn.Linear(10, 1)
                    
                def forward(self, x):
                    return self._linear2(self._linear1(x))

            def train():
104
                # 1. initialize parallel environment
105 106
                dist.init_parallel_env()

107
                # 2. create data parallel layer & optimizer
108 109 110 111 112 113 114
                layer = LinearNet()
                dp_layer = paddle.DataParallel(layer)

                loss_fn = nn.MSELoss()
                adam = opt.Adam(
                    learning_rate=0.001, parameters=dp_layer.parameters())

115
                # 3. run layer
116 117 118 119 120 121 122 123 124 125 126 127 128 129
                inputs = paddle.randn([10, 10], 'float32')
                outputs = dp_layer(inputs)
                labels = paddle.randn([10, 1], 'float32')
                loss = loss_fn(outputs, labels)
                
                loss.backward()

                adam.step()
                adam.clear_grad()

            if __name__ == '__main__':
                dist.spawn(train)
    """

130 131 132 133 134 135 136 137 138 139 140
    # 0. get env & check world size
    global _global_parallel_env
    # when call init_parallel_env, need update `_global_parallel_env`
    _global_parallel_env = ParallelEnv()
    parallel_env = _global_parallel_env
    # if not parallel, `init_parallel_env` do nothing
    if parallel_env.world_size < 2:
        warnings.warn(
            "Currently not a parallel execution environment, `paddle.distributed.init_parallel_env` will not do anything."
        )
        return
141 142
    # NOTE(xiongkun): support cpu gloo only, add this environment variable to 
    #                 enable cpu only gloo prarllel training)
X
xiongkun 已提交
143 144
    backend = os.environ.get('PADDLE_DISTRI_BACKEND', 'auto')
    is_cpu_only = _is_cpuonly(backend)
145 146
    # 1. gpu xpu check, must be gpu or xpu, 
    if not (is_cpu_only or core.is_compiled_with_cuda() or
147
            core.is_compiled_with_xpu() or core.is_compiled_with_npu()):
148
        raise NotImplementedError(
149
            "If you want to use CPU-only version, please use 'gloo' as backend")
150 151 152 153 154 155 156 157 158

    # 2. check env
    def _check_var_exists(var_name):
        var = os.environ.get(var_name, None)
        if var is None:
            raise ValueError("paddle.distributed initialize error, "
                             "environment variable %s is needed, but not set." %
                             var_name)

159
    if not is_cpu_only and core.is_compiled_with_cuda():
160
        _check_var_exists("FLAGS_selected_gpus")
161
    elif not is_cpu_only and core.is_compiled_with_xpu():
162 163
        _check_var_exists('FLAGS_selected_xpus')

164 165 166 167 168
    _check_var_exists("PADDLE_TRAINER_ID")
    _check_var_exists("PADDLE_CURRENT_ENDPOINT")
    _check_var_exists("PADDLE_TRAINERS_NUM")
    _check_var_exists("PADDLE_TRAINER_ENDPOINTS")

169
    # 3: init gloo context (step 1: httpsever start)
L
lilong12 已提交
170
    init_gloo = int(os.getenv("PADDLE_WITH_GLOO", "0"))
171
    if is_cpu_only or init_gloo:
L
lilong12 已提交
172 173 174 175 176 177 178 179 180 181 182 183 184 185
        ep_rank_0 = parallel_env.trainer_endpoints[0].split(":")
        manager = Manager()
        # glboal dict to store status
        http_server_d = manager.dict()
        http_server_d["running"] = False
        if parallel_env.rank == 0:
            # The scope for worker used by http server is '_worker'
            size = {'_worker': parallel_env.world_size}
            http_server = Process(
                target=_start_kv_server,
                args=(int(ep_rank_0[1]), http_server_d, size))
            http_server.daemon = True
            http_server_d["running"] = True
            http_server.start()
186 187

    # 4. init NCCL ParallelStrategy
188
    strategy = ParallelStrategy()
189 190
    if parallel_helper._is_parallel_ctx_initialized():
        warnings.warn("The parallel environment has been initialized.")
191 192 193 194
    strategy.nranks = parallel_env.world_size
    strategy.local_rank = parallel_env.rank
    strategy.trainer_endpoints = parallel_env.trainer_endpoints
    strategy.current_endpoint = parallel_env.current_endpoint
195
    strategy.nrings = parallel_env.nrings
196

197
    # NOTE(chenweihang): [ why config global place here? ]
198
    # the dygraph mode will be set to default mode,
199 200 201 202
    # users will not call `dygraph.guard` or `enable_dygraph`
    # directly, if they want to switch default place,
    # they need to call a function to change default place,
    # here just set correctly place to users
203 204 205
    if is_cpu_only:
        place = core.CPUPlace()
    elif core.is_compiled_with_cuda():
206 207 208
        place = core.CUDAPlace(parallel_env.device_id)
    elif core.is_compiled_with_xpu():
        place = core.XPUPlace(parallel_env.device_id)
209 210
    elif core.is_compiled_with_npu():
        place = core.NPUPlace(parallel_env.device_id)
211

212
    _set_expected_place(place)
213
    # init nccl or bkcl context
214 215 216 217
    if is_cpu_only:
        parallel_helper._set_parallel_ctx(
            core.GLOOParallelContext(strategy, place))
    elif core.is_compiled_with_cuda():
218 219 220 221 222
        parallel_helper._set_parallel_ctx(
            core.NCCLParallelContext(strategy, place))
    elif core.is_compiled_with_xpu():
        parallel_helper._set_parallel_ctx(
            core.BKCLParallelContext(strategy, place))
223 224 225
    elif core.is_compiled_with_npu():
        parallel_helper._set_parallel_ctx(
            core.HCCLParallelContext(strategy, place))
226 227 228

    other_endpoints = strategy.trainer_endpoints[:]
    other_endpoints.remove(strategy.current_endpoint)
229
    if not is_cpu_only and strategy.local_rank == 0:
230 231
        wait_server_ready(other_endpoints)

232
    parallel_helper._init_parallel_ctx()
233 234 235 236
    # 5: init gloo context (step 2: gloo init)
    # dividing init_gloo into two part beacause nccl and gloo
    # are separately looking for free ports which sometimes
    # leads to port-conflict.
237 238 239 240 241
    if is_cpu_only and parallel_env.rank == 0:
        # compare to init_gloo, we don't need to 
        # init gloo, because we do this in _init_parallel_ctx;
        http_server_d["running"] = False
        http_server.join()
L
lilong12 已提交
242

243 244
    elif init_gloo:
        wait_server_ready([parallel_env.trainer_endpoints[0]])
L
lilong12 已提交
245 246 247 248 249 250 251 252 253 254 255 256 257 258
        gloo_strategy = core.GlooParallelStrategy()
        gloo_strategy.rank = parallel_env.rank
        gloo_strategy.rank_num = parallel_env.world_size
        gloo_strategy.ip_address = ep_rank_0[0]
        gloo_strategy.ip_port = int(ep_rank_0[1])
        default_init_timeout_seconds = 3600
        default_run_timeout_seconds = 9999999
        gloo_strategy.init_seconds = default_init_timeout_seconds
        gloo_strategy.run_seconds = default_run_timeout_seconds
        gloo = core.GlooParallelContext(gloo_strategy)
        gloo.init()
        if parallel_env.rank == 0:
            http_server_d["running"] = False
            http_server.join()
259

260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280

def get_rank():
    """
    Returns the rank of current trainer.

    Its value is equal to the value of the environment variable ``PADDLE_TRAINER_ID`` . 
    The default value is 0.

    Returns:
        (int) The rank of current trainer.

    Examples:
        .. code-block:: python

            import paddle
            import paddle.distributed as dist

            # execute this command in terminal: export PADDLE_TRAINER_ID=0
            print("The rank is %d" % dist.get_rank())
            # The rank is 0
    """
281
    return _get_global_parallel_env().rank
282 283 284 285


def get_world_size():
    """
286
    Returns the number of trainers (number of processes participating in current job).
287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303

    Its value is equal to the value of the environment variable ``PADDLE_TRAINERS_NUM`` . 
    The default value is 1.

    Returns:
        (int) The number of trainers.

    Examples:
        .. code-block:: python

            import paddle
            import paddle.distributed as dist

            # execute this command in terminal: export PADDLE_TRAINERS_NUM=4
            print("The world_size is %d" % dist.get_world_size())
            # The world_size is 4
    """
304
    return _get_global_parallel_env().world_size