spawn.py 17.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function, division

import multiprocessing
import os
import signal
import six
import sys
import warnings

24
from paddle.distributed.utils import _print_arguments, _prepare_trainer_env, get_host_name_ip
25
from paddle.distributed.cloud_utils import get_cluster_and_pod
26
from paddle.distributed.fleet.cloud_utils import use_paddlecloud
27 28 29 30
from paddle.device import get_device

# deprecated module import
from paddle.fluid import core
31
from paddle.fluid.framework import _cpu_num, set_flags
32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67


class ParallelEnvArgs(object):
    def __init__(self):
        # Paddle cluster nodes ips, such as 192.168.0.16,192.168.0.17..
        self.cluster_node_ips = None

        # The current node ip.
        self.node_ip = None

        # whether to use paddlecloud platform to run your multi-process job.
        # If false, no need to set this argument.
        self.use_paddlecloud = None

        # The trainer's started port on a single node
        self.started_port = None

        # Print the config or not
        self.print_config = True

        # It's for gpu training and the training process will run 
        # on the selected_gpus, each process is bound to a single GPU. 
        # And if it's not set, this module will use all the gpu cards 
        # for training.
        self.selected_gpus = None


def _py_supported_check():
    if not sys.version_info >= (3, 4):
        raise RuntimeError(
            "Use `paddle.distributed.spawn` to start parallel training "
            "requires python version greater than 3.4, if your python "
            "is lower than this version, please use "
            "`paddle.distributed.launch` instead.")


68
def _options_valid_check(options):
69 70 71 72 73
    # `print_config` keeped as a debug options, not show to users
    supported_options = ['start_method', 'ips', 'gpus', 'print_config']
    deprecated_options = [
        'selected_gpus', 'started_port', 'cluster_node_ips', 'node_ip',
        'use_paddlecloud'
74 75 76
    ]
    for key in options:
        if key not in supported_options:
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
            if key in deprecated_options:
                warnings.warn(
                    "The config option (%s) of `paddle.distributed.spawn` is deprecated. "
                    "Please use the latest config options stated in the `spawn` API documentation."
                    % key, DeprecationWarning)
            else:
                raise ValueError(
                    "The config option (%s) of `paddle.distributed.spawn` is not supported."
                    % key)


def _get_node_ip(ips):
    node_ip = None
    node_ips = [x.strip() for x in ips.split(',')]
    if len(node_ips) == 1:
        node_ip = node_ips[0]
    else:
        _, node_ip = get_host_name_ip()
    return node_ip
96 97


98 99 100 101 102 103 104
def _get_subprocess_env_list(nprocs, options):
    # contruct processes env list
    processes_env_list = []

    # get args from kwargs
    args = ParallelEnvArgs()

105 106
    # deal with `ips`
    args.cluster_node_ips = options.get('ips', None)
107
    if args.cluster_node_ips is None:
108 109 110
        args.cluster_node_ips = options.get('cluster_node_ips', None)
        if args.cluster_node_ips is None:
            args.cluster_node_ips = "127.0.0.1"
111

112
    # deal with `gpus`
113 114 115 116 117 118 119
    # set default selected gpus
    # e.g. if the nprocs is 4, the selected gpus is "0,1,2,3"
    # NOTE(chenweihang): [ why not use FLAGS_selected_gpus directly? ]
    # because the FLAGS_selected_gpus may be used in other place,
    # if we set FLAGS_selected_gpus to be `0,1,2,3`, it may cause error
    # when using `ParallelEnv`
    # NOTE(chenweihang): use absolute gpu card id
120 121 122
    args.selected_gpus = options.get('gpus', None)
    if args.selected_gpus is None:
        args.selected_gpus = options.get('selected_gpus', None)
123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
    env_devices = os.getenv("CUDA_VISIBLE_DEVICES", None)
    if env_devices is None or env_devices == "":
        env_devices_list = [
            str(x) for x in six.moves.range(core.get_cuda_device_count())
        ]
    else:
        env_devices_list = env_devices.split(',')
    if args.selected_gpus is None:
        if len(env_devices_list) < nprocs:
            raise RuntimeError(
                "the number of visible devices(%d) is less than the number "
                "of spawn processes(%d), please ensure that the correct "
                "`nprocs` argument is passed or the environment variable "
                "`CUDA_VISIBLE_DEVICES` is correctly configured." %
                (len(env_devices_list), nprocs))
        args.selected_gpus = ",".join(
            [str(env_devices_list[x]) for x in range(0, nprocs)])
    else:
141 142 143 144 145 146 147 148
        selected_gpu_list = args.selected_gpus.split(',')
        if len(selected_gpu_list) != nprocs:
            raise ValueError(
                "The number of selected gpus(%s) is not equal to "
                "the number of spawn processes(%d), please ensure that the "
                "correct `nprocs` and `gpus` arguments are passed." %
                (len(selected_gpu_list), nprocs))
        for card_id in selected_gpu_list:
149 150 151 152 153
            if card_id not in env_devices_list:
                raise ValueError("The selected gpu card %s cannot found in "
                                 "CUDA_VISIBLE_DEVICES (%s)." %
                                 (card_id, ",".join(env_devices_list)))

154 155 156 157 158
    # set other inner args
    args.node_ip = options.get('node_ip', None)
    if args.node_ip is None:
        args.node_ip = _get_node_ip(args.cluster_node_ips)

159 160
    args.started_port = options.get('started_port', None)

161 162 163 164 165
    args.use_paddlecloud = options.get('use_paddlecloud', None)
    if args.use_paddlecloud is None:
        args.use_paddlecloud = use_paddlecloud()

    # get cluster and pod config
166 167 168 169 170 171
    cluster, pod = get_cluster_and_pod(args)

    # prepare subprocess env list
    for trainer in pod.trainers:
        processes_env_list.append(_prepare_trainer_env(cluster, trainer))

172 173
    # [Debug] print config
    args.print_config = options.get('print_config', False)
174 175 176 177 178 179 180
    if args.print_config:
        _print_arguments(args)

    return processes_env_list


def _remove_risky_env():
181
    # remove useless env vars
182 183 184 185 186 187
    # no copy, each process will hold env vars itself
    os.environ.pop("http_proxy", None)
    os.environ.pop("https_proxy", None)


def _set_trainer_env(env_dict):
188 189 190 191 192 193
    # NOTE(chenweihang): [ Why need set FLAGS_selected_gpus here? ]
    # When the child process starts, it will inherit the configuration of the 
    # main process and set the FLAGS once, but the environment variable has 
    # not been set at this time, which leads to the FLAGS_selected_gpus 
    # is keep same with mainprocess(usually empty), so manually update the flags here
    set_flags({'FLAGS_selected_gpus': env_dict['FLAGS_selected_gpus']})
194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
    for var_name in env_dict:
        os.environ[var_name] = env_dict[var_name]


def _func_wrapper(func, args, error_queue, return_queue, env_dict):
    try:
        # config subprocess environment variables
        _remove_risky_env()
        _set_trainer_env(env_dict)
        # execute function
        result = func(*args)
        # record function return value
        return_queue.put(result)
    except KeyboardInterrupt:
        pass
    except Exception:
        import traceback
        error_queue.put(traceback.format_exc())
        sys.exit(1)


class MultiprocessContext(object):
    def __init__(self, processes, error_queues, return_queues):
        _py_supported_check()
        self.error_queues = error_queues
        # NOTE(chenweihang): The `spawn` method is mainly used 
        # to wrap the outermost execution function of the program for 
        # parallel execution. Generally, the return value is not concerned, 
        # but if the user needs to obtain the return value, users can get  
        # the return result of each process from context.return_queues
        self.return_queues = return_queues
        self.processes = processes
        self.sentinels = {
            process.sentinel: index
            for index, process in enumerate(processes)
        }

    def join(self, timeout=None):
        if len(self.sentinels) == 0:
            return True

        ready = multiprocessing.connection.wait(
            self.sentinels.keys(), timeout=timeout)

        error_index = None
        for sentinel in ready:
            index = self.sentinels.pop(sentinel)
            process = self.processes[index]
            process.join()
            if process.exitcode != 0:
                error_index = index
                break

        if error_index is None:
            return len(self.sentinels) == 0

        for process in self.processes:
            if process.is_alive():
                process.terminate()
            process.join()

        self._throw_exception(error_index)

    def _throw_exception(self, error_index):
        if self.error_queues[error_index].empty():
            exitcode = self.processes[error_index].exitcode
            if exitcode < 0:
                name = signal.Signals(-exitcode).name
                raise Exception("Process %d terminated with signal %s." %
                                (error_index, name))
            else:
                raise Exception("Process %d terminated with exit code %d." & (
                    error_index, exitcode))

        original_trace = self.error_queues[error_index].get()
        msg = "\n\n----------------------------------------------\n" \
              "Process %d terminated with the following error:\n" \
              "----------------------------------------------\n\n" % error_index
        msg += original_trace
        raise Exception(msg)


def spawn(func, args=(), nprocs=-1, join=True, daemon=False, **options):
    """
    Start multiple processes with ``spawn`` method for parallel training.

280 281 282
    .. note::
        ``spawn`` now only supports GPU collective mode.

283 284 285 286 287 288 289 290 291 292 293 294
    Args:
        func (function): The target function is called by spawned process.
            This function need to be able to pickled, so it must be defined
            at the top level of a module.
        args (tuple, optional): Arguments passed to ``func``.
        nprocs (int, optional): Number of processed to start. Default: -1.
            when nprocs is -1, the available device will be obtained from 
            the environment variable when the model is executed: If use GPU, 
            the currently available device ID is obtained from the environment 
            variable CUDA_VISIBLE_DEVICES; If use CPU, the currently available
            CPU number is obtained from the environment variable CPU_NUM. 
            For example, export CPU_NUM=4, if the environment variable is not set, 
295 296
            the spawn method will add default value to the environment variable 
            and set its value to 1.
297 298 299 300 301 302 303 304 305 306
        join (bool, optional): Perform a blocking join on all spawned processes.
            Default: True.
        daemon (bool, optional): The spawned processes' daemon flag. Default: False.
        **options(dict, optional): Other initial parallel execution environment 
            configuration options. The following options are currently supported: 
            (1) start_method (string): the way to start a process. 
            The start method can be ``spawn`` , ``fork`` , ``forkserver`` . 
            Because the CUDA runtime does not support the ``fork`` start method, 
            when use CUDA in subprocesses, we should start process by ``spawn`` 
            or ``forkserver`` method. Default: "spawn" ; 
307 308 309 310
            (2) gpus (string): The training process will run on the 
            selected gpus, such as "0,1,2,3". Default: None; 
            (3) ips (string): Paddle cluster nodes ips, such as 
            "192.168.0.16,192.168.0.17". Default: "127.0.0.1" . 
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333

    Returns:
        ``MultiprocessContext`` object, it hold the spawned processes.

    Examples:
        .. code-block:: python

            from __future__ import print_function

            import paddle
            import paddle.nn as nn
            import paddle.optimizer as opt
            import paddle.distributed as dist

            class LinearNet(nn.Layer):
                def __init__(self):
                    super(LinearNet, self).__init__()
                    self._linear1 = nn.Linear(10, 10)
                    self._linear2 = nn.Linear(10, 1)
                    
                def forward(self, x):
                    return self._linear2(self._linear1(x))

334 335
            def train(print_result=False): 
                # 1. initialize parallel environment
336 337
                dist.init_parallel_env()

338
                # 2. create data parallel layer & optimizer
339 340 341 342 343 344 345
                layer = LinearNet()
                dp_layer = paddle.DataParallel(layer)

                loss_fn = nn.MSELoss()
                adam = opt.Adam(
                    learning_rate=0.001, parameters=dp_layer.parameters())

346
                # 3. run layer
347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
                inputs = paddle.randn([10, 10], 'float32')
                outputs = dp_layer(inputs)
                labels = paddle.randn([10, 1], 'float32')
                loss = loss_fn(outputs, labels)
                
                if print_result is True:
                    print("loss:", loss.numpy())
                
                loss.backward()

                adam.step()
                adam.clear_grad()

            # Usage 1: only pass function. 
            # If your training method no need any argument, and 
            # use all visible devices for parallel training. 
            if __name__ == '__main__':
                dist.spawn(train)

            # Usage 2: pass function and arguments.
            # If your training method need some arguments, and 
            # use all visible devices for parallel training.
            if __name__ == '__main__':
                dist.spawn(train, args=(True,))

            # Usage 3: pass function, arguments and nprocs.
            # If your training method need some arguments, and 
            # only use part of visible devices for parallel training.
            # If your machine hold 8 cards {0,1,2,3,4,5,6,7},
            # this case will use cards {0,1}; If you set 
            # CUDA_VISIBLE_DEVICES=4,5,6,7, this case will use
            # cards {4,5}
            if __name__ == '__main__':
                dist.spawn(train, args=(True,), nprocs=2)

382
            # Usage 4: pass function, arguments, nprocs and gpus.
383 384
            # If your training method need some arguments, and 
            # only use part of visible devices for parallel training,
385
            # but you can't set your machine's environment variable 
386
            # CUDA_VISIBLE_DEVICES, such as it is None or all cards
387
            # {0,1,2,3,4,5,6,7}, you can pass `gpus` to 
388 389 390
            # select the GPU cards you want to use. For example,
            # this case will use cards {4,5} if your machine hold 8 cards.
            if __name__ == '__main__':
391
                dist.spawn(train, args=(True,), nprocs=2, gpus='4,5')
392 393 394 395 396 397 398 399
    """
    # NOTE(chenweihang): [ why only supports python3.4+ ? ]
    # Python supported setting the child process startup method
    # since 3.4. The previous version can only use the default startup 
    # method, while the default startup method of Unix is fork, which 
    # cannot support CUDA runtime multi-process
    _py_supported_check()

400 401 402 403
    # Give an error hint when the users enter a configuration option 
    # that does not exist
    _options_valid_check(options)

404 405 406 407 408
    # get default nprocs
    if nprocs == -1:
        device = get_device()
        if device == 'cpu':
            # TODO: not supports cpu parallel now
409
            nprocs = _cpu_num()
410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
        else:
            nprocs = core.get_cuda_device_count()

    # NOTE(chenweihang): [ why need get cluster info before run? ]
    # when using `paddle.distributed.spawn` start parallel training, 
    # we should get cluster info before starting subprocess, and pass 
    # correct info to each subprocess
    procs_env_list = _get_subprocess_env_list(nprocs, options)

    # start processes
    # NOTE(chenweihang): [ why default start method is spawn? ]
    # The CUDA runtime does not support the fork start method, 
    # either the spawn or forkserver start method are required 
    # to use CUDA in subprocesses.
    start_method = options.get('start_method', None)
    if start_method is None:
        start_method = 'spawn'
    mp = multiprocessing.get_context(start_method)

    error_queues = []
    return_queues = []
    processes = []
    for i in range(nprocs):
        error_queue = mp.SimpleQueue()
        return_queue = mp.SimpleQueue()
        process = mp.Process(
            target=_func_wrapper,
            args=(func, args, error_queue, return_queue, procs_env_list[i]))
        process.daemon = daemon
        process.start()
        error_queues.append(error_queue)
        return_queues.append(return_queue)
        processes.append(process)

    context = MultiprocessContext(processes, error_queues, return_queues)
    if not join:
        return context

    # loop until all process end
    while not context.join():
        pass

    # finally return context
    return context