launch.py 16.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14
r"""
15
fleetrun is a module that spawns multiple distributed
16 17
process on each training node for gpu training and cpu training.
Usage:
18
    In both of single node training or multiple node training, this module
19 20 21 22 23 24 25 26
launch a process on each of the given gpu card or cpu machine.
    GPU training:
    1. for single node training with all visible gpu cards:
       fleetrun your_training_py (arg1 arg2 and all others)
    2. for single node training with [0,4) cards
       fleetrun --gpus="0,1,2,3" your_training_py (arg1 arg2 and all others)
    3. for multiple node training such as two node:192.168.0.16, 192.168.0.17
        on 192.168.0.16:
27
            fleetrun --ips="192.168.0.16,192.168.0.17" \
28 29 30 31 32 33
                your_training_py (arg1 arg2 and all others)
        on 192.168.0.17:
            fleetrun --ips="192.168.0.16,192.168.0.17" \
                your_training_py (arg1 arg2 and all others)
    CPU training:
    1. for single node training with multi servers and workers:
34
        fleetrun --server_num=2 --worker_num=2 your_training_py (arg1 arg2 and all others)
35
    2. for multiple node training such as two node:192.168.0.16, 192.168.0.17 \
36
        with 2 servers and 4 workers.
37
        on 192.168.0.16:
38 39
            fleetrun --servers="192.168.0.16:6170,192.168.0.17:6170" \
                --workers="192.168.0.16,192.168.0.17,192.168.0.16,192.168.0.17" \
40 41 42
                your_training_py (arg1 arg2 and all others)
        on 192.168.0.17:
            fleetrun --servers="192.168.0.16:6170,192.168.0.17:6171" \
43 44 45 46 47 48 49 50 51 52 53
                --workers="192.168.0.16,192.168.0.17,192.168.0.16,192.168.0.17" \
                your_training_py (arg1 arg2 and all others)
    3. use gloo backend for multiple node training such as two node:192.168.0.16, 192.168.0.17 \
        with 2 servers and 4 workers. (workers should set port)
        on 192.168.0.16:
            fleetrun --servers="192.168.0.16:6170,192.168.0.17:6170" \
                --workers="192.168.0.16:6171,192.168.0.17:6171,192.168.0.16:6172,192.168.0.17:6172" \
                your_training_py (arg1 arg2 and all others)
        on 192.168.0.17:
            fleetrun --servers="192.168.0.16:6170,192.168.0.17:6170" \
                --workers="192.168.0.16:6171,192.168.0.17:6171,192.168.0.16:6172,192.168.0.17:6172" \
54 55 56 57
                your_training_py (arg1 arg2 and all others)
"""

from __future__ import print_function
58 59

import shutil
60
import sys
61
import tempfile
62 63 64 65 66 67 68 69 70
from sys import version
import subprocess
import os
import time
import six
import copy
from argparse import ArgumentParser, REMAINDER
import paddle
import paddle.fluid as fluid
71
from paddle.distributed.fleet import launch_utils
72
import signal
73

74
# TODO(danleifeng): Don't import * from a module
75 76
from paddle.distributed.fleet.launch_utils import *
import paddle.distributed.fleet.cloud_utils as cloud_utils
77
import paddle.distributed.fleet.ascend_utils as ascend_utils
78

79 80 81 82 83
from paddle.distributed.fleet.elastic import ElasticManager
from paddle.distributed.fleet.elastic import LauncherInterface
from paddle.distributed.fleet.elastic import ElasticStatus
from paddle.distributed.fleet.elastic import ELASTIC_EXIT_CODE

84 85
__all__ = []

86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102

def _print_arguments(args):
    print("-----------  Configuration Arguments -----------")
    for arg, value in sorted(six.iteritems(vars(args))):
        print("%s: %s" % (arg, value))
    print("------------------------------------------------")


def _parse_args():
    """
    Helper function parsing the command line options
    @retval ArgumentParser
    """
    parser = ArgumentParser(
        description='''start paddle training using multi-process mode.
see: http://www.paddlepaddle.org/documentation/docs/zh/1.6/user_guides/howto/training/cluster_howto.html#permalink-8--nccl2-
''')
103
    base_group = parser.add_argument_group("Base Parameters")
104

105 106
    base_group.add_argument(
        "--log_dir",
107
        type=str,
108 109 110 111
        default="log",
        help="The path for each process's log.If it's not set, the log will printed to default pipe."
    )

112 113 114 115 116 117 118 119
    base_group.add_argument(
        "--nproc_per_node",
        type=int,
        default=None,
        help="The number of processes to launch on a node."
        "In gpu training, it should be less or equal to the gpus number of you system(or you set by --gpus). And so each process can"
        " bound to one or average number of gpus.")

120 121 122
    base_group.add_argument(
        "--run_mode",
        type=str,
G
gongweibao 已提交
123
        default=None,
124 125
        help="run mode of job, can be:collective/ps/ps-heter")

126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
    if fluid.core.is_compiled_with_cuda():
        base_group.add_argument(
            "--gpus",
            type=str,
            default=None,
            help="It's for gpu training."
            "For example:"
            "--gpus=\"0,1,2,3\" will launch four training processes each bound to one gpu."
        )
        base_group.add_argument("--selected_gpus", dest="gpus")

    if fluid.core.is_compiled_with_xpu():
        base_group.add_argument(
            "--xpus",
            type=str,
            default=None,
            help="It's for xpu training. For example: "
            "--xpus=\"0,1,2,3\" will launch four training processes each bound to one xpu."
        )
        base_group.add_argument("--selected_xpus", dest="xpus")
146

147
    base_group.add_argument(
148 149 150 151 152 153 154
        "training_script",
        type=str,
        help="The full path to the single GPU training "
        "program/script to be launched in parallel, "
        "followed by all the arguments for the "
        "training script")

155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
    base_group.add_argument('training_script_args', nargs=REMAINDER)

    # Optional arguments for the launch helper
    # for collective
    collective_group = parser.add_argument_group("Collective Parameters")
    collective_group.add_argument(
        "--ips",
        type=str,
        default="127.0.0.1",
        help="Paddle cluster nodes ips, such as 192.168.0.16,192.168.0.17..")

    ps_group = parser.add_argument_group("Parameter-Server Parameters")
    # for parameter server
    ps_group.add_argument(
        "--servers", type=str, default="", help="User defined servers ip:port")
    ps_group.add_argument(
        "--workers", type=str, default="", help="User defined workers ip:port")
    ps_group.add_argument(
        "--heter_workers",
        type=str,
        default="",
        help="User defined heter workers ip:port")

    ps_group.add_argument("--worker_num", type=int, help="number of workers")
    ps_group.add_argument("--server_num", type=int, help="number of servers")
    ps_group.add_argument(
        "--heter_worker_num", type=int, help="number of heter_workers")
182
    ps_group.add_argument("--http_port", type=int, help="Gloo http Port")
183

184 185 186 187 188 189 190 191 192 193 194 195
    # parameter elastic mode
    elastic_group = parser.add_argument_group("Elastic Parameters")
    elastic_group.add_argument(
        "--elastic_server", type=str, help="etcd server host:port")
    elastic_group.add_argument("--job_id", type=str, help="job unique id")
    elastic_group.add_argument("--np", type=int, help="job pod/node number")
    elastic_group.add_argument("--scale", type=int, default=0, help="scale np")
    elastic_group.add_argument(
        "--host", type=str, help="bind host, default to POD_IP env")
    elastic_group.add_argument(
        "--force", type=bool, default=False, help="update np force")

196 197 198
    return parser.parse_args()


199
def get_cluster_from_args(args, device_mode, devices_per_proc):
200 201 202 203
    node_ips = [x.strip() for x in args.ips.split(',')]
    if len(node_ips) == 1:
        node_ip = node_ips[0]
    else:
204 205 206 207
        if args.host:
            node_ip = args.host
        else:
            _, node_ip = get_host_name_ip()
208

209
    assert node_ip in node_ips, "Can't find your local ip {%s} in node_ips: {%s}" \
210
        % (node_ip, node_ips)
211 212
    node_rank = node_ips.index(node_ip)

213
    logger.debug("parsed from args: node_ips:{} node_ip:{} node_rank:{}".format(
214 215 216 217 218
        node_ips, node_ip, node_rank))

    free_ports = None
    if not cloud_utils.use_paddlecloud() and len(
            node_ips) <= 1 and os.environ.get('FLAGS_START_PORT') is None:
219
        free_ports = find_free_ports(len(devices_per_proc))
220 221 222 223 224
        if free_ports is not None:
            free_ports = list(free_ports)
    else:
        start_port = 6070
        if os.environ.get('FLAGS_START_PORT') is not None:
225
            start_port = int(os.environ.get('FLAGS_START_PORT'))
226

227 228 229
        free_ports = [
            x for x in range(start_port, start_port + len(devices_per_proc))
        ]
230

231 232 233
    trainer_endpoints = []
    for ip in node_ips:
        trainer_endpoints.append(["%s:%d" % (ip, port) for port in free_ports])
234 235
    return get_cluster(node_ips, node_ip, trainer_endpoints, device_mode,
                       devices_per_proc)
236 237


238 239 240 241
class CollectiveLauncher(LauncherInterface):
    def __init__(self, args):
        self.args = args
        self.procs = []
242

243 244 245 246 247 248 249 250 251
    def launch(self):
        logger.info("collective lauchner launch ...")
        args = self.args
        # parse arguments, used for cloud-single-machine and local
        (device_mode,
         devices_per_proc) = launch_utils.get_device_proc_info(args)
        trainers_num = cloud_utils.get_trainers_num()
        logger.debug("parsed from args trainerss_num:{} mode:{} devices:{}".
                     format(trainers_num, device_mode, devices_per_proc))
252

253 254
        cluster = None
        pod = None
255

256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
        start_port = 6170
        if os.environ.get('FLAGS_START_PORT') is not None:
            start_port = os.environ.get('FLAGS_START_PORT')
        if cloud_utils.use_paddlecloud() and trainers_num != 1:
            cluster, pod = cloud_utils.get_cloud_cluster(
                args.ips, device_mode, devices_per_proc, start_port)
            logger.debug("get cluster from cloud:{}".format(cluster))
        elif device_mode == DeviceMode.ASCEND_NPU:
            # for ascend
            cluster, pod = ascend_utils.get_cloud_cluster(
                rank_table_file=os.getenv("RANK_TABLE_FILE", None),
                device_mode=device_mode,
                start_port=start_port)
        else:
            # trainers_num = 1 or not use paddlecloud ips="a,b"
            cluster, pod = get_cluster_from_args(args, device_mode,
                                                 devices_per_proc)
            logger.debug("get cluster from args:{}".format(cluster))

        global_envs = copy.copy(os.environ.copy())
        self.gloo_rendezvous_dir = tempfile.mkdtemp()
        # add gloo env
        global_envs["PADDLE_WITH_GLOO"] = str(
            os.getenv("PADDLE_WITH_GLOO", "0"))
        global_envs["PADDLE_GLOO_RENDEZVOUS"] = "3"
        global_envs["PADDLE_GLOO_FS_PATH"] = self.gloo_rendezvous_dir

        self.procs = start_local_trainers(
            cluster,
            pod,
            training_script=args.training_script,
            training_script_args=args.training_script_args,
            log_dir=args.log_dir,
            envs=global_envs)

        for idx, proc in enumerate(self.procs):
            logger.info("launch proc_id:{} idx:{}".format(proc.proc.pid, idx))

    def stop(self):
        logger.info("collective lauchner stop ...")
K
kuizhiqing 已提交
296 297
        if not self._terminate_procs():
            logger.error("kill process failed")
298 299 300 301 302 303 304 305 306 307
        if os.path.exists(self.gloo_rendezvous_dir):
            shutil.rmtree(self.gloo_rendezvous_dir)

    def watch(self):
        logger.debug("collective lauchner watch ...")
        for p in self.procs:
            if p.log_fn and p.local_rank == 0:
                pull_worker_log(p)
        ret = self._check_procs()
        return ret
308

309

310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
def launch_ps(args, distribute_mode):
    cloud_flag = cloud_utils.use_paddlecloud()

    # for ps-cpu on paddlecloud
    if cloud_flag and distribute_mode == DistributeMode.PS:
        direct_start(args)
        return
    elif cloud_flag and distribute_mode == DistributeMode.PS_HETER:
        cloud_ps_heter_env_set(args)
        args.workers = os.getenv("PADDLE_TRAINER_ENDPOINTS")
        args.servers = os.getenv("PADDLE_PSERVERS_IP_PORT_LIST")
        args.heter_workers = os.getenv("PADDLE_HETER_TRAINER_IP_PORT_LIST")

    ps_launcher = ParameterServerLauncher(args, distribute_mode)
    ps_launcher.start_ps()
    return


def which_distributed_mode(args):
329 330 331 332 333 334 335 336 337 338
    if args.run_mode is not None:
        assert args.run_mode in ["collective", "ps", "ps-heter"]

    if args.run_mode == "collective":
        return DistributeMode.COLLECTIVE
    elif args.run_mode == "ps":
        return DistributeMode.PS
    elif args.run_mode == "ps-heter":
        return DistributeMode.PS_HETER

339
    ps_args = [
340 341
        '--worker_num', '--server_num', '--heter_worker_num', '--servers',
        '--workers', '--heter_workers', '--http_port'
342
    ]
343
    collective_args = ['--ips']
344

345
    ps_heter_args = ["--heter_worker_num", "--heter_workers"]
346 347 348 349 350 351 352 353

    has_ps_args = [
        ps_arg for ps_arg in ps_args if ps_arg in " ".join(sys.argv[1:-1])
    ]
    has_collective_args = [
        co_arg for co_arg in collective_args
        if co_arg in " ".join(sys.argv[1:-1])
    ]
354 355 356 357 358 359

    if len(has_ps_args) > 1 and len(has_collective_args) > 1:
        raise ValueError(
            "Only one mode(Collective or Parameter-Server) can be selected at the same time, but more than one configuration was received."
        )

360
    if fluid.core.is_compiled_with_cuda():
361
        accelerators = fluid.core.get_cuda_device_count()
B
Baibaifan 已提交
362 363
    elif fluid.core.is_compiled_with_npu():
        accelerators = fluid.core.get_npu_device_count()
364
    elif fluid.core.is_compiled_with_xpu():
365
        accelerators = fluid.core.get_xpu_device_count()
366
    else:
367
        accelerators = 0
368

369 370
    if len(has_ps_args) > 0:
        logger.info(
371 372
            "Run parameter-sever mode. pserver arguments:{}, accelerators count:{}".
            format(has_ps_args, accelerators))
373 374 375 376 377
        has_ps_heter_args = list(set(has_ps_args) & set(ps_heter_args))
        if len(has_ps_heter_args) > 0:
            return DistributeMode.PS_HETER
        else:
            return DistributeMode.PS
378
    elif len(has_collective_args) > 0:
379 380
        logger.info("Run collective mode. gpu arguments:{}, cuda count:{}".
                    format(has_collective_args, accelerators))
381
        return DistributeMode.COLLECTIVE
382
    else:
383 384
        if not fluid.core.is_compiled_with_cuda(
        ) and not fluid.core.is_compiled_with_xpu():
385
            logger.warning(
386
                "Not found distinct arguments and not compiled with cuda or xpu. Default use ps mode"
387 388 389 390
            )
            return DistributeMode.PS
        else:
            logger.warning(
391
                "Not found distinct arguments and compiled with cuda or xpu. Default use collective mode"
392 393
            )
            return DistributeMode.COLLECTIVE
394 395 396 397 398 399 400 401


def launch():
    args = _parse_args()
    logger = get_logger()
    _print_arguments(args)

    distribute_mode = which_distributed_mode(args)
402 403
    # TODO(kuizhiqing) support ps later
    if not distribute_mode == DistributeMode.COLLECTIVE:
404
        launch_ps(args, distribute_mode)
405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
        return

    elastic = ElasticManager(args)

    signal.signal(signal.SIGTERM, elastic.signal_handler)
    signal.signal(signal.SIGABRT, elastic.signal_handler)
    signal.signal(signal.SIGINT, elastic.signal_handler)

    while True:

        # wait for all nodes ready to run
        elastic.wait()

        # run self with specified launcher
        elastic.run(CollectiveLauncher)

        # keep wathing the health status of self and being notified for other's failure
        ret = elastic.watch()
        if ret == ElasticStatus.COMPLETED:
            break
        if ret == ElasticStatus.HOLD:
            continue
        if ret == ElasticStatus.EXIT:
            break
        if ret == ElasticStatus.ERROR:
            sys.exit(3)
        if ret == ElasticStatus.RESTART:
            sys.exit(ELASTIC_EXIT_CODE)

    if int(elastic.sigint) > 0:
        sys.exit(128 + int(elastic.sigint))
    else:
        sys.exit(0)
438 439 440 441


if __name__ == "__main__":
    launch()