launch.py 16.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
15
fleetrun is a module that spawns multiple distributed
16 17
process on each training node for gpu training and cpu training.
Usage:
18
    In both of single node training or multiple node training, this module
19 20 21 22 23 24 25 26
launch a process on each of the given gpu card or cpu machine.
    GPU training:
    1. for single node training with all visible gpu cards:
       fleetrun your_training_py (arg1 arg2 and all others)
    2. for single node training with [0,4) cards
       fleetrun --gpus="0,1,2,3" your_training_py (arg1 arg2 and all others)
    3. for multiple node training such as two node:192.168.0.16, 192.168.0.17
        on 192.168.0.16:
27
            fleetrun --ips="192.168.0.16,192.168.0.17" \
28 29 30 31 32 33
                your_training_py (arg1 arg2 and all others)
        on 192.168.0.17:
            fleetrun --ips="192.168.0.16,192.168.0.17" \
                your_training_py (arg1 arg2 and all others)
    CPU training:
    1. for single node training with multi servers and workers:
34
        fleetrun --server_num=2 --worker_num=2 your_training_py (arg1 arg2 and all others)
35
    2. for multiple node training such as two node:192.168.0.16, 192.168.0.17 \
36
        with 2 servers and 4 workers.
37
        on 192.168.0.16:
38 39
            fleetrun --servers="192.168.0.16:6170,192.168.0.17:6170" \
                --workers="192.168.0.16,192.168.0.17,192.168.0.16,192.168.0.17" \
40 41 42
                your_training_py (arg1 arg2 and all others)
        on 192.168.0.17:
            fleetrun --servers="192.168.0.16:6170,192.168.0.17:6171" \
43 44 45 46 47 48 49 50 51 52 53
                --workers="192.168.0.16,192.168.0.17,192.168.0.16,192.168.0.17" \
                your_training_py (arg1 arg2 and all others)
    3. use gloo backend for multiple node training such as two node:192.168.0.16, 192.168.0.17 \
        with 2 servers and 4 workers. (workers should set port)
        on 192.168.0.16:
            fleetrun --servers="192.168.0.16:6170,192.168.0.17:6170" \
                --workers="192.168.0.16:6171,192.168.0.17:6171,192.168.0.16:6172,192.168.0.17:6172" \
                your_training_py (arg1 arg2 and all others)
        on 192.168.0.17:
            fleetrun --servers="192.168.0.16:6170,192.168.0.17:6170" \
                --workers="192.168.0.16:6171,192.168.0.17:6171,192.168.0.16:6172,192.168.0.17:6172" \
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
                your_training_py (arg1 arg2 and all others)
"""

from __future__ import print_function
import sys
from sys import version
import subprocess
import os
import time
import six
import copy
from argparse import ArgumentParser, REMAINDER
import paddle
import paddle.fluid as fluid

69 70
from paddle.distributed.fleet.launch_utils import *
import paddle.distributed.fleet.cloud_utils as cloud_utils
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89


def _print_arguments(args):
    print("-----------  Configuration Arguments -----------")
    for arg, value in sorted(six.iteritems(vars(args))):
        print("%s: %s" % (arg, value))
    print("------------------------------------------------")


def _parse_args():
    """
    Helper function parsing the command line options
    @retval ArgumentParser
    """
    parser = ArgumentParser(
        description='''start paddle training using multi-process mode.
see: http://www.paddlepaddle.org/documentation/docs/zh/1.6/user_guides/howto/training/cluster_howto.html#permalink-8--nccl2-
''')

90
    # Optional arguments for the launch helper
91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
    parser.add_argument(
        "--ips",
        type=str,
        default="127.0.0.1",
        help="Paddle cluster nodes ips, such as 192.168.0.16,192.168.0.17..")
    parser.add_argument(
        "--gpus",
        type=str,
        default=None,
        help="It's for gpu training and the training process will run on the gpus,"
        "each process is bound to a single GPU. And if it's not set, this module will use all the gpu cards for training."
    )

    parser.add_argument(
        "--servers", type=str, default="", help="User defined servers ip:port")
    parser.add_argument(
        "--workers", type=str, default="", help="User defined workers ip:port")
108
    parser.add_argument("--worker_num", type=int, help="number of workers")
109

110
    parser.add_argument("--server_num", type=int, help="number of servers")
111 112 113 114

    parser.add_argument(
        "--log_dir",
        type=str,
115
        default="log",
116 117
        help="The path for each process's log.If it's not set, the log will printed to default pipe."
    )
118
    # positional
119 120 121 122 123 124 125 126
    parser.add_argument(
        "training_script",
        type=str,
        help="The full path to the single GPU training "
        "program/script to be launched in parallel, "
        "followed by all the arguments for the "
        "training script")

127
    # rest from the training program
128 129 130 131 132 133 134 135 136 137 138 139
    parser.add_argument('training_script_args', nargs=REMAINDER)
    return parser.parse_args()


def get_cluster_from_args(args, gpus):
    node_ips = [x.strip() for x in args.ips.split(',')]
    if len(node_ips) == 1:
        node_ip = node_ips[0]
    else:
        _, node_ip = get_host_name_ip()

    # node_ip = args.node_ip
140
    assert node_ip in node_ips, "Can't find your local ip {%s} in node_ips: {%s}" \
141
        % (node_ip, node_ips)
142 143
    node_rank = node_ips.index(node_ip)

144
    logger.debug("parsed from args: node_ips:{} node_ip:{} node_rank:{}".format(
145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
        node_ips, node_ip, node_rank))

    free_ports = None
    if not cloud_utils.use_paddlecloud() and len(
            node_ips) <= 1 and os.environ.get('FLAGS_START_PORT') is None:
        free_ports = find_free_ports(len(gpus))
        if free_ports is not None:
            free_ports = list(free_ports)
    else:
        start_port = 6070
        if os.environ.get('FLAGS_START_PORT') is not None:
            start_port = os.environ.get('FLAGS_START_PORT')

        free_ports = [x for x in range(start_port, start_port + len(gpus))]

160 161 162 163
    trainer_endpoints = []
    for ip in node_ips:
        trainer_endpoints.append(["%s:%d" % (ip, port) for port in free_ports])
    return get_cluster(node_ips, node_ip, trainer_endpoints, gpus)
164 165 166 167 168


def get_gpus(gpus):
    if gpus is None:
        gpus_num = fluid.core.get_cuda_device_count()
169
        res_gpus = [str(x) for x in range(0, gpus_num)]
170 171 172
    else:
        cuda_visible_devices = os.getenv("CUDA_VISIBLE_DEVICES")
        if cuda_visible_devices is None or cuda_visible_devices == "":
173
            res_gpus = [x.strip() for x in gpus.split(',')]
174 175 176 177 178 179 180 181 182
        else:
            # change gpus into relative values
            # e.g. CUDA_VISIBLE_DEVICES=4,5,6,7; args.gpus=4,5,6,7;
            # therefore gpus=0,1,2,3
            cuda_visible_devices_list = cuda_visible_devices.split(',')
            for x in gpus.split(','):
                assert x in cuda_visible_devices_list, "Can't find "\
                "your gpus %s in CUDA_VISIBLE_DEVICES[%s]."\
                % (x, cuda_visible_devices)
183
            res_gpus = [
184 185 186
                cuda_visible_devices_list.index(x.strip())
                for x in gpus.split(',')
            ]
187 188 189 190
            logger.info("Change selected_gpus into reletive values. --ips:{} "
                        "will change into relative_ips:{} according to your "
                        "CUDA_VISIBLE_DEVICES:{}".format(
                            gpus, res_gpus, cuda_visible_devices_list))
191

192
    return res_gpus
193 194 195 196 197 198 199 200 201 202 203 204


def launch_collective(args):
    # parse arguments, used for cloud-single-machine and local
    gpus = get_gpus(args.gpus)
    trainers_num = cloud_utils.get_trainers_num()
    logger.debug("parsed from args trainerss_num:{} gpus:{}".format(
        trainers_num, gpus))

    cluster = None
    pod = None

205 206 207
    start_port = 6170
    if os.environ.get('FLAGS_START_PORT') is not None:
        start_port = os.environ.get('FLAGS_START_PORT')
208
    if cloud_utils.use_paddlecloud() and trainers_num != 1:
209
        cluster, pod = cloud_utils.get_cloud_cluster(args.ips, gpus, start_port)
210
        logger.debug("get cluster from cloud:{}".format(cluster))
211 212 213
    else:
        # trainers_num = 1 or not use paddlecloud ips="a,b"
        cluster, pod = get_cluster_from_args(args, gpus)
214
        logger.debug("get cluster from args:{}".format(cluster))
215 216 217 218 219 220 221 222 223 224 225 226

    procs = start_local_trainers(
        cluster,
        pod,
        training_script=args.training_script,
        training_script_args=args.training_script_args,
        log_dir=args.log_dir)

    while True:
        alive = watch_local_trainers(procs, cluster.trainers_nranks())

        if not alive:
227 228
            logger.info("Local processes completed.")
            logger.debug("POD info:{}".format(pod))
229 230 231 232 233 234
            break

        time.sleep(3)


def launch_ps(args):
235
    ports = None
236
    start_port = 6170
237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
    if args.server_num:
        server_num = args.server_num
        ports = get_ports(server_num, 0)
        server_endpoints = ",".join(["127.0.0.1:" + str(x) for x in ports])
    else:
        assert args.servers != "", "The setting of CPU mode must be either server_num or servers."
        server_endpoints = args.servers
    server_endpoints_ips = [
        x.strip().split(":")[0] for x in server_endpoints.split(",")
    ]
    server_endpoints_port = [
        x.strip().split(":")[1] for x in server_endpoints.split(",")
    ]
    server_num = len(server_endpoints_ips)

    if args.worker_num:
        worker_num = args.worker_num
        ports = get_ports(worker_num, server_num)
        worker_endpoints = ",".join(["127.0.0.1:" + str(x) for x in ports])
    else:
        assert args.workers != "", "The setting of CPU mode must be either worker_num or workers."
        worker_endpoints = args.workers
    worker_endpoints_ips = [
        x.strip().split(":")[0] for x in worker_endpoints.split(",")
    ]
    worker_num = len(worker_endpoints_ips)
    node_ips = list(set(server_endpoints_ips + worker_endpoints_ips))
264 265 266 267 268 269 270 271 272 273 274
    worker_endpoints_len = [
        len(x.strip().split(":")) for x in worker_endpoints.split(",")
    ]
    if 1 in worker_endpoints_len:
        # if no port value in worker_endpoints, will set default port values.
        worker_endpoints_port = range(start_port + server_num,
                                      start_port + server_num + worker_num, 1)
    else:
        worker_endpoints_port = [
            x.strip().split(":")[1] for x in worker_endpoints.split(",")
        ]
275 276 277 278 279 280 281 282

    # local train
    if len(set(node_ips)) == 1:
        current_node_ip = node_ips[0]
    else:
        _, current_node_ip = get_host_name_ip()

    assert current_node_ip in node_ips, "Can't find your local ip {%s} in args.servers and args.workers ips: {%s}" \
283
        % (current_node_ip, node_ips)
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
    node_rank = node_ips.index(current_node_ip)
    logger.debug(
        "parsed from args: node_ips:{} current_node_ip:{} node_rank:{}, server_ports:{}".
        format(node_ips, current_node_ip, node_rank, server_endpoints_port))

    cluster = Cluster(hdfs=None)
    server_rank = 0
    worker_rank = 0
    for node_rank, ip in enumerate(node_ips):
        pod = Pod()
        pod.rank = node_rank
        pod.addr = ip
        for i in range(len(server_endpoints_ips)):
            if ip == server_endpoints_ips[i]:
                server = Trainer()
                server.endpoint = "%s:%s" % (ip, server_endpoints_port[i])
                server.rank = server_rank
                server_rank += 1
                pod.servers.append(server)
        for j in range(len(worker_endpoints_ips)):
            if ip == worker_endpoints_ips[j]:
                worker = Trainer()
                worker.endpoint = "%s:%s" % (ip, worker_endpoints_port[i])
                worker.rank = worker_rank
                worker_rank += 1
                pod.workers.append(worker)

        cluster.pods.append(pod)

    pod_rank = node_ips.index(current_node_ip)
    pod = cluster.pods[pod_rank]

316 317 318 319 320 321 322
    default_env = os.environ.copy()
    current_env = copy.copy(default_env)
    current_env.pop("http_proxy", None)
    current_env.pop("https_proxy", None)
    procs = []
    cmds = []
    log_fns = []
323
    for idx, cur_server in enumerate(pod.servers):
324
        proc_env = {
325
            "PADDLE_PSERVERS_IP_PORT_LIST": server_endpoints,
326
            "PADDLE_TRAINER_ENDPOINTS": worker_endpoints,
327
            "PADDLE_PORT": cur_server.endpoint.split(":")[1],
328 329
            "TRAINING_ROLE": "PSERVER",
            "PADDLE_TRAINERS_NUM": str(worker_num),
330 331
            "POD_IP": cur_server.endpoint.split(":")[0],
            "PADDLE_WITH_GLOO": "1"
332 333
        }
        current_env.update(proc_env)
334 335 336 337

        cmd = [sys.executable, "-u", args.training_script
               ] + args.training_script_args
        cmds.append(cmd)
338

339 340 341 342 343 344 345
        if idx == 0:
            logger.info(
                "Local server start {} processes. First process distributed "
                "environment info (Only For Debug): {}".format(
                    len(pod.servers),
                    pretty_print_envs(proc_env, ("Distributed Envs", "Value"))))

346 347
        if args.log_dir is not None:
            os.system("mkdir -p {}".format(args.log_dir))
348
            fn = open("%s/serverlog.%d" % (args.log_dir, idx), "w")
349 350 351 352 353
            log_fns.append(fn)
            proc = subprocess.Popen(cmd, env=current_env, stdout=fn, stderr=fn)
        else:
            proc = subprocess.Popen(cmd, env=current_env)

354 355 356 357 358
        tp = TrainerProc()
        tp.proc = proc
        tp.rank = cur_server.rank
        tp.local_rank = idx
        tp.log_fn = fn
359
        tp.log_offset = fn.tell() if fn else None
360 361 362 363 364
        tp.cmd = cmd

        procs.append(tp)

    for idx, cur_worker in enumerate(pod.workers):
365
        proc_env = {
366
            "PADDLE_PSERVERS_IP_PORT_LIST": server_endpoints,
367
            "PADDLE_TRAINER_ENDPOINTS": worker_endpoints,
368 369
            "PADDLE_TRAINERS_NUM": str(worker_num),
            "TRAINING_ROLE": "TRAINER",
370 371
            "PADDLE_TRAINER_ID": str(cur_worker.rank),
            "PADDLE_WITH_GLOO": "1"
372 373 374
        }
        current_env.update(proc_env)

375 376 377
        cmd = [sys.executable, "-u", args.training_script
               ] + args.training_script_args
        cmds.append(cmd)
378 379 380 381 382 383 384 385

        if idx == 0:
            logger.info(
                "Local worker start {} processes. First process distributed "
                "environment info (Only For Debug): {}".format(
                    len(pod.workers),
                    pretty_print_envs(proc_env, ("Distributed Envs", "Value"))))

386 387
        if args.log_dir is not None:
            os.system("mkdir -p {}".format(args.log_dir))
388
            fn = open("%s/workerlog.%d" % (args.log_dir, idx), "w")
389 390 391 392
            log_fns.append(fn)
            proc = subprocess.Popen(cmd, env=current_env, stdout=fn, stderr=fn)
        else:
            proc = subprocess.Popen(cmd, env=current_env)
393 394 395 396 397 398

        tp = TrainerProc()
        tp.proc = proc
        tp.rank = cur_worker.rank
        tp.local_rank = idx
        tp.log_fn = fn
399
        tp.log_offset = fn.tell() if fn else None
400 401 402
        tp.cmd = cmd

        procs.append(tp)
403

404 405 406
    logger.info(
        "Please check servers and workers logs in {}/workerlog.* and {}/serverlog.*".
        format(args.log_dir, args.log_dir))
407 408
    # only wait worker to finish here
    for i, proc in enumerate(procs):
409
        if i < len(pod.servers):
410
            continue
411
        procs[i].proc.wait()
412 413 414 415
        if len(log_fns) > 0:
            log_fns[i].close()

    print("all workers exit, going to finish parameter server", file=sys.stderr)
416
    for i in range(len(pod.servers)):
417 418
        if len(log_fns) > 0:
            log_fns[i].close()
419
        procs[i].proc.terminate()
420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
    print("all parameter server are killed", file=sys.stderr)


def launch():
    args = _parse_args()
    logger = get_logger()
    _print_arguments(args)
    ps_args = ['--worker_num', '--server_num', '--servers', '--workers']
    collective_args = ['--ips', '--gpus']
    has_ps_args = [
        ps_arg for ps_arg in ps_args if ps_arg in " ".join(sys.argv[1:-1])
    ]
    has_collective_args = [
        co_arg for co_arg in collective_args
        if co_arg in " ".join(sys.argv[1:-1])
    ]
436 437 438 439 440
    if fluid.core.is_compiled_with_cuda():
        cuda_device_num = fluid.core.get_cuda_device_count()
    else:
        cuda_device_num = 0

441 442
    if len(has_ps_args) > 0 or cuda_device_num == 0:
        logger.info(
443
            "Run parameter-sever cpu mode. pserver arguments:{}, cuda count:{}".
444
            format(has_ps_args, cuda_device_num))
445 446
        launch_ps(args)
    elif len(has_collective_args) > 0:
447
        logger.info("Run collective gpu mode. gpu arguments:{}, cuda count:{}".
448
                    format(has_collective_args, cuda_device_num))
449 450 451
        launch_collective(args)
    else:
        logger.warning(
452
            "Not found distinct arguments. Default use gpu collective mode")
453 454 455 456 457
        launch_collective(args)


if __name__ == "__main__":
    launch()