launch.py 10.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
15
fleetrun is a module that spawns multiple distributed
16 17
process on each training node for gpu training and cpu training.
Usage:
18
    In both of single node training or multiple node training, this module
19 20 21 22 23 24 25 26
launch a process on each of the given gpu card or cpu machine.
    GPU training:
    1. for single node training with all visible gpu cards:
       fleetrun your_training_py (arg1 arg2 and all others)
    2. for single node training with [0,4) cards
       fleetrun --gpus="0,1,2,3" your_training_py (arg1 arg2 and all others)
    3. for multiple node training such as two node:192.168.0.16, 192.168.0.17
        on 192.168.0.16:
27
            fleetrun --ips="192.168.0.16,192.168.0.17" \
28 29 30 31 32 33
                your_training_py (arg1 arg2 and all others)
        on 192.168.0.17:
            fleetrun --ips="192.168.0.16,192.168.0.17" \
                your_training_py (arg1 arg2 and all others)
    CPU training:
    1. for single node training with multi servers and workers:
34
        fleetrun --server_num=2 --worker_num=2 your_training_py (arg1 arg2 and all others)
35
    2. for multiple node training such as two node:192.168.0.16, 192.168.0.17 \
36
        with 2 servers and 4 workers.
37
        on 192.168.0.16:
38 39
            fleetrun --servers="192.168.0.16:6170,192.168.0.17:6170" \
                --workers="192.168.0.16,192.168.0.17,192.168.0.16,192.168.0.17" \
40 41 42
                your_training_py (arg1 arg2 and all others)
        on 192.168.0.17:
            fleetrun --servers="192.168.0.16:6170,192.168.0.17:6171" \
43 44 45 46 47 48 49 50 51 52 53
                --workers="192.168.0.16,192.168.0.17,192.168.0.16,192.168.0.17" \
                your_training_py (arg1 arg2 and all others)
    3. use gloo backend for multiple node training such as two node:192.168.0.16, 192.168.0.17 \
        with 2 servers and 4 workers. (workers should set port)
        on 192.168.0.16:
            fleetrun --servers="192.168.0.16:6170,192.168.0.17:6170" \
                --workers="192.168.0.16:6171,192.168.0.17:6171,192.168.0.16:6172,192.168.0.17:6172" \
                your_training_py (arg1 arg2 and all others)
        on 192.168.0.17:
            fleetrun --servers="192.168.0.16:6170,192.168.0.17:6170" \
                --workers="192.168.0.16:6171,192.168.0.17:6171,192.168.0.16:6172,192.168.0.17:6172" \
54 55 56 57
                your_training_py (arg1 arg2 and all others)
"""

from __future__ import print_function
58 59

import shutil
60
import sys
61
import tempfile
62 63 64 65 66 67 68 69 70 71
from sys import version
import subprocess
import os
import time
import six
import copy
from argparse import ArgumentParser, REMAINDER
import paddle
import paddle.fluid as fluid

72 73
from paddle.distributed.fleet.launch_utils import *
import paddle.distributed.fleet.cloud_utils as cloud_utils
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91


def _print_arguments(args):
    print("-----------  Configuration Arguments -----------")
    for arg, value in sorted(six.iteritems(vars(args))):
        print("%s: %s" % (arg, value))
    print("------------------------------------------------")


def _parse_args():
    """
    Helper function parsing the command line options
    @retval ArgumentParser
    """
    parser = ArgumentParser(
        description='''start paddle training using multi-process mode.
see: http://www.paddlepaddle.org/documentation/docs/zh/1.6/user_guides/howto/training/cluster_howto.html#permalink-8--nccl2-
''')
M
MrChengmo 已提交
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
    base_group = parser.add_argument_group("Base Parameters")

    base_group.add_argument(
        "-d",
        "--distributed_mode",
        type=str,
        choices=["collective", "ps", "ps_heter", "ps_gpu", ""],
        default="",
        help="Distributed running mode: collective/ps/ps_gpu/ps_heter")

    base_group.add_argument(
        "--log_dir",
        type=str,
        default="log",
        help="The path for each process's log.If it's not set, the log will printed to default pipe."
    )

109 110 111 112 113 114 115 116
    base_group.add_argument(
        "--gpus",
        type=str,
        default=None,
        help="It's for gpu training and the training process will run on the gpus,"
        "each process is bound to a single GPU. And if it's not set, this module will use all the gpu cards for training."
    )

M
MrChengmo 已提交
117 118 119 120 121 122 123
    base_group.add_argument(
        "training_script",
        type=str,
        help="The full path to the single GPU training "
        "program/script to be launched in parallel, "
        "followed by all the arguments for the "
        "training script")
124

M
fix  
MrChengmo 已提交
125 126
    base_group.add_argument('training_script_args', nargs=REMAINDER)

127
    # Optional arguments for the launch helper
M
MrChengmo 已提交
128 129 130
    # for collective
    collective_group = parser.add_argument_group("Collective Parameters")
    collective_group.add_argument(
131 132 133 134 135
        "--ips",
        type=str,
        default="127.0.0.1",
        help="Paddle cluster nodes ips, such as 192.168.0.16,192.168.0.17..")

M
MrChengmo 已提交
136 137 138
    ps_group = parser.add_argument_group("Parameter-Server Parameters")
    # for parameter server
    ps_group.add_argument(
139
        "--servers", type=str, default="", help="User defined servers ip:port")
M
MrChengmo 已提交
140
    ps_group.add_argument(
141
        "--workers", type=str, default="", help="User defined workers ip:port")
M
MrChengmo 已提交
142 143 144 145 146
    ps_group.add_argument(
        "--heter_workers",
        type=str,
        default="",
        help="User defined heter workers ip:port")
147

M
MrChengmo 已提交
148 149 150 151
    ps_group.add_argument("--worker_num", type=int, help="number of workers")
    ps_group.add_argument("--server_num", type=int, help="number of servers")
    ps_group.add_argument(
        "--heter_worker_num", type=int, help="number of heter_workers")
152

M
MrChengmo 已提交
153 154
    ps_group.add_argument(
        "--heter_worker_device",
155
        type=str,
M
MrChengmo 已提交
156 157 158
        default="gpu",
        choices=["gpu", "xpu"],
        help="heter worker device")
159 160 161 162 163 164 165 166 167 168 169 170

    return parser.parse_args()


def get_cluster_from_args(args, gpus):
    node_ips = [x.strip() for x in args.ips.split(',')]
    if len(node_ips) == 1:
        node_ip = node_ips[0]
    else:
        _, node_ip = get_host_name_ip()

    # node_ip = args.node_ip
171
    assert node_ip in node_ips, "Can't find your local ip {%s} in node_ips: {%s}" \
172
        % (node_ip, node_ips)
173 174
    node_rank = node_ips.index(node_ip)

175
    logger.debug("parsed from args: node_ips:{} node_ip:{} node_rank:{}".format(
176 177 178 179 180 181 182 183 184 185 186
        node_ips, node_ip, node_rank))

    free_ports = None
    if not cloud_utils.use_paddlecloud() and len(
            node_ips) <= 1 and os.environ.get('FLAGS_START_PORT') is None:
        free_ports = find_free_ports(len(gpus))
        if free_ports is not None:
            free_ports = list(free_ports)
    else:
        start_port = 6070
        if os.environ.get('FLAGS_START_PORT') is not None:
187
            start_port = int(os.environ.get('FLAGS_START_PORT'))
188 189 190

        free_ports = [x for x in range(start_port, start_port + len(gpus))]

191 192 193 194
    trainer_endpoints = []
    for ip in node_ips:
        trainer_endpoints.append(["%s:%d" % (ip, port) for port in free_ports])
    return get_cluster(node_ips, node_ip, trainer_endpoints, gpus)
195 196 197 198 199 200 201 202 203 204 205 206


def launch_collective(args):
    # parse arguments, used for cloud-single-machine and local
    gpus = get_gpus(args.gpus)
    trainers_num = cloud_utils.get_trainers_num()
    logger.debug("parsed from args trainerss_num:{} gpus:{}".format(
        trainers_num, gpus))

    cluster = None
    pod = None

207 208 209
    start_port = 6170
    if os.environ.get('FLAGS_START_PORT') is not None:
        start_port = os.environ.get('FLAGS_START_PORT')
210
    if cloud_utils.use_paddlecloud() and trainers_num != 1:
211
        cluster, pod = cloud_utils.get_cloud_cluster(args.ips, gpus, start_port)
212
        logger.debug("get cluster from cloud:{}".format(cluster))
213 214 215
    else:
        # trainers_num = 1 or not use paddlecloud ips="a,b"
        cluster, pod = get_cluster_from_args(args, gpus)
216
        logger.debug("get cluster from args:{}".format(cluster))
217

218 219 220 221 222 223 224
    global_envs = copy.copy(os.environ.copy())
    gloo_rendezvous_dir = tempfile.mkdtemp()
    # add gloo env
    global_envs["PADDLE_WITH_GLOO"] = "1"
    global_envs["PADDLE_GLOO_RENDEZVOUS"] = "2"
    global_envs["PADDLE_GLOO_FS_PATH"] = gloo_rendezvous_dir

225 226 227 228 229
    procs = start_local_trainers(
        cluster,
        pod,
        training_script=args.training_script,
        training_script_args=args.training_script_args,
230 231
        log_dir=args.log_dir,
        envs=global_envs)
232 233 234 235 236

    while True:
        alive = watch_local_trainers(procs, cluster.trainers_nranks())

        if not alive:
237 238
            logger.info("Local processes completed.")
            logger.debug("POD info:{}".format(pod))
239 240 241 242
            break

        time.sleep(3)

243 244 245
    if os.path.exists(gloo_rendezvous_dir):
        shutil.rmtree(gloo_rendezvous_dir)

246 247

def launch_ps(args):
M
MrChengmo 已提交
248
    cloud_flag = cloud_utils.use_paddlecloud()
249

M
MrChengmo 已提交
250 251 252 253 254 255 256 257 258 259
    # for ps-cpu on paddlecloud
    direct_start_mode = ["ps", ""]
    if cloud_flag and (args.distributed_mode in direct_start_mode):
        direct_start(args)
        return
    elif cloud_flag and args.distributed_mode == "ps_heter":
        cloud_ps_heter_env_set(args)
        args.trainers = os.getenv("PADDLE_TRAINER_ENDPOINTS")
        args.workers = os.getenv("PADDLE_PSERVERS_IP_PORT_LIST")
        args.heter_workers = os.getenv("PADDLE_HETER_TRAINER_IP_PORT_LIST")
260

M
MrChengmo 已提交
261 262 263
    ps_launcher = ParameterServerLauncher(args)
    ps_launcher.start_ps(args)
    return
264

265 266 267 268 269

def launch():
    args = _parse_args()
    logger = get_logger()
    _print_arguments(args)
M
MrChengmo 已提交
270 271 272 273
    ps_args = [
        '--worker_num', '--server_num', '--heter_worker_num', '--servers',
        '--workers', '--heter_worrkers', 'heter_worker_device'
    ]
274 275 276 277 278 279 280 281
    collective_args = ['--ips', '--gpus']
    has_ps_args = [
        ps_arg for ps_arg in ps_args if ps_arg in " ".join(sys.argv[1:-1])
    ]
    has_collective_args = [
        co_arg for co_arg in collective_args
        if co_arg in " ".join(sys.argv[1:-1])
    ]
282 283 284 285 286
    if fluid.core.is_compiled_with_cuda():
        cuda_device_num = fluid.core.get_cuda_device_count()
    else:
        cuda_device_num = 0

M
MrChengmo 已提交
287 288
    ps_mode = ['ps', 'ps_gpu', 'ps_heter']
    if len(has_ps_args) > 0 or args.distributed_mode in ps_mode:
289
        logger.info(
M
MrChengmo 已提交
290
            "Run parameter-sever mode. pserver arguments:{}, cuda count:{}".
291
            format(has_ps_args, cuda_device_num))
292 293
        launch_ps(args)
    elif len(has_collective_args) > 0:
294
        logger.info("Run collective gpu mode. gpu arguments:{}, cuda count:{}".
295
                    format(has_collective_args, cuda_device_num))
296 297 298
        launch_collective(args)
    else:
        logger.warning(
299
            "Not found distinct arguments. Default use gpu collective mode")
300 301 302 303 304
        launch_collective(args)


if __name__ == "__main__":
    launch()