launch.py 12.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
15
fleetrun is a module that spawns multiple distributed
16 17
process on each training node for gpu training and cpu training.
Usage:
18
    In both of single node training or multiple node training, this module
19 20 21 22 23 24 25 26
launch a process on each of the given gpu card or cpu machine.
    GPU training:
    1. for single node training with all visible gpu cards:
       fleetrun your_training_py (arg1 arg2 and all others)
    2. for single node training with [0,4) cards
       fleetrun --gpus="0,1,2,3" your_training_py (arg1 arg2 and all others)
    3. for multiple node training such as two node:192.168.0.16, 192.168.0.17
        on 192.168.0.16:
27
            fleetrun --ips="192.168.0.16,192.168.0.17" \
28 29 30 31 32 33
                your_training_py (arg1 arg2 and all others)
        on 192.168.0.17:
            fleetrun --ips="192.168.0.16,192.168.0.17" \
                your_training_py (arg1 arg2 and all others)
    CPU training:
    1. for single node training with multi servers and workers:
34
        fleetrun --server_num=2 --worker_num=2 your_training_py (arg1 arg2 and all others)
35
    2. for multiple node training such as two node:192.168.0.16, 192.168.0.17 \
36
        with 2 servers and 4 workers.
37
        on 192.168.0.16:
38 39
            fleetrun --servers="192.168.0.16:6170,192.168.0.17:6170" \
                --workers="192.168.0.16,192.168.0.17,192.168.0.16,192.168.0.17" \
40 41 42
                your_training_py (arg1 arg2 and all others)
        on 192.168.0.17:
            fleetrun --servers="192.168.0.16:6170,192.168.0.17:6171" \
43 44 45 46 47 48 49 50 51 52 53
                --workers="192.168.0.16,192.168.0.17,192.168.0.16,192.168.0.17" \
                your_training_py (arg1 arg2 and all others)
    3. use gloo backend for multiple node training such as two node:192.168.0.16, 192.168.0.17 \
        with 2 servers and 4 workers. (workers should set port)
        on 192.168.0.16:
            fleetrun --servers="192.168.0.16:6170,192.168.0.17:6170" \
                --workers="192.168.0.16:6171,192.168.0.17:6171,192.168.0.16:6172,192.168.0.17:6172" \
                your_training_py (arg1 arg2 and all others)
        on 192.168.0.17:
            fleetrun --servers="192.168.0.16:6170,192.168.0.17:6170" \
                --workers="192.168.0.16:6171,192.168.0.17:6171,192.168.0.16:6172,192.168.0.17:6172" \
54 55 56 57
                your_training_py (arg1 arg2 and all others)
"""

from __future__ import print_function
58 59

import shutil
60
import sys
61
import tempfile
62 63 64 65 66 67 68 69 70 71
from sys import version
import subprocess
import os
import time
import six
import copy
from argparse import ArgumentParser, REMAINDER
import paddle
import paddle.fluid as fluid

72 73
from paddle.distributed.fleet.launch_utils import *
import paddle.distributed.fleet.cloud_utils as cloud_utils
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91


def _print_arguments(args):
    print("-----------  Configuration Arguments -----------")
    for arg, value in sorted(six.iteritems(vars(args))):
        print("%s: %s" % (arg, value))
    print("------------------------------------------------")


def _parse_args():
    """
    Helper function parsing the command line options
    @retval ArgumentParser
    """
    parser = ArgumentParser(
        description='''start paddle training using multi-process mode.
see: http://www.paddlepaddle.org/documentation/docs/zh/1.6/user_guides/howto/training/cluster_howto.html#permalink-8--nccl2-
''')
M
MrChengmo 已提交
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
    base_group = parser.add_argument_group("Base Parameters")

    base_group.add_argument(
        "-d",
        "--distributed_mode",
        type=str,
        choices=["collective", "ps", "ps_heter", "ps_gpu", ""],
        default="",
        help="Distributed running mode: collective/ps/ps_gpu/ps_heter")

    base_group.add_argument(
        "--log_dir",
        type=str,
        default="log",
        help="The path for each process's log.If it's not set, the log will printed to default pipe."
    )

    base_group.add_argument(
        "training_script",
        type=str,
        help="The full path to the single GPU training "
        "program/script to be launched in parallel, "
        "followed by all the arguments for the "
        "training script")
116

117
    # Optional arguments for the launch helper
M
MrChengmo 已提交
118 119 120
    # for collective
    collective_group = parser.add_argument_group("Collective Parameters")
    collective_group.add_argument(
121 122 123 124
        "--ips",
        type=str,
        default="127.0.0.1",
        help="Paddle cluster nodes ips, such as 192.168.0.16,192.168.0.17..")
M
MrChengmo 已提交
125
    collective_group.add_argument(
126 127 128 129 130 131 132
        "--gpus",
        type=str,
        default=None,
        help="It's for gpu training and the training process will run on the gpus,"
        "each process is bound to a single GPU. And if it's not set, this module will use all the gpu cards for training."
    )

M
MrChengmo 已提交
133 134 135
    ps_group = parser.add_argument_group("Parameter-Server Parameters")
    # for parameter server
    ps_group.add_argument(
136
        "--servers", type=str, default="", help="User defined servers ip:port")
M
MrChengmo 已提交
137
    ps_group.add_argument(
138
        "--workers", type=str, default="", help="User defined workers ip:port")
M
MrChengmo 已提交
139 140 141 142 143
    ps_group.add_argument(
        "--heter_workers",
        type=str,
        default="",
        help="User defined heter workers ip:port")
144

M
MrChengmo 已提交
145 146 147 148
    ps_group.add_argument("--worker_num", type=int, help="number of workers")
    ps_group.add_argument("--server_num", type=int, help="number of servers")
    ps_group.add_argument(
        "--heter_worker_num", type=int, help="number of heter_workers")
149

M
MrChengmo 已提交
150 151
    ps_group.add_argument(
        "--heter_worker_device",
152
        type=str,
M
MrChengmo 已提交
153 154 155
        default="gpu",
        choices=["gpu", "xpu"],
        help="heter worker device")
156 157 158 159 160 161 162 163 164 165 166 167

    return parser.parse_args()


def get_cluster_from_args(args, gpus):
    node_ips = [x.strip() for x in args.ips.split(',')]
    if len(node_ips) == 1:
        node_ip = node_ips[0]
    else:
        _, node_ip = get_host_name_ip()

    # node_ip = args.node_ip
168
    assert node_ip in node_ips, "Can't find your local ip {%s} in node_ips: {%s}" \
169
        % (node_ip, node_ips)
170 171
    node_rank = node_ips.index(node_ip)

172
    logger.debug("parsed from args: node_ips:{} node_ip:{} node_rank:{}".format(
173 174 175 176 177 178 179 180 181 182 183
        node_ips, node_ip, node_rank))

    free_ports = None
    if not cloud_utils.use_paddlecloud() and len(
            node_ips) <= 1 and os.environ.get('FLAGS_START_PORT') is None:
        free_ports = find_free_ports(len(gpus))
        if free_ports is not None:
            free_ports = list(free_ports)
    else:
        start_port = 6070
        if os.environ.get('FLAGS_START_PORT') is not None:
184
            start_port = int(os.environ.get('FLAGS_START_PORT'))
185 186 187

        free_ports = [x for x in range(start_port, start_port + len(gpus))]

188 189 190 191
    trainer_endpoints = []
    for ip in node_ips:
        trainer_endpoints.append(["%s:%d" % (ip, port) for port in free_ports])
    return get_cluster(node_ips, node_ip, trainer_endpoints, gpus)
192 193 194 195 196


def get_gpus(gpus):
    if gpus is None:
        gpus_num = fluid.core.get_cuda_device_count()
197
        res_gpus = [str(x) for x in range(0, gpus_num)]
198 199 200
    else:
        cuda_visible_devices = os.getenv("CUDA_VISIBLE_DEVICES")
        if cuda_visible_devices is None or cuda_visible_devices == "":
201
            res_gpus = [x.strip() for x in gpus.split(',')]
202 203 204 205 206 207 208
        else:
            # change gpus into relative values
            # e.g. CUDA_VISIBLE_DEVICES=4,5,6,7; args.gpus=4,5,6,7;
            # therefore gpus=0,1,2,3
            cuda_visible_devices_list = cuda_visible_devices.split(',')
            for x in gpus.split(','):
                assert x in cuda_visible_devices_list, "Can't find "\
M
MrChengmo 已提交
209 210
                    "your gpus %s in CUDA_VISIBLE_DEVICES[%s]."\
                    % (x, cuda_visible_devices)
211
            res_gpus = [
212 213 214
                cuda_visible_devices_list.index(x.strip())
                for x in gpus.split(',')
            ]
215 216 217 218
            logger.info("Change selected_gpus into reletive values. --ips:{} "
                        "will change into relative_ips:{} according to your "
                        "CUDA_VISIBLE_DEVICES:{}".format(
                            gpus, res_gpus, cuda_visible_devices_list))
219

220
    return res_gpus
221 222 223 224 225 226 227 228 229 230 231 232


def launch_collective(args):
    # parse arguments, used for cloud-single-machine and local
    gpus = get_gpus(args.gpus)
    trainers_num = cloud_utils.get_trainers_num()
    logger.debug("parsed from args trainerss_num:{} gpus:{}".format(
        trainers_num, gpus))

    cluster = None
    pod = None

233 234 235
    start_port = 6170
    if os.environ.get('FLAGS_START_PORT') is not None:
        start_port = os.environ.get('FLAGS_START_PORT')
236
    if cloud_utils.use_paddlecloud() and trainers_num != 1:
237
        cluster, pod = cloud_utils.get_cloud_cluster(args.ips, gpus, start_port)
238
        logger.debug("get cluster from cloud:{}".format(cluster))
239 240 241
    else:
        # trainers_num = 1 or not use paddlecloud ips="a,b"
        cluster, pod = get_cluster_from_args(args, gpus)
242
        logger.debug("get cluster from args:{}".format(cluster))
243

244 245 246 247 248 249 250
    global_envs = copy.copy(os.environ.copy())
    gloo_rendezvous_dir = tempfile.mkdtemp()
    # add gloo env
    global_envs["PADDLE_WITH_GLOO"] = "1"
    global_envs["PADDLE_GLOO_RENDEZVOUS"] = "2"
    global_envs["PADDLE_GLOO_FS_PATH"] = gloo_rendezvous_dir

251 252 253 254 255
    procs = start_local_trainers(
        cluster,
        pod,
        training_script=args.training_script,
        training_script_args=args.training_script_args,
256 257
        log_dir=args.log_dir,
        envs=global_envs)
258 259 260 261 262

    while True:
        alive = watch_local_trainers(procs, cluster.trainers_nranks())

        if not alive:
263 264
            logger.info("Local processes completed.")
            logger.debug("POD info:{}".format(pod))
265 266 267 268
            break

        time.sleep(3)

269 270 271
    if os.path.exists(gloo_rendezvous_dir):
        shutil.rmtree(gloo_rendezvous_dir)

272 273

def launch_ps(args):
M
MrChengmo 已提交
274
    cloud_flag = cloud_utils.use_paddlecloud()
275

M
MrChengmo 已提交
276 277 278 279 280 281 282 283 284 285
    # for ps-cpu on paddlecloud
    direct_start_mode = ["ps", ""]
    if cloud_flag and (args.distributed_mode in direct_start_mode):
        direct_start(args)
        return
    elif cloud_flag and args.distributed_mode == "ps_heter":
        cloud_ps_heter_env_set(args)
        args.trainers = os.getenv("PADDLE_TRAINER_ENDPOINTS")
        args.workers = os.getenv("PADDLE_PSERVERS_IP_PORT_LIST")
        args.heter_workers = os.getenv("PADDLE_HETER_TRAINER_IP_PORT_LIST")
286

M
MrChengmo 已提交
287 288 289
    ps_launcher = ParameterServerLauncher(args)
    ps_launcher.start_ps(args)
    return
290

291 292 293 294 295

def launch():
    args = _parse_args()
    logger = get_logger()
    _print_arguments(args)
M
MrChengmo 已提交
296 297 298 299
    ps_args = [
        '--worker_num', '--server_num', '--heter_worker_num', '--servers',
        '--workers', '--heter_worrkers', 'heter_worker_device'
    ]
300 301 302 303 304 305 306 307
    collective_args = ['--ips', '--gpus']
    has_ps_args = [
        ps_arg for ps_arg in ps_args if ps_arg in " ".join(sys.argv[1:-1])
    ]
    has_collective_args = [
        co_arg for co_arg in collective_args
        if co_arg in " ".join(sys.argv[1:-1])
    ]
308 309 310 311 312
    if fluid.core.is_compiled_with_cuda():
        cuda_device_num = fluid.core.get_cuda_device_count()
    else:
        cuda_device_num = 0

M
MrChengmo 已提交
313 314
    ps_mode = ['ps', 'ps_gpu', 'ps_heter']
    if len(has_ps_args) > 0 or args.distributed_mode in ps_mode:
315
        logger.info(
M
MrChengmo 已提交
316
            "Run parameter-sever mode. pserver arguments:{}, cuda count:{}".
317
            format(has_ps_args, cuda_device_num))
318 319
        launch_ps(args)
    elif len(has_collective_args) > 0:
320
        logger.info("Run collective gpu mode. gpu arguments:{}, cuda count:{}".
321
                    format(has_collective_args, cuda_device_num))
322 323 324
        launch_collective(args)
    else:
        logger.warning(
325
            "Not found distinct arguments. Default use gpu collective mode")
326 327 328 329 330
        launch_collective(args)


if __name__ == "__main__":
    launch()