launch.py 11.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
15
fleetrun is a module that spawns multiple distributed
16 17
process on each training node for gpu training and cpu training.
Usage:
18
    In both of single node training or multiple node training, this module
19 20 21 22 23 24 25 26
launch a process on each of the given gpu card or cpu machine.
    GPU training:
    1. for single node training with all visible gpu cards:
       fleetrun your_training_py (arg1 arg2 and all others)
    2. for single node training with [0,4) cards
       fleetrun --gpus="0,1,2,3" your_training_py (arg1 arg2 and all others)
    3. for multiple node training such as two node:192.168.0.16, 192.168.0.17
        on 192.168.0.16:
27
            fleetrun --ips="192.168.0.16,192.168.0.17" \
28 29 30 31 32 33
                your_training_py (arg1 arg2 and all others)
        on 192.168.0.17:
            fleetrun --ips="192.168.0.16,192.168.0.17" \
                your_training_py (arg1 arg2 and all others)
    CPU training:
    1. for single node training with multi servers and workers:
34
        fleetrun --server_num=2 --worker_num=2 your_training_py (arg1 arg2 and all others)
35
    2. for multiple node training such as two node:192.168.0.16, 192.168.0.17 \
36
        with 2 servers and 4 workers.
37
        on 192.168.0.16:
38 39
            fleetrun --servers="192.168.0.16:6170,192.168.0.17:6170" \
                --workers="192.168.0.16,192.168.0.17,192.168.0.16,192.168.0.17" \
40 41 42
                your_training_py (arg1 arg2 and all others)
        on 192.168.0.17:
            fleetrun --servers="192.168.0.16:6170,192.168.0.17:6171" \
43 44 45 46 47 48 49 50 51 52 53
                --workers="192.168.0.16,192.168.0.17,192.168.0.16,192.168.0.17" \
                your_training_py (arg1 arg2 and all others)
    3. use gloo backend for multiple node training such as two node:192.168.0.16, 192.168.0.17 \
        with 2 servers and 4 workers. (workers should set port)
        on 192.168.0.16:
            fleetrun --servers="192.168.0.16:6170,192.168.0.17:6170" \
                --workers="192.168.0.16:6171,192.168.0.17:6171,192.168.0.16:6172,192.168.0.17:6172" \
                your_training_py (arg1 arg2 and all others)
        on 192.168.0.17:
            fleetrun --servers="192.168.0.16:6170,192.168.0.17:6170" \
                --workers="192.168.0.16:6171,192.168.0.17:6171,192.168.0.16:6172,192.168.0.17:6172" \
54 55 56 57
                your_training_py (arg1 arg2 and all others)
"""

from __future__ import print_function
58 59

import shutil
60
import sys
61
import tempfile
62 63 64 65 66 67 68 69 70 71
from sys import version
import subprocess
import os
import time
import six
import copy
from argparse import ArgumentParser, REMAINDER
import paddle
import paddle.fluid as fluid

72 73
from paddle.distributed.fleet.launch_utils import *
import paddle.distributed.fleet.cloud_utils as cloud_utils
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91


def _print_arguments(args):
    print("-----------  Configuration Arguments -----------")
    for arg, value in sorted(six.iteritems(vars(args))):
        print("%s: %s" % (arg, value))
    print("------------------------------------------------")


def _parse_args():
    """
    Helper function parsing the command line options
    @retval ArgumentParser
    """
    parser = ArgumentParser(
        description='''start paddle training using multi-process mode.
see: http://www.paddlepaddle.org/documentation/docs/zh/1.6/user_guides/howto/training/cluster_howto.html#permalink-8--nccl2-
''')
M
MrChengmo 已提交
92 93 94 95 96 97 98 99 100
    base_group = parser.add_argument_group("Base Parameters")

    base_group.add_argument(
        "--log_dir",
        type=str,
        default="log",
        help="The path for each process's log.If it's not set, the log will printed to default pipe."
    )

101 102 103 104 105 106 107 108
    base_group.add_argument(
        "--gpus",
        type=str,
        default=None,
        help="It's for gpu training and the training process will run on the gpus,"
        "each process is bound to a single GPU. And if it's not set, this module will use all the gpu cards for training."
    )

M
MrChengmo 已提交
109 110 111 112 113 114 115
    base_group.add_argument(
        "training_script",
        type=str,
        help="The full path to the single GPU training "
        "program/script to be launched in parallel, "
        "followed by all the arguments for the "
        "training script")
116

M
fix  
MrChengmo 已提交
117 118
    base_group.add_argument('training_script_args', nargs=REMAINDER)

119
    # Optional arguments for the launch helper
M
MrChengmo 已提交
120 121 122
    # for collective
    collective_group = parser.add_argument_group("Collective Parameters")
    collective_group.add_argument(
123 124 125 126 127
        "--ips",
        type=str,
        default="127.0.0.1",
        help="Paddle cluster nodes ips, such as 192.168.0.16,192.168.0.17..")

M
MrChengmo 已提交
128 129 130
    ps_group = parser.add_argument_group("Parameter-Server Parameters")
    # for parameter server
    ps_group.add_argument(
131
        "--servers", type=str, default="", help="User defined servers ip:port")
M
MrChengmo 已提交
132
    ps_group.add_argument(
133
        "--workers", type=str, default="", help="User defined workers ip:port")
M
MrChengmo 已提交
134 135 136 137 138
    ps_group.add_argument(
        "--heter_workers",
        type=str,
        default="",
        help="User defined heter workers ip:port")
139

M
MrChengmo 已提交
140 141 142 143
    ps_group.add_argument("--worker_num", type=int, help="number of workers")
    ps_group.add_argument("--server_num", type=int, help="number of servers")
    ps_group.add_argument(
        "--heter_worker_num", type=int, help="number of heter_workers")
144 145 146 147 148 149 150 151 152 153 154 155

    return parser.parse_args()


def get_cluster_from_args(args, gpus):
    node_ips = [x.strip() for x in args.ips.split(',')]
    if len(node_ips) == 1:
        node_ip = node_ips[0]
    else:
        _, node_ip = get_host_name_ip()

    # node_ip = args.node_ip
156
    assert node_ip in node_ips, "Can't find your local ip {%s} in node_ips: {%s}" \
157
        % (node_ip, node_ips)
158 159
    node_rank = node_ips.index(node_ip)

160
    logger.debug("parsed from args: node_ips:{} node_ip:{} node_rank:{}".format(
161 162 163 164 165 166 167 168 169 170 171
        node_ips, node_ip, node_rank))

    free_ports = None
    if not cloud_utils.use_paddlecloud() and len(
            node_ips) <= 1 and os.environ.get('FLAGS_START_PORT') is None:
        free_ports = find_free_ports(len(gpus))
        if free_ports is not None:
            free_ports = list(free_ports)
    else:
        start_port = 6070
        if os.environ.get('FLAGS_START_PORT') is not None:
172
            start_port = int(os.environ.get('FLAGS_START_PORT'))
173 174 175

        free_ports = [x for x in range(start_port, start_port + len(gpus))]

176 177 178 179
    trainer_endpoints = []
    for ip in node_ips:
        trainer_endpoints.append(["%s:%d" % (ip, port) for port in free_ports])
    return get_cluster(node_ips, node_ip, trainer_endpoints, gpus)
180 181 182 183 184 185 186 187 188 189 190 191


def launch_collective(args):
    # parse arguments, used for cloud-single-machine and local
    gpus = get_gpus(args.gpus)
    trainers_num = cloud_utils.get_trainers_num()
    logger.debug("parsed from args trainerss_num:{} gpus:{}".format(
        trainers_num, gpus))

    cluster = None
    pod = None

192 193 194
    start_port = 6170
    if os.environ.get('FLAGS_START_PORT') is not None:
        start_port = os.environ.get('FLAGS_START_PORT')
195
    if cloud_utils.use_paddlecloud() and trainers_num != 1:
196
        cluster, pod = cloud_utils.get_cloud_cluster(args.ips, gpus, start_port)
197
        logger.debug("get cluster from cloud:{}".format(cluster))
198 199 200
    else:
        # trainers_num = 1 or not use paddlecloud ips="a,b"
        cluster, pod = get_cluster_from_args(args, gpus)
201
        logger.debug("get cluster from args:{}".format(cluster))
202

203 204 205 206 207 208 209
    global_envs = copy.copy(os.environ.copy())
    gloo_rendezvous_dir = tempfile.mkdtemp()
    # add gloo env
    global_envs["PADDLE_WITH_GLOO"] = "1"
    global_envs["PADDLE_GLOO_RENDEZVOUS"] = "2"
    global_envs["PADDLE_GLOO_FS_PATH"] = gloo_rendezvous_dir

210 211 212 213 214
    procs = start_local_trainers(
        cluster,
        pod,
        training_script=args.training_script,
        training_script_args=args.training_script_args,
215 216
        log_dir=args.log_dir,
        envs=global_envs)
217 218 219 220 221

    while True:
        alive = watch_local_trainers(procs, cluster.trainers_nranks())

        if not alive:
222 223
            logger.info("Local processes completed.")
            logger.debug("POD info:{}".format(pod))
224 225 226 227
            break

        time.sleep(3)

228 229 230
    if os.path.exists(gloo_rendezvous_dir):
        shutil.rmtree(gloo_rendezvous_dir)

231

C
fix  
chengmo 已提交
232
def launch_ps(args, distribute_mode):
M
MrChengmo 已提交
233
    cloud_flag = cloud_utils.use_paddlecloud()
234

M
MrChengmo 已提交
235
    # for ps-cpu on paddlecloud
C
fix  
chengmo 已提交
236
    if cloud_flag and distribute_mode == DistributeMode.PS:
M
MrChengmo 已提交
237 238
        direct_start(args)
        return
C
fix  
chengmo 已提交
239
    elif cloud_flag and distribute_mode == DistributeMode.PS_HETER:
M
MrChengmo 已提交
240 241 242 243
        cloud_ps_heter_env_set(args)
        args.trainers = os.getenv("PADDLE_TRAINER_ENDPOINTS")
        args.workers = os.getenv("PADDLE_PSERVERS_IP_PORT_LIST")
        args.heter_workers = os.getenv("PADDLE_HETER_TRAINER_IP_PORT_LIST")
244

C
fix  
chengmo 已提交
245 246
    ps_launcher = ParameterServerLauncher(args, distribute_mode)
    ps_launcher.start_ps()
M
MrChengmo 已提交
247
    return
248

249

C
fix  
chengmo 已提交
250
def which_distributed_mode(args):
M
MrChengmo 已提交
251
    ps_args = [
C
fix  
chengmo 已提交
252 253 254 255 256 257
        '--worker_num',
        '--server_num',
        '--heter_worker_num',
        '--servers',
        '--workers',
        '--heter_workers',
M
MrChengmo 已提交
258
    ]
C
fix  
chengmo 已提交
259 260 261 262
    collective_args = ['--ips']

    ps_heter_args = ["--heter_worker_num", "--heter_workers"]

263 264 265 266 267 268 269
    has_ps_args = [
        ps_arg for ps_arg in ps_args if ps_arg in " ".join(sys.argv[1:-1])
    ]
    has_collective_args = [
        co_arg for co_arg in collective_args
        if co_arg in " ".join(sys.argv[1:-1])
    ]
C
fix  
chengmo 已提交
270 271 272 273 274

    assert (
        len(has_ps_args) > 1 and len(has_collective_args) > 1
    ), "Only one mode(Collective or Parameter-Server ) can be selected at the same time, but more than one configuration was received."

275 276 277 278 279
    if fluid.core.is_compiled_with_cuda():
        cuda_device_num = fluid.core.get_cuda_device_count()
    else:
        cuda_device_num = 0

C
fix  
chengmo 已提交
280
    if len(has_ps_args) > 0:
281
        logger.info(
M
MrChengmo 已提交
282
            "Run parameter-sever mode. pserver arguments:{}, cuda count:{}".
283
            format(has_ps_args, cuda_device_num))
C
fix  
chengmo 已提交
284 285 286 287 288
        has_ps_heter_args = list(set(has_ps_args) & set(ps_heter_args))
        if len(has_ps_heter_args) > 0:
            return DistributeMode.PS_HETER
        else:
            return DistributeMode.PS
289
    elif len(has_collective_args) > 0:
290
        logger.info("Run collective gpu mode. gpu arguments:{}, cuda count:{}".
291
                    format(has_collective_args, cuda_device_num))
C
fix  
chengmo 已提交
292
        return DistributeMode.COLLECTIVE
293 294
    else:
        logger.warning(
295
            "Not found distinct arguments. Default use gpu collective mode")
C
fix  
chengmo 已提交
296 297 298 299 300 301 302 303 304 305
        return DistributeMode.COLLECTIVE


def launch():
    args = _parse_args()
    logger = get_logger()
    _print_arguments(args)

    distribute_mode = which_distributed_mode(args)
    if distribute_mode == DistributeMode.COLLECTIVE:
306
        launch_collective(args)
C
fix  
chengmo 已提交
307 308
    else:
        launch_ps(args, distribute_mode)
309 310 311 312


if __name__ == "__main__":
    launch()