launch.py 8.5 KB
Newer Older
G
gongweibao 已提交
1
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
W
Wu Yi 已提交
2 3 4 5 6 7 8 9 10 11 12 13
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
G
gongweibao 已提交
14 15
"""
paddle.distributed.launch is a module that spawns multiple distributed 
T
tianshuo78520a 已提交
16
process on each training node for gpu training.
G
gongweibao 已提交
17 18 19
Usage:
    In both of single node training or multiple node training, this module 
launch a process on each of the given gpu card.
T
tianshuo78520a 已提交
20
    1. for single node training with all visible gpu cards:
G
gongweibao 已提交
21 22 23
       python -m paddle.distributed.launch \
         your_training_py (arg1 arg2 and all others)
    
T
tianshuo78520a 已提交
24
    2. for single node training with [0,4) cards
G
gongweibao 已提交
25 26
       python -m paddle.distributed.launch --selected_gpus="0,1,2,3" \
         your_training_py (arg1 arg2 and all others)
T
tianshuo78520a 已提交
27
    3. for multiple node training such as two node:192.168.0.16, 192.168.0.17
G
gongweibao 已提交
28 29 30 31 32 33 34 35 36
        on 192.168.0.16:
            python -m paddle.distributed.launch --cluster_node_ips="192.168.0.16,192.168.0.17" \
                --node_ip=192.168.0.16 \
                your_training_py (arg1 arg2 and all others)
        on 192.168.0.17:
            python -m paddle.distributed.launch --cluster_node_ips="192.168.0.16,192.168.0.17" \
                --node_ip=192.168.0.17 \
                your_training_py (arg1 arg2 and all others)
"""
W
Wu Yi 已提交
37 38

from __future__ import print_function
G
gongweibao 已提交
39 40
import sys
from sys import version
W
Wu Yi 已提交
41 42
import subprocess
import os
43
import time
G
gongweibao 已提交
44 45 46
import six
import copy
from argparse import ArgumentParser, REMAINDER
47

48
from paddle.distributed.utils import *
49
from paddle.distributed import cloud_utils
50

51

G
gongweibao 已提交
52 53 54 55 56
def _print_arguments(args):
    print("-----------  Configuration Arguments -----------")
    for arg, value in sorted(six.iteritems(vars(args))):
        print("%s: %s" % (arg, value))
    print("------------------------------------------------")
W
Wu Yi 已提交
57

58

G
gongweibao 已提交
59 60 61 62 63 64
def _parse_args():
    """
    Helper function parsing the command line options
    @retval ArgumentParser
    """
    parser = ArgumentParser(
W
Wu Yi 已提交
65 66
        description='''start paddle training using multi-process mode.
NOTE: your train program ***must*** run as distributed nccl2 mode,
67
see: http://www.paddlepaddle.org/documentation/docs/zh/1.6/user_guides/howto/training/cluster_howto.html#permalink-8--nccl2-
W
Wu Yi 已提交
68 69 70 71 72 73 74 75 76
And your train program must read environment variables below in order to let different
process init properly:
FLAGS_selected_gpus
PADDLE_TRAINER_ID
PADDLE_CURRENT_ENDPOINT
PADDLE_TRAINERS_NUM
PADDLE_TRAINER_ENDPOINTS
POD_IP (current node ip address, not needed for local training)
''')
G
gongweibao 已提交
77

78
    #Optional arguments for the launch helper
W
Wu Yi 已提交
79
    parser.add_argument(
G
gongweibao 已提交
80 81 82 83 84 85 86 87 88
        "--cluster_node_ips",
        type=str,
        default="127.0.0.1",
        help="Paddle cluster nodes ips, such as 192.168.0.16,192.168.0.17..")
    parser.add_argument(
        "--node_ip",
        type=str,
        default="127.0.0.1",
        help="The current node ip. ")
89 90
    parser.add_argument(
        "--use_paddlecloud",
91 92 93
        action='store_true',
        help="wheter to use paddlecloud platform to run your multi-process job. If false, no need to set this argument."
    )
G
gongweibao 已提交
94 95
    parser.add_argument(
        "--started_port",
W
Wu Yi 已提交
96
        type=int,
97
        default=None,
G
gongweibao 已提交
98 99 100 101 102 103 104 105 106 107 108 109
        help="The trainer's started port on a single node")

    parser.add_argument(
        "--print_config",
        type=bool,
        default=True,
        help="Print the config or not")

    parser.add_argument(
        "--selected_gpus",
        type=str,
        default=None,
T
tianshuo78520a 已提交
110 111
        help="It's for gpu training and the training process will run on the selected_gpus,"
        "each process is bound to a single GPU. And if it's not set, this module will use all the gpu cards for training."
G
gongweibao 已提交
112 113
    )

114 115 116 117 118 119
    parser.add_argument(
        "--log_level",
        type=int,
        default=20,  # logging.INFO, details are here:https://docs.python.org/3/library/logging.html#levels
        help="Logging level, default is logging.INFO")

W
Wu Yi 已提交
120
    parser.add_argument(
G
gongweibao 已提交
121
        "--log_dir",
W
Wu Yi 已提交
122
        type=str,
T
tianshuo78520a 已提交
123
        help="The path for each process's log.If it's not set, the log will printed to default pipe."
G
gongweibao 已提交
124 125
    )

126
    #positional
127
    parser.add_argument(
G
gongweibao 已提交
128
        "training_script",
129
        type=str,
G
gongweibao 已提交
130 131 132 133 134
        help="The full path to the single GPU training "
        "program/script to be launched in parallel, "
        "followed by all the arguments for the "
        "training script")

135
    #rest from the training program
G
gongweibao 已提交
136
    parser.add_argument('training_script_args', nargs=REMAINDER)
137 138 139
    return parser.parse_args()


140 141 142 143 144 145 146
def get_cluster_from_args(args, selected_gpus):
    node_ips = [x.strip() for x in args.cluster_node_ips.split(',')]
    node_ip = args.node_ip
    node_rank = node_ips.index(node_ip)

    logger.debug("parsed from args:node_ips:{} node_ip:{} node_rank:{}".format(
        node_ips, node_ip, node_rank))
147

148 149 150 151 152 153 154
    free_ports = None
    if not args.use_paddlecloud and len(
            node_ips) <= 1 and args.started_port is None:
        free_ports = find_free_ports(len(selected_gpus))
        if free_ports is not None:
            free_ports = list(free_ports)
    else:
155 156 157 158
        started_port = 6070
        if args.started_port is not None:
            started_port = args.started_port

159
        free_ports = [
160
            x for x in range(started_port, started_port + len(selected_gpus))
161
        ]
162

163 164 165 166
    trainer_endpoints = []
    for ip in node_ips:
        trainer_endpoints.append(["%s:%d" % (ip, port) for port in free_ports])
    return get_cluster(node_ips, node_ip, trainer_endpoints, selected_gpus)
G
gongweibao 已提交
167

168 169 170

def get_gpus(selected_gpus):
    if selected_gpus is None:
171 172
        from paddle.fluid import core
        gpus_num = core.get_cuda_device_count()
173
        gpus = [str(x) for x in range(0, gpus_num)]
G
gongweibao 已提交
174
    else:
175 176
        cuda_visible_devices = os.getenv("CUDA_VISIBLE_DEVICES")
        if cuda_visible_devices is None or cuda_visible_devices == "":
177
            gpus = [x.strip() for x in selected_gpus.split(',')]
178 179 180 181 182
        else:
            # change selected_gpus into relative values
            # e.g. CUDA_VISIBLE_DEVICES=4,5,6,7; args.selected_gpus=4,5,6,7;
            # therefore selected_gpus=0,1,2,3
            cuda_visible_devices_list = cuda_visible_devices.split(',')
183
            for x in selected_gpus.split(','):
184 185 186
                assert x in cuda_visible_devices_list, "Can't find "\
                "your selected_gpus %s in CUDA_VISIBLE_DEVICES[%s]."\
                % (x, cuda_visible_devices)
187
            gpus = [
188
                cuda_visible_devices_list.index(x.strip())
189
                for x in selected_gpus.split(',')
190
            ]
191 192 193 194
            logger.info("Change selected_gpus into reletive values. --ips:{} "
                        "will change into relative_ips:{} according to your "
                        "CUDA_VISIBLE_DEVICES:{}".format(
                            selected_gpus, gpus, cuda_visible_devices_list))
195

196
    return gpus
197 198


199
def get_cluster_and_pod(args):
200 201 202 203 204
    # parse arguments, used for cloud-single-machine and local
    selected_gpus = get_gpus(args.selected_gpus)
    trainers_num = cloud_utils.get_trainers_num()
    logger.debug("parsed from args trainerss_num:{} selected_gpus:{}".format(
        trainers_num, selected_gpus))
205

206 207
    cluster = None
    pod = None
G
gongweibao 已提交
208

209 210 211 212 213 214 215 216
    if args.use_paddlecloud and trainers_num != 1:
        cluster, pod = cloud_utils.get_cloud_cluster(
            args.cluster_node_ips, args.node_ip, args.started_port,
            selected_gpus)
        logger.info("get cluster from cloud:{}".format(cluster))
    else:
        cluster, pod = get_cluster_from_args(args, selected_gpus)
        logger.info("get cluster from args:{}".format(cluster))
G
gongweibao 已提交
217

218 219 220 221 222 223
    return cluster, pod


def launch(args):
    cluster, pod = get_cluster_and_pod(args)

224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
    procs = start_local_trainers(
        cluster,
        pod,
        training_script=args.training_script,
        training_script_args=args.training_script_args,
        log_dir=args.log_dir)

    while True:
        alive = watch_local_trainers(procs, cluster.trainers_nranks())

        if not alive:
            logger.info("Local procs complete, POD info:{}".format(pod))
            break

        time.sleep(3)


if __name__ == "__main__":
G
gongweibao 已提交
242
    args = _parse_args()
243 244 245

    logger = get_logger(args.log_level)

G
gongweibao 已提交
246 247
    if args.print_config:
        _print_arguments(args)
W
Wu Yi 已提交
248

249
    launch(args)