launch_utils.py 78.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import logging
import time
import os
import signal
import copy
import sys
import subprocess
22 23
import tempfile
import shutil
24
from contextlib import closing
X
xiongkun 已提交
25
import multiprocessing
26
import socket
W
WangXi 已提交
27
import struct
28
import json
29

30
import paddle.fluid as fluid
J
Jiangxinz 已提交
31
from distutils.util import strtobool
X
xiongkun 已提交
32
import paddle.utils.cpp_extension.extension_utils as utils
33

34 35 36 37
logger = logging.getLogger("root")
logger.propagate = False


38
class DistributeMode:
39 40 41
    """
    There are various mode for fleetrun, each of them is designed for different model.
    """
42

43 44 45 46 47
    COLLECTIVE = 0
    PS = 1
    PS_HETER = 2


48
class DeviceMode:
49 50 51
    """
    Training devices type
    """
52

53
    UNKNOWN = -1
54 55 56
    CPU = 0
    GPU = 1
    KUNLUN = 2
57
    XPU = 2
58 59
    ASCEND_NPU = 3
    UNKNOWN = 3
Z
zn 已提交
60
    MLU = 4
61 62


63 64 65 66 67 68 69 70 71
class Cluster(object):
    def __init__(self, hdfs):
        self.job_server = None
        self.pods = []
        self.hdfs = None
        self.job_stage_flag = None

    def __str__(self):
        return "job_server:{} pods:{} job_stage_flag:{} hdfs:{}".format(
72 73 74 75 76
            self.job_server,
            [str(pod) for pod in self.pods],
            self.job_stage_flag,
            self.hdfs,
        )
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93

    def __eq__(self, cluster):
        if len(self.pods) != len(cluster.pods):
            return False

        for a, b in zip(self.pods, cluster.pods):
            if a != b:
                return False

        if self.job_stage_flag != cluster.job_stage_flag:
            return False

        return True

    def __ne__(self, cluster):
        return not self.__eq__(cluster)

Z
zhangchunle 已提交
94
    def update_pods(self, cluster):
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
        self.pods = copy.copy(cluster.pods)

    def trainers_nranks(self):
        return len(self.trainers_endpoints())

    def pods_nranks(self):
        return len(self.pods)

    def trainers_endpoints(self):
        r = []
        for pod in self.pods:
            for t in pod.trainers:
                r.append(t.endpoint)
        return r

110 111 112 113 114 115 116 117
    def world_device_ids(self):
        r = []
        for pod in self.pods:
            for t in pod.trainers:
                str_accelerators = [str(acc) for acc in t.accelerators]
                r.append(str_accelerators)
        return r

118 119 120 121
    def pods_endpoints(self):
        r = []
        for pod in self.pods:
            ep = "{}:{}".format(pod.addr, pod.port)
122
            assert (
123
                pod.port is not None and pod.addr is not None
124
            ), "{} not a valid endpoint".format(ep)
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
            r.append(ep)
        return r

    def get_pod_by_id(self, pod_id):
        for pod in self.pods:
            if str(pod_id) == str(pod.id):
                return pod

        return None


class JobServer(object):
    def __init__(self):
        self.endpoint = None

    def __str__(self):
        return "{}".format(self.endpoint)

    def __eq__(self, j):
        return self.endpint == j.endpoint

    def __ne__(self, j):
        return not self == j


class Trainer(object):
    def __init__(self):
152
        self.accelerators = []
153 154
        self.endpoint = None
        self.rank = None
155
        self.stage = None
156 157

    def __str__(self):
158
        return "accelerator:{} endpoint:{} rank:{}".format(
159 160
            self.accelerators, self.endpoint, self.rank
        )
161 162

    def __eq__(self, t):
163
        if len(self.accelerators) != len(t.accelerators):
164 165
            return False

166
        if self.endpoint != t.endpoint or self.rank != t.rank:
167 168
            return False

169
        for a, b in zip(self.accelerators, t.accelerators):
170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
            if a != b:
                return False

        return True

    def __ne__(self, t):
        return not self == t

    def rank(self):
        return self.rank


class Pod(object):
    def __init__(self):
        self.rank = None
        self.id = None
        self.addr = None
        self.port = None
        self.trainers = []
189 190
        self.servers = []
        self.workers = []
191
        self.coordinators = []
192
        self.heter_workers = []
193 194
        self.accelerators = []
        self.device_mode = None
195 196

    def __str__(self):
197
        return "rank:{} id:{} addr:{} port:{} visible_accelerator:{} trainers:{} servers:{} \
198
            workers:{} heter_workers:{} coordinators:{}".format(
199 200 201 202 203 204 205 206 207 208 209
            self.rank,
            self.id,
            self.addr,
            self.port,
            self.accelerators,
            [str(t) for t in self.trainers],
            [str(s) for s in self.servers],
            [str(w) for w in self.workers],
            [str(h) for h in self.heter_workers],
            [str(c) for c in self.coordinators],
        )
210 211

    def __eq__(self, pod):
212 213 214 215 216 217
        if (
            self.rank != pod.rank
            or self.id != pod.id
            or self.addr != pod.addr
            or self.port != pod.port
        ):
Z
zhangchunle 已提交
218
            logger.debug("pod {} != {}".format(self, pod))
219 220 221
            return False

        if len(self.trainers) != len(pod.trainers):
222 223 224
            logger.debug(
                "trainers {} != {}".format(self.trainers, pod.trainers)
            )
225 226 227 228
            return False

        for i in range(len(self.trainers)):
            if self.trainers[i] != pod.trainers[i]:
229 230 231
                logger.debug(
                    "trainer {} != {}".format(self.trainers[i], pod.trainers[i])
                )
232 233
                return False

234 235 236 237 238 239
        if len(self.servers) != len(pod.servers):
            logger.debug("servers {} != {}".format(self.servers, pod.servers))
            return False

        for i in range(len(self.servers)):
            if self.servers[i] != pod.servers[i]:
240 241 242
                logger.debug(
                    "servers {} != {}".format(self.servers[i], pod.servers[i])
                )
243 244 245 246 247 248 249 250
                return False

        if len(self.workers) != len(pod.workers):
            logger.debug("workers {} != {}".format(self.workers, pod.workers))
            return False

        for i in range(len(self.workers)):
            if self.workers[i] != pod.workers[i]:
251 252 253
                logger.debug(
                    "workers {} != {}".format(self.workers[i], pod.workers[i])
                )
254 255
                return False

256 257 258 259 260 261 262 263 264 265 266
        return True

    def __ne__(self, pod):
        return not self == pod

    def parse_response(self, res_pods):
        pass

    def rank(self):
        return self.rank

267
    def get_visible_accelerators(self):
268
        r = ""
269
        for g in self.accelerators:
270 271
            r += "{},".format(g)

272
        assert r != "", "this pod {} can't see any accelerators".format(self)
273 274 275 276 277 278 279 280 281 282 283

        r = r[:-1]
        return r


def get_logger(log_level=20, name="root"):
    logger = logging.getLogger(name)
    logger.setLevel(log_level)

    log_handler = logging.StreamHandler()
    log_format = logging.Formatter(
284 285
        '%(levelname)s %(asctime)s %(filename)s:%(lineno)d] %(message)s'
    )
286 287 288 289 290 291
    log_handler.setFormatter(log_format)
    logger.addHandler(log_handler)

    return logger


292 293 294
def get_cluster(
    node_ips, node_ip, trainer_endpoints, device_mode, devices_per_proc
):
295
    assert type(trainer_endpoints) is list, "trainer_endpoints must be list"
296 297 298 299 300 301
    cluster = Cluster(hdfs=None)
    trainer_rank = 0
    for node_rank, ip in enumerate(node_ips):
        pod = Pod()
        pod.rank = node_rank
        pod.addr = ip
302 303
        pod.device_mode = device_mode

304
        cur_node_endpoints = trainer_endpoints[node_rank]
305
        # when use paddlecloud, endpoints may > devices_per_proc(user_defined)
306
        assert len(cur_node_endpoints) >= len(
307
            devices_per_proc
308
        ), "current trainer_endpoints size should be greater equal than acclerators size."
309
        for i in range(len(devices_per_proc)):
310
            trainer = Trainer()
311 312 313 314 315
            if (
                device_mode == DeviceMode.GPU
                or device_mode == DeviceMode.ASCEND_NPU
                or device_mode == DeviceMode.MLU
            ):
316
                if isinstance(devices_per_proc[i], (list, tuple)):
317 318
                    trainer.accelerators.extend(devices_per_proc[i])
                    pod.accelerators.extend(devices_per_proc[i])
319
                else:
320 321
                    trainer.accelerators.append(devices_per_proc[i])
                    pod.accelerators.append(devices_per_proc[i])
322 323
            elif device_mode == DeviceMode.XPU:
                if isinstance(devices_per_proc[i], (list, tuple)):
324
                    trainer.accelerators.extend(devices_per_proc[i])
325
                else:
326
                    trainer.accelerators.append(devices_per_proc[i])
327
            trainer.endpoint = "%s" % (cur_node_endpoints[i])
328 329 330 331 332 333 334 335 336 337 338
            trainer.rank = trainer_rank
            trainer_rank += 1

            pod.trainers.append(trainer)
        cluster.pods.append(pod)

    pod_rank = node_ips.index(node_ip)
    return cluster, cluster.pods[pod_rank]


def terminate_local_procs(procs):
K
kuizhiqing 已提交
339 340 341 342 343 344 345 346 347 348 349
    # try to terminate process by group, this happend in multiprocess senario in user process
    if os.name != 'nt':
        for p in procs:
            if p.proc.poll() is None:
                os.killpg(os.getpgid(p.proc.pid), signal.SIGTERM)
                if p.log_fn:
                    p.log_fn.close()
                logger.info("terminate process group gid:{}".format(p.proc.pid))

        time.sleep(1)

350 351 352
    for p in procs:
        if p.proc.poll() is None:
            p.proc.terminate()
353 354
            if p.log_fn:
                p.log_fn.close()
355 356
            logger.debug("terminate process id:{}".format(p.proc.pid))

357
    # wait all process terminiated
358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
    time.sleep(3)
    for step in range(0, 50):
        alive = False
        for p in procs:
            if p.proc.poll() is None:  # not termniate
                os.kill(p.proc.pid, signal.SIGKILL)
                alive = True

        if not alive:
            logger.info("terminate all the procs")
            return

        time.sleep(3)

    logger.fatal("can't kill all process and exit")
    exit(1)


def get_host_name_ip():
    try:
        host_name = socket.gethostname()
        host_ip = socket.gethostbyname(host_name)
        return host_name, host_ip
    except:
        return None


def add_arguments(argname, type, default, help, argparser, **kwargs):
    """Add argparse's argument.
    Usage:
    .. code-block:: python
        parser = argparse.ArgumentParser()
        add_argument("name", str, "Jonh", "User name.", parser)
        args = parser.parse_args()
    """
J
Jiangxinz 已提交
393
    type = strtobool if type == bool else type
394 395 396 397 398 399 400
    argparser.add_argument(
        "--" + argname,
        default=default,
        type=type,
        help=help + ' Default: %(default)s.',
        **kwargs
    )
401 402 403 404 405


def find_free_ports(num):
    def __free_port():
        with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
W
WangXi 已提交
406 407
            # Note(wangxi): Close the connection with a TCP RST instead
            # of a TCP FIN, to avoid time_wait state.
408 409 410
            s.setsockopt(
                socket.SOL_SOCKET, socket.SO_LINGER, struct.pack('ii', 1, 0)
            )
411 412 413 414 415 416 417 418 419 420 421 422 423 424
            s.bind(('', 0))
            return s.getsockname()[1]

    port_set = set()
    step = 0
    while True:
        port = __free_port()
        if port not in port_set:
            port_set.add(port)

        if len(port_set) >= num:
            return port_set

        step += 1
W
WangXi 已提交
425
        if step > 400:
426 427 428 429 430 431 432 433
            print(
                "can't find avilable port and use the specified static port now!"
            )
            return None

    return None


434 435 436 437 438 439
def get_ports(num, offset):
    if os.environ.get('FLAGS_START_PORT') is None:
        ports = find_free_ports(num)
        if ports is not None:
            ports = list(ports)
    else:
440
        start_port = int(os.environ.get('FLAGS_START_PORT'))
441 442 443 444
        ports = range(start_port + offset, start_port + offset + num, 1)
    return ports


445 446 447 448 449 450 451 452
def pretty_print_envs(envs, header=None):
    spacing = 2
    max_k = 40
    max_v = 45

    for k, v in envs.items():
        max_k = max(max_k, len(k))

453
    h_format = "    " + "|{{:>{}s}}{}{{:^{}s}}|\n".format(
454 455
        max_k, " " * spacing, max_v
    )
456
    l_format = "    " + "|{{:>{}s}}{{}}{{:^{}s}}|\n".format(max_k, max_v)
457 458
    length = max_k + max_v + spacing

459 460
    border = "    +" + "".join(["="] * length) + "+"
    line = "    +" + "".join(["-"] * length) + "+"
461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485

    draws = ""
    draws += border + "\n"

    if header:
        draws += h_format.format(header[0], header[1])
    else:
        draws += h_format.format("fleetrun Distributed Envs", "Value")

    draws += line + "\n"

    for k, v in envs.items():
        if isinstance(v, str) and len(v) >= max_v:
            str_v = "... " + v[-41:]
        else:
            str_v = v

        draws += l_format.format(k, " " * spacing, str(str_v))

    draws += border

    _str = "\n{}\n".format(draws)
    return _str


486 487 488 489 490 491 492 493 494 495
class TrainerProc(object):
    def __init__(self):
        self.proc = None
        self.log_fn = None
        self.log_offset = None
        self.rank = None
        self.local_rank = None
        self.cmd = None


496 497 498 499 500 501 502 503 504 505 506 507
_run_with_coverage = False


def run_with_coverage(*args):
    global _run_with_coverage
    assert len(args) <= 1, "len(args) {} should <= 1".format(len(args))
    if len(args) == 1:
        assert isinstance(args[0], bool)
        _run_with_coverage = args[0]
    return _run_with_coverage


508 509 510
def start_local_trainers(
    cluster, pod, training_script, training_script_args, log_dir=None, envs=None
):
511 512 513 514 515 516

    if envs is None:
        current_env = copy.copy(os.environ.copy())
    else:
        current_env = copy.copy(envs)

517 518 519 520
    # paddle broadcast ncclUniqueId use socket, and
    # proxy maybe make trainers unreachable, so delete them.
    # if we set them to "", grpc will log error message "bad uri"
    # so just delete them.
521 522 523
    current_env.pop("http_proxy", None)
    current_env.pop("https_proxy", None)

524 525
    ids = cluster.world_device_ids()
    res = [':'.join(ele) for ele in ids]
526 527 528
    procs = []
    for idx, t in enumerate(pod.trainers):
        proc_env = {
529 530 531 532 533 534 535 536 537
            "PADDLE_TRAINER_ID": "%d" % t.rank,
            "PADDLE_CURRENT_ENDPOINT": "%s" % t.endpoint,
            "PADDLE_TRAINERS_NUM": "%d" % cluster.trainers_nranks(),
            "PADDLE_TRAINER_ENDPOINTS": ",".join(cluster.trainers_endpoints()),
            "PADDLE_RANK_IN_NODE": str(idx),
            "PADDLE_LOCAL_DEVICE_IDS": ",".join(
                [str(acc) for acc in t.accelerators]
            ),
            "PADDLE_WORLD_DEVICE_IDS": ",".join(res),
538 539
        }

540 541 542
        # The following three environnement variables are used for auto mapping
        if current_env.get("PADDLE_CLUSTER_TOPO_PATH", None) is not None:
            proc_env["PADDLE_CLUSTER_TOPO_PATH"] = current_env[
543 544
                "PADDLE_CLUSTER_TOPO_PATH"
            ]
545 546
        if current_env.get("PADDLE_RANK_MAPPING_PATH", None) is not None:
            proc_env["PADDLE_RANK_MAPPING_PATH"] = current_env[
547 548
                "PADDLE_RANK_MAPPING_PATH"
            ]
549 550
        if current_env.get("PADDLE_ENABLE_AUTO_MAPPING", None) is not None:
            proc_env["PADDLE_ENABLE_AUTO_MAPPING"] = current_env[
551 552
                "PADDLE_ENABLE_AUTO_MAPPING"
            ]
553

554
        if len(t.accelerators) > 0 and pod.device_mode == DeviceMode.GPU:
555
            proc_env["FLAGS_selected_gpus"] = "%s" % ",".join(
556 557
                [str(g) for g in t.accelerators]
            )
558

559 560 561
        elif (
            len(t.accelerators) > 0 and pod.device_mode == DeviceMode.ASCEND_NPU
        ):
562
            proc_env["FLAGS_selected_npus"] = "%s" % ",".join(
563 564
                [str(g) for g in t.accelerators]
            )
Z
zn 已提交
565 566
        elif len(t.accelerators) > 0 and pod.device_mode == DeviceMode.MLU:
            proc_env["FLAGS_selected_mlus"] = "%s" % ",".join(
567 568
                [str(g) for g in t.accelerators]
            )
569

570 571
        if len(t.accelerators) > 0:
            proc_env["FLAGS_selected_accelerators"] = "%s" % ",".join(
572 573
                [str(g) for g in t.accelerators]
            )
574 575
        # to do: same code style in future
        if fluid.core.is_compiled_with_xpu() and len(t.accelerators) > 0:
576
            proc_env["FLAGS_selected_xpus"] = "%s" % ",".join(
577 578
                [str(g) for g in t.accelerators]
            )
579

580 581
        current_env.update(proc_env)

582
        coverage_args = []
583 584 585 586
        if (
            run_with_coverage()
            or os.environ.get("WITH_COVERAGE", "OFF") == "ON"
        ):
587
            coverage_args = ["-m", "coverage", "run", "--branch", "-p"]
588 589 590 591 592 593
        cmd = (
            [sys.executable, "-u"]
            + coverage_args
            + [training_script]
            + training_script_args
        )
594

595 596 597
        logger.debug("start trainer proc{}  env:{}".format(cmd, current_env))

        if idx == 0:
598 599 600 601 602 603 604
            logger.info(
                "Local start {} processes. First process distributed "
                "environment info (Only For Debug): {}".format(
                    len(pod.trainers),
                    pretty_print_envs(proc_env, ("Distributed Envs", "Value")),
                )
            )
605
            logger.info(
606 607
                "details about PADDLE_TRAINER_ENDPOINTS can be found in "
                "{}/endpoints.log, and detail running logs maybe found in "
608 609
                "{}/workerlog.0".format(log_dir, log_dir)
            )
610
        fn = None
K
kuizhiqing 已提交
611
        pre_fn = None if os.name == 'nt' else os.setsid
612 613
        if log_dir is not None:
            os.system("mkdir -p {}".format(log_dir))
614 615 616 617 618
            if os.path.exists("%s/endpoints.log" % log_dir):
                os.system("rm -f {}/endpoints.log".format(log_dir))
            with open("%s/endpoints.log" % log_dir, "w") as f:
                f.write("PADDLE_TRAINER_ENDPOINTS: \n")
                f.write("\n".join(cluster.trainers_endpoints()))
619 620 621 622 623
            if (
                current_env.get("PADDLE_ENABLE_AUTO_MAPPING") is not None
                and current_env.get("PADDLE_NEED_RANK_MAPPING").lower()
                == "true"
            ):
624 625 626
                fn = open("%s/prelaunchlog.%d" % (log_dir, idx), "a")
            else:
                fn = open("%s/workerlog.%d" % (log_dir, idx), "a")
627 628 629
            proc = subprocess.Popen(
                cmd, env=current_env, stdout=fn, stderr=fn, preexec_fn=pre_fn
            )
630
        else:
K
kuizhiqing 已提交
631
            proc = subprocess.Popen(cmd, env=current_env, preexec_fn=pre_fn)
632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655

        tp = TrainerProc()
        tp.proc = proc
        tp.rank = t.rank
        tp.local_rank = idx
        tp.log_fn = fn
        tp.log_offset = fn.tell() if fn else None
        tp.cmd = cmd

        procs.append(tp)

    return procs


def pull_worker_log(tp):
    if tp.log_fn:
        with open(tp.log_fn.name, 'r') as fin:
            fin.seek(tp.log_offset, 0)
            for line in fin:
                try:
                    sys.stdout.write(line)
                except UnicodeEncodeError:
                    sys.stdout.write(
                        'UnicodeEncodeError occurs at this line. '
656 657 658
                        'Please refer to the original log file "%s"\n'
                        % tp.log_fn.name
                    )
659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685
            tp.log_offset = fin.tell()


def watch_local_trainers(procs, nranks):
    try:
        error = False
        error_rank = []
        # wait all process finish or one error
        alive = False
        for p in procs:
            if p.log_fn and p.local_rank == 0:
                pull_worker_log(p)

            ret = p.proc.poll()
            if ret is None:
                alive = True
            elif ret != 0:
                error = True
                error_rank.append(p.rank)

        if error:
            terminate_local_procs(procs)
            exit(1)

    except KeyboardInterrupt:
        logger.warning("KeyboardInterrupt, exit")
        terminate_local_procs(procs)
K
kuizhiqing 已提交
686
        return
687 688
    except SystemExit:
        logger.error(
689 690 691 692
            "ABORT!!! Out of all {} trainers, the trainer process with rank={} was aborted. Please check its log.".format(
                nranks, error_rank
            )
        )
693
        terminate_local_procs(procs)
694
        raise
695 696
    except:
        logger.error(
697 698 699 700
            "ABORT!!! Out of all {} trainers, the trainer process with rank={} was aborted. Please check its log.".format(
                nranks, error_rank
            )
        )
701
        terminate_local_procs(procs)
K
kuizhiqing 已提交
702
        return
703 704

    return alive
705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720


def get_gpus(gpus):
    if gpus is None:
        gpus_num = fluid.core.get_cuda_device_count()
        res_gpus = [str(x) for x in range(0, gpus_num)]
    else:
        cuda_visible_devices = os.getenv("CUDA_VISIBLE_DEVICES")
        if cuda_visible_devices is None or cuda_visible_devices == "":
            res_gpus = [x.strip() for x in gpus.split(',')]
        else:
            # change gpus into relative values
            # e.g. CUDA_VISIBLE_DEVICES=4,5,6,7; args.gpus=4,5,6,7;
            # therefore gpus=0,1,2,3
            cuda_visible_devices_list = cuda_visible_devices.split(',')
            for x in gpus.split(','):
721 722 723
                assert x in cuda_visible_devices_list, (
                    "Can't find "
                    "your gpus %s in CUDA_VISIBLE_DEVICES[%s]."
724
                    % (x, cuda_visible_devices)
725
                )
726 727 728 729
            res_gpus = [
                cuda_visible_devices_list.index(x.strip())
                for x in gpus.split(',')
            ]
730 731 732 733 734 735 736
            logger.info(
                "Change selected_gpus into reletive values. --ips:{} "
                "will change into relative_ips:{} according to your "
                "CUDA_VISIBLE_DEVICES:{}".format(
                    gpus, res_gpus, cuda_visible_devices_list
                )
            )
737 738 739 740

    return res_gpus


741 742 743 744 745 746 747 748 749 750 751 752 753 754
def get_xpus(xpus):
    if xpus is None:
        xpus_num = fluid.core.get_xpu_device_count()
        res_xpus = [str(x) for x in range(0, xpus_num)]
    else:
        xpu_visible_devices = os.getenv("XPU_VISIBLE_DEVICES")
        if xpu_visible_devices is None or xpu_visible_devices == "":
            res_xpus = [x.strip() for x in xpus.split(',')]
        else:
            # change xpus into relative values
            # e.g. XPU_VISIBLE_DEVICES=4,5,6,7; args.xpus=4,5,6,7;
            # therefore xpus=0,1,2,3
            xpu_visible_devices_list = xpu_visible_devices.split(',')
            for x in xpus.split(','):
755 756 757 758 759 760
                assert (
                    x in xpu_visible_devices_list
                ), "Can't find " "your xpus %s in XPU_VISIBLE_DEVICES[%s]." % (
                    x,
                    xpu_visible_devices,
                )
761 762 763 764
            res_xpus = [
                xpu_visible_devices_list.index(x.strip())
                for x in xpus.split(',')
            ]
765 766 767 768 769 770 771
            logger.info(
                "Change selected_xpus into reletive values. --ips:{} "
                "will change into relative_ips:{} according to your "
                "XPU_VISIBLE_DEVICES:{}".format(
                    xpus, res_xpus, xpu_visible_devices_list
                )
            )
772 773 774 775

    return res_xpus


K
kuizhiqing 已提交
776 777 778 779 780 781 782 783 784 785 786 787 788 789
def get_npus(npus):
    if npus is None:
        npus_num = fluid.core.get_npu_device_count()
        res_npus = [str(x) for x in range(0, npus_num)]
    else:
        npu_visible_devices = os.getenv("ASCEND_VISIBLE_DEVICES")
        if npu_visible_devices is None or npu_visible_devices == "":
            res_npus = [x.strip() for x in npus.split(',')]
        else:
            # change npus into relative values
            # e.g. ASCEND_VISIBLE_DEVICES=4,5,6,7; args.npus=4,5,6,7;
            # therefore npus=0,1,2,3
            npu_visible_devices_list = npu_visible_devices.split(',')
            for x in npus.split(','):
790 791 792
                assert x in npu_visible_devices_list, (
                    "Can't find "
                    "your npus %s in ASCEND_VISIBLE_DEVICES[%s]."
K
kuizhiqing 已提交
793
                    % (x, npu_visible_devices)
794
                )
K
kuizhiqing 已提交
795 796 797 798
            res_npus = [
                npu_visible_devices_list.index(x.strip())
                for x in npus.split(',')
            ]
799 800 801 802 803 804 805
            logger.info(
                "Change selected_npus into reletive values. --ips:{} "
                "will change into relative_ips:{} according to your "
                "ASCEND_VISIBLE_DEVICES:{}".format(
                    npus, res_npus, npu_visible_devices_list
                )
            )
K
kuizhiqing 已提交
806 807 808 809

    return res_npus


Z
zn 已提交
810 811 812 813 814 815 816 817 818 819 820 821 822 823
def get_mlus(mlus):
    if mlus is None:
        mlus_num = fluid.core.get_mlu_device_count()
        res_mlus = [str(x) for x in range(0, mlus_num)]
    else:
        mlu_visible_devices = os.getenv("MLU_VISIBLE_DEVICES")
        if mlu_visible_devices is None or mlu_visible_devices == "":
            res_mlus = [x.strip() for x in mlus.split(',')]
        else:
            # change mlus into relative values
            # e.g. MLU_VISIBLE_DEVICES=4,5,6,7; args.mlus=4,5,6,7;
            # therefore mlus=0,1,2,3
            mlu_visible_devices_list = mlu_visible_devices.split(',')
            for x in mlus.split(','):
824 825 826 827 828 829
                assert (
                    x in mlu_visible_devices_list
                ), "Can't find " "your mlus %s in MLU_VISIBLE_DEVICES[%s]." % (
                    x,
                    mlu_visible_devices,
                )
Z
zn 已提交
830 831 832 833
            res_mlus = [
                mlu_visible_devices_list.index(x.strip())
                for x in mlus.split(',')
            ]
834 835 836 837 838 839 840
            logger.info(
                "Change selected_mlus into reletive values. --ips:{} "
                "will change into relative_ips:{} according to your "
                "MLU_VISIBLE_DEVICES:{}".format(
                    mlus, res_mlus, mlu_visible_devices_list
                )
            )
Z
zn 已提交
841 842 843 844

    return res_mlus


X
xiongkun 已提交
845
def get_device_mode(backend):
K
kuizhiqing 已提交
846
    if backend == 'heter':
847 848 849 850
        if (
            fluid.core.is_compiled_with_cuda()
            and fluid.core.get_cuda_device_count() > 0
        ):
K
kuizhiqing 已提交
851 852
            print("launch train in heter mode with GPU device.")
            return DeviceMode.GPU
853 854 855 856
        if (
            fluid.core.is_compiled_with_xpu()
            and fluid.core.get_xpu_device_count() > 0
        ):
K
kuizhiqing 已提交
857 858
            print("launch train in heter mode with XPU device.")
            return DeviceMode.XPU
859 860 861 862
        if (
            fluid.core.is_compiled_with_npu()
            and fluid.core.get_npu_device_count() > 0
        ):
K
kuizhiqing 已提交
863 864 865 866
            print("launch train in heter mode with NPU device.")
            return DeviceMode.ASCEND_NPU

    if backend == 'hccl' and fluid.core.get_npu_device_count() > 0:
867 868 869
        print("launch train in ascend npu mode!")
        return DeviceMode.ASCEND_NPU

870
    if backend == 'nccl' and fluid.core.get_cuda_device_count() > 0:
871
        print("launch train in GPU mode!")
872
        return DeviceMode.GPU
873

X
xiongkun 已提交
874
    if backend == 'bkcl' and fluid.core.get_xpu_device_count() > 0:
875 876
        print("launch train in XPU mode")
        return DeviceMode.XPU
877

Z
zn 已提交
878 879 880 881
    if backend == 'cncl' and fluid.core.get_mlu_device_count() > 0:
        print("launch train in MLU mode")
        return DeviceMode.MLU

X
xiongkun 已提交
882 883 884 885 886
    if backend == 'gloo':
        print("launch train in CPU mode")
        return DeviceMode.CPU

    raise RuntimeError("Don't supported devices")
887 888 889 890


def get_device_proc_info(args):
    # device_mode
X
xiongkun 已提交
891
    device_mode = get_device_mode(args.backend)
892 893 894 895 896 897

    # devices
    devices_per_proc = []
    if device_mode == DeviceMode.GPU:
        gpus = get_gpus(args.gpus)
        if args.nproc_per_node is not None:
898 899 900 901 902
            assert (
                len(gpus) % int(args.nproc_per_node)
            ) == 0, "gpus' number:{} mod args.nproc_per_node:{} must == 0".format(
                len(gpus), args.nproc_per_node
            )
903 904

            n = int(len(gpus) / int(args.nproc_per_node))
905
            devices_per_proc = [gpus[i : i + n] for i in range(0, len(gpus), n)]
906 907
        else:
            devices_per_proc = gpus
908
    elif device_mode == DeviceMode.ASCEND_NPU:
K
kuizhiqing 已提交
909 910
        npus = get_npus(args.npus)
        if args.nproc_per_node is not None:
911 912 913 914 915
            assert (
                len(npus) % int(args.nproc_per_node)
            ) == 0, "npus' number:{} mod args.nproc_per_node:{} must == 0".format(
                len(npus), args.nproc_per_node
            )
K
kuizhiqing 已提交
916 917

            n = int(len(npus) / int(args.nproc_per_node))
918
            devices_per_proc = [npus[i : i + n] for i in range(0, len(npus), n)]
K
kuizhiqing 已提交
919 920
        else:
            devices_per_proc = npus
921 922 923
    elif device_mode == DeviceMode.XPU:
        xpus = get_xpus(args.xpus)
        if args.nproc_per_node is not None:
924 925 926 927 928
            assert (
                len(xpus) % int(args.nproc_per_node)
            ) == 0, "xpus' number:{} mod args.nproc_per_node:{} must == 0".format(
                len(xpus), args.nproc_per_node
            )
929 930

            n = int(len(xpus) / int(args.nproc_per_node))
931
            devices_per_proc = [xpus[i : i + n] for i in range(0, len(xpus), n)]
932 933
        else:
            devices_per_proc = xpus
Z
zn 已提交
934 935 936
    elif device_mode == DeviceMode.MLU:
        mlus = get_mlus(args.mlus)
        if args.nproc_per_node is not None:
937 938 939 940 941
            assert (
                len(mlus) % int(args.nproc_per_node)
            ) == 0, "mlus' number:{} mod args.nproc_per_node:{} must == 0".format(
                len(mlus), args.nproc_per_node
            )
Z
zn 已提交
942 943

            n = int(len(mlus) / int(args.nproc_per_node))
944
            devices_per_proc = [mlus[i : i + n] for i in range(0, len(mlus), n)]
Z
zn 已提交
945 946
        else:
            devices_per_proc = mlus
947
    elif device_mode == DeviceMode.CPU:
X
xiongkun 已提交
948
        if hasattr(args, "paddle_cpuonly") and args.nproc_per_node is None:
949
            # NOTE (xiongkun03) set it to cpu core number
X
xiongkun 已提交
950
            args.nproc_per_node = multiprocessing.cpu_count()
951 952 953 954 955
        if args.nproc_per_node is None:
            devices_per_proc = [0]
        else:
            devices_per_proc = [x for x in range(0, args.nproc_per_node)]
    else:
956 957 958 959 960
        assert (
            False
        ), "Can't support device_mode:{}, support only cpu|gpu|xpu now.".format(
            device_mode
        )
961 962 963 964

    return (device_mode, devices_per_proc)


965 966
def direct_start(args):
    # run ps-cpu mode on paddlecloud, using given envs
967 968 969 970 971
    cmd = [
        sys.executable,
        "-u",
        args.training_script,
    ] + args.training_script_args
972 973 974 975 976 977 978 979 980 981
    proc = subprocess.Popen(cmd)
    proc.wait()
    return


def get_custom_endpoints(origin_endpoints, offset=0):
    """
    origin_endpoint: ip:port
    user_define_endpoint: ip:(port+offset)
    """
982
    assert origin_endpoints is not None
983 984 985 986 987 988 989 990 991 992
    paddle_user_define_endpoints_list = []
    for ip_port in origin_endpoints.split(","):
        ip = ip_port.split(":")[0]
        port = ip_port.split(":")[1]
        new_port = int(port) + offset
        paddle_user_define_endpoints_list.append(":".join((ip, str(new_port))))
    paddle_user_define_endpoints = ",".join(paddle_user_define_endpoints_list)
    return paddle_user_define_endpoints


993
# def cloud_ps_heter_env_set(args):
994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
#    environs = {}
#
#    paddle_trainer_endpoints = os.getenv("TRAINER_IP_PORT_LIST", "")
#    assert paddle_trainer_endpoints != None
#
#    paddle_pserver_endpoints = os.getenv("PSERVER_IP_PORT_LIST", "")
#    assert paddle_pserver_endpoints != None
#
#    # hard code for paddlecloud custom-framework
#    avilable_ports = os.getenv("TRAINER_PORTS", "").split(",")
#    assert len(
#        avilable_ports
#    ) >= 2, "set paddle_ports_num >= 2 in config.ini for paddlecloud job submit"
#
#    # hard code for paddlecloud custom-framework
#    trainers_num = len(paddle_pserver_endpoints.split(","))
#    assert trainers_num != 0
#    environs["PADDLE_TRAINERS_NUM"] = trainers_num
#    environs["TRAINERS_NUM"] = trainers_num
#
#    # hard code for paddlecloud custom-framework
#    environs["PADDLE_HETER_TRAINER_IP_PORT_LIST"] = paddle_trainer_endpoints
#    environs["PADDLE_PSERVERS_IP_PORT_LIST"] = paddle_pserver_endpoints
#    environs["PADDLE_TRAINER_ENDPOINTS"] = get_custom_endpoints(
#        paddle_pserver_endpoints, 1)
#    heter_worker_num = len(paddle_trainer_endpoints.split(","))
#    if (args.heter_worker_num != None) and (
#            heter_worker_num != args.heter_worker_num):
#        warnings.warn(
#            "Your fleetrun setting: heter_worker_num is {}, but we find {} device can be used, this setting has been changed.".
#            format(args.heter_worker_num, heter_worker_num))
#        args.heter_worker_num = heter_worker_num
#
#    for k, v in environs.items():
#        os.environ[k] = str(v)
#    logger.info("Set heter parameter server env: {}".format(
#        pretty_print_envs(environs)))
1031 1032


1033 1034 1035
def get_mapped_cluster_without_rank_mapping(
    node_ips, node_ip, trainer_endpoints, device_mode, node_ranks
):
1036
    assert type(trainer_endpoints) is list, "trainer_endpoints must be list"
1037 1038 1039
    assert (
        device_mode == DeviceMode.GPU
    ), "Only support get mapped cluster for gpu now."
1040 1041 1042 1043 1044 1045 1046 1047 1048
    cluster = Cluster(hdfs=None)
    for node_rank, ip in enumerate(node_ips):
        pod = Pod()
        pod.rank = node_rank
        pod.addr = ip
        pod.device_mode = device_mode
        cur_node_endpoints = trainer_endpoints[node_rank]

        # choose rank from global mapped ranks and set it to the trainer.
1049 1050
        ranks_per_node = node_ranks[node_rank]
        assert len(ranks_per_node) == 1
1051 1052 1053 1054
        for i in range(len(ranks_per_node)):
            trainer = Trainer()
            trainer.endpoint = "%s" % (cur_node_endpoints[i])
            trainer.rank = ranks_per_node[i]
1055 1056 1057 1058 1059 1060 1061 1062
            pod.trainers.append(trainer)
        cluster.pods.append(pod)

    pod_rank = node_ips.index(node_ip)
    return cluster, cluster.pods[pod_rank]


def get_mapped_cluster_from_args_without_rank_mapping(args, device_mode):
1063 1064 1065
    assert (
        device_mode == DeviceMode.GPU
    ), "Only support get mapped cluster for gpu now."
1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
    gpus_num = fluid.core.get_cuda_device_count()

    # parse ip-ranks json file
    cluster_topo = None
    with open(args.cluster_topo_path, "r") as json_file:
        cluster_topo = json.load(json_file)

    node_ips = []
    node_ranks = []
    for idx, cur_cluster_topo in enumerate(cluster_topo["machines"]):
        node_ips.append(cur_cluster_topo['addr'])
        node_ranks.append([idx])

    if len(node_ips) == 1:
        node_ip = node_ips[0]
    else:
        if args.host:
            node_ip = args.host
        else:
            _, node_ip = get_host_name_ip()

1087 1088 1089
    assert (
        node_ip in node_ips
    ), "Can't find your local ip {%s} in node_ips: {%s}" % (node_ip, node_ips)
1090 1091
    node_rank = node_ips.index(node_ip)

1092 1093 1094
    assert len(node_ranks) == len(
        node_ips
    ), "ranks length should be equal to ips length."
1095

1096 1097 1098 1099 1100 1101
    logger.debug(
        "parsed from args: node_ips:{} node_ip:{} "
        "node_rank:{} node_ranks:{}".format(
            node_ips, node_ip, node_rank, node_ranks[node_rank]
        )
    )
1102 1103 1104 1105 1106 1107 1108 1109 1110

    # NOTE: there are different number of global mapped ranks on each node.
    free_ports = []
    trainer_endpoints = []
    for ip in node_ips:
        node_rank = node_ips.index(ip)
        if os.environ.get('PADDLE_PORT') is not None:
            start_port = int(os.getenv("PADDLE_PORT", ""))
            free_ports = [
1111 1112 1113 1114
                x
                for x in range(
                    start_port, start_port + len(node_ranks[node_rank])
                )
1115 1116 1117 1118
            ]
        elif os.environ.get('FLAGS_START_PORT') is not None:
            start_port = int(os.environ.get('FLAGS_START_PORT'))
            free_ports = [
1119 1120 1121 1122
                x
                for x in range(
                    start_port, start_port + len(node_ranks[node_rank])
                )
1123 1124 1125 1126 1127
            ]
        else:
            free_ports = find_free_ports(len(node_ranks[node_rank]))
        trainer_endpoints.append(["%s:%d" % (ip, port) for port in free_ports])

1128 1129 1130
    return get_mapped_cluster_without_rank_mapping(
        node_ips, node_ip, trainer_endpoints, device_mode, node_ranks
    )
1131 1132


1133 1134 1135 1136 1137 1138 1139 1140
def get_mapped_cluster_with_rank_mapping(
    node_ips,
    node_ip,
    trainer_endpoints,
    device_mode,
    node_ranks,
    node_rank_mappings,
):
1141
    assert type(trainer_endpoints) is list, "trainer_endpoints must be list"
1142 1143 1144
    assert (
        device_mode == DeviceMode.GPU
    ), "Only support get mapped cluster for gpu now."
1145 1146 1147 1148 1149 1150 1151 1152 1153

    def get_relative_gpu_id(gpu_id):
        cuda_visible_devices = os.getenv("CUDA_VISIBLE_DEVICES")
        if cuda_visible_devices is None or cuda_visible_devices == "":
            return gpu_id
        else:
            cuda_visible_devices_list = cuda_visible_devices.split(',')
            relative_id = cuda_visible_devices_list.index(str(gpu_id))
            logger.info(
1154 1155 1156 1157
                "Change gpu id from {} to {} based on CUDA_VISIBLE_DEVICES {}".format(
                    gpu_id, relative_id, cuda_visible_devices_list
                )
            )
1158 1159 1160 1161 1162 1163 1164 1165 1166
            return relative_id

    cluster = Cluster(hdfs=None)
    for node_rank, ip in enumerate(node_ips):
        pod = Pod()
        pod.rank = node_rank
        pod.addr = ip
        pod.device_mode = device_mode
        cur_node_endpoints = trainer_endpoints[node_rank]
1167

1168 1169 1170 1171 1172
        # choose rank from global mapped ranks and set it to the trainer.
        ranks_per_node = node_ranks[node_rank]
        cur_node_rank_mapping = node_rank_mappings[node_rank]
        for i in range(len(ranks_per_node)):
            trainer = Trainer()
1173 1174 1175 1176 1177 1178 1179 1180 1181
            local_device_ids = cur_node_rank_mapping["ranks"][
                str(ranks_per_node[i])
            ]
            assert (
                len(local_device_ids) == 1
            ), "Only support one process to one device mapping"
            trainer.accelerators.append(
                get_relative_gpu_id(local_device_ids[0])
            )
1182 1183
            trainer.endpoint = "%s" % (cur_node_endpoints[i])
            trainer.rank = ranks_per_node[i]
1184 1185 1186 1187 1188 1189 1190
            pod.trainers.append(trainer)
        cluster.pods.append(pod)

    pod_rank = node_ips.index(node_ip)
    return cluster, cluster.pods[pod_rank]


1191
def get_mapped_cluster_from_args_with_rank_mapping(args, device_mode):
1192 1193 1194
    assert (
        device_mode == DeviceMode.GPU
    ), "Only support get mapped cluster for gpu now."
1195 1196 1197
    gpus_num = fluid.core.get_cuda_device_count()

    # parse ip-ranks json file
1198
    rank_mapping_path = args.rank_mapping_path or os.getenv(
1199 1200
        "PADDLE_RANK_MAPPING_PATH"
    )
1201 1202 1203 1204 1205
    rank_mapping = None
    with open(rank_mapping_path, "r") as json_file:
        rank_mapping = json.load(json_file)
    # reset PADDLE_RANK_MAPPING_PATH env
    os.environ["PADDLE_RANK_MAPPING_PATH"] = ""
1206 1207

    node_ips = []
1208 1209 1210 1211 1212 1213 1214 1215 1216 1217
    node_ranks = []
    node_rank_mappings = []
    for cur_rank_mapping in rank_mapping:
        node_ips.append(cur_rank_mapping['addr'])
        cur_node_rank_list = [
            int(i) for i in list(cur_rank_mapping['ranks'].keys())
        ]
        cur_node_rank_list.sort()
        node_ranks.append(cur_node_rank_list)
        node_rank_mappings.append(cur_rank_mapping)
1218 1219 1220 1221 1222 1223 1224 1225 1226

    if len(node_ips) == 1:
        node_ip = node_ips[0]
    else:
        if args.host:
            node_ip = args.host
        else:
            _, node_ip = get_host_name_ip()

1227 1228 1229
    assert (
        node_ip in node_ips
    ), "Can't find your local ip {%s} in node_ips: {%s}" % (node_ip, node_ips)
1230 1231
    node_rank = node_ips.index(node_ip)

1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244
    assert (
        len(node_ranks[node_rank]) <= gpus_num
    ), "number of ranks mapped to one node should not exceed the avaiable ones."
    assert len(node_ranks) == len(
        node_ips
    ), "ranks length should be equal to ips length."

    logger.debug(
        "parsed from args: node_ips:{} node_ip:{} "
        "node_rank:{} node_ranks:{}".format(
            node_ips, node_ip, node_rank, node_ranks[node_rank]
        )
    )
1245 1246 1247 1248 1249 1250

    # NOTE: there are different number of global mapped ranks on each node.
    free_ports = []
    trainer_endpoints = []
    for ip in node_ips:
        node_rank = node_ips.index(ip)
1251 1252 1253
        if os.environ.get('PADDLE_PORT') is not None:
            start_port = int(os.getenv("PADDLE_PORT", ""))
            free_ports = [
1254 1255 1256 1257
                x
                for x in range(
                    start_port, start_port + len(node_ranks[node_rank])
                )
1258 1259
            ]
        elif os.environ.get('FLAGS_START_PORT') is not None:
1260
            start_port = int(os.environ.get('FLAGS_START_PORT'))
1261
            free_ports = [
1262 1263 1264 1265
                x
                for x in range(
                    start_port, start_port + len(node_ranks[node_rank])
                )
1266
            ]
1267
        else:
1268
            free_ports = find_free_ports(len(node_ranks[node_rank]))
1269 1270
        trainer_endpoints.append(["%s:%d" % (ip, port) for port in free_ports])

1271 1272 1273 1274 1275 1276 1277 1278
    return get_mapped_cluster_with_rank_mapping(
        node_ips,
        node_ip,
        trainer_endpoints,
        device_mode,
        node_ranks,
        node_rank_mappings,
    )
1279 1280


1281 1282 1283 1284
class ParameterServerLauncher(object):
    def __init__(self, args, distribute_mode):
        self.args = args
        self.distribute_mode = distribute_mode
1285
        self.with_coordinator = False
1286 1287 1288
        self.server_num = 0
        self.worker_num = 0
        self.heter_worker_num = 0
1289
        self.coordinator_num = 0
1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302

        self.server_endpoints = ""
        self.server_endpoints_ips = []
        self.server_endpoints_port = []

        self.worker_endpoints = ""
        self.worker_endpoints_ips = []
        self.worker_endpoints_port = []

        self.heter_worker_endpoints = ""
        self.heter_worker_endpoints_ips = []
        self.heter_worker_endpoints_port = []

1303 1304 1305 1306
        self.coordinator_endpoints = ""
        self.coordinator_endpoints_ips = []
        self.coordinator_endpoints_port = []

1307 1308 1309
        self.is_local = True
        self.current_node_ip = ""

1310 1311 1312 1313 1314 1315
        self.stage_trainer_num = []
        self.stage_heter_map = {}
        self.stage_list = []
        self.stage_device_map = {}
        self.stage_num = 0

1316 1317 1318 1319 1320 1321
        self.get_role_endpoints(args)

    def get_role_endpoints(self, args):
        if args.server_num:
            self.server_num = args.server_num
            if args.servers:
1322 1323 1324 1325 1326
                assert (
                    len(args.servers.split(",")) == self.server_num
                ), "The server_num and servers doesn't match. Expect servers endpoints num epual to server_num, but received servers enpoint num: {} and server_num {}".format(
                    len(args.servers.split(",")), self.server_num
                )
1327 1328 1329 1330
                self.server_endpoints = args.servers
            else:
                ports = get_ports(self.server_num, 0)
                self.server_endpoints = ",".join(
1331 1332
                    ["127.0.0.1:" + str(x) for x in ports]
                )
1333
        else:
1334 1335 1336
            assert (
                args.servers != ""
            ), "The setting of Parameter-Server must has server_num or servers."
1337 1338 1339 1340 1341 1342 1343
            self.server_endpoints = args.servers
            self.server_num = len(self.server_endpoints.split(","))

        # get worker envs
        if args.worker_num:
            self.worker_num = args.worker_num
            if args.workers:
1344 1345 1346 1347 1348
                assert (
                    len(args.workers.split(",")) == self.worker_num
                ), "The worker_num and workers doesn't match. Expect workers endpoints num epual to worker_num, but received workers enpoint num: {} and worker_num {}".format(
                    len(args.workers.split(",")), self.worker_num
                )
1349 1350 1351 1352 1353

                self.worker_endpoints = args.workers
            else:
                ports = get_ports(self.worker_num, self.server_num)
                self.worker_endpoints = ",".join(
1354 1355
                    ["127.0.0.1:" + str(x) for x in ports]
                )
1356
        else:
1357 1358 1359
            assert (
                args.workers != ""
            ), "The setting of Parameter-Server must has worker_num or workers."
1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372
            worker_endpoints_ips = [
                x.strip().split(":")[0] for x in args.workers.split(",")
            ]
            self.worker_num = len(worker_endpoints_ips)
            worker_endpoints_len = [
                len(x.strip().split(":")) for x in args.workers.split(",")
            ]

            if 1 in worker_endpoints_len:
                # if no port value in worker_endpoints, will set default port values.
                start_port = 6170
                worker_endpoints_port = range(
                    start_port + self.server_num,
1373 1374 1375
                    start_port + self.server_num + self.worker_num,
                    1,
                )
1376 1377 1378
                # create endpoints str
                worker_endpoints = []
                for i in range(self.worker_num):
1379 1380 1381 1382 1383 1384 1385 1386
                    worker_endpoints.append(
                        ":".join(
                            (
                                worker_endpoints_ips[i],
                                str(worker_endpoints_port[i]),
                            )
                        )
                    )
1387 1388 1389 1390
                self.worker_endpoints = ",".join(worker_endpoints)
            else:
                self.worker_endpoints = args.workers

1391 1392 1393 1394 1395
        # get coordinator envs
        if args.coordinator_num:
            self.with_coordinator = True
            self.coordinator_num = args.coordinator_num
            if args.coordinators:
1396 1397 1398 1399 1400
                assert (
                    len(args.coordinators.split(",")) == self.coordinator_num
                ), "The coordinator_num and coordinators doesn't match. Expect coordinators endpoints num epual to coordinator_num, but received coordinator enpoint num: {} and coordinator_num {}".format(
                    len(args.coordinators.split(",")), self.coordinator_num
                )
1401 1402 1403 1404 1405

                self.coordinator_endpoints = args.coordinators
            else:
                ports = get_ports(self.coordinator_num, 1)
                self.coordinator_endpoints = ",".join(
1406 1407
                    ["127.0.0.1:" + str(x) for x in ports]
                )
1408 1409
                print(">>> use default coordinator addr(only one process)")

1410 1411
        # get heter worker envs
        if self.distribute_mode == DistributeMode.PS_HETER:
1412 1413 1414
            assert (
                args.heter_devices != ""
            ), "The setting of Parameter-Server heter mode must has heter_devices."
1415
            self.stage_device_map[1] = "cpu"  # for cpu trainer
1416 1417 1418 1419 1420
            heter_devices_list = args.heter_devices.split(";")
            for i in range(len(heter_devices_list)):
                self.stage_device_map[i + 2] = heter_devices_list[i]

            self.stage_heter_map[1] = self.worker_endpoints
1421
            if args.heter_worker_num:
1422
                self.stage_heter_trainer_num = args.heter_worker_num.split(";")
1423 1424 1425 1426 1427
                self.stage_heter_trainer_num = [
                    int(trainer_num)
                    for trainer_num in self.stage_heter_trainer_num
                ]

1428
                if args.heter_workers:
1429 1430 1431 1432
                    assert len(args.heter_workers.split(";")) == len(
                        self.stage_heter_trainer_num
                    ), "The stage_num and heter_workers doesn't match. Expect heter_workers endpoints stage num epual to heter_worker_num stage, but received heter_workers enpoint stage num: {} and heter_worker_num stage {}".format(
                        len(args.heter_workers.split(";")),
1433 1434
                        len(self.stage_heter_trainer_num),
                    )
1435 1436 1437 1438 1439 1440
                    heter_worker_endpoints_list = args.heter_workers.split(";")
                    self.heter_worker_endpoints = ""
                    for i in range(len(self.stage_heter_trainer_num)):
                        if self.heter_worker_endpoints != "":
                            self.heter_worker_endpoints += ","
                        heter_worker_endpoints = heter_worker_endpoints_list[
1441 1442 1443 1444 1445 1446 1447 1448
                            i
                        ].split(",")
                        assert (
                            len(heter_worker_endpoints)
                            == self.stage_heter_trainer_num[i]
                        ), "The heter trainer num in stage {} is not equal in args.heter_worker_num and args.heter_workers".format(
                            i
                        )
1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461

                        heter_worker_endpoints_ips = [
                            x.strip().split(":")[0]
                            for x in heter_worker_endpoints
                        ]
                        heter_worker_endpoints_len = [
                            len(x.strip().split(":"))
                            for x in heter_worker_endpoints
                        ]

                        if 1 in heter_worker_endpoints_len:
                            # if no port value in heter_worker_endpoint, will set default port values.
                            heter_worker_endpoints_port = get_ports(
1462
                                len(heter_worker_endpoints_ips),
1463 1464 1465 1466
                                self.worker_num
                                + self.server_num
                                + self.heter_worker_num,
                            )
1467 1468
                            new_heter_worker_endpoints = []
                            for j in range(len(heter_worker_endpoints_ips)):
1469 1470 1471 1472 1473 1474 1475 1476
                                new_heter_worker_endpoints.append(
                                    ":".join(
                                        (
                                            heter_worker_endpoints_ips[j],
                                            str(heter_worker_endpoints_port[j]),
                                        )
                                    )
                                )
1477 1478 1479 1480 1481
                            ip_port_list = ",".join(new_heter_worker_endpoints)
                        else:
                            ip_port_list = ",".join(heter_worker_endpoints)

                        self.stage_heter_map[i + 2] = ip_port_list
1482 1483 1484
                        self.stage_list.extend(
                            [i + 2] * len(ip_port_list.split(','))
                        )
1485 1486 1487

                        self.heter_worker_num += self.stage_heter_trainer_num[i]
                        self.heter_worker_endpoints += ip_port_list
1488
                else:
1489 1490
                    for i in range(len(self.stage_heter_trainer_num)):
                        heter_trainer_num = self.stage_heter_trainer_num[i]
1491
                        ports = get_ports(
1492 1493 1494 1495 1496
                            heter_trainer_num,
                            self.server_num
                            + self.worker_num
                            + self.heter_worker_num,
                        )
1497
                        ip_port_list = ",".join(
1498 1499
                            ["127.0.0.1:" + str(x) for x in ports]
                        )
1500
                        self.stage_heter_map[i + 2] = ip_port_list
1501 1502 1503
                        self.stage_list.extend(
                            [i + 2] * len(ip_port_list.split(','))
                        )
1504 1505 1506 1507
                        self.heter_worker_num += heter_trainer_num
                        if self.heter_worker_endpoints != "":
                            self.heter_worker_endpoints += ","
                        self.heter_worker_endpoints += ip_port_list
1508
            else:
1509 1510 1511
                assert (
                    args.heter_workers != ""
                ), "The setting of Parameter-Server heter mode must has heter_worker_num or heter_workers."
1512 1513 1514 1515 1516
                self.stage_heter_trainer_num = []
                heter_worker_endpoints_list = args.heter_workers.split(";")
                self.heter_worker_endpoints = ""
                for i in range(len(heter_worker_endpoints_list)):
                    heter_worker_endpoints = heter_worker_endpoints_list[
1517 1518
                        i
                    ].split(",")
1519
                    self.stage_heter_trainer_num.append(
1520 1521
                        len(heter_worker_endpoints)
                    )
1522 1523 1524 1525 1526 1527 1528 1529 1530 1531
                    heter_worker_endpoints_ips = [
                        x.strip().split(":")[0] for x in heter_worker_endpoints
                    ]
                    heter_worker_endpoints_len = [
                        len(x.strip().split(":"))
                        for x in heter_worker_endpoints
                    ]
                    if 1 in heter_worker_endpoints_len:
                        # if no port value in heter_worker_endpoint, will set default port values.
                        heter_worker_endpoints_port = get_ports(
1532 1533 1534 1535 1536
                            len(heter_worker_endpoints_ips),
                            self.worker_num
                            + self.server_num
                            + self.heter_worker_num,
                        )
1537 1538 1539

                        new_heter_worker_endpoints = []
                        for j in range(len(heter_worker_endpoints_ips)):
1540 1541 1542 1543 1544 1545 1546 1547
                            new_heter_worker_endpoints.append(
                                ":".join(
                                    (
                                        heter_worker_endpoints_ips[j],
                                        str(heter_worker_endpoints_port[j]),
                                    )
                                )
                            )
1548 1549 1550 1551 1552
                        ip_port_list = ",".join(new_heter_worker_endpoints)
                    else:
                        ip_port_list = ",".join(heter_worker_endpoints)

                    self.stage_heter_map[i + 2] = ip_port_list
1553 1554 1555
                    self.stage_list.extend(
                        [i + 2] * len(ip_port_list.split(','))
                    )
1556 1557 1558 1559 1560 1561

                    self.heter_worker_num += self.stage_heter_trainer_num[-1]
                    if self.heter_worker_endpoints != "":
                        self.heter_worker_endpoints += ","
                    self.heter_worker_endpoints += ip_port_list

1562 1563 1564
            self.stage_trainer_num = [
                self.worker_num
            ] + self.stage_heter_trainer_num
1565 1566 1567 1568
            self.stage_num = len(self.stage_trainer_num)

        # get http_port
        if args.http_port:
1569
            http_port = [args.http_port]
1570 1571
        else:
            http_port = get_ports(
1572 1573
                1, self.server_num + self.worker_num + self.heter_worker_num
            )
1574 1575
        http_ip = self.server_endpoints.split(",")[0].split(":")[0]
        self.http_port = http_ip + ":" + str(http_port[0])
1576 1577 1578 1579 1580 1581 1582 1583

        # check local or user define
        self.server_endpoints_ips = [
            x.strip().split(":")[0] for x in self.server_endpoints.split(",")
        ]
        self.worker_endpoints_ips = [
            x.strip().split(":")[0] for x in self.worker_endpoints.split(",")
        ]
1584

1585
        if self.with_coordinator:
1586 1587 1588 1589 1590 1591 1592 1593 1594
            self.coordinator_endpoints_ips = [
                x.strip().split(":")[0]
                for x in self.coordinator_endpoints.split(",")
            ]
            self.coordinator_endpoints_port = [
                x.strip().split(":")[1]
                for x in self.coordinator_endpoints.split(",")
            ]

1595 1596 1597 1598 1599 1600
        self.server_endpoints_port = [
            x.strip().split(":")[1] for x in self.server_endpoints.split(",")
        ]
        self.worker_endpoints_port = [
            x.strip().split(":")[1] for x in self.worker_endpoints.split(",")
        ]
1601 1602 1603 1604 1605 1606 1607 1608
        self.node_ips = []
        for ip in self.server_endpoints_ips:
            if ip not in self.node_ips:
                self.node_ips.append(ip)
        for ip in self.worker_endpoints_ips:
            if ip not in self.node_ips:
                self.node_ips.append(ip)

1609 1610 1611 1612 1613 1614 1615 1616 1617
        if self.distribute_mode == DistributeMode.PS_HETER:
            self.heter_worker_endpoints_ips = [
                x.strip().split(":")[0]
                for x in self.heter_worker_endpoints.split(",")
            ]
            self.heter_worker_endpoints_port = [
                x.strip().split(":")[1]
                for x in self.heter_worker_endpoints.split(",")
            ]
1618 1619 1620
            for ip in self.heter_worker_endpoints_ips:
                if ip not in self.node_ips:
                    self.node_ips.append(ip)
1621 1622 1623 1624 1625 1626 1627

        if len(set(self.node_ips)) == 1:
            self.is_local = True
            self.current_node_ip = self.node_ips[0]
        else:
            self.is_local = False
            pod_ip = os.getenv("POD_IP", None)
1628
            if pod_ip is None:
1629 1630 1631
                _, self.current_node_ip = get_host_name_ip()
            else:
                self.current_node_ip = pod_ip
1632
            if not self.distribute_mode == DistributeMode.PS_HETER:
1633 1634 1635 1636
                assert self.current_node_ip in self.node_ips, (
                    "Can't find your local ip {%s} in args.servers and args.workers ips: {%s}"
                    % (self.current_node_ip, self.node_ips)
                )
1637 1638 1639
        if self.current_node_ip in self.node_ips:
            self.node_rank = self.node_ips.index(self.current_node_ip)
            logger.debug(
1640 1641 1642 1643
                "parsed from args: node_ips:{} current_node_ip:{} node_rank:{}".format(
                    self.node_ips, self.current_node_ip, self.node_rank
                )
            )
1644 1645

    def start_ps(self):
1646
        if self.current_node_ip not in self.node_ips:
1647
            return
1648 1649 1650 1651
        cluster = Cluster(hdfs=None)
        server_rank = 0
        worker_rank = 0
        heter_worker_rank = 0
1652
        coordinator_rank = 0
1653 1654 1655 1656 1657 1658 1659
        for node_rank, ip in enumerate(self.node_ips):
            pod = Pod()
            pod.rank = node_rank
            pod.addr = ip
            for i in range(len(self.server_endpoints_ips)):
                if ip == self.server_endpoints_ips[i]:
                    server = Trainer()
1660 1661 1662 1663
                    server.endpoint = "%s:%s" % (
                        ip,
                        self.server_endpoints_port[i],
                    )
1664 1665 1666 1667 1668 1669
                    server.rank = server_rank
                    server_rank += 1
                    pod.servers.append(server)
            for j in range(len(self.worker_endpoints_ips)):
                if ip == self.worker_endpoints_ips[j]:
                    worker = Trainer()
1670 1671 1672 1673
                    worker.endpoint = "%s:%s" % (
                        ip,
                        self.worker_endpoints_port[j],
                    )
1674
                    worker.rank = worker_rank
1675
                    worker.stage = 1
1676 1677
                    worker_rank += 1
                    pod.workers.append(worker)
1678 1679 1680 1681
            for m in range(len(self.coordinator_endpoints_ips)):
                if ip == self.coordinator_endpoints_ips[m]:
                    coordinator = Trainer()
                    coordinator.endpoint = "%s:%s" % (
1682 1683 1684
                        ip,
                        self.coordinator_endpoints_port[m],
                    )
1685 1686 1687 1688 1689
                    coordinator.rank = coordinator_rank
                    coordinator.stage = 1
                    coordinator_rank += 1
                    pod.coordinators.append(coordinator)

1690 1691 1692 1693
            for k in range(len(self.heter_worker_endpoints_ips)):
                if ip == self.heter_worker_endpoints_ips[k]:
                    heter_worker = Trainer()
                    heter_worker.endpoint = "%s:%s" % (
1694 1695 1696
                        ip,
                        self.heter_worker_endpoints_port[k],
                    )
1697
                    heter_worker.rank = heter_worker_rank
1698
                    heter_worker.stage = self.stage_list[k]
1699 1700 1701 1702 1703 1704 1705 1706 1707
                    heter_worker_rank += 1
                    pod.heter_workers.append(heter_worker)

            cluster.pods.append(pod)

        pod = cluster.pods[self.node_rank]
        self.gloo_rendezvous_dir = tempfile.mkdtemp()

        # 3. subproces start
1708 1709 1710 1711
        self.procs = {
            "worker": [],
            "coordinator": [],
            "server": [],
1712
            "heter_worker": [],
1713 1714 1715 1716 1717
        }
        self.cmds = {
            "worker": [],
            "coordinator": [],
            "server": [],
1718
            "heter_worker": [],
1719 1720 1721 1722 1723
        }
        self.log_fns = {
            "worker": [],
            "coordinator": [],
            "server": [],
1724
            "heter_worker": [],
1725
        }
1726 1727 1728

        self.start_pod_server(self.args, pod)
        self.start_pod_worker(self.args, pod)
1729 1730
        if self.with_coordinator:
            self.start_pod_coordinator(self.args, pod)
1731 1732
        if self.distribute_mode == DistributeMode.PS_HETER:
            self.start_pod_heter_worker(self.args, pod)
1733 1734

        logger.info(
1735 1736 1737 1738 1739 1740 1741
            "Please check servers, workers, coordinator and heter_worker logs in {}/workerlog.*, {}/serverlog.* , {}/coordinatorlog.*, and {}/heterlog.*".format(
                self.args.log_dir,
                self.args.log_dir,
                self.args.log_dir,
                self.args.log_dir,
            )
        )
1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765

        # 4. wait for finish training
        if len(self.procs["worker"]) > 0:
            # if node has worker procs
            # only wait worker to finish here
            for i, proc in enumerate(self.procs["worker"]):
                self.procs["worker"][i].proc.wait()
                if len(self.log_fns["worker"]) > 0:
                    self.log_fns["worker"][i].close()
            logger.info(
                "all workers exit, going to finish parameter server and heter_worker."
            )
            if len(self.procs["heter_worker"]) > 0:
                for i, proc in enumerate(self.procs["heter_worker"]):
                    self.log_fns["heter_worker"][i].close()
                    self.procs["heter_worker"][i].proc.terminate()
                logger.info("all heter_worker are killed")

            if len(self.procs["server"]) > 0:
                for i, proc in enumerate(self.procs["server"]):
                    self.log_fns["server"][i].close()
                    self.procs["server"][i].proc.terminate()
                logger.info("all parameter server are killed")

1766 1767 1768 1769 1770 1771
            if len(self.procs["coordinator"]) > 0:
                for i, proc in enumerate(self.procs["coordinator"]):
                    self.log_fns["coordinator"][i].close()
                    self.procs["coordinator"][i].proc.terminate()
                logger.info("all coordinators are killed")

1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791
        else:
            # if node has not worker procs
            # blocking training process
            if len(self.procs["server"]) > 0:
                for i, proc in enumerate(self.procs["server"]):
                    self.procs["server"][i].proc.wait()

            if len(self.procs["heter_worker"]) > 0:
                for i, proc in enumerate(self.procs["heter_worker"]):
                    self.procs["heter_worker"][i].proc.wait()

        if os.path.exists(self.gloo_rendezvous_dir):
            shutil.rmtree(self.gloo_rendezvous_dir)

    def start_pod_server(self, args, pod):
        default_env = os.environ.copy()
        current_env = copy.copy(default_env)
        current_env.pop("http_proxy", None)
        current_env.pop("https_proxy", None)
        for idx, cur_server in enumerate(pod.servers):
1792 1793 1794 1795
            if self.distribute_mode == DistributeMode.PS_HETER:
                proc_env = {
                    "PADDLE_PSERVERS_IP_PORT_LIST": self.server_endpoints,
                    "PADDLE_TRAINER_ENDPOINTS": self.worker_endpoints,
1796
                    "PADDLE_COORDINATOR_ENDPOINTS": self.coordinator_endpoints,
1797
                    "PADDLE_ALL_HETER_TRAINER_IP_PORT_LIST": self.heter_worker_endpoints,
1798 1799 1800 1801
                    "PADDLE_PORT": cur_server.endpoint.split(":")[1],
                    "TRAINING_ROLE": "PSERVER",
                    "PADDLE_TRAINERS_NUM": str(self.worker_num),
                    "POD_IP": cur_server.endpoint.split(":")[0],
1802
                    "PADDLE_WITH_GLOO": str(os.getenv("PADDLE_WITH_GLOO", "0")),
1803 1804
                    "PADDLE_GLOO_RENDEZVOUS": "3",
                    "PADDLE_GLOO_FS_PATH": self.gloo_rendezvous_dir,
1805
                    "PADDLE_GLOO_HTTP_ENDPOINT": self.http_port,
1806 1807 1808 1809 1810
                }
            else:
                proc_env = {
                    "PADDLE_PSERVERS_IP_PORT_LIST": self.server_endpoints,
                    "PADDLE_TRAINER_ENDPOINTS": self.worker_endpoints,
1811
                    "PADDLE_COORDINATOR_ENDPOINTS": self.coordinator_endpoints,
1812 1813 1814 1815
                    "PADDLE_PORT": cur_server.endpoint.split(":")[1],
                    "TRAINING_ROLE": "PSERVER",
                    "PADDLE_TRAINERS_NUM": str(self.worker_num),
                    "POD_IP": cur_server.endpoint.split(":")[0],
1816
                    "PADDLE_WITH_GLOO": str(os.getenv("PADDLE_WITH_GLOO", "0")),
1817 1818
                    "PADDLE_GLOO_RENDEZVOUS": "3",
                    "PADDLE_GLOO_FS_PATH": self.gloo_rendezvous_dir,
1819
                    "PADDLE_GLOO_HTTP_ENDPOINT": self.http_port,
1820
                }
1821 1822
            current_env.update(proc_env)

1823 1824 1825 1826 1827
            cmd = [
                sys.executable,
                "-u",
                args.training_script,
            ] + args.training_script_args
1828 1829 1830 1831 1832 1833 1834
            self.cmds["server"].append(cmd)

            if idx == 0:
                logger.info(
                    "Local server start {} processes. First process distributed "
                    "environment info (Only For Debug): {}".format(
                        len(pod.servers),
1835 1836 1837 1838 1839
                        pretty_print_envs(
                            proc_env, ("Distributed Envs", "Value")
                        ),
                    )
                )
1840 1841 1842 1843 1844

            if args.log_dir is not None:
                os.system("mkdir -p {}".format(args.log_dir))
                fn = open("%s/serverlog.%d" % (args.log_dir, idx), "w")
                self.log_fns["server"].append(fn)
1845 1846 1847
                proc = subprocess.Popen(
                    cmd, env=current_env, stdout=fn, stderr=fn
                )
1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876
            else:
                proc = subprocess.Popen(cmd, env=current_env)

            tp = TrainerProc()
            tp.proc = proc
            tp.rank = cur_server.rank
            tp.local_rank = idx
            tp.log_fn = fn
            tp.log_offset = fn.tell() if fn else None
            tp.cmd = cmd

            self.procs["server"].append(tp)

    def start_pod_worker(self, args, pod):
        default_env = os.environ.copy()
        current_env = copy.copy(default_env)
        current_env.pop("http_proxy", None)
        current_env.pop("https_proxy", None)

        heter_device_num = 0
        device_list = []
        if fluid.core.is_compiled_with_cuda():
            device_list = get_gpus(args.gpus)
            heter_device_num = len(device_list)
        elif fluid.core.is_compiled_with_xpu():
            heter_device_num = fluid.core.get_xpu_device_count()
            device_list = [str(x) for x in range(0, heter_device_num)]

        for idx, cur_worker in enumerate(pod.workers):
1877 1878 1879 1880 1881
            device_id = (
                "0"
                if heter_device_num == 0
                else str(device_list[(idx) % heter_device_num])
            )
1882 1883
            if self.distribute_mode == DistributeMode.PS_HETER:
                proc_env = {
1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908
                    "PADDLE_PSERVERS_IP_PORT_LIST": self.server_endpoints,
                    "PADDLE_TRAINER_ENDPOINTS": self.worker_endpoints,
                    "PADDLE_TRAINERS_NUM": str(self.worker_num),
                    "PADDLE_COORDINATOR_ENDPOINTS": self.coordinator_endpoints,
                    "PADDLE_STAGE_TRAINERS_NUM": str(self.stage_trainer_num),
                    "STAGE_ID": "1",
                    "STAGE_NUM": str(self.stage_num),
                    "PADDLE_PREVIOUS_HETER_TRAINER_IP_PORT_LIST": "",
                    "PADDLE_NEXT_HETER_TRAINER_IP_PORT_LIST": self.stage_heter_map[
                        2
                    ],
                    "PADDLE_ALL_HETER_TRAINER_IP_PORT_LIST": self.heter_worker_endpoints,
                    "HETER_DEVICE_TYPE": self.stage_device_map[1],
                    "TRAINING_ROLE": "TRAINER",
                    "POD_IP": cur_worker.endpoint.split(":")[0],
                    "PADDLE_PORT": cur_worker.endpoint.split(":")[1],
                    "PADDLE_TRAINER_ID": str(cur_worker.rank),
                    "PADDLE_WITH_GLOO": str(os.getenv("PADDLE_WITH_GLOO", "0")),
                    "PADDLE_GLOO_RENDEZVOUS": "3",
                    "PADDLE_GLOO_FS_PATH": self.gloo_rendezvous_dir,
                    "FLAGS_selected_gpus": "0",
                    "FLAGS_selected_xpus": "0",
                    "CUDA_VISIBLE_DEVICES": device_id,
                    "XPU_VISIBLE_DEVICES": device_id,
                    "PADDLE_GLOO_HTTP_ENDPOINT": self.http_port,
1909 1910 1911 1912 1913 1914 1915
                }
            else:
                proc_env = {
                    "PADDLE_PSERVERS_IP_PORT_LIST": self.server_endpoints,
                    "PADDLE_TRAINER_ENDPOINTS": self.worker_endpoints,
                    "PADDLE_TRAINERS_NUM": str(self.worker_num),
                    "TRAINING_ROLE": "TRAINER",
1916
                    "PADDLE_COORDINATOR_ENDPOINTS": self.coordinator_endpoints,
1917 1918 1919
                    "POD_IP": cur_worker.endpoint.split(":")[0],
                    "PADDLE_PORT": cur_worker.endpoint.split(":")[1],
                    "PADDLE_TRAINER_ID": str(cur_worker.rank),
1920
                    "PADDLE_WITH_GLOO": str(os.getenv("PADDLE_WITH_GLOO", "0")),
1921 1922 1923 1924 1925 1926
                    "PADDLE_GLOO_RENDEZVOUS": "3",
                    "PADDLE_GLOO_FS_PATH": self.gloo_rendezvous_dir,
                    "FLAGS_selected_gpus": "0",
                    "FLAGS_selected_xpus": "0",
                    "CUDA_VISIBLE_DEVICES": device_id,
                    "XPU_VISIBLE_DEVICES": device_id,
1927
                    "PADDLE_GLOO_HTTP_ENDPOINT": self.http_port,
1928
                }
1929

1930
            current_env.update(proc_env)
1931 1932 1933 1934 1935
            cmd = [
                sys.executable,
                "-u",
                args.training_script,
            ] + args.training_script_args
1936 1937 1938 1939 1940 1941 1942
            self.cmds["worker"].append(cmd)

            if idx == 0:
                logger.info(
                    "Local worker start {} processes. First process distributed "
                    "environment info (Only For Debug): {}".format(
                        len(pod.workers),
1943 1944 1945 1946 1947
                        pretty_print_envs(
                            proc_env, ("Distributed Envs", "Value")
                        ),
                    )
                )
1948 1949 1950 1951 1952

            if args.log_dir is not None:
                os.system("mkdir -p {}".format(args.log_dir))
                fn = open("%s/workerlog.%d" % (args.log_dir, idx), "w")
                self.log_fns["worker"].append(fn)
1953 1954 1955
                proc = subprocess.Popen(
                    cmd, env=current_env, stdout=fn, stderr=fn
                )
1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968
            else:
                proc = subprocess.Popen(cmd, env=current_env)

            tp = TrainerProc()
            tp.proc = proc
            tp.rank = cur_worker.rank
            tp.local_rank = idx
            tp.log_fn = fn
            tp.log_offset = fn.tell() if fn else None
            tp.cmd = cmd

            self.procs["worker"].append(tp)

1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994
    def start_pod_coordinator(self, args, pod):
        print(">>> entering start_pod_coordinator")
        default_env = os.environ.copy()
        current_env = copy.copy(default_env)
        current_env.pop("http_proxy", None)
        current_env.pop("https_proxy", None)

        for idx, cur_coordinator in enumerate(pod.coordinators):
            device_id = "0"
            proc_env = {
                "PADDLE_PSERVERS_IP_PORT_LIST": self.server_endpoints,
                "PADDLE_TRAINER_ENDPOINTS": self.worker_endpoints,
                "PADDLE_TRAINERS_NUM": str(self.worker_num),
                "PADDLE_COORDINATOR_ENDPOINTS": self.coordinator_endpoints,
                "PADDLE_COORDINATOR_NUM": str(self.coordinator_num),
                "TRAINING_ROLE": "COORDINATOR",
                "POD_IP": cur_coordinator.endpoint.split(":")[0],
                "PADDLE_PORT": cur_coordinator.endpoint.split(":")[1],
                "PADDLE_TRAINER_ID": str(cur_coordinator.rank),
                "PADDLE_WITH_GLOO": str(os.getenv("PADDLE_WITH_GLOO", "0")),
                "PADDLE_GLOO_RENDEZVOUS": "3",
                "PADDLE_GLOO_FS_PATH": self.gloo_rendezvous_dir,
                "FLAGS_selected_gpus": "0",
                "FLAGS_selected_xpus": "0",
                "CUDA_VISIBLE_DEVICES": device_id,
                "XPU_VISIBLE_DEVICES": device_id,
1995
                "PADDLE_GLOO_HTTP_ENDPOINT": self.http_port,
1996 1997 1998
            }

            current_env.update(proc_env)
1999 2000 2001 2002 2003
            cmd = [
                sys.executable,
                "-u",
                args.training_script,
            ] + args.training_script_args
2004 2005 2006 2007 2008 2009 2010
            self.cmds["coordinator"].append(cmd)

            if idx == 0:
                logger.info(
                    "Local coordinator start {} processes. First process distributed "
                    "environment info (Only For Debug): {}".format(
                        len(pod.coordinators),
2011 2012 2013 2014 2015
                        pretty_print_envs(
                            proc_env, ("Distributed Envs", "Value")
                        ),
                    )
                )
2016 2017 2018 2019 2020

            if args.log_dir is not None:
                os.system("mkdir -p {}".format(args.log_dir))
                fn = open("%s/coordinator.%d" % (args.log_dir, idx), "w")
                self.log_fns["coordinator"].append(fn)
2021 2022 2023
                proc = subprocess.Popen(
                    cmd, env=current_env, stdout=fn, stderr=fn
                )
2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036
            else:
                proc = subprocess.Popen(cmd, env=current_env)

            tp = TrainerProc()
            tp.proc = proc
            tp.rank = cur_coordinator.rank
            tp.local_rank = idx
            tp.log_fn = fn
            tp.log_offset = fn.tell() if fn else None
            tp.cmd = cmd

            self.procs["coordinator"].append(tp)

2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052
    def start_pod_heter_worker(self, args, pod):
        default_env = os.environ.copy()
        current_env = copy.copy(default_env)
        current_env.pop("http_proxy", None)
        current_env.pop("https_proxy", None)

        heter_device_num = 0
        device_list = []
        if fluid.core.is_compiled_with_cuda():
            device_list = get_gpus(args.gpus)
            heter_device_num = len(device_list)
        elif fluid.core.is_compiled_with_xpu():
            heter_device_num = fluid.core.get_xpu_device_count()
            device_list = [str(x) for x in range(0, heter_device_num)]

        for idx, cur_heter_worker in enumerate(pod.heter_workers):
2053 2054 2055 2056 2057
            device_id = (
                "0"
                if heter_device_num == 0
                else str(device_list[(idx) % heter_device_num])
            )
2058
            stage_id = cur_heter_worker.stage
2059
            proc_env = {
2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086
                "PADDLE_PSERVERS_IP_PORT_LIST": self.server_endpoints,
                "PADDLE_TRAINER_ENDPOINTS": self.worker_endpoints,
                "PADDLE_NEXT_HETER_TRAINER_IP_PORT_LIST": self.stage_heter_map[
                    stage_id + 1
                ]
                if stage_id <= self.stage_num - 1
                else "",
                "PADDLE_PREVIOUS_HETER_TRAINER_IP_PORT_LIST": self.stage_heter_map[
                    stage_id - 1
                ],
                "PADDLE_ALL_HETER_TRAINER_IP_PORT_LIST": self.heter_worker_endpoints,
                "HETER_DEVICE_TYPE": self.stage_device_map[stage_id],
                "STAGE_ID": str(stage_id),
                "STAGE_NUM": str(self.stage_num),
                "PADDLE_PORT": cur_heter_worker.endpoint.split(":")[1],
                "TRAINING_ROLE": "HETER_TRAINER",
                "PADDLE_TRAINERS_NUM": str(self.worker_num),
                "PADDLE_STAGE_TRAINERS_NUM": str(self.stage_trainer_num),
                "POD_IP": cur_heter_worker.endpoint.split(":")[0],
                "PADDLE_WITH_GLOO": str(os.getenv("PADDLE_WITH_GLOO", "0")),
                "PADDLE_GLOO_RENDEZVOUS": "3",
                "PADDLE_GLOO_FS_PATH": self.gloo_rendezvous_dir,
                "FLAGS_selected_gpus": "0",
                "FLAGS_selected_xpus": "0",
                "CUDA_VISIBLE_DEVICES": device_id,
                "XPU_VISIBLE_DEVICES": device_id,
                "PADDLE_GLOO_HTTP_ENDPOINT": self.http_port,
2087 2088 2089
            }
            current_env.update(proc_env)

2090 2091 2092 2093 2094
            cmd = [
                sys.executable,
                "-u",
                args.training_script,
            ] + args.training_script_args
2095 2096 2097 2098 2099 2100 2101
            self.cmds["heter_worker"].append(cmd)

            if idx == 0:
                logger.info(
                    "Local heter_worker start {} processes. First process distributed "
                    "environment info (Only For Debug): {}".format(
                        len(pod.heter_workers),
2102 2103 2104 2105 2106
                        pretty_print_envs(
                            proc_env, ("Distributed Envs", "Value")
                        ),
                    )
                )
2107 2108 2109 2110 2111

            if args.log_dir is not None:
                os.system("mkdir -p {}".format(args.log_dir))
                fn = open("%s/heterlog.%d" % (args.log_dir, idx), "w")
                self.log_fns["heter_worker"].append(fn)
2112 2113 2114
                proc = subprocess.Popen(
                    cmd, env=current_env, stdout=fn, stderr=fn
                )
2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126
            else:
                proc = subprocess.Popen(cmd, env=current_env)

            tp = TrainerProc()
            tp.proc = proc
            tp.rank = cur_heter_worker.rank
            tp.local_rank = idx
            tp.log_fn = fn
            tp.log_offset = fn.tell() if fn else None
            tp.cmd = cmd

            self.procs["heter_worker"].append(tp)
X
xiongkun 已提交
2127 2128 2129


def check_backend(backend):
2130
    if backend not in [
2131 2132 2133 2134 2135 2136 2137 2138
        'nccl',
        'gloo',
        'bkcl',
        'cncl',
        'auto',
        'hccl',
        'heter',
        'xccl',
2139 2140 2141 2142 2143
    ]:
        raise ValueError(
            "paddle.distributed initialize error, "
            "backend argument can only be one of "
            "'nccl', 'gloo', 'bkcl', 'auto', 'hccl', 'heter', 'xccl' "
2144 2145
            "but got %s" % backend
        )
X
xiongkun 已提交
2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158

    if backend == 'nccl' and not fluid.core.is_compiled_with_cuda():
        raise ValueError(
            "paddle.distributed initialize error, "
            "your paddle is not compiled with cuda but you assign 'nccl' as backend."
        )

    if backend == 'bkcl' and not fluid.core.is_compiled_with_xpu():
        raise ValueError(
            "paddle.distributed initialize error, "
            "your paddle is not compiled with xpu but you assign 'bkcl' as backend."
        )

K
kuizhiqing 已提交
2159 2160 2161 2162 2163 2164
    if backend == 'hccl' and not fluid.core.is_compiled_with_npu():
        raise ValueError(
            "paddle.distributed initialize error, "
            "your paddle is not compiled with npu but you assign 'hccl' as backend."
        )

Z
zn 已提交
2165 2166 2167 2168 2169 2170
    if backend == 'cncl' and not fluid.core.is_compiled_with_mlu():
        raise ValueError(
            "paddle.distributed initialize error, "
            "your paddle is not compiled with mlu but you assign 'cncl' as backend."
        )

X
xiongkun 已提交
2171 2172

def block_windows_and_macos(backend):
2173 2174
    if backend != 'gloo':
        return
X
xiongkun 已提交
2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191
    if utils.OS_NAME.startswith('darwin'):  # MACOS , block
        raise ValueError(
            "You are going to using gloo on macos, but currently is not supported"
        )
    if utils.IS_WINDOWS:  # MACOS , block
        raise ValueError(
            "You are going to using gloo on windows, but currently is not supported"
        )


def get_backend_by_compile_flag():
    if fluid.core.is_compiled_with_cuda():
        return 'nccl'

    if fluid.core.is_compiled_with_xpu():
        return 'bkcl'

K
kuizhiqing 已提交
2192 2193 2194
    if fluid.core.is_compiled_with_npu():
        return 'hccl'

Z
zn 已提交
2195 2196 2197
    if fluid.core.is_compiled_with_mlu():
        return 'cncl'

X
xiongkun 已提交
2198
    return 'gloo'