multi_process.py 3.0 KB
Newer Older
G
gongweibao 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
16 17
import sys
import time
G
gongweibao 已提交
18 19


20
def train(prefix):
G
gongweibao 已提交
21 22 23 24
    selected_gpus = os.getenv("FLAGS_selected_gpus")
    trainer_id = int(os.getenv("PADDLE_TRAINER_ID"))
    worker_endpoints_env = os.getenv("PADDLE_TRAINER_ENDPOINTS")
    current_endpoint = os.getenv("PADDLE_CURRENT_ENDPOINT")
25 26
    worker_endpoints = worker_endpoints_env
    trainers_num = len(worker_endpoints.split(','))
G
gongweibao 已提交
27

28 29 30 31 32 33 34
    name = "selected_gpus:{} worker_endpoints:{} trainers_num:{} current_endpoint:{} trainer_id:{}".format(
        selected_gpus,
        worker_endpoints,
        trainers_num,
        current_endpoint,
        trainer_id,
    )
G
gongweibao 已提交
35 36

    print(name)
37 38 39
    with open(
        "multi_process_{}.check_{}.log".format(prefix, trainer_id), "w"
    ) as f:
G
gongweibao 已提交
40 41 42
        f.write(name)


43
def train_abort(prefix):
44 45 46 47 48 49 50 51 52
    selected_gpus = os.getenv("FLAGS_selected_gpus")
    trainer_id = int(os.getenv("PADDLE_TRAINER_ID"))
    worker_endpoints_env = os.getenv("PADDLE_TRAINER_ENDPOINTS")
    current_endpoint = os.getenv("PADDLE_CURRENT_ENDPOINT")
    worker_endpoints = worker_endpoints_env
    trainers_num = len(worker_endpoints.split(','))

    if trainer_id == 0:
        try:
53
            # train abort
54
            sys.exit(1)
55
        except SystemExit:
56 57 58 59 60 61 62
            name = "abort>>> selected_gpus:{} worker_endpoints:{} trainers_num:{} current_endpoint:{} trainer_id:{}".format(
                selected_gpus,
                worker_endpoints,
                trainers_num,
                current_endpoint,
                trainer_id,
            )
63
            print(name)
64
            with open(
65 66
                "multi_process_{}.check_{}.log".format(prefix, trainer_id), "w"
            ) as f:
67 68 69 70 71
                f.write(name)
            raise
    else:
        # sleep 30s to make sure paddle.distributed.launch will terminate this process
        time.sleep(30)
72 73 74 75 76 77 78
        name = "selected_gpus:{} worker_endpoints:{} trainers_num:{} current_endpoint:{} trainer_id:{}".format(
            selected_gpus,
            worker_endpoints,
            trainers_num,
            current_endpoint,
            trainer_id,
        )
79 80

        print(name)
81 82 83
        with open(
            "multi_process_{}.check_{}.log".format(prefix, trainer_id), "w"
        ) as f:
84 85 86
            f.write(name)


G
gongweibao 已提交
87
if __name__ == '__main__':
88 89 90
    if len(sys.argv) == 3 and sys.argv[2] == "abort":
        prefix = sys.argv[1]
        train_abort(prefix)
91
    else:
92 93
        prefix = sys.argv[1]
        train(prefix)