test_fleet_launch_nproc.sh 4.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
#!/bin/bash

# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# 
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# 
#     http://www.apache.org/licenses/LICENSE-2.0
# 
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

set -e
K
kuizhiqing 已提交
18
export PADDLE_START_PORT=35789
19 20 21 22 23 24 25

#local_ip=`ip route get 1 | awk '{print $NF;exit}'`
file_0="fleet_nproc_0.check_0.log"

function test_nproc_0(){
    gpus=$1
    rm -f ${file_0}
K
kuizhiqing 已提交
26
    distributed_args="--log_dir=testlog --nproc_per_node=1 --ips=127.0.0.1"
27 28 29
    # nproc_per_node=1, each with 2 gpus
    python -m paddle.distributed.launch ${distributed_args} nproc_process.py  fleet_nproc_0

30
    str0="selected_devices:${gpus} worker_endpoints:127.0.0.1:35789 trainers_num:1 current_endpoint:127.0.0.1:35789 trainer_id:0"
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
    if grep -q "$str0" "$file_0"; then
        echo "find trainer 0"
    else
        echo "not find trainer 0"
        exit -1
    fi
}

# unittest1:gpu
if python detected_gpu.py ; then
    echo "begin ut 1:"
    export CUDA_VISIBLE_DEVICES=0,1
    test_nproc_0 "0,1"
fi

# unittest2:cpu
if ! python detected_gpu.py ; then
    echo "begin ut 2:"
    export CUDA_VISIBLE_DEVICES=""
    test_nproc_0 ""
fi

53 54 55 56 57 58
# unittest3:xpu
if python detected_xpu.py ; then
    echo "begin ut 3:"
    export XPU_VISIBLE_DEVICES=0,1
    test_nproc_0 "0,1"
fi
59 60 61 62 63 64

function test_nproc_1_gpu(){
    file_0="fleet_nproc_1.check_0.log"
    file_1="fleet_nproc_1.check_1.log"
    rm -f ${file_0} ${file_1}

K
kuizhiqing 已提交
65
    distributed_args="--log_dir=testlog --nproc_per_node=2 --ips=127.0.0.1"
66 67
    python -m paddle.distributed.launch ${distributed_args} nproc_process.py  fleet_nproc_1

68
    str0="selected_devices:0 worker_endpoints:127.0.0.1:35789,127.0.0.1:35790 trainers_num:2 current_endpoint:127.0.0.1:35789 trainer_id:0"
69 70 71 72 73 74 75
    if grep -q "$str0" "$file_0"; then
        echo "find trainer 0"
    else
        echo "not find trainer 0"
        exit -1
    fi

76
    str1="selected_devices:1 worker_endpoints:127.0.0.1:35789,127.0.0.1:35790 trainers_num:2 current_endpoint:127.0.0.1:35790 trainer_id:1"
77 78 79 80 81 82 83 84
    if grep -q "$str1" "$file_1"; then
        echo "find trainer 1"
    else
        echo "not find trainer 1"
        exit -1
    fi
}

85
# unittest4: nproc_per_node=2, each with 1 gpus
86
if python detected_gpu.py ; then
87
    echo "begin ut 4:"
88 89 90 91 92 93 94 95 96
    export CUDA_VISIBLE_DEVICES=0,1
    test_nproc_1_gpu
fi

function test_nproc_1_cpu(){
    file_0="fleet_nproc_1.check_0.log"
    file_1="fleet_nproc_1.check_1.log"
    rm -f ${file_0} ${file_1}

K
kuizhiqing 已提交
97
    distributed_args="--log_dir=testlog --nproc_per_node=2 --ips=127.0.0.1"
98 99
    python -m paddle.distributed.launch ${distributed_args} nproc_process.py  fleet_nproc_1

100
    str0="selected_devices: worker_endpoints:127.0.0.1:35789,127.0.0.1:35790 trainers_num:2 current_endpoint:127.0.0.1:35789 trainer_id:0"
101 102 103 104 105 106 107
    if grep -q "$str0" "$file_0"; then
        echo "find trainer 0"
    else
        echo "not find trainer 0"
        exit -1
    fi

108
    str1="selected_devices: worker_endpoints:127.0.0.1:35789,127.0.0.1:35790 trainers_num:2 current_endpoint:127.0.0.1:35790 trainer_id:1"
109 110 111 112 113 114 115 116
    if grep -q "$str1" "$file_1"; then
        echo "find trainer 1"
    else
        echo "not find trainer 1"
        exit -1
    fi
}

117
# unittest5: nproc_per_node=2, cpu
118
if ! python detected_gpu.py ; then
119
    echo "begin ut 5:"
120 121 122
    export CUDA_VISIBLE_DEVICES=""
    test_nproc_1_cpu
fi
123 124 125 126 127 128 129


function test_nproc_1_xpu(){
    file_0="fleet_nproc_1.check_0.log"
    file_1="fleet_nproc_1.check_1.log"
    rm -f ${file_0} ${file_1}

K
kuizhiqing 已提交
130
    distributed_args="--log_dir=testlog --nproc_per_node=2 --ips=127.0.0.1"
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
    python -m paddle.distributed.launch ${distributed_args} nproc_process.py  fleet_nproc_1

    str0="selected_devices:0 worker_endpoints:127.0.0.1:35789,127.0.0.1:35790 trainers_num:2 current_endpoint:127.0.0.1:35789 trainer_id:0"
    if grep -q "$str0" "$file_0"; then
        echo "find trainer 0"
    else
        echo "not find trainer 0"
        exit -1
    fi

    str1="selected_devices:1 worker_endpoints:127.0.0.1:35789,127.0.0.1:35790 trainers_num:2 current_endpoint:127.0.0.1:35790 trainer_id:1"
    if grep -q "$str1" "$file_1"; then
        echo "find trainer 1"
    else
        echo "not find trainer 1"
        exit -1
    fi
}

# unittest6: nproc_per_node=2, each with 1 gpus
if python detected_xpu.py ; then
    echo "begin ut 6:"
    export XPU_VISIBLE_DEVICES=0,1
    test_nproc_1_xpu
fi