test_launch.sh 2.6 KB
Newer Older
G
gongweibao 已提交
1
#!/bin/bash
2
set -e
G
gongweibao 已提交
3
# use default values
4 5 6
# FIXME: random fails on Unknown command lines -c (or -m).
launch_py=${PADDLE_BINARY_DIR}/python/paddle/distributed/launch.py
python ${launch_py} multi_process.py
G
gongweibao 已提交
7

8
# use paddlecloud
9
echo "begin test use paddlecloud"
10 11 12 13 14 15
cluster_node_ips="10.0.0.1"
node_ip="10.0.0.1"
export PADDLE_TRAINERS_NUM=2
export POD_IP=127.0.0.1
export PADDLE_TRAINERS=127.0.0.1,127.0.0.2
export PADDLE_TRAINER_ID=0
G
gongweibao 已提交
16

17
export PADDLE_PORT=35019
18
export TRAINER_PORTS_NUM=2
19 20

distributed_args="--use_paddlecloud --cluster_node_ips=${cluster_node_ips} --node_ip=${node_ip} --selected_gpus=0,1 --log_dir=testlog"
21
CUDA_VISIBLE_DEVICES=0,1 python ${launch_py} ${distributed_args} multi_process.py
G
gongweibao 已提交
22

23 24
str1="selected_gpus:0 worker_endpoints:127.0.0.1:35019,127.0.0.1:35020,127.0.0.2:35019,127.0.0.2:35020 trainers_num:4 current_endpoint:127.0.0.1:35019 trainer_id:0"
str2="selected_gpus:1 worker_endpoints:127.0.0.1:35019,127.0.0.1:35020,127.0.0.2:35019,127.0.0.2:35020 trainers_num:4 current_endpoint:127.0.0.1:35020 trainer_id:1"
25 26
file_0="multi_process.check_0.log"
file_1="multi_process.check_1.log"
G
gongweibao 已提交
27

28 29
echo "paddlecloud params test"
if grep -q "$str1" "$file_0"; then
G
gongweibao 已提交
30 31 32 33 34 35
    echo "find trainer 0"
else
    echo "not find trainer 0"
    exit -1
fi

36
if grep -q "$str2" "$file_1"; then
G
gongweibao 已提交
37 38
    echo "find trainer 1"
else
39
    echo "not find trainer 1"
G
gongweibao 已提交
40 41
    exit -1
fi
42 43 44 45 46 47 48 49 50

# test async poll process
if [ -f $file_0 ]; then
    rm $file_0
fi
if [ -f $file_1 ]; then
    rm $file_1
fi

51
# test use DISTRIBUTED_TRAINER_ENDPOINTS env in paddlecloud
52
unset PADDLE_PORT
53
export DISTRIBUTED_TRAINER_ENDPOINTS=127.0.0.1:6170,127.0.0.1:6171,127.0.0.2:6170,127.0.0.2:6171
54

55 56
echo ""
echo "paddle.distributed.launch async poll process test"
57
if ! CUDA_VISIBLE_DEVICES=0,1 python ${launch_py} ${distributed_args} multi_process.py abort; then
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
    echo "train abort as planned"
fi

abort_str1="abort>>> selected_gpus:0 worker_endpoints:127.0.0.1:6170,127.0.0.1:6171,127.0.0.2:6170,127.0.0.2:6171 trainers_num:4 current_endpoint:127.0.0.1:6170 trainer_id:0"

if grep -q "$abort_str1" "$file_0"; then
    echo "trainer 0 abort as planned"
else
    echo "trainer 0 not abort as planned"
    exit -1
fi

if [ ! -f $file_1 ]; then
    echo "trainer 1 terminate as planned"
else
    echo "trainer 1 not terminate as planned"
    exit -1
fi
76 77 78 79 80 81 82 83

#test for random ports
file_0_0="test_launch_filelock_0_0.log"
file_1_0="test_launch_filelock_1_0.log"
rm -rf $file_0_0 $file_0_1

distributed_args="--selected_gpus=0,1 --log_dir=testlog"
export PADDLE_LAUNCH_LOG="test_launch_filelock_0"
84
CUDA_VISIBLE_DEVICES=0,1 python ${launch_py} ${distributed_args} find_ports.py
85
str_0="worker_endpoints:127.0.0.1:6070,127.0.0.1:6071"