test_launch.sh 2.4 KB
Newer Older
G
gongweibao 已提交
1
#!/bin/bash
2
set -e
G
gongweibao 已提交
3 4 5
# use default values
python -m paddle.distributed.launch multi_process.py

6 7 8 9 10 11 12
# use paddlecloud
cluster_node_ips="10.0.0.1"
node_ip="10.0.0.1"
export PADDLE_TRAINERS_NUM=2
export POD_IP=127.0.0.1
export PADDLE_TRAINERS=127.0.0.1,127.0.0.2
export PADDLE_TRAINER_ID=0
G
gongweibao 已提交
13

14 15 16 17
export PADDLE_PORT=35019
export PADDLE_PORTS_NUM=2

distributed_args="--use_paddlecloud --cluster_node_ips=${cluster_node_ips} --node_ip=${node_ip} --selected_gpus=0,1 --log_dir=testlog"
18
CUDA_VISIBLE_DEVICES=0,1 python -m paddle.distributed.launch ${distributed_args} multi_process.py
G
gongweibao 已提交
19

20 21
str1="selected_gpus:0 worker_endpoints:127.0.0.1:35019,127.0.0.1:35020,127.0.0.2:35019,127.0.0.2:35020 trainers_num:4 current_endpoint:127.0.0.1:35019 trainer_id:0"
str2="selected_gpus:1 worker_endpoints:127.0.0.1:35019,127.0.0.1:35020,127.0.0.2:35019,127.0.0.2:35020 trainers_num:4 current_endpoint:127.0.0.1:35020 trainer_id:1"
22 23
file_0="multi_process.check_0.log"
file_1="multi_process.check_1.log"
G
gongweibao 已提交
24

25 26
echo "paddlecloud params test"
if grep -q "$str1" "$file_0"; then
G
gongweibao 已提交
27 28 29 30 31 32
    echo "find trainer 0"
else
    echo "not find trainer 0"
    exit -1
fi

33
if grep -q "$str2" "$file_1"; then
G
gongweibao 已提交
34 35
    echo "find trainer 1"
else
36
    echo "not find trainer 1"
G
gongweibao 已提交
37 38
    exit -1
fi
39 40 41 42 43 44 45 46 47

# test async poll process
if [ -f $file_0 ]; then
    rm $file_0
fi
if [ -f $file_1 ]; then
    rm $file_1
fi

48 49 50
unset PADDLE_PORT
unset PADDLE_PORTS_NUM

51 52
echo ""
echo "paddle.distributed.launch async poll process test"
53
if ! CUDA_VISIBLE_DEVICES=0,1 python -m paddle.distributed.launch ${distributed_args} multi_process.py abort; then
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
    echo "train abort as planned"
fi

abort_str1="abort>>> selected_gpus:0 worker_endpoints:127.0.0.1:6170,127.0.0.1:6171,127.0.0.2:6170,127.0.0.2:6171 trainers_num:4 current_endpoint:127.0.0.1:6170 trainer_id:0"

if grep -q "$abort_str1" "$file_0"; then
    echo "trainer 0 abort as planned"
else
    echo "trainer 0 not abort as planned"
    exit -1
fi

if [ ! -f $file_1 ]; then
    echo "trainer 1 terminate as planned"
else
    echo "trainer 1 not terminate as planned"
    exit -1
fi
72 73 74 75 76 77 78 79 80 81

#test for random ports
file_0_0="test_launch_filelock_0_0.log"
file_1_0="test_launch_filelock_1_0.log"
rm -rf $file_0_0 $file_0_1

distributed_args="--selected_gpus=0,1 --log_dir=testlog"
export PADDLE_LAUNCH_LOG="test_launch_filelock_0"
CUDA_VISIBLE_DEVICES=0,1 python -m paddle.distributed.launch ${distributed_args} find_ports.py
str_0="worker_endpoints:127.0.0.1:6070,127.0.0.1:6071"