diff --git a/python/paddle/distributed/launch.py b/python/paddle/distributed/launch.py index 73e91abbd4a93b230d029d776c7d80bdadeafd66..3e2f2e59f9142c36c9914abf8eabab81d3e8edbb 100644 --- a/python/paddle/distributed/launch.py +++ b/python/paddle/distributed/launch.py @@ -95,9 +95,9 @@ POD_IP (current node ip address, not needed for local training) help="The current node ip. ") parser.add_argument( "--use_paddlecloud", - type=bool, - default="False", - help="wheter to use paddlecloud platform to run your multi-process job.") + action='store_true', + help="wheter to use paddlecloud platform to run your multi-process job. If false, no need to set this argument." + ) parser.add_argument( "--started_port", type=int, diff --git a/python/paddle/fluid/tests/unittests/test_launch.sh b/python/paddle/fluid/tests/unittests/test_launch.sh index 1419ba7335b247031edfc1c67eaf490db646a57b..d3b8d34e49c3848fd227cb630cedbb8e6938fead 100644 --- a/python/paddle/fluid/tests/unittests/test_launch.sh +++ b/python/paddle/fluid/tests/unittests/test_launch.sh @@ -11,7 +11,8 @@ export POD_IP=127.0.0.1 export PADDLE_TRAINERS=127.0.0.1,127.0.0.2 export PADDLE_TRAINER_ID=0 -distributed_args="--use_paddlecloud True --cluster_node_ips ${cluster_node_ips} --node_ip ${node_ip} --selected_gpus=0,1 --log_dir testlog" +distributed_args="--use_paddlecloud --cluster_node_ips=${cluster_node_ips} --node_ip=${node_ip} +--selected_gpus=0,1 --log_dir=testlog" python -m paddle.distributed.launch ${distributed_args} multi_process.py str1="selected_gpus:0 worker_endpoints:127.0.0.1:6170,127.0.0.1:6171,127.0.0.2:6170,127.0.0.2:6171 trainers_num:4 current_endpoint:127.0.0.1:6170 trainer_id:0"