提交 3fe63d67 编写于 作者: D danleifeng 提交者: gongweibao

add store_true to use_paddlecloud argument in launch.py (#21168)

上级 9cbe7bcc
...@@ -95,9 +95,9 @@ POD_IP (current node ip address, not needed for local training) ...@@ -95,9 +95,9 @@ POD_IP (current node ip address, not needed for local training)
help="The current node ip. ") help="The current node ip. ")
parser.add_argument( parser.add_argument(
"--use_paddlecloud", "--use_paddlecloud",
type=bool, action='store_true',
default="False", help="wheter to use paddlecloud platform to run your multi-process job. If false, no need to set this argument."
help="wheter to use paddlecloud platform to run your multi-process job.") )
parser.add_argument( parser.add_argument(
"--started_port", "--started_port",
type=int, type=int,
......
...@@ -11,7 +11,8 @@ export POD_IP=127.0.0.1 ...@@ -11,7 +11,8 @@ export POD_IP=127.0.0.1
export PADDLE_TRAINERS=127.0.0.1,127.0.0.2 export PADDLE_TRAINERS=127.0.0.1,127.0.0.2
export PADDLE_TRAINER_ID=0 export PADDLE_TRAINER_ID=0
distributed_args="--use_paddlecloud True --cluster_node_ips ${cluster_node_ips} --node_ip ${node_ip} --selected_gpus=0,1 --log_dir testlog" distributed_args="--use_paddlecloud --cluster_node_ips=${cluster_node_ips} --node_ip=${node_ip}
--selected_gpus=0,1 --log_dir=testlog"
python -m paddle.distributed.launch ${distributed_args} multi_process.py python -m paddle.distributed.launch ${distributed_args} multi_process.py
str1="selected_gpus:0 worker_endpoints:127.0.0.1:6170,127.0.0.1:6171,127.0.0.2:6170,127.0.0.2:6171 trainers_num:4 current_endpoint:127.0.0.1:6170 trainer_id:0" str1="selected_gpus:0 worker_endpoints:127.0.0.1:6170,127.0.0.1:6171,127.0.0.2:6170,127.0.0.2:6171 trainers_num:4 current_endpoint:127.0.0.1:6170 trainer_id:0"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册