Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
a24d1868
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
a24d1868
编写于
11月 16, 2020
作者:
D
danleifeng
提交者:
GitHub
11月 16, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix nccl init failed in parallel dygraph mode (#28497)
上级
93c39779
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
44 addition
and
31 deletion
+44
-31
paddle/fluid/imperative/nccl_context.cc
paddle/fluid/imperative/nccl_context.cc
+21
-13
python/paddle/distributed/parallel.py
python/paddle/distributed/parallel.py
+23
-18
未找到文件。
paddle/fluid/imperative/nccl_context.cc
浏览文件 @
a24d1868
...
...
@@ -49,16 +49,20 @@ void NCCLParallelContext::RecvNCCLID(const std::string &ep,
address
.
sin_port
=
htons
(
port
);
int
try_times
=
0
;
int
retry_time
=
0
;
while
(
true
)
{
if
(
bind
(
server_fd
,
(
struct
sockaddr
*
)
&
address
,
sizeof
(
address
))
<
0
)
{
retry_time
=
3
*
(
try_times
+
1
);
LOG
(
WARNING
)
<<
"Socket bind worker "
<<
ep
<<
(
try_times
<
5
?
" failed, try again after 3 seconds."
:
" failed, try again after 3 seconds. "
"Bind on endpoint %s failed. "
"Please confirm whether the "
"communication port or GPU card is "
"occupied."
);
std
::
this_thread
::
sleep_for
(
std
::
chrono
::
seconds
(
3
));
<<
(
try_times
<
9
?
" failed, try again after "
+
std
::
to_string
(
retry_time
)
+
" seconds."
:
" failed, try again after "
+
std
::
to_string
(
retry_time
)
+
" seconds. Bind on endpoint "
+
ep
+
" failed. Please confirm whether the "
"communication port or GPU card is occupied."
);
std
::
this_thread
::
sleep_for
(
std
::
chrono
::
seconds
(
retry_time
));
++
try_times
;
continue
;
}
...
...
@@ -129,16 +133,20 @@ void NCCLParallelContext::SendNCCLID(const std::string &ep,
}
int
try_times
=
0
;
int
retry_time
=
0
;
while
(
true
)
{
if
(
connect
(
sock
,
(
struct
sockaddr
*
)
&
serv_addr
,
sizeof
(
serv_addr
))
<
0
)
{
retry_time
=
3
*
(
try_times
+
1
);
LOG
(
WARNING
)
<<
"Socket connect worker "
<<
ep
<<
(
try_times
<
5
?
" failed, try again after 3 seconds."
:
" failed, try again after 3 seconds. Maybe that "
"some process is occupied the GPUs of this node "
"now, and you should kill those process manually."
);
std
::
this_thread
::
sleep_for
(
std
::
chrono
::
seconds
(
3
));
<<
(
try_times
<
9
?
" failed, try again after "
+
std
::
to_string
(
retry_time
)
+
" seconds."
:
" failed, try again after "
+
std
::
to_string
(
retry_time
)
+
" seconds. Maybe that some process is occupied the "
"GPUs of this node now, and you should kill those "
"process manually."
);
std
::
this_thread
::
sleep_for
(
std
::
chrono
::
seconds
(
retry_time
));
++
try_times
;
continue
;
}
...
...
python/paddle/distributed/parallel.py
浏览文件 @
a24d1868
...
...
@@ -125,7 +125,7 @@ def init_parallel_env():
if
ParallelEnv
().
world_size
<
2
:
return
# 3: init gloo context
# 3: init gloo context
(step 1: httpsever start)
ep_rank_0
=
ParallelEnv
().
trainer_endpoints
[
0
].
split
(
":"
)
ep_rank
=
ParallelEnv
().
trainer_endpoints
[
ParallelEnv
().
rank
].
split
(
":"
)
manager
=
Manager
()
...
...
@@ -138,22 +138,6 @@ def init_parallel_env():
http_server
.
daemon
=
True
http_server_d
[
"running"
]
=
True
http_server
.
start
()
wait_server_ready
([
ParallelEnv
().
trainer_endpoints
[
0
]])
gloo_strategy
=
core
.
GlooParallelStrategy
()
gloo_strategy
.
rank
=
ParallelEnv
().
rank
gloo_strategy
.
rank_num
=
ParallelEnv
().
world_size
gloo_strategy
.
ip_address
=
ep_rank_0
[
0
]
gloo_strategy
.
ip_port
=
int
(
ep_rank_0
[
1
])
default_init_timeout_seconds
=
3600
default_run_timeout_seconds
=
9999999
gloo_strategy
.
init_seconds
=
default_init_timeout_seconds
gloo_strategy
.
run_seconds
=
default_run_timeout_seconds
gloo
=
core
.
GlooParallelContext
(
gloo_strategy
)
gloo
.
init
()
if
ParallelEnv
().
rank
==
0
:
http_server_d
[
"running"
]
=
False
http_server
.
join
()
# 4. init NCCL ParallelStrategy
strategy
=
ParallelStrategy
()
...
...
@@ -165,7 +149,7 @@ def init_parallel_env():
strategy
.
current_endpoint
=
ParallelEnv
().
current_endpoint
# NOTE(chenweihang): [ why config global place here? ]
# the dygraph mode will be set to default mode,
# the dygraph mode will be set to default mode,
# users will not call `dygraph.guard` or `enable_dygraph`
# directly, if they want to switch default place,
# they need to call a function to change default place,
...
...
@@ -177,6 +161,27 @@ def init_parallel_env():
parallel_helper
.
_set_parallel_ctx
(
core
.
NCCLParallelContext
(
strategy
,
place
))
parallel_helper
.
_init_parallel_ctx
()
# 5: init gloo context (step 2: gloo init)
# dividing init_gloo into two part beacause nccl and gloo
# are separately looking for free ports which sometimes
# leads to port-conflict.
wait_server_ready
([
ParallelEnv
().
trainer_endpoints
[
0
]])
gloo_strategy
=
core
.
GlooParallelStrategy
()
gloo_strategy
.
rank
=
ParallelEnv
().
rank
gloo_strategy
.
rank_num
=
ParallelEnv
().
world_size
gloo_strategy
.
ip_address
=
ep_rank_0
[
0
]
gloo_strategy
.
ip_port
=
int
(
ep_rank_0
[
1
])
default_init_timeout_seconds
=
3600
default_run_timeout_seconds
=
9999999
gloo_strategy
.
init_seconds
=
default_init_timeout_seconds
gloo_strategy
.
run_seconds
=
default_run_timeout_seconds
gloo
=
core
.
GlooParallelContext
(
gloo_strategy
)
gloo
.
init
()
if
ParallelEnv
().
rank
==
0
:
http_server_d
[
"running"
]
=
False
http_server
.
join
()
def
get_rank
():
"""
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录