Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
8c0529fd
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
8c0529fd
编写于
10月 08, 2022
作者:
H
Haohongxiang
提交者:
GitHub
10月 08, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[Dygraph] Fix performance of pp+mp by using send/recv_calc_stream instead of send/recv (#46116)
上级
ff37e48e
变更
8
隐藏空白更改
内联
并排
Showing
8 changed file
with
153 addition
and
15 deletion
+153
-15
paddle/fluid/distributed/collective/ProcessGroup.h
paddle/fluid/distributed/collective/ProcessGroup.h
+10
-0
paddle/fluid/distributed/collective/ProcessGroupNCCL.cc
paddle/fluid/distributed/collective/ProcessGroupNCCL.cc
+35
-0
paddle/fluid/distributed/collective/ProcessGroupNCCL.h
paddle/fluid/distributed/collective/ProcessGroupNCCL.h
+8
-0
paddle/fluid/distributed/collective/ProcessGroupStream.cc
paddle/fluid/distributed/collective/ProcessGroupStream.cc
+25
-0
paddle/fluid/distributed/collective/ProcessGroupStream.h
paddle/fluid/distributed/collective/ProcessGroupStream.h
+15
-0
paddle/fluid/pybind/distributed_py.cc
paddle/fluid/pybind/distributed_py.cc
+31
-0
python/paddle/distributed/fleet/layers/mpu/mp_ops.py
python/paddle/distributed/fleet/layers/mpu/mp_ops.py
+20
-1
python/paddle/distributed/fleet/meta_parallel/pp_utils/p2p_communication.py
...ributed/fleet/meta_parallel/pp_utils/p2p_communication.py
+9
-14
未找到文件。
paddle/fluid/distributed/collective/ProcessGroup.h
浏览文件 @
8c0529fd
...
...
@@ -214,6 +214,16 @@ class ProcessGroup {
"ProcessGroup%s does not support AllGather_Partial"
,
GetBackendName
()));
}
virtual
std
::
shared_ptr
<
ProcessGroup
::
Task
>
AllGather_Partial
(
std
::
vector
<
phi
::
DenseTensor
>&
in_tensors
,
// NOLINT
std
::
vector
<
phi
::
DenseTensor
>&
out_tensors
,
// NOLINT
int
offset
,
int
length
,
bool
)
{
// NOLINT
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
"ProcessGroup%s does not support AllGather_Partial"
,
GetBackendName
()));
}
virtual
std
::
shared_ptr
<
ProcessGroup
::
Task
>
AllToAll
(
std
::
vector
<
phi
::
DenseTensor
>&
,
// NOLINT
std
::
vector
<
phi
::
DenseTensor
>&
)
{
// NOLINT
...
...
paddle/fluid/distributed/collective/ProcessGroupNCCL.cc
浏览文件 @
8c0529fd
...
...
@@ -1034,6 +1034,41 @@ std::shared_ptr<ProcessGroup::Task> ProcessGroupNCCL::AllGather_Partial(
CommType
::
ALLGATHER
);
}
std
::
shared_ptr
<
ProcessGroup
::
Task
>
ProcessGroupNCCL
::
AllGather_Partial
(
std
::
vector
<
phi
::
DenseTensor
>&
in_tensors
,
std
::
vector
<
phi
::
DenseTensor
>&
out_tensors
,
int
offset
,
int
length
,
bool
sync_op
,
bool
use_calc_stream
)
{
PADDLE_ENFORCE_EQ
(
CheckTensorsInCudaPlace
(
in_tensors
),
true
,
platform
::
errors
::
InvalidArgument
(
"All inputs should be in CudaPlace."
));
PADDLE_ENFORCE_EQ
(
CheckTensorsInCudaPlace
(
out_tensors
),
true
,
platform
::
errors
::
InvalidArgument
(
"All outputs should be in CudaPlace."
));
return
Collective
(
in_tensors
,
out_tensors
,
[
&
](
phi
::
DenseTensor
&
input
,
phi
::
DenseTensor
&
output
,
ncclComm_t
comm
,
const
gpuStream_t
&
stream
)
{
return
platform
::
dynload
::
ncclAllGather
(
GetPointerByOffset
(
input
.
data
(),
offset
,
input
.
dtype
()),
output
.
data
(),
length
,
platform
::
ToNCCLDataType
(
input
.
dtype
()),
comm
,
stream
);
},
CommType
::
ALLGATHER
,
sync_op
,
use_calc_stream
);
}
std
::
shared_ptr
<
ProcessGroup
::
Task
>
ProcessGroupNCCL
::
AllToAll
(
std
::
vector
<
phi
::
DenseTensor
>&
in_tensors
,
std
::
vector
<
phi
::
DenseTensor
>&
out_tensors
)
{
...
...
paddle/fluid/distributed/collective/ProcessGroupNCCL.h
浏览文件 @
8c0529fd
...
...
@@ -182,6 +182,14 @@ class ProcessGroupNCCL : public ProcessGroupStream {
int
offset
,
int
length
)
override
;
std
::
shared_ptr
<
ProcessGroup
::
Task
>
AllGather_Partial
(
std
::
vector
<
phi
::
DenseTensor
>&
in_tensors
,
std
::
vector
<
phi
::
DenseTensor
>&
out_tensors
,
int
offset
,
int
length
,
bool
sync_op
,
bool
use_calc_stream
)
override
;
std
::
shared_ptr
<
ProcessGroup
::
Task
>
AllToAll
(
std
::
vector
<
phi
::
DenseTensor
>&
in
,
std
::
vector
<
phi
::
DenseTensor
>&
out
)
override
;
...
...
paddle/fluid/distributed/collective/ProcessGroupStream.cc
浏览文件 @
8c0529fd
...
...
@@ -154,5 +154,30 @@ std::shared_ptr<ProcessGroup::Task> ProcessGroupStream::Recv_Partial(
"ProcessGroup%s does not support do recv_partial"
,
GetBackendName
()));
}
std
::
shared_ptr
<
ProcessGroup
::
Task
>
ProcessGroupStream
::
AllGather_Partial
(
std
::
vector
<
phi
::
DenseTensor
>&
in_tensors
,
std
::
vector
<
phi
::
DenseTensor
>&
out_tensors
,
int
offset
,
int
length
,
bool
sync_op
)
{
return
AllGather_Partial
(
in_tensors
,
out_tensors
,
offset
,
length
,
sync_op
,
/*use_calc_stream*/
false
);
}
std
::
shared_ptr
<
ProcessGroup
::
Task
>
ProcessGroupStream
::
AllGather_Partial
(
std
::
vector
<
phi
::
DenseTensor
>&
in_tensors
,
std
::
vector
<
phi
::
DenseTensor
>&
out_tensors
,
int
offset
,
int
length
,
bool
sync_op
,
bool
use_calc_stream
)
{
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
"ProcessGroup%s does not support do recv_partial"
,
GetBackendName
()));
}
}
// namespace distributed
}
// namespace paddle
paddle/fluid/distributed/collective/ProcessGroupStream.h
浏览文件 @
8c0529fd
...
...
@@ -132,6 +132,21 @@ class ProcessGroupStream : public ProcessGroup {
int
length
,
bool
sync_op
,
bool
use_calc_stream
);
std
::
shared_ptr
<
ProcessGroup
::
Task
>
AllGather_Partial
(
std
::
vector
<
phi
::
DenseTensor
>&
in_tensors
,
std
::
vector
<
phi
::
DenseTensor
>&
out_tensors
,
int
offset
,
int
length
,
bool
sync_op
)
override
;
virtual
std
::
shared_ptr
<
ProcessGroup
::
Task
>
AllGather_Partial
(
std
::
vector
<
phi
::
DenseTensor
>&
in_tensors
,
// NOLINT
std
::
vector
<
phi
::
DenseTensor
>&
out_tensors
,
// NOLINT
int
offset
,
int
length
,
bool
sync_op
,
bool
use_calc_stream
);
};
}
// namespace distributed
...
...
paddle/fluid/pybind/distributed_py.cc
浏览文件 @
8c0529fd
...
...
@@ -621,6 +621,37 @@ void BindDistributed(py::module *m) {
py
::
arg
(
"op"
),
py
::
call_guard
<
py
::
gil_scoped_release
>
())
.
def
(
"all_gather_partial_on_calc_stream"
,
[](
distributed
::
ProcessGroupStream
&
self
,
py
::
handle
py_in_tensor
,
py
::
handle
py_out_tensor
,
int
nranks
,
int
rank_id
)
{
auto
in_tensor
=
CastPyArg2Tensor
(
py_in_tensor
.
ptr
(),
0
);
auto
out_tensor
=
CastPyArg2Tensor
(
py_out_tensor
.
ptr
(),
0
);
auto
in_dense
=
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
in_tensor
.
impl
());
auto
out_dense
=
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
out_tensor
.
impl
());
std
::
vector
<
phi
::
DenseTensor
>
in_tensors
=
{
*
in_dense
};
std
::
vector
<
phi
::
DenseTensor
>
out_tensors
=
{
*
out_dense
};
int
numel
=
(
*
in_dense
).
numel
();
int
send_numel
=
numel
/
nranks
;
int
offset
=
send_numel
*
rank_id
;
return
self
.
AllGather_Partial
(
in_tensors
,
out_tensors
,
offset
,
send_numel
,
/*sync_op*/
true
,
/*use_calc_stream*/
true
);
},
py
::
arg
(
"in"
),
py
::
arg
(
"out"
),
py
::
arg
(
"num"
),
py
::
arg
(
"id"
),
py
::
call_guard
<
py
::
gil_scoped_release
>
())
.
def
(
"send_on_calc_stream"
,
[](
distributed
::
ProcessGroupStream
&
self
,
...
...
python/paddle/distributed/fleet/layers/mpu/mp_ops.py
浏览文件 @
8c0529fd
...
...
@@ -43,7 +43,26 @@ def _c_identity(tensor, group=None):
return
ring_id
=
0
if
group
is
None
else
group
.
id
if
_non_static_mode
():
if
in_dygraph_mode
():
from
paddle.autograd
import
PyLayer
class
c_identity_eager
(
PyLayer
):
@
staticmethod
def
forward
(
ctx
,
tensor
):
return
_legacy_C_ops
.
c_identity
(
tensor
,
'use_calc_stream'
,
True
,
'ring_id'
,
group
.
id
,
'use_model_parallel'
,
True
)
@
staticmethod
def
backward
(
ctx
,
dy
):
op_type
=
collective
.
_get_reduce_op
(
ReduceOp
.
SUM
,
"_c_identity"
)
group
.
process_group
.
allreduce_on_calc_stream
(
dy
,
op_type
)
return
dy
return
c_identity_eager
.
apply
(
tensor
)
elif
_in_legacy_dygraph
():
return
_legacy_C_ops
.
c_identity
(
tensor
,
'use_calc_stream'
,
True
,
'ring_id'
,
ring_id
,
'use_model_parallel'
,
True
)
...
...
python/paddle/distributed/fleet/meta_parallel/pp_utils/p2p_communication.py
浏览文件 @
8c0529fd
...
...
@@ -173,7 +173,9 @@ def _partial_send_op(tensor, group, use_calc_stream, ring_id, dst, nranks,
elif
in_dygraph_mode
():
group
=
paddle
.
distributed
.
collective
.
_get_default_group
(
)
if
group
is
None
else
group
return
group
.
process_group
.
send_partial
(
tensor
,
dst
,
nranks
,
rank_id
)
comm_op
=
group
.
process_group
.
send_partial_on_calc_stream
\
if
use_calc_stream
else
group
.
process_group
.
send_partial
return
comm_op
(
tensor
,
dst
,
nranks
,
rank_id
)
def
send_partial
(
tensor
,
...
...
@@ -212,12 +214,9 @@ def _partial_recv_op(tensor, group, use_calc_stream, ring_id, src, nranks,
elif
in_dygraph_mode
():
group
=
paddle
.
distributed
.
collective
.
_get_default_group
(
)
if
group
is
None
else
group
task
=
group
.
process_group
.
recv_partial
(
tensor
,
src
,
nranks
,
rank_id
)
if
use_calc_stream
:
task
.
wait
()
return
None
else
:
return
task
comm_op
=
group
.
process_group
.
recv_partial_on_calc_stream
\
if
use_calc_stream
else
group
.
process_group
.
recv_partial
return
comm_op
(
tensor
,
src
,
nranks
,
rank_id
)
def
recv_partial
(
tensor
,
...
...
@@ -255,13 +254,9 @@ def _partial_allgather_op(tensor, group, use_calc_stream, ring_id, nranks,
elif
in_dygraph_mode
():
group
=
paddle
.
distributed
.
collective
.
_get_default_group
(
)
if
group
is
None
else
group
task
=
group
.
process_group
.
all_gather_partial
(
tensor
,
tensor
,
nranks
,
rank_id
)
if
use_calc_stream
:
task
.
wait
()
return
None
else
:
return
task
comm_op
=
group
.
process_group
.
all_gather_partial_on_calc_stream
\
if
use_calc_stream
else
group
.
process_group
.
all_gather_partial
return
comm_op
(
tensor
,
tensor
,
nranks
,
rank_id
)
def
allgather_partial
(
tensor
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录