Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
8f0590e7
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
8f0590e7
编写于
3月 16, 2018
作者:
Y
Yu Yang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add ncclAllReduce
上级
c15d2c9e
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
41 addition
and
9 deletion
+41
-9
paddle/fluid/framework/parallel_executor.cc
paddle/fluid/framework/parallel_executor.cc
+41
-9
未找到文件。
paddle/fluid/framework/parallel_executor.cc
浏览文件 @
8f0590e7
...
@@ -138,14 +138,6 @@ struct ScaleLossGradOpHandle : public OpHandle {
...
@@ -138,14 +138,6 @@ struct ScaleLossGradOpHandle : public OpHandle {
}
}
};
};
struct
NCCLAllReduceOpHandle
:
public
OpHandle
{
void
Run
()
override
{
if
(
this
->
inputs_
.
size
()
==
1
)
{
return
;
// No need to all reduce when GPU count = 1;
}
}
};
class
ParallelExecutorPrivate
{
class
ParallelExecutorPrivate
{
public:
public:
explicit
ParallelExecutorPrivate
(
size_t
num_threads
=
12
)
explicit
ParallelExecutorPrivate
(
size_t
num_threads
=
12
)
...
@@ -243,6 +235,46 @@ ncclDataType_t ToNCCLDataType(std::type_index type) {
...
@@ -243,6 +235,46 @@ ncclDataType_t ToNCCLDataType(std::type_index type) {
}
}
}
}
struct
NCCLAllReduceOpHandle
:
public
OpHandle
{
ParallelExecutorPrivate
*
member_
;
explicit
NCCLAllReduceOpHandle
(
ParallelExecutorPrivate
*
member
)
:
member_
(
member
)
{}
void
Run
()
override
{
if
(
this
->
inputs_
.
size
()
==
1
)
{
return
;
// No need to all reduce when GPU count = 1;
}
else
{
auto
&
var_name
=
static_cast
<
VarHandle
*>
(
this
->
inputs_
[
0
])
->
name_
;
int
dtype
=
-
1
;
size_t
numel
=
0
;
for
(
auto
&
p
:
member_
->
places_
)
{
int
dev_id
=
boost
::
get
<
platform
::
CUDAPlace
>
(
p
).
device
;
Scope
*
s
=
member_
->
local_scopes_
[
p
];
auto
&
lod_tensor
=
s
->
FindVar
(
var_name
)
->
Get
<
framework
::
LoDTensor
>
();
void
*
buffer
=
const_cast
<
void
*>
(
lod_tensor
.
data
<
void
>
());
if
(
dtype
==
-
1
)
{
dtype
=
ToNCCLDataType
(
lod_tensor
.
type
());
}
if
(
numel
==
0
)
{
numel
=
static_cast
<
size_t
>
(
lod_tensor
.
numel
());
}
auto
&
nccl_ctx
=
member_
->
communication_streams_
.
at
(
dev_id
);
ncclAllReduce
(
buffer
,
buffer
,
numel
,
static_cast
<
ncclDataType_t
>
(
dtype
),
ncclSum
,
nccl_ctx
.
comm
,
nccl_ctx
.
stream
());
}
ncclGroupEnd
();
}
}
};
ParallelExecutor
::
ParallelExecutor
(
ParallelExecutor
::
ParallelExecutor
(
const
std
::
vector
<
platform
::
Place
>
&
places
,
const
std
::
vector
<
platform
::
Place
>
&
places
,
const
std
::
unordered_set
<
std
::
string
>
&
params
,
const
std
::
unordered_set
<
std
::
string
>
&
params
,
...
@@ -361,7 +393,7 @@ void ParallelExecutor::ConstructDependencyGraph(
...
@@ -361,7 +393,7 @@ void ParallelExecutor::ConstructDependencyGraph(
for
(
auto
&
og
:
var_names
)
{
for
(
auto
&
og
:
var_names
)
{
if
(
grads
.
count
(
og
)
!=
0
)
{
// is param grad
if
(
grads
.
count
(
og
)
!=
0
)
{
// is param grad
// Insert NCCL AllReduce Op
// Insert NCCL AllReduce Op
member_
->
ops_
.
emplace_back
(
new
NCCLAllReduceOpHandle
());
member_
->
ops_
.
emplace_back
(
new
NCCLAllReduceOpHandle
(
member_
));
auto
*
op_handle
=
member_
->
ops_
.
back
().
get
();
auto
*
op_handle
=
member_
->
ops_
.
back
().
get
();
for
(
auto
&
pair
:
member_
->
local_scopes_
)
{
for
(
auto
&
pair
:
member_
->
local_scopes_
)
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录