Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
0ed3f359
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
0ed3f359
编写于
1月 23, 2018
作者:
Y
Yang Yang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
pass parallel_do cbow
上级
af8cb820
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
44 addition
and
11 deletion
+44
-11
paddle/operators/parallel_do_op.cc
paddle/operators/parallel_do_op.cc
+43
-11
python/paddle/v2/fluid/layers/control_flow.py
python/paddle/v2/fluid/layers/control_flow.py
+1
-0
未找到文件。
paddle/operators/parallel_do_op.cc
浏览文件 @
0ed3f359
...
...
@@ -31,6 +31,7 @@ static constexpr char kParallelScopes[] = "parallel_scopes";
static
constexpr
char
kParallelBlock
[]
=
"sub_block"
;
using
LoDTensor
=
framework
::
LoDTensor
;
using
SelectedRows
=
framework
::
SelectedRows
;
static
void
SplitTensorAndMoveTensorToScopes
(
const
framework
::
Scope
&
scope
,
std
::
vector
<
framework
::
Scope
*>
*
sub_scopes
,
...
...
@@ -64,6 +65,30 @@ static void SplitTensorAndMoveTensorToScopes(
}
}
inline
void
CopyOrShare
(
const
framework
::
Variable
&
src
,
const
platform
::
Place
&
dst_place
,
framework
::
Variable
*
dst
)
{
if
(
src
.
IsType
<
LoDTensor
>
())
{
if
(
src
.
Get
<
LoDTensor
>
().
place
()
==
dst_place
)
{
dst
->
GetMutable
<
LoDTensor
>
()
->
ShareDataWith
(
src
.
Get
<
LoDTensor
>
());
}
else
{
Copy
(
src
.
Get
<
LoDTensor
>
(),
dst_place
,
dst
->
GetMutable
<
LoDTensor
>
());
}
}
else
if
(
src
.
IsType
<
SelectedRows
>
())
{
auto
&
src_sr
=
src
.
Get
<
SelectedRows
>
();
auto
*
dst_sr
=
dst
->
GetMutable
<
SelectedRows
>
();
dst_sr
->
set_rows
(
src_sr
.
rows
());
dst_sr
->
set_height
(
src_sr
.
height
());
Copy
(
src_sr
.
value
(),
dst_place
,
dst_sr
->
mutable_value
());
// if (src_sr.value().place() == dst_place) {
// dst_sr->mutable_value()->ShareDataWith(src_sr.value());
// } else {
// }
}
else
{
PADDLE_THROW
(
"Expect LoDTensor/SelectedRows, get %s"
,
src
.
Type
().
name
());
}
}
void
WaitOnPlace
(
const
platform
::
Place
place
)
{
platform
::
DeviceContextPool
&
pool
=
platform
::
DeviceContextPool
::
Instance
();
auto
&
dev_ctx
=
*
pool
.
Get
(
place
);
...
...
@@ -149,6 +174,7 @@ class ParallelDoOp : public framework::OperatorBase {
lod_tensor_to_be_merged
->
MergeLoDTensor
(
lod_tensors
,
dev_ctx
.
GetPlace
());
}
WaitOnPlaces
(
places
);
LOG
(
INFO
)
<<
"End of ParallelGradDo"
;
}
};
...
...
@@ -210,21 +236,27 @@ class ParallelDoGradOp : public framework::OperatorBase {
}
WaitOnPlaces
(
places
);
// merge grad
AccumulateGrad
(
scope
,
place
,
sub_scopes
,
places
);
LOG
(
INFO
)
<<
"End of ParallelDoGrad"
;
}
void
AccumulateGrad
(
const
framework
::
Scope
&
scope
,
const
platform
::
Place
&
place
,
const
std
::
vector
<
framework
::
Scope
*>
&
sub_scopes
,
const
platform
::
PlaceList
&
places
)
const
{
for
(
auto
&
s
:
Outputs
(
framework
::
GradVarName
(
kParameters
)))
{
auto
&
result
=
sub_scopes
[
0
]
->
FindVar
(
s
)
->
Get
<
LoDTensor
>
();
std
::
string
tmp_name
;
auto
*
tmp
=
sub_scopes
[
0
]
->
Var
(
&
tmp_name
)
->
GetMutable
<
LoDTensor
>
();
std
::
__cxx11
::
string
tmp_name
;
auto
*
tmp
=
sub_scopes
[
0
]
->
Var
(
&
tmp_name
);
LOG
(
INFO
)
<<
"---"
<<
s
;
for
(
size_t
i
=
1
;
i
<
sub_scopes
.
size
();
++
i
)
{
auto
&
tensor_to_merge
=
sub_scopes
[
i
]
->
FindVar
(
s
)
->
Get
<
LoDTensor
>
();
if
(
!
(
places
[
i
]
==
places
[
0
]))
{
framework
::
Copy
(
tensor_to_merge
,
places
[
0
],
tmp
);
LOG
(
INFO
)
<<
"---"
;
CopyOrShare
(
*
sub_scopes
[
i
]
->
FindVar
(
s
),
places
[
0
],
tmp
);
WaitOnPlace
(
places
[
0
]);
}
else
{
tmp
->
ShareDataWith
(
tensor_to_merge
);
}
LOG
(
INFO
)
<<
"---"
;
auto
sum_op
=
framework
::
OpRegistry
::
CreateOp
(
"sum"
,
{{
"X"
,
{
s
,
tmp_name
}}},
{{
"Out"
,
{
s
}}},
framework
::
AttributeMap
{});
...
...
@@ -232,8 +264,8 @@ class ParallelDoGradOp : public framework::OperatorBase {
WaitOnPlace
(
places
[
0
]);
}
VLOG
(
3
)
<<
result
;
framework
::
Copy
(
result
,
place
,
scope
.
FindVar
(
s
)
->
GetMutable
<
LoDTensor
>
(
));
LOG
(
INFO
)
<<
"---"
;
CopyOrShare
(
*
sub_scopes
[
0
]
->
FindVar
(
s
),
place
,
scope
.
FindVar
(
s
));
}
WaitOnPlaces
(
places
);
}
...
...
@@ -289,7 +321,7 @@ class ParallelDoGradOpShapeInference : public framework::InferShapeBase {
PADDLE_ENFORCE
(
ctx
->
HasInputs
(
kParameters
));
PADDLE_ENFORCE
(
ctx
->
HasOutputs
(
framework
::
GradVarName
(
kParameters
)));
PADDLE_ENFORCE
(
ctx
->
HasInput
(
kInputs
));
PADDLE_ENFORCE
(
ctx
->
HasInput
s
(
kInputs
));
for
(
auto
&
s
:
output
)
{
PADDLE_ENFORCE
(
ctx
->
HasInputs
(
s
));
...
...
python/paddle/v2/fluid/layers/control_flow.py
浏览文件 @
0ed3f359
...
...
@@ -270,6 +270,7 @@ class ParallelDo(object):
for
in_var_name
in
op
.
input
(
iname
):
if
in_var_name
not
in
local_inputs
:
params
.
append
(
in_var_name
)
params
=
list
(
set
(
params
))
return
[
parent_block
.
var
(
name
)
for
name
in
params
]
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录