Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
9deb1756
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
9deb1756
编写于
1月 15, 2018
作者:
Q
Qiao Longfei
提交者:
GitHub
1月 15, 2018
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix while_grad_op first step loss lod problem (#7490)
* fix while_grad_op first step loss lod problem * optimize code
上级
59bc4c46
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
11 addition
and
6 deletion
+11
-6
paddle/operators/shrink_rnn_memory_op.cc
paddle/operators/shrink_rnn_memory_op.cc
+1
-0
paddle/operators/while_op.cc
paddle/operators/while_op.cc
+10
-6
未找到文件。
paddle/operators/shrink_rnn_memory_op.cc
浏览文件 @
9deb1756
...
...
@@ -138,6 +138,7 @@ class ShrinkRNNMemoryGradOp : public ArrayOp {
math
::
set_constant
(
dev_ctx
,
&
rest_tensor
,
0.0
f
);
}
}
dx_tensor
.
set_lod
(
x_tensor
.
lod
());
}
};
...
...
paddle/operators/while_op.cc
浏览文件 @
9deb1756
...
...
@@ -121,8 +121,8 @@ class WhileGradOp : public framework::OperatorBase {
for
(
size_t
i
=
0
;
i
<
outside_og_names
.
size
();
++
i
)
{
auto
outside_og_name
=
outside_og_names
[
i
];
auto
inside_og_name
=
inside_og_names
[
i
];
VLOG
(
10
)
<<
"Linking outside "
<<
outside_og_name
<<
" --> inside "
<<
inside_og_name
;
VLOG
(
8
)
<<
"Linking outside "
<<
outside_og_name
<<
" --> inside "
<<
inside_og_name
;
auto
&
og_outside
=
detail
::
Ref
(
scope
.
FindVar
(
outside_og_name
),
"Cannot find Outside Gradient %s"
,
outside_og_name
);
...
...
@@ -141,11 +141,11 @@ class WhileGradOp : public framework::OperatorBase {
auto
&
outside_array
=
og_outside
.
Get
<
framework
::
LoDTensorArray
>
();
auto
&
inside_array
=
detail
::
Ref
(
og_inside
.
GetMutable
<
framework
::
LoDTensorArray
>
());
VLOG
(
10
)
<<
outside_og_name
<<
" size = "
<<
outside_array
.
size
();
VLOG
(
8
)
<<
outside_og_name
<<
" size = "
<<
outside_array
.
size
();
inside_array
.
resize
(
outside_array
.
size
());
for
(
size_t
j
=
0
;
j
<
inside_array
.
size
();
++
j
)
{
VLOG
(
10
)
<<
j
<<
" "
<<
outside_array
[
j
].
numel
();
VLOG
(
8
)
<<
j
<<
" "
<<
outside_array
[
j
].
numel
();
if
(
outside_array
[
j
].
numel
()
!=
0
)
{
inside_array
[
j
].
set_lod
(
outside_array
[
j
].
lod
());
inside_array
[
j
].
ShareDataWith
(
outside_array
[
j
]);
...
...
@@ -187,10 +187,14 @@ class WhileGradOp : public framework::OperatorBase {
attrs
[
"shape"
]
=
framework
::
vectorize2int
(
inside_tensor
.
dims
());
attrs
[
"value"
]
=
0.0
f
;
auto
var_name
=
pg_names
[
param_id
];
auto
zero_op
=
framework
::
OpRegistry
::
CreateOp
(
"fill_constant"
,
framework
::
VariableNameMap
{},
{{
"Out"
,
{
pg_names
[
param_id
]
}}},
attrs
);
{{
"Out"
,
{
var_name
}}},
attrs
);
zero_op
->
Run
(
scope
,
dev_place
);
scope
.
FindVar
(
var_name
)
->
GetMutable
<
framework
::
LoDTensor
>
()
->
set_lod
(
inside_tensor
.
lod
());
}
}
...
...
@@ -231,7 +235,7 @@ class WhileGradOpDescMaker : public framework::SingleGradOpDescMaker {
auto
igs
=
InputGrad
(
kX
,
/*do not drop empty gradient*/
false
);
for
(
auto
&
each_ig
:
igs
)
{
if
(
inner_op_outputs
.
find
(
each_ig
)
==
inner_op_outputs
.
end
())
{
VLOG
(
10
)
<<
"Ignore "
<<
each_ig
;
VLOG
(
8
)
<<
"Ignore "
<<
each_ig
;
each_ig
=
framework
::
kEmptyVarName
;
}
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录