Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
cedd9805
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
cedd9805
编写于
1月 10, 2018
作者:
F
fengjiayi
提交者:
GitHub
1月 10, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #7361 from JiayiFeng/refine_and_enhence_WhileGradOp
Refine while grad op
上级
a158a3bd
fbc30215
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
34 addition
and
39 deletion
+34
-39
paddle/operators/while_op.cc
paddle/operators/while_op.cc
+34
-39
未找到文件。
paddle/operators/while_op.cc
浏览文件 @
cedd9805
...
...
@@ -211,59 +211,54 @@ class WhileGradOpDescMaker : public framework::SingleGradOpDescMaker {
protected:
std
::
unique_ptr
<
framework
::
OpDesc
>
Apply
()
const
override
{
auto
*
grad
=
new
framework
::
OpDesc
();
grad
->
SetType
(
"while_grad"
);
grad
->
SetInput
(
kX
,
Input
(
kX
));
auto
*
while_grad
=
new
framework
::
OpDesc
();
while_grad
->
SetType
(
"while_grad"
);
while_grad
->
SetInput
(
kX
,
Input
(
kX
));
while_grad
->
SetInput
(
kOutputs
,
Output
(
kOutputs
));
while_grad
->
SetInput
(
kStepScopes
,
Output
(
kStepScopes
));
auto
*
grad_block
=
this
->
grad_block_
[
0
];
auto
*
fwd_block
=
grad_block
->
ParentBlock
();
// Not all of IGs will be generated by inner gradient operators of while op.
// Ignore IGs that is not generated by the inside block.
auto
igs
=
InputGrad
(
kX
,
/*do not drop empty gradient*/
false
);
std
::
unordered_set
<
std
::
string
>
all_outs
;
for
(
size_t
i
=
0
;
i
<
grad_block_
[
0
]
->
OpSize
();
++
i
)
{
for
(
auto
&
oname
:
grad_block_
[
0
]
->
Op
(
i
)
->
OutputArgumentNames
())
{
all_outs
.
insert
(
oname
);
std
::
unordered_set
<
std
::
string
>
inner_op_outputs
;
for
(
const
auto
*
op
:
grad_block
->
AllOps
())
{
for
(
auto
&
oname
:
op
->
OutputArgumentNames
())
{
inner_op_outputs
.
insert
(
oname
);
}
}
auto
igs
=
InputGrad
(
kX
,
/*do not drop empty gradient*/
false
);
for
(
auto
&
each_ig
:
igs
)
{
if
(
all_outs
.
find
(
each_ig
)
==
all_o
uts
.
end
())
{
if
(
inner_op_outputs
.
find
(
each_ig
)
==
inner_op_outp
uts
.
end
())
{
VLOG
(
10
)
<<
"Ignore "
<<
each_ig
;
each_ig
=
framework
::
kEmptyVarName
;
}
}
grad
->
SetOutput
(
framework
::
GradVarName
(
kX
),
igs
);
grad
->
SetInput
(
kOutputs
,
Output
(
kOutputs
));
while_grad
->
SetOutput
(
framework
::
GradVarName
(
kX
),
igs
);
// OG should be re-calculated by step blocks, since many outputs of while op
// do not need to calculate gradients.
std
::
unordered_set
<
std
::
string
>
block_ins
;
auto
*
fwd_block
=
this
->
grad_block_
[
0
]
->
ParentBlock
();
{
for
(
auto
&
p
:
Input
(
kX
))
{
block_ins
.
insert
(
p
);
}
for
(
auto
&
o
:
Output
(
kOutputs
))
{
block_ins
.
insert
(
o
);
}
block_ins
.
reserve
(
Input
(
kX
).
size
()
+
Output
(
kOutputs
).
size
());
for
(
auto
&
p
:
Input
(
kX
))
{
block_ins
.
insert
(
p
);
}
for
(
auto
&
o
:
Output
(
kOutputs
))
{
block_ins
.
insert
(
o
);
}
std
::
unordered_set
<
std
::
string
>
extra_inputs
;
for
(
size_t
i
=
0
;
i
<
grad_block_
[
0
]
->
OpSize
();
++
i
)
{
for
(
auto
&
input_name
:
grad_block_
[
0
]
->
Op
(
i
)
->
InputArgumentNames
())
{
if
(
block_ins
.
find
(
input_name
)
!=
block_ins
.
end
())
{
continue
;
}
// If the input of Op is generated by the forward block, do not make it
// as input again.
if
(
fwd_block
->
FindVar
(
input_name
)
!=
nullptr
)
{
for
(
const
auto
*
op
:
grad_block
->
AllOps
())
{
for
(
auto
&
input_name
:
op
->
InputArgumentNames
())
{
// If the input of Op has been recorded or is generated by the forward
// block, do not make it as input again.
if
(
block_ins
.
find
(
input_name
)
!=
block_ins
.
end
()
||
fwd_block
->
FindVar
(
input_name
)
!=
nullptr
)
{
continue
;
}
extra_inputs
.
insert
(
input_name
);
}
for
(
auto
&
output_name
:
grad_block_
[
0
]
->
Op
(
i
)
->
OutputArgumentNames
())
{
for
(
auto
&
output_name
:
op
->
OutputArgumentNames
())
{
block_ins
.
insert
(
output_name
);
}
}
...
...
@@ -272,15 +267,15 @@ class WhileGradOpDescMaker : public framework::SingleGradOpDescMaker {
extra_inputs_list
.
resize
(
extra_inputs
.
size
());
std
::
copy
(
extra_inputs
.
begin
(),
extra_inputs
.
end
(),
extra_inputs_list
.
begin
());
grad
->
SetInput
(
framework
::
GradVarName
(
kOutputs
),
extra_inputs_list
);
grad
->
SetInput
(
kStepScopes
,
Output
(
kStepScopes
));
grad
->
SetAttrMap
(
this
->
Attrs
());
grad
->
SetBlockAttr
(
kStepBlock
,
*
grad_block_
[
0
]
);
while_
grad
->
SetInput
(
framework
::
GradVarName
(
kOutputs
),
extra_inputs_list
);
while_
grad
->
SetAttrMap
(
this
->
Attrs
());
while_grad
->
SetBlockAttr
(
kStepBlock
,
*
grad_block
);
// record the original output gradient names, since the gradient name of
// while operator could be renamed.
grad
->
SetAttr
(
"original_output_grad"
,
extra_inputs_list
);
while_
grad
->
SetAttr
(
"original_output_grad"
,
extra_inputs_list
);
return
std
::
unique_ptr
<
framework
::
OpDesc
>
(
grad
);
return
std
::
unique_ptr
<
framework
::
OpDesc
>
(
while_
grad
);
}
};
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录