Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
e0d8c6ac
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
e0d8c6ac
编写于
7月 02, 2019
作者:
C
chengduo
提交者:
GitHub
7月 02, 2019
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add find_no_grad_vars in backward.py (#17942)
* add not_been_used_vars to no_grad_set test=develop
上级
449c7a9f
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
122 addition
and
9 deletion
+122
-9
paddle/fluid/op_use_default_grad_op_maker.spec
paddle/fluid/op_use_default_grad_op_maker.spec
+0
-1
paddle/fluid/operators/hierarchical_sigmoid_op.cc
paddle/fluid/operators/hierarchical_sigmoid_op.cc
+42
-7
python/paddle/fluid/backward.py
python/paddle/fluid/backward.py
+23
-1
python/paddle/fluid/tests/unittests/test_backward_find_no_grad_vars.py
.../fluid/tests/unittests/test_backward_find_no_grad_vars.py
+57
-0
未找到文件。
paddle/fluid/op_use_default_grad_op_maker.spec
浏览文件 @
e0d8c6ac
...
@@ -15,7 +15,6 @@ fusion_seqexpand_concat_fc
...
@@ -15,7 +15,6 @@ fusion_seqexpand_concat_fc
fusion_seqpool_concat
fusion_seqpool_concat
fusion_squared_mat_sub
fusion_squared_mat_sub
gru
gru
hierarchical_sigmoid
lrn
lrn
lstm_unit
lstm_unit
max_pool2d_with_index
max_pool2d_with_index
...
...
paddle/fluid/operators/hierarchical_sigmoid_op.cc
浏览文件 @
e0d8c6ac
...
@@ -86,6 +86,10 @@ class HierarchicalSigmoidOp : public framework::OperatorWithKernel {
...
@@ -86,6 +86,10 @@ class HierarchicalSigmoidOp : public framework::OperatorWithKernel {
}
}
};
};
/*
* Inputs: X, W, Label, PathTable, PathCode, Bias
* Outputs: Out, PreOut, W_out
*/
template
<
typename
AttrType
>
template
<
typename
AttrType
>
class
HierarchicalSigmoidOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
class
HierarchicalSigmoidOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
public:
...
@@ -162,6 +166,37 @@ Hierarchical Probabilistic Neural Network Language Model."
...
@@ -162,6 +166,37 @@ Hierarchical Probabilistic Neural Network Language Model."
}
}
};
};
/*
* Inputs: X, W, Label, PathTable, PathCode, PreOut, Out@GRAD
* Outputs: X@GRAD, W@GRAD, Bias@GRAD
*/
class
HierarchicalSigmoidGradMaker
:
public
framework
::
SingleGradOpDescMaker
{
public:
using
framework
::
SingleGradOpDescMaker
::
SingleGradOpDescMaker
;
std
::
unique_ptr
<
framework
::
OpDesc
>
Apply
()
const
override
{
auto
*
op
=
new
framework
::
OpDesc
();
op
->
SetType
(
this
->
ForwardOpType
()
+
"_grad"
);
// Inputs: X, W, Label, PathTable, PathCode, PreOut, Out@GRAD
op
->
SetInput
(
"X"
,
Input
(
"X"
));
op
->
SetInput
(
"W"
,
Input
(
"W"
));
op
->
SetInput
(
"Bias"
,
Input
(
"Bias"
));
op
->
SetInput
(
"Label"
,
Input
(
"Label"
));
op
->
SetInput
(
"PathTable"
,
Input
(
"PathTable"
));
op
->
SetInput
(
"PathCode"
,
Input
(
"PathCode"
));
op
->
SetInput
(
"PreOut"
,
Output
(
"PreOut"
));
op
->
SetInput
(
framework
::
GradVarName
(
"Out"
),
OutputGrad
(
"Out"
));
// Outputs: X@GRAD, W@GRAD, Bias@GRAD
op
->
SetOutput
(
framework
::
GradVarName
(
"X"
),
InputGrad
(
"X"
));
op
->
SetOutput
(
framework
::
GradVarName
(
"W"
),
InputGrad
(
"W"
));
op
->
SetOutput
(
framework
::
GradVarName
(
"Bias"
),
InputGrad
(
"Bias"
));
op
->
SetAttrMap
(
Attrs
());
return
std
::
unique_ptr
<
framework
::
OpDesc
>
(
op
);
}
};
class
HierarchicalSigmoidGradOp
:
public
framework
::
OperatorWithKernel
{
class
HierarchicalSigmoidGradOp
:
public
framework
::
OperatorWithKernel
{
public:
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
...
@@ -209,17 +244,17 @@ class HierarchicalSigmoidGradOpGradVarTypeInference
...
@@ -209,17 +244,17 @@ class HierarchicalSigmoidGradOpGradVarTypeInference
auto
attr
=
ctx
->
GetAttr
(
"is_sparse"
);
auto
attr
=
ctx
->
GetAttr
(
"is_sparse"
);
bool
is_sparse
=
boost
::
get
<
bool
>
(
attr
);
bool
is_sparse
=
boost
::
get
<
bool
>
(
attr
);
if
(
is_sparse
)
{
if
(
is_sparse
)
{
VLOG
(
3
0
)
<<
"hierarchical_sigmoid_grad op "
<<
framework
::
GradVarName
(
"W"
)
VLOG
(
3
)
<<
"hierarchical_sigmoid_grad op "
<<
framework
::
GradVarName
(
"W"
)
<<
" is set to SelectedRows"
;
<<
" is set to SelectedRows"
;
ctx
->
SetType
(
w_grad_var_name
,
framework
::
proto
::
VarType
::
SELECTED_ROWS
);
ctx
->
SetType
(
w_grad_var_name
,
framework
::
proto
::
VarType
::
SELECTED_ROWS
);
}
else
{
}
else
{
VLOG
(
3
0
)
<<
"hierarchical_sigmoid_grad op "
<<
framework
::
GradVarName
(
"W"
)
VLOG
(
3
)
<<
"hierarchical_sigmoid_grad op "
<<
framework
::
GradVarName
(
"W"
)
<<
" is set to LoDTensor"
;
<<
" is set to LoDTensor"
;
ctx
->
SetType
(
w_grad_var_name
,
framework
::
proto
::
VarType
::
LOD_TENSOR
);
ctx
->
SetType
(
w_grad_var_name
,
framework
::
proto
::
VarType
::
LOD_TENSOR
);
}
}
if
(
hasBias
)
{
if
(
hasBias
)
{
VLOG
(
3
0
)
<<
"hierarchical_sigmoid_grad op "
VLOG
(
3
)
<<
"hierarchical_sigmoid_grad op "
<<
framework
::
GradVarName
(
"Bias"
)
<<
" is set to LoDTensor"
;
<<
framework
::
GradVarName
(
"Bias"
)
<<
" is set to LoDTensor"
;
ctx
->
SetType
(
bias_grad_var_name
,
framework
::
proto
::
VarType
::
LOD_TENSOR
);
ctx
->
SetType
(
bias_grad_var_name
,
framework
::
proto
::
VarType
::
LOD_TENSOR
);
}
}
ctx
->
SetDataType
(
w_grad_var_name
,
ctx
->
GetDataType
(
ctx
->
Input
(
"W"
)[
0
]));
ctx
->
SetDataType
(
w_grad_var_name
,
ctx
->
GetDataType
(
ctx
->
Input
(
"W"
)[
0
]));
...
@@ -232,7 +267,7 @@ class HierarchicalSigmoidGradOpGradVarTypeInference
...
@@ -232,7 +267,7 @@ class HierarchicalSigmoidGradOpGradVarTypeInference
namespace
ops
=
paddle
::
operators
;
namespace
ops
=
paddle
::
operators
;
REGISTER_OPERATOR
(
hierarchical_sigmoid
,
ops
::
HierarchicalSigmoidOp
,
REGISTER_OPERATOR
(
hierarchical_sigmoid
,
ops
::
HierarchicalSigmoidOp
,
ops
::
HierarchicalSigmoidOpMaker
<
int
>
,
ops
::
HierarchicalSigmoidOpMaker
<
int
>
,
paddle
::
framework
::
DefaultGradOpDescMaker
<
true
>
);
ops
::
HierarchicalSigmoidGradMaker
);
REGISTER_OPERATOR
(
hierarchical_sigmoid_grad
,
ops
::
HierarchicalSigmoidGradOp
,
REGISTER_OPERATOR
(
hierarchical_sigmoid_grad
,
ops
::
HierarchicalSigmoidGradOp
,
ops
::
HierarchicalSigmoidGradOpGradVarTypeInference
);
ops
::
HierarchicalSigmoidGradOpGradVarTypeInference
);
REGISTER_OP_CPU_KERNEL
(
REGISTER_OP_CPU_KERNEL
(
...
...
python/paddle/fluid/backward.py
浏览文件 @
e0d8c6ac
...
@@ -552,7 +552,9 @@ def append_backward(loss, parameter_list=None, no_grad_set=None,
...
@@ -552,7 +552,9 @@ def append_backward(loss, parameter_list=None, no_grad_set=None,
block_no_grad_set
=
set
(
map
(
_strip_grad_suffix_
,
no_grad_dict
[
0
]))
block_no_grad_set
=
set
(
map
(
_strip_grad_suffix_
,
no_grad_dict
[
0
]))
op_path
=
_find_op_path_
(
root_block
,
[
loss
],
[],
block_no_grad_set
)
op_path
=
_find_op_path_
(
root_block
,
[
loss
],
[],
block_no_grad_set
)
no_grad_vars
=
_find_no_grad_vars
(
root_block
,
op_path
,
[
loss
],
block_no_grad_set
)
block_no_grad_set
.
update
(
no_grad_vars
)
no_grad_dict
[
0
].
update
(
list
(
map
(
_append_grad_suffix_
,
block_no_grad_set
)))
no_grad_dict
[
0
].
update
(
list
(
map
(
_append_grad_suffix_
,
block_no_grad_set
)))
input_grad_names_set
=
None
input_grad_names_set
=
None
...
@@ -630,6 +632,26 @@ def _as_list(x):
...
@@ -630,6 +632,26 @@ def _as_list(x):
return
list
(
x
)
if
isinstance
(
x
,
collections
.
Sequence
)
else
[
x
]
return
list
(
x
)
if
isinstance
(
x
,
collections
.
Sequence
)
else
[
x
]
def
_find_no_grad_vars
(
block
,
op_path
,
targets
,
no_grad_set
):
"""
Find the vars which is not used in the program, and
those var belong to no_grad_var.
"""
output_names
=
set
([
out
.
name
for
out
in
targets
])
no_grad_var
=
[]
for
i
,
op
in
reversed
(
list
(
enumerate
(
op_path
))):
# If the op has sub_block, it is too complicated to find the correct no_grad_var.
if
not
op
.
has_attr
(
"sub_block"
):
for
out_var
in
op
.
desc
.
output_arg_names
():
if
out_var
not
in
output_names
and
out_var
not
in
op
.
desc
.
input_arg_names
(
)
and
not
block
.
vars
[
out_var
].
stop_gradient
:
no_grad_var
.
append
(
out_var
)
for
name
in
op
.
desc
.
input_arg_names
():
if
name
not
in
no_grad_set
:
output_names
.
add
(
name
)
return
set
(
no_grad_var
)
def
_find_op_path_
(
block
,
outputs
,
inputs
,
no_grad_set
):
def
_find_op_path_
(
block
,
outputs
,
inputs
,
no_grad_set
):
"""
"""
no_grad_set will also be changed
no_grad_set will also be changed
...
...
python/paddle/fluid/tests/unittests/test_backward_find_no_grad_vars.py
0 → 100644
浏览文件 @
e0d8c6ac
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
__future__
import
print_function
import
unittest
import
paddle.fluid
as
fluid
from
simple_nets
import
init_data
def
simple_net1
():
x
=
fluid
.
layers
.
data
(
name
=
'image'
,
shape
=
[
784
],
dtype
=
'float32'
)
label
=
fluid
.
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64'
)
feature
=
fluid
.
layers
.
fc
(
input
=
x
,
size
=
20
,
act
=
None
)
part1
,
part2
=
fluid
.
layers
.
split
(
feature
,
num_or_sections
=
[
10
,
10
],
dim
=
1
)
# Note that: part2 is not used.
loss
=
fluid
.
layers
.
cross_entropy
(
input
=
part1
,
label
=
label
)
loss
=
fluid
.
layers
.
mean
(
loss
)
return
loss
class
TestBackward
(
unittest
.
TestCase
):
def
check_backward
(
self
,
model
):
place
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
main
=
fluid
.
Program
()
startup
=
fluid
.
Program
()
batch_size
=
2
with
fluid
.
program_guard
(
main
,
startup
):
loss
=
model
()
optimizer
=
fluid
.
optimizer
.
SGD
(
learning_rate
=
0.1
)
optimizer
.
minimize
(
loss
)
exe
.
run
(
fluid
.
default_startup_program
())
img
,
label
=
init_data
(
batch_size
,
img_shape
=
[
784
],
label_range
=
9
)
exe
.
run
(
feed
=
{
'image'
:
img
,
'label'
:
label
})
def
test_backward
(
self
):
self
.
check_backward
(
simple_net1
)
if
__name__
==
'__main__'
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录