Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
56b5d147
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
56b5d147
编写于
11月 18, 2019
作者:
G
guofei
提交者:
Huihuang Zheng
11月 18, 2019
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Fix the error of init variable in StaticRNN when stop_gradient=ON (#21118)
上级
3c98ec90
变更
2
显示空白变更内容
内联
并排
Showing
2 changed file
with
89 addition
and
15 deletion
+89
-15
paddle/fluid/operators/recurrent_op.cc
paddle/fluid/operators/recurrent_op.cc
+3
-5
python/paddle/fluid/tests/unittests/test_recurrent_op.py
python/paddle/fluid/tests/unittests/test_recurrent_op.py
+86
-10
未找到文件。
paddle/fluid/operators/recurrent_op.cc
浏览文件 @
56b5d147
...
...
@@ -635,11 +635,9 @@ class RecurrentGradOpShapeInference : public framework::InferShapeBase {
RecurrentBase
::
kOutputs
);
// In some case the kInitialStates is empty.
if
(
ctx
->
HasInputs
(
RecurrentBase
::
kInitialStates
))
{
PADDLE_ENFORCE_EQ
(
ctx
->
HasOutputs
(
framework
::
GradVarName
(
RecurrentBase
::
kInitialStates
)),
true
,
"The output of(%s) should not be empty."
,
framework
::
GradVarName
(
RecurrentBase
::
kInitialStates
));
if
(
ctx
->
HasInputs
(
RecurrentBase
::
kInitialStates
)
&&
ctx
->
HasOutputs
(
framework
::
GradVarName
(
RecurrentBase
::
kInitialStates
)))
{
ctx
->
SetOutputsDim
(
framework
::
GradVarName
(
RecurrentBase
::
kInitialStates
),
ctx
->
GetInputsDim
(
RecurrentBase
::
kInitialStates
));
}
...
...
python/paddle/fluid/tests/unittests/test_recurrent_op.py
浏览文件 @
56b5d147
...
...
@@ -123,7 +123,8 @@ class RecurrentOpTest1(unittest.TestCase):
def
setUp
(
self
):
self
.
setup_program
()
self
.
data_field
=
{
"x"
,
"h_boot"
}
self
.
feed_data_field
=
{
"x"
,
"h_boot"
}
self
.
grad_data_field
=
self
.
feed_data_field
self
.
input_shape
=
(
self
.
sent_len
,
self
.
batch_size
,
self
.
input_dim
)
self
.
output_shape
=
(
self
.
sent_len
,
self
.
batch_size
,
self
.
input_dim
)
...
...
@@ -161,7 +162,7 @@ class RecurrentOpTest1(unittest.TestCase):
def
forward
(
self
):
self
.
feed_map
=
{
x
:
create_tensor
(
getattr
(
self
.
py_rnn
,
x
),
self
.
place
)
for
x
in
self
.
data_field
for
x
in
self
.
feed_
data_field
}
exe
=
Executor
(
self
.
place
)
out
=
exe
.
run
(
self
.
main_program
,
...
...
@@ -173,11 +174,11 @@ class RecurrentOpTest1(unittest.TestCase):
def
backward
(
self
):
self
.
feed_map
=
{
x
:
create_tensor
(
getattr
(
self
.
py_rnn
,
x
),
self
.
place
)
for
x
in
self
.
data_field
for
x
in
self
.
feed_
data_field
}
fetch_list
=
[
self
.
main_program
.
global_block
().
var
(
grad_var_name
(
x
))
for
x
in
self
.
data_field
for
x
in
self
.
grad_
data_field
]
exe
=
Executor
(
self
.
place
)
...
...
@@ -195,7 +196,7 @@ class RecurrentOpTest1(unittest.TestCase):
ana_grad
=
[
np
.
array
(
x
)
for
x
in
self
.
backward
()]
num_grad
=
self
.
get_numerical_gradient
()
for
idx
,
name
in
enumerate
(
self
.
data_field
):
for
idx
,
name
in
enumerate
(
self
.
grad_
data_field
):
self
.
assertEqual
(
num_grad
[
idx
].
shape
,
ana_grad
[
idx
].
shape
)
self
.
assertTrue
(
np
.
isclose
(
...
...
@@ -212,7 +213,7 @@ class RecurrentOpTest1(unittest.TestCase):
def
get_numerical_gradient
(
self
,
delta
=
0.005
):
dloss_dout
=
1.0
feed_list
=
[
getattr
(
self
.
py_rnn
,
x
)
for
x
in
self
.
data_field
]
feed_list
=
[
getattr
(
self
.
py_rnn
,
x
)
for
x
in
self
.
grad_
data_field
]
grad_list
=
[
np
.
zeros_like
(
x
)
for
x
in
feed_list
]
for
feed
,
grad
in
zip
(
feed_list
,
grad_list
):
for
f
,
g
in
np
.
nditer
([
feed
,
grad
],
op_flags
=
[
'readwrite'
]):
...
...
@@ -253,7 +254,8 @@ class RecurrentOpTest2(RecurrentOpTest1):
def
setUp
(
self
):
self
.
setup_program
()
self
.
data_field
=
{
"x"
,
"h_boot"
,
"W"
,
"U"
}
self
.
feed_data_field
=
{
"x"
,
"h_boot"
,
"W"
,
"U"
}
self
.
grad_data_field
=
self
.
feed_data_field
self
.
input_shape
=
(
self
.
sent_len
,
self
.
batch_size
,
self
.
input_dim
)
self
.
output_shape
=
(
self
.
sent_len
,
self
.
batch_size
,
self
.
input_dim
)
...
...
@@ -352,7 +354,8 @@ class RecurrentOpMultipleMemoryTest(RecurrentOpTest1):
def
setUp
(
self
):
self
.
setup_program
()
self
.
data_field
=
{
"x"
,
"h_boot1"
,
"h_boot2"
}
self
.
feed_data_field
=
{
"x"
,
"h_boot1"
,
"h_boot2"
}
self
.
grad_data_field
=
self
.
feed_data_field
self
.
input_shape
=
(
self
.
sent_len
,
self
.
batch_size
,
self
.
input_dim
)
self
.
output_shape
=
(
self
.
sent_len
,
self
.
batch_size
,
self
.
input_dim
)
...
...
@@ -435,7 +438,8 @@ class RecurrentOpNoMemBootTest(RecurrentOpTest1):
def
setUp
(
self
):
self
.
setup_program
()
self
.
data_field
=
{
"x"
}
self
.
feed_data_field
=
{
"x"
}
self
.
grad_data_field
=
self
.
feed_data_field
self
.
input_shape
=
(
self
.
sent_len
,
self
.
batch_size
,
self
.
input_dim
)
self
.
output_shape
=
(
self
.
sent_len
,
self
.
batch_size
,
self
.
input_dim
)
...
...
@@ -535,7 +539,8 @@ class RecurrentOpSubBlockTest(RecurrentOpTest1):
def
setUp
(
self
):
self
.
setup_program
()
self
.
data_field
=
{
"x"
,
"emb"
,
"w1"
,
"w2"
}
self
.
feed_data_field
=
{
"x"
,
"emb"
,
"w1"
,
"w2"
}
self
.
grad_data_field
=
self
.
feed_data_field
self
.
input_shape
=
(
self
.
sent_len
,
self
.
batch_size
,
self
.
input_dim
)
self
.
output_shape
=
(
self
.
sent_len
,
self
.
batch_size
,
self
.
input_dim
)
...
...
@@ -602,5 +607,76 @@ class RecurrentOpSubBlockTest(RecurrentOpTest1):
return
rnn
()
class
RecurrentOpStopGradientTest
(
RecurrentOpTest1
):
"""
Test RNNOp with stop_gradient = True
equation:
h_t = \sigma (W x_t + U h_{t-1})
weights:
- W
- U
vars:
- x
memories:
- h
output:
- h
"""
input_dim
=
2
batch_size
=
10
sent_len
=
2
def
setUp
(
self
):
self
.
setup_program
()
self
.
feed_data_field
=
{
"x"
,
"h_boot"
,
"W"
,
"U"
}
self
.
grad_data_field
=
{
"x"
,
"W"
,
"U"
}
self
.
input_shape
=
(
self
.
sent_len
,
self
.
batch_size
,
self
.
input_dim
)
self
.
output_shape
=
(
self
.
sent_len
,
self
.
batch_size
,
self
.
input_dim
)
self
.
py_rnn
=
PySimpleRNN2
(
self
.
input_shape
,
self
.
output_shape
)
with
fluid
.
program_guard
(
self
.
main_program
,
self
.
startup_program
):
self
.
output
=
layers
.
mean
(
self
.
create_rnn_op
())
def
create_rnn_op
(
self
):
x
=
layers
.
data
(
shape
=
[
self
.
sent_len
,
self
.
batch_size
,
self
.
input_dim
],
dtype
=
"float32"
,
name
=
"x"
,
append_batch_size
=
False
)
x
.
stop_gradient
=
False
h_boot
=
layers
.
data
(
shape
=
[
self
.
input_dim
],
dtype
=
"float32"
,
name
=
"h_boot"
)
h_boot
.
stop_gradient
=
True
rnn
=
layers
.
StaticRNN
()
with
rnn
.
step
():
h_pre
=
rnn
.
memory
(
init
=
h_boot
)
# init doesn't have gradient
x_t
=
rnn
.
step_input
(
x
)
temp_l
=
layers
.
fc
(
input
=
x_t
,
size
=
self
.
input_dim
,
param_attr
=
ParamAttr
(
name
=
"W"
,
initializer
=
fluid
.
initializer
.
ConstantInitializer
(
1.0
)),
bias_attr
=
False
)
temp_r
=
layers
.
fc
(
input
=
h_pre
,
size
=
self
.
input_dim
,
param_attr
=
ParamAttr
(
name
=
"U"
,
initializer
=
fluid
.
initializer
.
ConstantInitializer
(
0.0
)),
bias_attr
=
False
)
h
=
layers
.
sigmoid
(
x
=
layers
.
elementwise_add
(
temp_l
,
temp_r
))
rnn
.
update_memory
(
h_pre
,
h
)
rnn
.
output
(
h
)
return
rnn
()
if
__name__
==
'__main__'
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录