Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
4450a312
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
4450a312
编写于
12月 26, 2017
作者:
Y
Yang Yu
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Polish Unittest
上级
01d20c44
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
67 addition
and
40 deletion
+67
-40
python/paddle/v2/fluid/tests/decorators.py
python/paddle/v2/fluid/tests/decorators.py
+27
-0
python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py
python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py
+40
-40
未找到文件。
python/paddle/v2/fluid/tests/decorators.py
0 → 100644
浏览文件 @
4450a312
import
paddle.v2.fluid
as
fluid
__all__
=
[
'many_times'
,
'prog_scope'
]
def
many_times
(
times
):
def
__impl__
(
fn
):
def
__fn__
(
*
args
,
**
kwargs
):
for
_
in
range
(
times
):
fn
(
*
args
,
**
kwargs
)
return
__fn__
return
__impl__
def
prog_scope
():
def
__impl__
(
fn
):
def
__fn__
(
*
args
,
**
kwargs
):
prog
=
fluid
.
Program
()
startup_prog
=
fluid
.
Program
()
with
fluid
.
program_guard
(
prog
,
startup_prog
):
fn
(
*
args
,
**
kwargs
)
return
__fn__
return
__impl__
python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py
浏览文件 @
4450a312
...
...
@@ -3,7 +3,7 @@ import random
import
collections
import
paddle.v2.fluid
as
fluid
import
unittest
import
copy
from
decorators
import
*
class
Memory
(
object
):
...
...
@@ -78,7 +78,7 @@ class BaseRNN(object):
self
.
outputs
[
oname
]
=
Output
()
def
step
(
self
,
**
kwargs
):
pass
raise
NotImplementedError
()
def
exe
(
self
):
retv
=
dict
()
...
...
@@ -141,18 +141,22 @@ class BaseRNN(object):
feed_dict
[
pname
]
=
self
.
params
[
pname
]
return
feed_dict
def
get_numeric_gradient_of_param
(
self
,
param_name
,
delta
=
0.01
):
def
get_numeric_gradient_of_param
(
self
,
param_name
,
delta
=
0.001
):
if
len
(
p
.
shape
)
!=
2
:
raise
ValueError
(
"Not support get numeric gradient of an parameter,"
" which is not matrix"
)
p
=
self
.
params
[
param_name
]
g
=
numpy
.
zeros
(
shape
=
p
.
shape
,
dtype
=
p
.
dtype
)
for
p_it
,
g_it
in
numpy
.
nditer
([
p
,
g
],
op_flags
=
[
'readwrite'
]):
o
=
float
(
p_it
)
p_it
[...]
=
o
+
delta
pos
=
self
.
_exe_mean_out_
()
p_it
[...]
=
o
-
delta
neg
=
self
.
_exe_mean_out_
()
p_it
[...]
=
o
g
[:]
=
(
pos
-
neg
)
/
(
delta
*
2
)
for
i
in
xrange
(
p
.
shape
[
0
]):
for
j
in
xrange
(
p
.
shape
[
1
]):
o
=
p
[
i
][
j
]
p
[
i
][
j
]
+=
delta
pos
=
self
.
_exe_mean_out_
()
p
[
i
][
j
]
-=
2
*
delta
neg
=
self
.
_exe_mean_out_
()
p
[
i
][
j
]
=
o
g
[
i
][
j
]
=
(
pos
-
neg
)
/
(
delta
*
2
)
return
g
def
_exe_mean_out_
(
self
):
...
...
@@ -175,40 +179,36 @@ class SimpleMul(BaseRNN):
class
TestSimpleMul
(
unittest
.
TestCase
):
def
setUp
(
self
):
self
.
python_impl
=
SimpleMul
()
def
test_forward
(
self
):
program
=
fluid
.
Program
()
startup_program
=
fluid
.
Program
()
with
fluid
.
program_guard
(
program
,
startup_program
):
dat
=
fluid
.
layers
.
data
(
name
=
'X'
,
shape
=
[
32
],
lod_level
=
1
)
rnn
=
fluid
.
layers
.
DynamicRNN
()
with
rnn
.
block
():
d
=
rnn
.
step_input
(
dat
)
o
=
fluid
.
layers
.
fc
(
input
=
d
,
param_attr
=
'W'
,
bias_attr
=
False
,
size
=
10
,
act
=
None
)
rnn
.
output
(
o
)
out
=
rnn
()
out
=
fluid
.
layers
.
sequence_pool
(
out
,
pool_type
=
'last'
)
loss
=
fluid
.
layers
.
mean
(
x
=
out
)
fluid
.
backward
.
append_backward_ops
(
loss
)
# Test many times in local to ensure the random seed cannot breaks CI
# @many_times(10)
@
prog_scope
()
def
test_forward_backward
(
self
):
python_impl
=
SimpleMul
()
dat
=
fluid
.
layers
.
data
(
name
=
'X'
,
shape
=
[
32
],
lod_level
=
1
)
rnn
=
fluid
.
layers
.
DynamicRNN
()
with
rnn
.
block
():
d
=
rnn
.
step_input
(
dat
)
o
=
fluid
.
layers
.
fc
(
input
=
d
,
param_attr
=
'W'
,
bias_attr
=
False
,
size
=
10
,
act
=
None
)
rnn
.
output
(
o
)
out
=
rnn
()
out
=
fluid
.
layers
.
sequence_pool
(
out
,
pool_type
=
'last'
)
loss
=
fluid
.
layers
.
mean
(
x
=
out
)
fluid
.
backward
.
append_backward_ops
(
loss
)
cpu
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
cpu
)
out
,
w_g
=
exe
.
run
(
program
,
feed
=
self
.
python_impl
.
to_feed
(
cpu
),
out
,
w_g
=
exe
.
run
(
feed
=
python_impl
.
to_feed
(
cpu
),
fetch_list
=
[
out
,
"W@GRAD"
])
out_by_python
=
self
.
python_impl
.
exe
()[
'Out'
]
out_by_python
=
python_impl
.
exe
()[
'Out'
]
self
.
assertTrue
(
numpy
.
allclose
(
out
,
out_by_python
))
w_g_num
=
self
.
python_impl
.
get_numeric_gradient_of_param
(
"W"
)
print
w_g_num
[
0
][
0
]
print
w_g_num
-
w_g
w_g_num
=
python_impl
.
get_numeric_gradient_of_param
(
"W"
)
self
.
assertTrue
(
numpy
.
allclose
(
w_g_num
,
w_g
,
rtol
=
0.05
))
if
__name__
==
'__main__'
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录