提交 4450a312 编写于 作者: Y Yang Yu

Polish Unittest

上级 01d20c44
import paddle.v2.fluid as fluid
__all__ = ['many_times', 'prog_scope']
def many_times(times):
def __impl__(fn):
def __fn__(*args, **kwargs):
for _ in range(times):
fn(*args, **kwargs)
return __fn__
return __impl__
def prog_scope():
def __impl__(fn):
def __fn__(*args, **kwargs):
prog = fluid.Program()
startup_prog = fluid.Program()
with fluid.program_guard(prog, startup_prog):
fn(*args, **kwargs)
return __fn__
return __impl__
...@@ -3,7 +3,7 @@ import random ...@@ -3,7 +3,7 @@ import random
import collections import collections
import paddle.v2.fluid as fluid import paddle.v2.fluid as fluid
import unittest import unittest
import copy from decorators import *
class Memory(object): class Memory(object):
...@@ -78,7 +78,7 @@ class BaseRNN(object): ...@@ -78,7 +78,7 @@ class BaseRNN(object):
self.outputs[oname] = Output() self.outputs[oname] = Output()
def step(self, **kwargs): def step(self, **kwargs):
pass raise NotImplementedError()
def exe(self): def exe(self):
retv = dict() retv = dict()
...@@ -141,18 +141,22 @@ class BaseRNN(object): ...@@ -141,18 +141,22 @@ class BaseRNN(object):
feed_dict[pname] = self.params[pname] feed_dict[pname] = self.params[pname]
return feed_dict return feed_dict
def get_numeric_gradient_of_param(self, param_name, delta=0.01): def get_numeric_gradient_of_param(self, param_name, delta=0.001):
if len(p.shape) != 2:
raise ValueError("Not support get numeric gradient of an parameter,"
" which is not matrix")
p = self.params[param_name] p = self.params[param_name]
g = numpy.zeros(shape=p.shape, dtype=p.dtype) g = numpy.zeros(shape=p.shape, dtype=p.dtype)
for p_it, g_it in numpy.nditer([p, g], op_flags=['readwrite']): for i in xrange(p.shape[0]):
o = float(p_it) for j in xrange(p.shape[1]):
p_it[...] = o + delta o = p[i][j]
pos = self._exe_mean_out_() p[i][j] += delta
p_it[...] = o - delta pos = self._exe_mean_out_()
neg = self._exe_mean_out_() p[i][j] -= 2 * delta
p_it[...] = o neg = self._exe_mean_out_()
g[:] = (pos - neg) / (delta * 2) p[i][j] = o
g[i][j] = (pos - neg) / (delta * 2)
return g return g
def _exe_mean_out_(self): def _exe_mean_out_(self):
...@@ -175,40 +179,36 @@ class SimpleMul(BaseRNN): ...@@ -175,40 +179,36 @@ class SimpleMul(BaseRNN):
class TestSimpleMul(unittest.TestCase): class TestSimpleMul(unittest.TestCase):
def setUp(self): # Test many times in local to ensure the random seed cannot breaks CI
self.python_impl = SimpleMul() # @many_times(10)
@prog_scope()
def test_forward(self): def test_forward_backward(self):
program = fluid.Program() python_impl = SimpleMul()
startup_program = fluid.Program() dat = fluid.layers.data(name='X', shape=[32], lod_level=1)
with fluid.program_guard(program, startup_program):
dat = fluid.layers.data(name='X', shape=[32], lod_level=1) rnn = fluid.layers.DynamicRNN()
with rnn.block():
rnn = fluid.layers.DynamicRNN() d = rnn.step_input(dat)
with rnn.block(): o = fluid.layers.fc(input=d,
d = rnn.step_input(dat) param_attr='W',
o = fluid.layers.fc(input=d, bias_attr=False,
param_attr='W', size=10,
bias_attr=False, act=None)
size=10, rnn.output(o)
act=None)
rnn.output(o) out = rnn()
out = fluid.layers.sequence_pool(out, pool_type='last')
out = rnn() loss = fluid.layers.mean(x=out)
out = fluid.layers.sequence_pool(out, pool_type='last') fluid.backward.append_backward_ops(loss)
loss = fluid.layers.mean(x=out)
fluid.backward.append_backward_ops(loss)
cpu = fluid.CPUPlace() cpu = fluid.CPUPlace()
exe = fluid.Executor(cpu) exe = fluid.Executor(cpu)
out, w_g = exe.run(program, out, w_g = exe.run(feed=python_impl.to_feed(cpu),
feed=self.python_impl.to_feed(cpu),
fetch_list=[out, "W@GRAD"]) fetch_list=[out, "W@GRAD"])
out_by_python = self.python_impl.exe()['Out'] out_by_python = python_impl.exe()['Out']
self.assertTrue(numpy.allclose(out, out_by_python)) self.assertTrue(numpy.allclose(out, out_by_python))
w_g_num = self.python_impl.get_numeric_gradient_of_param("W") w_g_num = python_impl.get_numeric_gradient_of_param("W")
print w_g_num[0][0] self.assertTrue(numpy.allclose(w_g_num, w_g, rtol=0.05))
print w_g_num - w_g
if __name__ == '__main__': if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册