提交 e9a92e3e 编写于 作者: S superjom

add fc test

上级 001b62a4
import paddle.v2.framework.core as core
import unittest
import numpy
import numpy as np
import paddle.v2.framework.core as core
from paddle.v2.framework.op import Operator
class TestFc(unittest.TestCase):
def setUp(self):
self.x_np_data = np.random.random((1000, 784))
self.W_np_data = np.random.random((784, 100))
def test_fc(self):
scope = core.Scope()
place = core.CPUPlace()
x = scope.new_var("X")
x_tensor = x.get_tensor()
x_tensor.set_dims([1000, 784])
x_tensor.alloc_float(place)
x_tensor = scope.new_var("X").get_tensor()
x_tensor.set_dims(self.x_np_data.shape)
x_tensor.set(self.x_np_data, place)
w = scope.new_var("W")
w_tensor = w.get_tensor()
w_tensor.set_dims([784, 100])
w_tensor.alloc_float(place)
w_tensor.set(numpy.random.random((784, 100)).astype("float32"), place)
# Set a real numpy array here.
# x_tensor.set(numpy.array([]))
W_tensor = scope.new_var("W").get_tensor()
W_tensor.set_dims(self.W_np_data.shape)
W_tensor.set(self.W_np_data, place)
op = Operator("fc", X="X", Y="Y", W="W")
......@@ -30,15 +26,20 @@ class TestFc(unittest.TestCase):
if scope.find_var(out) is None:
scope.new_var(out).get_tensor()
tensor = scope.find_var("Y").get_tensor()
Y_tensor = scope.find_var("Y").get_tensor()
op.infer_shape(scope)
self.assertEqual([1000, 100], tensor.shape())
self.assertEqual([1000, 100], Y_tensor.shape())
ctx = core.DeviceContext.create(place)
op.run(scope, ctx)
# After complete all ops, check Y is expect or not.
py_data = np.matmul(self.x_np_data, self.W_np_data)
op_data = np.array(Y_tensor)
print py_data - op_data
self.assertTrue(np.allclose(py_data, op_data))
if __name__ == '__main__':
......
......@@ -6,8 +6,7 @@ from paddle.v2.framework.op import Operator
def py_sigmoid(x):
return 1. / (1 + np.exp(-x))
return 1. / (1. + np.exp(-x))
class PySimpleRNN(object):
'''
......@@ -62,10 +61,10 @@ class PySimpleRNNTest(unittest.TestCase):
print 'output', output
def create_tensor(scope, name, shape):
def create_tensor(scope, name, shape, np_data):
tensor = scope.new_var(name).get_tensor()
tensor.set_dims(shape)
tensor.set(np.random.random(shape), core.CPUPlace())
tensor.set(np_data, core.CPUPlace())
return tensor
......@@ -91,25 +90,36 @@ class TestRecurrentOp(unittest.TestCase):
weight_dim = 15
sent_len = 11
def forward(self):
def setUp(self):
self.py_rnn = PySimpleRNN(self.input_dim,
self.batch_size,
self.weight_dim,
self.sent_len)
self.scope = core.Scope()
def forward(self):
self.scope = core.Scope()
self.create_global_variables()
self.create_step_net()
rnn_op = self.create_rnn_op()
ctx = core.DeviceContext.create(core.CPUPlace())
print 'infer_shape'
rnn_op.infer_shape(self.scope)
rnn_op.run(self.scope, ctx)
return np.array(self.scope.find_var("h").get_tensor())
def create_global_variables(self):
# create inlink
x_np_data = self.py_rnn.x
create_tensor(self.scope, "x",
[self.sent_len, self.batch_size, self.input_dim])
create_tensor(self.scope, "W", [self.input_dim, self.input_dim])
create_tensor(self.scope, "U", [self.input_dim, self.input_dim])
create_tensor(self.scope, "h_boot", [self.batch_size, self.input_dim])
[self.sent_len, self.batch_size, self.input_dim], x_np_data)
W_np_data = self.py_rnn.W
create_tensor(self.scope, "W", [self.input_dim, self.input_dim], W_np_data)
U_np_data = self.py_rnn.U
create_tensor(self.scope, "U", [self.input_dim, self.input_dim], U_np_data)
h_boot_np_data = self.py_rnn.h_boot
create_tensor(self.scope, "h_boot", [self.batch_size, self.input_dim], h_boot_np_data)
self.scope.new_var("step_scopes")
self.scope.new_var("h@alias")
self.scope.new_var("h")
......@@ -146,8 +156,12 @@ class TestRecurrentOp(unittest.TestCase):
def test_forward(self):
print 'test recurrent op forward'
self.forward()
pd_output = self.forward()
py_output = self.py_rnn.forward()
print 'pd_output', pd_output
print
print 'py_output', py_output
self.assertEqual(pd_output.shape, py_output.shape)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册