提交 52a73587 编写于 作者: D dzhwinter 提交者: Yu Yang

"add asnumpy interface" (#5620)

* "add asnumpy interface"

* Just for unittest

* Change unittests for numpy I/O

* Fix CI
上级 a619695b
import numpy as np
import paddle.v2.fluid.core as core
from paddle.v2.fluid.framework import Block, Program, g_main_program
g_scope = core.Scope()
def as_numpy(tensor):
if isinstance(tensor, list):
return [as_numpy(t) for t in tensor]
assert isinstance(tensor, core.LoDTensor)
lod = tensor.lod()
tensor_data = np.array(tensor)
if len(lod) == 0:
ans = tensor_data
else:
raise RuntimeError("LoD Calculate lacks unit tests and buggy")
# elif len(lod) == 1:
# ans = []
# idx = 0
# while idx < len(lod) - 1:
# ans.append(tensor_data[lod[idx]:lod[idx + 1]])
# idx += 1
# else:
# for l in reversed(lod):
# ans = []
# idx = 0
# while idx < len(l) - 1:
# ans.append(tensor_data[l[idx]:l[idx + 1]])
# idx += 1
# tensor_data = ans
# ans = tensor_data
return ans
class Executor(object):
def __init__(self, places):
if not isinstance(places, list) and not isinstance(places, tuple):
......@@ -16,6 +45,47 @@ class Executor(object):
act_places.append(p)
self.executor = core.Executor(act_places)
self.places = places
def aslodtensor(self, data):
def accumulate(data):
if not isinstance(data, list):
return 1
return sum([accumulate(sub) for sub in data])
def parselod(data):
seq_lens = [accumulate(seq) for seq in data]
cur_len = 0
lod = [cur_len]
for l in seq_lens:
cur_len += l
lod.append(cur_len)
return lod
assert len(self.places) != 0
if not isinstance(data, list):
# pure tensor case
tensor = core.LoDTensor()
tensor.set(data, self.places[0])
return tensor
else:
raise RuntimeError("Current implementation lacks unittests")
# lodtensor case
lod = []
if not isinstance(data[0], list):
lod.append(parselod(data))
flattened_data = np.concatenate(data, axis=0).astype("int64")
else:
while isinstance(data[0], list):
lod.append(parselod(seq))
flattened_data = [item for seq in data for item in seq]
data = flattened_data
flattened_data = np.concatenate(data, axis=0).astype("int64")
flattened_data = flattened_data.reshape([len(flattened_data), 1])
tensor = core.LoDTensor()
tensor.set(flattened_data, self.places[0])
tensor.set_lod(lod)
return tensor
def run(self,
program=None,
......@@ -23,7 +93,8 @@ class Executor(object):
fetch_list=None,
feed_var_name='feed',
fetch_var_name='fetch',
scope=None):
scope=None,
return_numpy=True):
if feed is None:
feed = {}
if fetch_list is None:
......@@ -52,7 +123,10 @@ class Executor(object):
inputs={'X': [feed_var]},
outputs={'Out': [out]},
attrs={'col': i})
core.set_feed_variable(scope, feed[name], feed_var.name, i)
cur_feed = feed[name]
if not isinstance(cur_feed, core.LoDTensor):
cur_feed = self.aslodtensor(cur_feed)
core.set_feed_variable(scope, cur_feed, feed_var.name, i)
fetch_var = global_block.create_var(
name=fetch_var_name,
......@@ -66,7 +140,11 @@ class Executor(object):
attrs={'col': i})
self.executor.run(program.desc, scope, 0, True)
return [
outs = [
core.get_fetch_variable(scope, fetch_var_name, i)
for i in xrange(len(fetch_list))
]
if return_numpy:
outs = as_numpy(outs)
return outs
......@@ -261,7 +261,10 @@ class OpTest(unittest.TestCase):
feed_map = self.feed_var(inputs, place)
exe = Executor(place)
outs = exe.run(program, feed=feed_map, fetch_list=fetch_list)
outs = exe.run(program,
feed=feed_map,
fetch_list=fetch_list,
return_numpy=False)
for out_name, out_dup in Operator.get_op_outputs(self.op_type):
if out_name not in self.outputs:
......@@ -500,5 +503,6 @@ class OpTest(unittest.TestCase):
fetch_list = [g for p, g in param_grad_list]
executor = Executor(place)
result = executor.run(prog, feed_dict, fetch_list)
return map(np.array, result)
return map(
np.array,
executor.run(prog, feed_dict, fetch_list, return_numpy=False))
......@@ -52,15 +52,13 @@ class TestArrayReadWrite(unittest.TestCase):
exe = Executor(cpu)
tensor = core.LoDTensor()
tensor.set(numpy.random.random(size=(100, 100)).astype('float32'), cpu)
outs = map(numpy.array,
exe.run(feed={'x0': tensor,
'x1': tensor,
'x2': tensor},
fetch_list=[a_sum, x_sum],
scope=scope))
tensor = numpy.random.random(size=(100, 100)).astype('float32')
outs = exe.run(feed={'x0': tensor,
'x1': tensor,
'x2': tensor},
fetch_list=[a_sum, x_sum],
scope=scope)
self.assertEqual(outs[0], outs[1])
total_sum = layers.sums(input=[a_sum, x_sum])
......@@ -72,12 +70,11 @@ class TestArrayReadWrite(unittest.TestCase):
[each_x.name + "@GRAD" for each_x in x])
g_out = [
item.sum()
for item in map(
numpy.array,
exe.run(feed={'x0': tensor,
'x1': tensor,
'x2': tensor},
fetch_list=g_vars))
for item in exe.run(
feed={'x0': tensor,
'x1': tensor,
'x2': tensor},
fetch_list=g_vars)
]
g_out_sum = numpy.array(g_out).sum()
......
......@@ -21,18 +21,15 @@ class ConditionalBlock(unittest.TestCase):
exe = Executor(cpu)
exe.run(g_startup_program)
x = core.LoDTensor()
x.set(numpy.random.random(size=(10, 1)).astype('float32'), cpu)
x = numpy.random.random(size=(10, 1)).astype('float32')
outs = map(numpy.array, exe.run(feed={'X': x}, fetch_list=[out]))[0]
outs = exe.run(feed={'X': x}, fetch_list=[out])[0]
print outs
loss = layers.mean(x=out)
append_backward_ops(loss=loss)
outs = map(numpy.array,
exe.run(feed={'X': x},
fetch_list=[
g_main_program.block(0).var(data.name + "@GRAD")
]))[0]
outs = exe.run(
feed={'X': x},
fetch_list=[g_main_program.block(0).var(data.name + "@GRAD")])[0]
print outs
......
import unittest
from paddle.v2.fluid.layers import mul, data
from paddle.v2.fluid.layers import mul, data, sequence_pool
import paddle.v2.fluid.core as core
from paddle.v2.fluid.executor import Executor
from paddle.v2.fluid.framework import g_main_program
......@@ -17,17 +17,13 @@ class TestExecutor(unittest.TestCase):
out = mul(x=a, y=b)
place = core.CPUPlace()
a_np = numpy.random.random((100, 784)).astype('float32')
tensor_a = core.LoDTensor()
tensor_a.set(a_np, place)
b_np = numpy.random.random((784, 100)).astype('float32')
tensor_b = core.LoDTensor()
tensor_b.set(b_np, place)
exe = Executor(place)
outs = exe.run(g_main_program,
feed={'a': tensor_a,
'b': tensor_b},
feed={'a': a_np,
'b': b_np},
fetch_list=[out])
out = numpy.array(outs[0])
out = outs[0]
self.assertEqual((100, 100), out.shape)
self.assertTrue(numpy.allclose(out, numpy.dot(a_np, b_np)))
......
import paddle.v2 as paddle
import paddle.v2.fluid.layers as layers
import unittest
import numpy as np
import paddle.v2.fluid.core as core
import paddle.v2.fluid.optimizer as optimizer
import paddle.v2.fluid.executor as executor
import paddle.v2.fluid.layers as layers
import paddle.v2.fluid.optimizer as optimizer
from paddle.v2.fluid.framework import Program
from paddle.v2.fluid.io import save_inference_model, load_inference_model
import paddle.v2.fluid.executor as executor
import unittest
import numpy as np
class TestBook(unittest.TestCase):
......@@ -44,7 +44,7 @@ class TestBook(unittest.TestCase):
x=cost, main_program=program, startup_program=init_program)
sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.001)
opts = sgd_optimizer.minimize(avg_cost, init_program)
sgd_optimizer.minimize(avg_cost, init_program)
place = core.CPUPlace()
exe = executor.Executor(place)
......@@ -52,25 +52,20 @@ class TestBook(unittest.TestCase):
exe.run(init_program, feed={}, fetch_list=[])
for i in xrange(100):
x_data = np.array(
tensor_x = np.array(
[[1, 1], [1, 2], [3, 4], [5, 2]]).astype("float32")
y_data = np.array([[-2], [-3], [-7], [-7]]).astype("float32")
tensor_y = np.array([[-2], [-3], [-7], [-7]]).astype("float32")
tensor_x = core.LoDTensor()
tensor_x.set(x_data, place)
tensor_y = core.LoDTensor()
tensor_y.set(y_data, place)
exe.run(program,
feed={'x': tensor_x,
'y': tensor_y},
fetch_list=[avg_cost])
save_inference_model(MODEL_DIR, ["x", "y"], [avg_cost], exe, program)
outs = exe.run(program,
feed={'x': tensor_x,
'y': tensor_y},
fetch_list=[avg_cost])
expected = np.array(outs[0])
expected = exe.run(program,
feed={'x': tensor_x,
'y': tensor_y},
fetch_list=[avg_cost])[0]
reload(executor) # reload to build a new scope
exe = executor.Executor(place)
......@@ -83,7 +78,7 @@ class TestBook(unittest.TestCase):
feed={feed_var_names[0]: tensor_x,
feed_var_names[1]: tensor_y},
fetch_list=fetch_vars)
actual = np.array(outs[0])
actual = outs[0]
self.assertEqual(feed_var_names, ["x", "y"])
self.assertEqual(len(fetch_vars), 1)
......
......@@ -13,7 +13,7 @@ class TestLoDArrayLength(unittest.TestCase):
arr_len = layers.array_length(arr)
cpu = core.CPUPlace()
exe = Executor(cpu)
result = numpy.array(exe.run(fetch_list=[arr_len])[0])
result = exe.run(fetch_list=[arr_len])[0]
self.assertEqual(11, result[0])
......
......@@ -151,10 +151,11 @@ class TestCPULoDTensorArrayOpGrad(unittest.TestCase):
exe = Executor(place)
g_out = [
item.sum()
for item in map(
numpy.array,
exe.run(program, feed={'x': tensor}, fetch_list=[g_vars]))
numpy.array(item).sum()
for item in exe.run(program,
feed={'x': tensor},
fetch_list=[g_vars],
return_numpy=False)
]
g_out_sum = numpy.array(g_out).sum()
......
......@@ -65,17 +65,10 @@ class TestMNISTIfElseOp(unittest.TestCase):
y_data = np.array(map(lambda x: x[1], data)).astype("int64")
y_data = np.expand_dims(y_data, axis=1)
tensor_x = core.LoDTensor()
tensor_x.set(x_data, place)
tensor_y = core.LoDTensor()
tensor_y.set(y_data, place)
outs = map(np.array,
exe.run(kwargs['main_program'],
feed={'x': tensor_x,
'y': tensor_y},
fetch_list=[avg_loss]))
outs = exe.run(kwargs['main_program'],
feed={'x': x_data,
'y': y_data},
fetch_list=[avg_loss])
print outs[0]
if outs[0] < 1.0:
return
......@@ -129,19 +122,12 @@ class TestMNISTIfElseOp(unittest.TestCase):
for data in train_reader():
x_data = np.array(map(lambda x: x[0], data)).astype("float32")
y_data = np.array(map(lambda x: x[1], data)).astype("int64")
y_data = np.expand_dims(y_data, axis=1)
tensor_x = core.LoDTensor()
tensor_x.set(x_data, place)
tensor_y = core.LoDTensor()
tensor_y.set(y_data, place)
y_data = y_data.reshape((y_data.shape[0], 1))
outs = map(np.array,
exe.run(kwargs['main_program'],
feed={'x': tensor_x,
'y': tensor_y},
fetch_list=[avg_loss]))
outs = exe.run(kwargs['main_program'],
feed={'x': x_data,
'y': y_data},
fetch_list=[avg_loss])
print outs[0]
if outs[0] < 1.0:
return
......
......@@ -24,7 +24,7 @@ class TestParameter(unittest.TestCase):
self.assertEqual(0, param.block.idx)
exe = Executor(core.CPUPlace())
p = exe.run(g_main_program, fetch_list=[param])[0]
self.assertTrue(np.allclose(np.array(p), np.ones(shape) * val))
self.assertTrue(np.allclose(p, np.ones(shape) * val))
p = io.get_parameter_value_by_name('fc.w', exe, g_main_program)
self.assertTrue(np.allclose(np.array(p), np.ones(shape) * val))
......
......@@ -156,7 +156,7 @@ class RecurrentOpTest1(unittest.TestCase):
feed=self.feed_map,
fetch_list=[self.output])
return np.array(out[0])
return out[0]
def backward(self):
self.feed_map = {
......@@ -171,7 +171,8 @@ class RecurrentOpTest1(unittest.TestCase):
exe = Executor(self.place)
return exe.run(self.main_program,
feed=self.feed_map,
fetch_list=fetch_list)
fetch_list=fetch_list,
return_numpy=False)
def test_backward(self):
self.check_forward()
......
......@@ -7,12 +7,6 @@ import numpy as np
import paddle.v2.fluid.core as core
def create_tensor(np_data, place):
tensor = core.LoDTensor()
tensor.set(np_data, place)
return tensor
class RNNMemoryHelperOpTest(unittest.TestCase):
def setUp(self):
self.program = Program()
......@@ -30,13 +24,13 @@ class RNNMemoryHelperOpTest(unittest.TestCase):
def test_forward(self):
x_np = np.random.normal(size=(2, 3)).astype("float32")
self.feed_map = {'X': create_tensor(x_np, self.place)}
self.feed_map = {'X': x_np}
self.fetch_list = [self.Out]
exe = Executor(self.place)
out = exe.run(self.program,
feed=self.feed_map,
fetch_list=self.fetch_list)
np.isclose(np.array(out[0]), x_np, rtol=1e-5)
self.assertTrue(np.allclose(out[0], x_np, rtol=1e-5))
class RNNMemoryHelperGradOpTest(unittest.TestCase):
......@@ -66,8 +60,7 @@ class RNNMemoryHelperGradOpTest(unittest.TestCase):
def test_backward(self):
self.feed_map = {
name: create_tensor(
np.random.normal(size=(2, 3)).astype("float32"), self.place)
name: np.random.normal(size=(2, 3)).astype("float32")
for name in self.input_names
}
self.fetch_list = [self.output_vars['X@GRAD']]
......@@ -76,7 +69,7 @@ class RNNMemoryHelperGradOpTest(unittest.TestCase):
out = exe.run(self.program,
feed=self.feed_map,
fetch_list=self.fetch_list)
np.isclose(np.array(out[0]), self.feed_map['Out@GRAD'], rtol=1e-5)
np.isclose(out[0], self.feed_map['Out@GRAD'], rtol=1e-5)
class RNNMemoryHelperGradOpWithoutInputTest(unittest.TestCase):
......@@ -110,8 +103,7 @@ class RNNMemoryHelperGradOpWithoutInputTest(unittest.TestCase):
def test_backward(self):
self.feed_map = {
name: create_tensor(
np.random.normal(size=(2, 3)).astype("float32"), self.place)
name: np.random.normal(size=(2, 3)).astype("float32")
for name in ['X', 'Out']
}
self.fetch_list = [self.output_vars['X@GRAD']]
......@@ -120,10 +112,9 @@ class RNNMemoryHelperGradOpWithoutInputTest(unittest.TestCase):
out = exe.run(self.program,
feed=self.feed_map,
fetch_list=self.fetch_list)
np.isclose(
np.array(out[0]),
np.zeros(shape=(2, 3)).astype("float32"),
rtol=1e-5)
self.assertTrue(
np.allclose(
out[0], np.zeros(shape=(2, 3)).astype("float32"), rtol=1e-5))
if __name__ == '__main__':
......
......@@ -27,19 +27,16 @@ class TestShrinkRNNMemory(unittest.TestCase):
tensor_np = numpy.random.random(size=(3, 100)).astype('float32')
tensor.set(tensor_np, cpu)
exe = Executor(cpu)
outs = map(numpy.array,
exe.run(feed={'x': tensor}, fetch_list=[mem1, mem2, mem3]))
outs = exe.run(feed={'x': tensor}, fetch_list=[mem1, mem2, mem3])
self.assertTrue(numpy.allclose(tensor_np[0:3], outs[0]))
self.assertTrue(numpy.allclose(tensor_np[0:2], outs[1]))
self.assertTrue(numpy.allclose(tensor_np[0:1], outs[2]))
mem3_mean = layers.mean(x=mem3)
append_backward_ops(loss=mem3_mean)
x_grad = map(numpy.array,
exe.run(feed={'x': tensor},
fetch_list=[
g_main_program.global_block().var('x@GRAD')
]))[0]
x_grad = exe.run(
feed={'x': tensor},
fetch_list=[g_main_program.global_block().var('x@GRAD')])[0]
self.assertAlmostEqual(1.0, x_grad.sum(), delta=0.1)
......
......@@ -98,7 +98,11 @@ class TestCPULoDTensorArrayOps(unittest.TestCase):
exe = Executor(place)
scope = core.Scope()
exe.run(program, feed={'x': tensor, 'y': mask}, scope=scope)
exe.run(program,
feed={'x': tensor,
'y': mask},
scope=scope,
return_numpy=False)
var_true = scope.find_var(out_true.name).get_tensor()
......@@ -169,7 +173,8 @@ class TestCPUSplitMergeLoDTensorGrad(unittest.TestCase):
feed={'x': tensor,
'y': mask},
fetch_list=[g_vars],
scope=scope))
scope=scope,
return_numpy=False))
]
g_out_sum = np.array(g_out).sum()
......
......@@ -55,19 +55,10 @@ class TestWhileOp(unittest.TestCase):
for i in xrange(3):
d.append(numpy.random.random(size=[10]).astype('float32'))
d_tensor = []
for item in d:
t = core.LoDTensor()
t.set(item, cpu)
d_tensor.append(t)
outs = map(numpy.array,
exe.run(feed={
'd0': d_tensor[0],
'd1': d_tensor[1],
'd2': d_tensor[2]
},
fetch_list=[sum_result]))
outs = exe.run(feed={'d0': d[0],
'd1': d[1],
'd2': d[2]},
fetch_list=[sum_result])
self.assertAlmostEqual(numpy.sum(d), numpy.sum(outs[0]), delta=0.01)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册