提交 52a73587 编写于 作者: D dzhwinter 提交者: Yu Yang

"add asnumpy interface" (#5620)

* "add asnumpy interface"

* Just for unittest

* Change unittests for numpy I/O

* Fix CI
上级 a619695b
import numpy as np
import paddle.v2.fluid.core as core import paddle.v2.fluid.core as core
from paddle.v2.fluid.framework import Block, Program, g_main_program from paddle.v2.fluid.framework import Block, Program, g_main_program
g_scope = core.Scope() g_scope = core.Scope()
def as_numpy(tensor):
if isinstance(tensor, list):
return [as_numpy(t) for t in tensor]
assert isinstance(tensor, core.LoDTensor)
lod = tensor.lod()
tensor_data = np.array(tensor)
if len(lod) == 0:
ans = tensor_data
else:
raise RuntimeError("LoD Calculate lacks unit tests and buggy")
# elif len(lod) == 1:
# ans = []
# idx = 0
# while idx < len(lod) - 1:
# ans.append(tensor_data[lod[idx]:lod[idx + 1]])
# idx += 1
# else:
# for l in reversed(lod):
# ans = []
# idx = 0
# while idx < len(l) - 1:
# ans.append(tensor_data[l[idx]:l[idx + 1]])
# idx += 1
# tensor_data = ans
# ans = tensor_data
return ans
class Executor(object): class Executor(object):
def __init__(self, places): def __init__(self, places):
if not isinstance(places, list) and not isinstance(places, tuple): if not isinstance(places, list) and not isinstance(places, tuple):
...@@ -16,6 +45,47 @@ class Executor(object): ...@@ -16,6 +45,47 @@ class Executor(object):
act_places.append(p) act_places.append(p)
self.executor = core.Executor(act_places) self.executor = core.Executor(act_places)
self.places = places
def aslodtensor(self, data):
def accumulate(data):
if not isinstance(data, list):
return 1
return sum([accumulate(sub) for sub in data])
def parselod(data):
seq_lens = [accumulate(seq) for seq in data]
cur_len = 0
lod = [cur_len]
for l in seq_lens:
cur_len += l
lod.append(cur_len)
return lod
assert len(self.places) != 0
if not isinstance(data, list):
# pure tensor case
tensor = core.LoDTensor()
tensor.set(data, self.places[0])
return tensor
else:
raise RuntimeError("Current implementation lacks unittests")
# lodtensor case
lod = []
if not isinstance(data[0], list):
lod.append(parselod(data))
flattened_data = np.concatenate(data, axis=0).astype("int64")
else:
while isinstance(data[0], list):
lod.append(parselod(seq))
flattened_data = [item for seq in data for item in seq]
data = flattened_data
flattened_data = np.concatenate(data, axis=0).astype("int64")
flattened_data = flattened_data.reshape([len(flattened_data), 1])
tensor = core.LoDTensor()
tensor.set(flattened_data, self.places[0])
tensor.set_lod(lod)
return tensor
def run(self, def run(self,
program=None, program=None,
...@@ -23,7 +93,8 @@ class Executor(object): ...@@ -23,7 +93,8 @@ class Executor(object):
fetch_list=None, fetch_list=None,
feed_var_name='feed', feed_var_name='feed',
fetch_var_name='fetch', fetch_var_name='fetch',
scope=None): scope=None,
return_numpy=True):
if feed is None: if feed is None:
feed = {} feed = {}
if fetch_list is None: if fetch_list is None:
...@@ -52,7 +123,10 @@ class Executor(object): ...@@ -52,7 +123,10 @@ class Executor(object):
inputs={'X': [feed_var]}, inputs={'X': [feed_var]},
outputs={'Out': [out]}, outputs={'Out': [out]},
attrs={'col': i}) attrs={'col': i})
core.set_feed_variable(scope, feed[name], feed_var.name, i) cur_feed = feed[name]
if not isinstance(cur_feed, core.LoDTensor):
cur_feed = self.aslodtensor(cur_feed)
core.set_feed_variable(scope, cur_feed, feed_var.name, i)
fetch_var = global_block.create_var( fetch_var = global_block.create_var(
name=fetch_var_name, name=fetch_var_name,
...@@ -66,7 +140,11 @@ class Executor(object): ...@@ -66,7 +140,11 @@ class Executor(object):
attrs={'col': i}) attrs={'col': i})
self.executor.run(program.desc, scope, 0, True) self.executor.run(program.desc, scope, 0, True)
return [ outs = [
core.get_fetch_variable(scope, fetch_var_name, i) core.get_fetch_variable(scope, fetch_var_name, i)
for i in xrange(len(fetch_list)) for i in xrange(len(fetch_list))
] ]
if return_numpy:
outs = as_numpy(outs)
return outs
image/ image/
fit_a_line.model/ fit_a_line.model/
tmp
...@@ -261,7 +261,10 @@ class OpTest(unittest.TestCase): ...@@ -261,7 +261,10 @@ class OpTest(unittest.TestCase):
feed_map = self.feed_var(inputs, place) feed_map = self.feed_var(inputs, place)
exe = Executor(place) exe = Executor(place)
outs = exe.run(program, feed=feed_map, fetch_list=fetch_list) outs = exe.run(program,
feed=feed_map,
fetch_list=fetch_list,
return_numpy=False)
for out_name, out_dup in Operator.get_op_outputs(self.op_type): for out_name, out_dup in Operator.get_op_outputs(self.op_type):
if out_name not in self.outputs: if out_name not in self.outputs:
...@@ -500,5 +503,6 @@ class OpTest(unittest.TestCase): ...@@ -500,5 +503,6 @@ class OpTest(unittest.TestCase):
fetch_list = [g for p, g in param_grad_list] fetch_list = [g for p, g in param_grad_list]
executor = Executor(place) executor = Executor(place)
result = executor.run(prog, feed_dict, fetch_list) return map(
return map(np.array, result) np.array,
executor.run(prog, feed_dict, fetch_list, return_numpy=False))
...@@ -52,15 +52,13 @@ class TestArrayReadWrite(unittest.TestCase): ...@@ -52,15 +52,13 @@ class TestArrayReadWrite(unittest.TestCase):
exe = Executor(cpu) exe = Executor(cpu)
tensor = core.LoDTensor() tensor = numpy.random.random(size=(100, 100)).astype('float32')
tensor.set(numpy.random.random(size=(100, 100)).astype('float32'), cpu)
outs = exe.run(feed={'x0': tensor,
outs = map(numpy.array, 'x1': tensor,
exe.run(feed={'x0': tensor, 'x2': tensor},
'x1': tensor, fetch_list=[a_sum, x_sum],
'x2': tensor}, scope=scope)
fetch_list=[a_sum, x_sum],
scope=scope))
self.assertEqual(outs[0], outs[1]) self.assertEqual(outs[0], outs[1])
total_sum = layers.sums(input=[a_sum, x_sum]) total_sum = layers.sums(input=[a_sum, x_sum])
...@@ -72,12 +70,11 @@ class TestArrayReadWrite(unittest.TestCase): ...@@ -72,12 +70,11 @@ class TestArrayReadWrite(unittest.TestCase):
[each_x.name + "@GRAD" for each_x in x]) [each_x.name + "@GRAD" for each_x in x])
g_out = [ g_out = [
item.sum() item.sum()
for item in map( for item in exe.run(
numpy.array, feed={'x0': tensor,
exe.run(feed={'x0': tensor, 'x1': tensor,
'x1': tensor, 'x2': tensor},
'x2': tensor}, fetch_list=g_vars)
fetch_list=g_vars))
] ]
g_out_sum = numpy.array(g_out).sum() g_out_sum = numpy.array(g_out).sum()
......
...@@ -21,18 +21,15 @@ class ConditionalBlock(unittest.TestCase): ...@@ -21,18 +21,15 @@ class ConditionalBlock(unittest.TestCase):
exe = Executor(cpu) exe = Executor(cpu)
exe.run(g_startup_program) exe.run(g_startup_program)
x = core.LoDTensor() x = numpy.random.random(size=(10, 1)).astype('float32')
x.set(numpy.random.random(size=(10, 1)).astype('float32'), cpu)
outs = map(numpy.array, exe.run(feed={'X': x}, fetch_list=[out]))[0] outs = exe.run(feed={'X': x}, fetch_list=[out])[0]
print outs print outs
loss = layers.mean(x=out) loss = layers.mean(x=out)
append_backward_ops(loss=loss) append_backward_ops(loss=loss)
outs = map(numpy.array, outs = exe.run(
exe.run(feed={'X': x}, feed={'X': x},
fetch_list=[ fetch_list=[g_main_program.block(0).var(data.name + "@GRAD")])[0]
g_main_program.block(0).var(data.name + "@GRAD")
]))[0]
print outs print outs
......
import unittest import unittest
from paddle.v2.fluid.layers import mul, data from paddle.v2.fluid.layers import mul, data, sequence_pool
import paddle.v2.fluid.core as core import paddle.v2.fluid.core as core
from paddle.v2.fluid.executor import Executor from paddle.v2.fluid.executor import Executor
from paddle.v2.fluid.framework import g_main_program from paddle.v2.fluid.framework import g_main_program
...@@ -17,17 +17,13 @@ class TestExecutor(unittest.TestCase): ...@@ -17,17 +17,13 @@ class TestExecutor(unittest.TestCase):
out = mul(x=a, y=b) out = mul(x=a, y=b)
place = core.CPUPlace() place = core.CPUPlace()
a_np = numpy.random.random((100, 784)).astype('float32') a_np = numpy.random.random((100, 784)).astype('float32')
tensor_a = core.LoDTensor()
tensor_a.set(a_np, place)
b_np = numpy.random.random((784, 100)).astype('float32') b_np = numpy.random.random((784, 100)).astype('float32')
tensor_b = core.LoDTensor()
tensor_b.set(b_np, place)
exe = Executor(place) exe = Executor(place)
outs = exe.run(g_main_program, outs = exe.run(g_main_program,
feed={'a': tensor_a, feed={'a': a_np,
'b': tensor_b}, 'b': b_np},
fetch_list=[out]) fetch_list=[out])
out = numpy.array(outs[0]) out = outs[0]
self.assertEqual((100, 100), out.shape) self.assertEqual((100, 100), out.shape)
self.assertTrue(numpy.allclose(out, numpy.dot(a_np, b_np))) self.assertTrue(numpy.allclose(out, numpy.dot(a_np, b_np)))
......
import paddle.v2 as paddle import unittest
import paddle.v2.fluid.layers as layers
import numpy as np
import paddle.v2.fluid.core as core import paddle.v2.fluid.core as core
import paddle.v2.fluid.optimizer as optimizer
import paddle.v2.fluid.executor as executor
import paddle.v2.fluid.layers as layers
import paddle.v2.fluid.optimizer as optimizer
from paddle.v2.fluid.framework import Program from paddle.v2.fluid.framework import Program
from paddle.v2.fluid.io import save_inference_model, load_inference_model from paddle.v2.fluid.io import save_inference_model, load_inference_model
import paddle.v2.fluid.executor as executor
import unittest
import numpy as np
class TestBook(unittest.TestCase): class TestBook(unittest.TestCase):
...@@ -44,7 +44,7 @@ class TestBook(unittest.TestCase): ...@@ -44,7 +44,7 @@ class TestBook(unittest.TestCase):
x=cost, main_program=program, startup_program=init_program) x=cost, main_program=program, startup_program=init_program)
sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.001) sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.001)
opts = sgd_optimizer.minimize(avg_cost, init_program) sgd_optimizer.minimize(avg_cost, init_program)
place = core.CPUPlace() place = core.CPUPlace()
exe = executor.Executor(place) exe = executor.Executor(place)
...@@ -52,25 +52,20 @@ class TestBook(unittest.TestCase): ...@@ -52,25 +52,20 @@ class TestBook(unittest.TestCase):
exe.run(init_program, feed={}, fetch_list=[]) exe.run(init_program, feed={}, fetch_list=[])
for i in xrange(100): for i in xrange(100):
x_data = np.array( tensor_x = np.array(
[[1, 1], [1, 2], [3, 4], [5, 2]]).astype("float32") [[1, 1], [1, 2], [3, 4], [5, 2]]).astype("float32")
y_data = np.array([[-2], [-3], [-7], [-7]]).astype("float32") tensor_y = np.array([[-2], [-3], [-7], [-7]]).astype("float32")
tensor_x = core.LoDTensor()
tensor_x.set(x_data, place)
tensor_y = core.LoDTensor()
tensor_y.set(y_data, place)
exe.run(program, exe.run(program,
feed={'x': tensor_x, feed={'x': tensor_x,
'y': tensor_y}, 'y': tensor_y},
fetch_list=[avg_cost]) fetch_list=[avg_cost])
save_inference_model(MODEL_DIR, ["x", "y"], [avg_cost], exe, program) save_inference_model(MODEL_DIR, ["x", "y"], [avg_cost], exe, program)
outs = exe.run(program, expected = exe.run(program,
feed={'x': tensor_x, feed={'x': tensor_x,
'y': tensor_y}, 'y': tensor_y},
fetch_list=[avg_cost]) fetch_list=[avg_cost])[0]
expected = np.array(outs[0])
reload(executor) # reload to build a new scope reload(executor) # reload to build a new scope
exe = executor.Executor(place) exe = executor.Executor(place)
...@@ -83,7 +78,7 @@ class TestBook(unittest.TestCase): ...@@ -83,7 +78,7 @@ class TestBook(unittest.TestCase):
feed={feed_var_names[0]: tensor_x, feed={feed_var_names[0]: tensor_x,
feed_var_names[1]: tensor_y}, feed_var_names[1]: tensor_y},
fetch_list=fetch_vars) fetch_list=fetch_vars)
actual = np.array(outs[0]) actual = outs[0]
self.assertEqual(feed_var_names, ["x", "y"]) self.assertEqual(feed_var_names, ["x", "y"])
self.assertEqual(len(fetch_vars), 1) self.assertEqual(len(fetch_vars), 1)
......
...@@ -13,7 +13,7 @@ class TestLoDArrayLength(unittest.TestCase): ...@@ -13,7 +13,7 @@ class TestLoDArrayLength(unittest.TestCase):
arr_len = layers.array_length(arr) arr_len = layers.array_length(arr)
cpu = core.CPUPlace() cpu = core.CPUPlace()
exe = Executor(cpu) exe = Executor(cpu)
result = numpy.array(exe.run(fetch_list=[arr_len])[0]) result = exe.run(fetch_list=[arr_len])[0]
self.assertEqual(11, result[0]) self.assertEqual(11, result[0])
......
...@@ -151,10 +151,11 @@ class TestCPULoDTensorArrayOpGrad(unittest.TestCase): ...@@ -151,10 +151,11 @@ class TestCPULoDTensorArrayOpGrad(unittest.TestCase):
exe = Executor(place) exe = Executor(place)
g_out = [ g_out = [
item.sum() numpy.array(item).sum()
for item in map( for item in exe.run(program,
numpy.array, feed={'x': tensor},
exe.run(program, feed={'x': tensor}, fetch_list=[g_vars])) fetch_list=[g_vars],
return_numpy=False)
] ]
g_out_sum = numpy.array(g_out).sum() g_out_sum = numpy.array(g_out).sum()
......
...@@ -65,17 +65,10 @@ class TestMNISTIfElseOp(unittest.TestCase): ...@@ -65,17 +65,10 @@ class TestMNISTIfElseOp(unittest.TestCase):
y_data = np.array(map(lambda x: x[1], data)).astype("int64") y_data = np.array(map(lambda x: x[1], data)).astype("int64")
y_data = np.expand_dims(y_data, axis=1) y_data = np.expand_dims(y_data, axis=1)
tensor_x = core.LoDTensor() outs = exe.run(kwargs['main_program'],
tensor_x.set(x_data, place) feed={'x': x_data,
'y': y_data},
tensor_y = core.LoDTensor() fetch_list=[avg_loss])
tensor_y.set(y_data, place)
outs = map(np.array,
exe.run(kwargs['main_program'],
feed={'x': tensor_x,
'y': tensor_y},
fetch_list=[avg_loss]))
print outs[0] print outs[0]
if outs[0] < 1.0: if outs[0] < 1.0:
return return
...@@ -129,19 +122,12 @@ class TestMNISTIfElseOp(unittest.TestCase): ...@@ -129,19 +122,12 @@ class TestMNISTIfElseOp(unittest.TestCase):
for data in train_reader(): for data in train_reader():
x_data = np.array(map(lambda x: x[0], data)).astype("float32") x_data = np.array(map(lambda x: x[0], data)).astype("float32")
y_data = np.array(map(lambda x: x[1], data)).astype("int64") y_data = np.array(map(lambda x: x[1], data)).astype("int64")
y_data = np.expand_dims(y_data, axis=1) y_data = y_data.reshape((y_data.shape[0], 1))
tensor_x = core.LoDTensor()
tensor_x.set(x_data, place)
tensor_y = core.LoDTensor()
tensor_y.set(y_data, place)
outs = map(np.array, outs = exe.run(kwargs['main_program'],
exe.run(kwargs['main_program'], feed={'x': x_data,
feed={'x': tensor_x, 'y': y_data},
'y': tensor_y}, fetch_list=[avg_loss])
fetch_list=[avg_loss]))
print outs[0] print outs[0]
if outs[0] < 1.0: if outs[0] < 1.0:
return return
......
...@@ -24,7 +24,7 @@ class TestParameter(unittest.TestCase): ...@@ -24,7 +24,7 @@ class TestParameter(unittest.TestCase):
self.assertEqual(0, param.block.idx) self.assertEqual(0, param.block.idx)
exe = Executor(core.CPUPlace()) exe = Executor(core.CPUPlace())
p = exe.run(g_main_program, fetch_list=[param])[0] p = exe.run(g_main_program, fetch_list=[param])[0]
self.assertTrue(np.allclose(np.array(p), np.ones(shape) * val)) self.assertTrue(np.allclose(p, np.ones(shape) * val))
p = io.get_parameter_value_by_name('fc.w', exe, g_main_program) p = io.get_parameter_value_by_name('fc.w', exe, g_main_program)
self.assertTrue(np.allclose(np.array(p), np.ones(shape) * val)) self.assertTrue(np.allclose(np.array(p), np.ones(shape) * val))
......
...@@ -156,7 +156,7 @@ class RecurrentOpTest1(unittest.TestCase): ...@@ -156,7 +156,7 @@ class RecurrentOpTest1(unittest.TestCase):
feed=self.feed_map, feed=self.feed_map,
fetch_list=[self.output]) fetch_list=[self.output])
return np.array(out[0]) return out[0]
def backward(self): def backward(self):
self.feed_map = { self.feed_map = {
...@@ -171,7 +171,8 @@ class RecurrentOpTest1(unittest.TestCase): ...@@ -171,7 +171,8 @@ class RecurrentOpTest1(unittest.TestCase):
exe = Executor(self.place) exe = Executor(self.place)
return exe.run(self.main_program, return exe.run(self.main_program,
feed=self.feed_map, feed=self.feed_map,
fetch_list=fetch_list) fetch_list=fetch_list,
return_numpy=False)
def test_backward(self): def test_backward(self):
self.check_forward() self.check_forward()
......
...@@ -7,12 +7,6 @@ import numpy as np ...@@ -7,12 +7,6 @@ import numpy as np
import paddle.v2.fluid.core as core import paddle.v2.fluid.core as core
def create_tensor(np_data, place):
tensor = core.LoDTensor()
tensor.set(np_data, place)
return tensor
class RNNMemoryHelperOpTest(unittest.TestCase): class RNNMemoryHelperOpTest(unittest.TestCase):
def setUp(self): def setUp(self):
self.program = Program() self.program = Program()
...@@ -30,13 +24,13 @@ class RNNMemoryHelperOpTest(unittest.TestCase): ...@@ -30,13 +24,13 @@ class RNNMemoryHelperOpTest(unittest.TestCase):
def test_forward(self): def test_forward(self):
x_np = np.random.normal(size=(2, 3)).astype("float32") x_np = np.random.normal(size=(2, 3)).astype("float32")
self.feed_map = {'X': create_tensor(x_np, self.place)} self.feed_map = {'X': x_np}
self.fetch_list = [self.Out] self.fetch_list = [self.Out]
exe = Executor(self.place) exe = Executor(self.place)
out = exe.run(self.program, out = exe.run(self.program,
feed=self.feed_map, feed=self.feed_map,
fetch_list=self.fetch_list) fetch_list=self.fetch_list)
np.isclose(np.array(out[0]), x_np, rtol=1e-5) self.assertTrue(np.allclose(out[0], x_np, rtol=1e-5))
class RNNMemoryHelperGradOpTest(unittest.TestCase): class RNNMemoryHelperGradOpTest(unittest.TestCase):
...@@ -66,8 +60,7 @@ class RNNMemoryHelperGradOpTest(unittest.TestCase): ...@@ -66,8 +60,7 @@ class RNNMemoryHelperGradOpTest(unittest.TestCase):
def test_backward(self): def test_backward(self):
self.feed_map = { self.feed_map = {
name: create_tensor( name: np.random.normal(size=(2, 3)).astype("float32")
np.random.normal(size=(2, 3)).astype("float32"), self.place)
for name in self.input_names for name in self.input_names
} }
self.fetch_list = [self.output_vars['X@GRAD']] self.fetch_list = [self.output_vars['X@GRAD']]
...@@ -76,7 +69,7 @@ class RNNMemoryHelperGradOpTest(unittest.TestCase): ...@@ -76,7 +69,7 @@ class RNNMemoryHelperGradOpTest(unittest.TestCase):
out = exe.run(self.program, out = exe.run(self.program,
feed=self.feed_map, feed=self.feed_map,
fetch_list=self.fetch_list) fetch_list=self.fetch_list)
np.isclose(np.array(out[0]), self.feed_map['Out@GRAD'], rtol=1e-5) np.isclose(out[0], self.feed_map['Out@GRAD'], rtol=1e-5)
class RNNMemoryHelperGradOpWithoutInputTest(unittest.TestCase): class RNNMemoryHelperGradOpWithoutInputTest(unittest.TestCase):
...@@ -110,8 +103,7 @@ class RNNMemoryHelperGradOpWithoutInputTest(unittest.TestCase): ...@@ -110,8 +103,7 @@ class RNNMemoryHelperGradOpWithoutInputTest(unittest.TestCase):
def test_backward(self): def test_backward(self):
self.feed_map = { self.feed_map = {
name: create_tensor( name: np.random.normal(size=(2, 3)).astype("float32")
np.random.normal(size=(2, 3)).astype("float32"), self.place)
for name in ['X', 'Out'] for name in ['X', 'Out']
} }
self.fetch_list = [self.output_vars['X@GRAD']] self.fetch_list = [self.output_vars['X@GRAD']]
...@@ -120,10 +112,9 @@ class RNNMemoryHelperGradOpWithoutInputTest(unittest.TestCase): ...@@ -120,10 +112,9 @@ class RNNMemoryHelperGradOpWithoutInputTest(unittest.TestCase):
out = exe.run(self.program, out = exe.run(self.program,
feed=self.feed_map, feed=self.feed_map,
fetch_list=self.fetch_list) fetch_list=self.fetch_list)
np.isclose( self.assertTrue(
np.array(out[0]), np.allclose(
np.zeros(shape=(2, 3)).astype("float32"), out[0], np.zeros(shape=(2, 3)).astype("float32"), rtol=1e-5))
rtol=1e-5)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -27,19 +27,16 @@ class TestShrinkRNNMemory(unittest.TestCase): ...@@ -27,19 +27,16 @@ class TestShrinkRNNMemory(unittest.TestCase):
tensor_np = numpy.random.random(size=(3, 100)).astype('float32') tensor_np = numpy.random.random(size=(3, 100)).astype('float32')
tensor.set(tensor_np, cpu) tensor.set(tensor_np, cpu)
exe = Executor(cpu) exe = Executor(cpu)
outs = map(numpy.array, outs = exe.run(feed={'x': tensor}, fetch_list=[mem1, mem2, mem3])
exe.run(feed={'x': tensor}, fetch_list=[mem1, mem2, mem3]))
self.assertTrue(numpy.allclose(tensor_np[0:3], outs[0])) self.assertTrue(numpy.allclose(tensor_np[0:3], outs[0]))
self.assertTrue(numpy.allclose(tensor_np[0:2], outs[1])) self.assertTrue(numpy.allclose(tensor_np[0:2], outs[1]))
self.assertTrue(numpy.allclose(tensor_np[0:1], outs[2])) self.assertTrue(numpy.allclose(tensor_np[0:1], outs[2]))
mem3_mean = layers.mean(x=mem3) mem3_mean = layers.mean(x=mem3)
append_backward_ops(loss=mem3_mean) append_backward_ops(loss=mem3_mean)
x_grad = map(numpy.array, x_grad = exe.run(
exe.run(feed={'x': tensor}, feed={'x': tensor},
fetch_list=[ fetch_list=[g_main_program.global_block().var('x@GRAD')])[0]
g_main_program.global_block().var('x@GRAD')
]))[0]
self.assertAlmostEqual(1.0, x_grad.sum(), delta=0.1) self.assertAlmostEqual(1.0, x_grad.sum(), delta=0.1)
......
...@@ -98,7 +98,11 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): ...@@ -98,7 +98,11 @@ class TestCPULoDTensorArrayOps(unittest.TestCase):
exe = Executor(place) exe = Executor(place)
scope = core.Scope() scope = core.Scope()
exe.run(program, feed={'x': tensor, 'y': mask}, scope=scope) exe.run(program,
feed={'x': tensor,
'y': mask},
scope=scope,
return_numpy=False)
var_true = scope.find_var(out_true.name).get_tensor() var_true = scope.find_var(out_true.name).get_tensor()
...@@ -169,7 +173,8 @@ class TestCPUSplitMergeLoDTensorGrad(unittest.TestCase): ...@@ -169,7 +173,8 @@ class TestCPUSplitMergeLoDTensorGrad(unittest.TestCase):
feed={'x': tensor, feed={'x': tensor,
'y': mask}, 'y': mask},
fetch_list=[g_vars], fetch_list=[g_vars],
scope=scope)) scope=scope,
return_numpy=False))
] ]
g_out_sum = np.array(g_out).sum() g_out_sum = np.array(g_out).sum()
......
...@@ -55,19 +55,10 @@ class TestWhileOp(unittest.TestCase): ...@@ -55,19 +55,10 @@ class TestWhileOp(unittest.TestCase):
for i in xrange(3): for i in xrange(3):
d.append(numpy.random.random(size=[10]).astype('float32')) d.append(numpy.random.random(size=[10]).astype('float32'))
d_tensor = [] outs = exe.run(feed={'d0': d[0],
for item in d: 'd1': d[1],
t = core.LoDTensor() 'd2': d[2]},
t.set(item, cpu) fetch_list=[sum_result])
d_tensor.append(t)
outs = map(numpy.array,
exe.run(feed={
'd0': d_tensor[0],
'd1': d_tensor[1],
'd2': d_tensor[2]
},
fetch_list=[sum_result]))
self.assertAlmostEqual(numpy.sum(d), numpy.sum(outs[0]), delta=0.01) self.assertAlmostEqual(numpy.sum(d), numpy.sum(outs[0]), delta=0.01)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册