未验证 提交 6eba4b32 编写于 作者: Y yuyang18

Fix unittests after hide APIs

上级 818e0708
......@@ -129,7 +129,6 @@ def create_or_get_tensor(scope, var_name, var, place):
if var is not None:
assert isinstance(var, np.ndarray)
tensor.set_recursive_sequence_lengths([])
tensor.set_dims(var.shape)
tensor.set(var, place)
return tensor
......
......@@ -65,10 +65,10 @@ class TestDyRnnStaticInput(unittest.TestCase):
return self._lodtensor_to_ndarray(fetch_outs[0])
def _lodtensor_to_ndarray(self, lod_tensor):
dims = lod_tensor.get_dims()
dims = lod_tensor.shape()
ndarray = np.zeros(shape=dims).astype('float32')
for i in xrange(np.product(dims)):
ndarray.ravel()[i] = lod_tensor.get_float_element(i)
ndarray.ravel()[i] = lod_tensor._get_float_element(i)
return ndarray, lod_tensor.recursive_sequence_lengths()
def build_graph(self, only_forward=False):
......@@ -185,19 +185,19 @@ class TestDyRnnStaticInput(unittest.TestCase):
actual_gradients, actual_lod = self.fetch_value(static_input_grad)
static_input_shape = self.static_input_tensor.get_dims()
static_input_shape = self.static_input_tensor.shape()
numeric_gradients = np.zeros(shape=static_input_shape).astype('float32')
# calculate numeric gradients
tensor_size = np.product(static_input_shape)
for i in xrange(tensor_size):
origin = self.static_input_tensor.get_float_element(i)
origin = self.static_input_tensor._get_float_element(i)
x_pos = origin + self._delta
self.static_input_tensor.set_float_element(i, x_pos)
self.static_input_tensor._set_float_element(i, x_pos)
y_pos = self.fetch_value(loss)[0][0]
x_neg = origin - self._delta
self.static_input_tensor.set_float_element(i, x_neg)
self.static_input_tensor._set_float_element(i, x_neg)
y_neg = self.fetch_value(loss)[0][0]
self.static_input_tensor.set_float_element(i, origin)
self.static_input_tensor._set_float_element(i, origin)
numeric_gradients.ravel()[i] = (y_pos - y_neg) / self._delta / 2
self.assertTrue(np.allclose(actual_gradients, numeric_gradients, 0.001))
self.assertTrue(
......
......@@ -40,12 +40,12 @@ class TestSelectedRows(unittest.TestCase):
# compare tensor
self.assertAlmostEqual(2.0,
selected_rows.get_tensor().get_float_element(0))
selected_rows.get_tensor()._get_float_element(0))
self.assertAlmostEqual(1.0,
selected_rows.get_tensor().get_float_element(1))
selected_rows.get_tensor()._get_float_element(1))
self.assertAlmostEqual(
4.0,
selected_rows.get_tensor().get_float_element(2 * row_numel + 8))
selected_rows.get_tensor()._get_float_element(2 * row_numel + 8))
if __name__ == "__main__":
......
......@@ -45,8 +45,8 @@ class TestShrinkRNNMemoryBase(unittest.TestCase):
def sum_lodtensor(self, tensor):
sum_res = 0.0
for i in xrange(np.product(tensor.get_dims())):
sum_res += tensor.get_float_element(i)
for i in xrange(np.product(tensor.shape())):
sum_res += tensor._get_float_element(i)
return sum_res
......
......@@ -25,7 +25,7 @@ class TestTensor(unittest.TestCase):
tensor = var.get_tensor()
tensor.set_dims([1000, 784])
tensor._set_dims([1000, 784])
tensor._alloc_int(place)
tensor_array = numpy.array(tensor)
self.assertEqual((1000, 784), tensor_array.shape)
......@@ -44,7 +44,7 @@ class TestTensor(unittest.TestCase):
tensor = var.get_tensor()
tensor.set_dims([1000, 784])
tensor._set_dims([1000, 784])
tensor._alloc_float(place)
tensor_array = numpy.array(tensor)
......@@ -63,7 +63,7 @@ class TestTensor(unittest.TestCase):
var_lod = scope.var("test_lod_tensor")
lod_tensor = var_lod.get_tensor()
lod_tensor.set_dims([4, 4, 6])
lod_tensor._set_dims([4, 4, 6])
lod_tensor._alloc_int(place)
array = numpy.array(lod_tensor)
array[0, 0, 0] = 3
......@@ -84,7 +84,7 @@ class TestTensor(unittest.TestCase):
var_lod = scope.var("test_lod_tensor")
lod_tensor = var_lod.get_tensor()
lod_tensor.set_dims([5, 2, 3, 4])
lod_tensor._set_dims([5, 2, 3, 4])
lod_tensor._alloc_float(place)
tensor_array = numpy.array(lod_tensor)
......@@ -108,7 +108,7 @@ class TestTensor(unittest.TestCase):
lod_py = [[2, 1], [1, 2, 2]]
lod_tensor = core.LoDTensor()
lod_tensor.set_dims([5, 2, 3, 4])
lod_tensor._set_dims([5, 2, 3, 4])
lod_tensor.set_recursive_sequence_lengths(lod_py)
lod_tensor._alloc_float(place)
tensor_array = numpy.array(lod_tensor)
......@@ -128,7 +128,7 @@ class TestTensor(unittest.TestCase):
lod_py = [[2, 1], [1, 2, 2]]
lod_tensor = core.LoDTensor()
lod_tensor.set_dims([5, 2, 3, 4])
lod_tensor._set_dims([5, 2, 3, 4])
lod_tensor.set_recursive_sequence_lengths(lod_py)
lod_tensor._alloc_float(place)
tensor_array = numpy.array(lod_tensor)
......@@ -148,7 +148,7 @@ class TestTensor(unittest.TestCase):
tensor = var.get_tensor()
tensor.set_dims([0, 1])
tensor._set_dims([0, 1])
tensor._alloc_float(place)
tensor_array = numpy.array(tensor)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册