未验证 提交 f7f54d39 编写于 作者: Z Zeng Jinle 提交者: GitHub

[Release/1.5]Fix create_lod_tensor (#18197)

* fix_create_lod_tensor, test=release/1.5

* remove program_guard import,test=release/1.5

* fix windows numpy default int32 error, test=release/1.5
上级 a839f724
......@@ -15,6 +15,7 @@
from __future__ import print_function
from . import core
from .data_feeder import DataToLoDTensorConverter
import numpy as np
__all__ = ['create_lod_tensor', 'create_random_int_lodtensor']
......@@ -71,19 +72,33 @@ def create_lod_tensor(data, recursive_seq_lens, place):
if isinstance(data, core.LoDTensor):
return create_lod_tensor(np.array(data), recursive_seq_lens, place)
elif isinstance(data, list):
# When input data is a list, it only deal with the case where the base element
# is an index of shape [1] and dtype int64 (e.g., word id). Hence, the generated
# LoDTensor will be of shape [n, 1] and dtype int64, where `n` is the total number
# of words or other indexes in the sequence.
# dtype and shape is not important here,
# we only want to reuse code of DataToLoDTensorConverter
converter = DataToLoDTensorConverter(
place=place,
lod_level=len(recursive_seq_lens),
shape=[],
dtype=core.VarDesc.VarType.FP32)
new_recursive_seq_lens = []
for seq in data:
new_recursive_seq_lens.append(len(seq))
converter.feed(seq)
assert [
new_recursive_seq_lens
] == recursive_seq_lens, "data and recursive_seq_lens do not match"
flattened_data = np.concatenate(data, axis=0)
flattened_data = flattened_data.reshape([len(flattened_data), 1])
return create_lod_tensor(flattened_data, recursive_seq_lens, place)
arr = np.array(converter.data)
# FIXME(zjl): the original logic of create_lod_tensor would append
# 1 to the shape. Maybe it is not a right way? Currently, we only
# follow the previous logic
arr = arr.reshape(arr.shape + (1, ))
tensor = core.LoDTensor()
tensor.set(arr, place)
tensor.set_recursive_sequence_lengths(recursive_seq_lens)
return tensor
elif isinstance(data, np.ndarray):
tensor = core.LoDTensor()
tensor.set(data, place)
......
......@@ -58,7 +58,8 @@ class TestLoDTensor(unittest.TestCase):
def test_create_lod_tensor(self):
# Create LoDTensor from a list
data = [[1, 2, 3], [3, 4]]
data = [[np.int64(1), np.int64(2), np.int64(3)],
[np.int64(3), np.int64(4)]]
wrong_recursive_seq_lens = [[2, 2]]
correct_recursive_seq_lens = [[3, 2]]
self.assertRaises(AssertionError, create_lod_tensor, data,
......@@ -67,13 +68,23 @@ class TestLoDTensor(unittest.TestCase):
fluid.CPUPlace())
self.assertEqual(tensor.recursive_sequence_lengths(),
correct_recursive_seq_lens)
self.assertEqual(tensor._dtype(), core.VarDesc.VarType.INT64)
self.assertEqual(tensor.shape(), [5, 1])
self.assertTrue(
np.array_equal(
np.array(tensor),
np.array([1, 2, 3, 3, 4]).reshape(tensor.shape()).astype(
'int64')))
# Create LoDTensor from numpy array
data = np.random.random([10, 1])
data = np.random.random([10, 1]).astype('float64')
recursive_seq_lens = [[2, 1], [3, 3, 4]]
tensor = create_lod_tensor(data, recursive_seq_lens, fluid.CPUPlace())
self.assertEqual(tensor.recursive_sequence_lengths(),
recursive_seq_lens)
self.assertEqual(tensor._dtype(), core.VarDesc.VarType.FP64)
self.assertEqual(tensor.shape(), [10, 1])
self.assertTrue(np.array_equal(np.array(tensor), data))
# Create LoDTensor from another LoDTensor, they are differnt instances
new_recursive_seq_lens = [[2, 2, 1], [1, 2, 2, 3, 2]]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册