未验证 提交 417fcf4f 编写于 作者: K Kexin Zhao 提交者: GitHub

Modify Pybind LoDTensor API according to length-based LoD (#11106)

* add lod_tensor util and modify pybind

* refind pybind LoDTensor API and modify LoDTensor and DataFeeder test

* fix test error

* fix detection map op test

* fix reorder_lod_tensor test

* fix seq_concat_op

* fix chunk evel op test

* fix target assign op

* fix warp ctc op

* address comments step 1: reverse reset_lod op

* step 2: modify op test

* add warning message

* remove has_valid_lod

* add back has_valid_lod

* address comments

* add exception catching trial
上级 53d1d0f0
...@@ -173,21 +173,6 @@ def seq_to_seq_net(embedding_dim, encoder_size, decoder_size, source_dict_dim, ...@@ -173,21 +173,6 @@ def seq_to_seq_net(embedding_dim, encoder_size, decoder_size, source_dict_dim,
return avg_cost, feeding_list return avg_cost, feeding_list
def to_lodtensor(data, place):
seq_lens = [len(seq) for seq in data]
cur_len = 0
lod = [cur_len]
for l in seq_lens:
cur_len += l
lod.append(cur_len)
flattened_data = np.concatenate(data, axis=0).astype("int64")
flattened_data = flattened_data.reshape([len(flattened_data), 1])
lod_t = core.LoDTensor()
lod_t.set(flattened_data, place)
lod_t.set_lod([lod])
return lod_t, lod[-1]
def lodtensor_to_ndarray(lod_tensor): def lodtensor_to_ndarray(lod_tensor):
dims = lod_tensor.get_dims() dims = lod_tensor.get_dims()
ndarray = np.zeros(shape=dims).astype('float32') ndarray = np.zeros(shape=dims).astype('float32')
......
...@@ -125,18 +125,3 @@ def get_model(args): ...@@ -125,18 +125,3 @@ def get_model(args):
batch_size=args.batch_size) batch_size=args.batch_size)
return loss, inference_program, adam, train_reader, test_reader, batch_acc return loss, inference_program, adam, train_reader, test_reader, batch_acc
def to_lodtensor(data, place):
seq_lens = [len(seq) for seq in data]
cur_len = 0
lod = [cur_len]
for l in seq_lens:
cur_len += l
lod.append(cur_len)
flattened_data = numpy.concatenate(data, axis=0).astype("int64")
flattened_data = flattened_data.reshape([len(flattened_data), 1])
res = fluid.LoDTensor()
res.set(flattened_data, place)
res.set_lod([lod])
return res
...@@ -410,5 +410,38 @@ void LoDTensor::MergeLoDTensor( ...@@ -410,5 +410,38 @@ void LoDTensor::MergeLoDTensor(
} }
} }
LoD ConvertToLengthBasedLoD(const LoD &offset_lod) {
LoD length_lod;
length_lod.reserve(offset_lod.size());
for (size_t lvl = 0; lvl < offset_lod.size(); ++lvl) {
std::vector<size_t> level;
if (offset_lod[lvl].size() > 0) {
level.reserve(offset_lod[lvl].size() - 1);
}
for (size_t idx = 0; idx < offset_lod[lvl].size() - 1; ++idx) {
level.push_back(offset_lod[lvl][idx + 1] - offset_lod[lvl][idx]);
}
length_lod.push_back(level);
}
return length_lod;
}
LoD ConvertToOffsetBasedLoD(const LoD &length_lod) {
LoD offset_lod;
offset_lod.reserve(length_lod.size());
for (size_t lvl = 0; lvl < length_lod.size(); ++lvl) {
std::vector<size_t> level;
level.reserve(length_lod[lvl].size() + 1);
size_t tmp = 0;
level.push_back(tmp);
for (size_t idx = 0; idx < length_lod[lvl].size(); ++idx) {
tmp += length_lod[lvl][idx];
level.push_back(tmp);
}
offset_lod.push_back(level);
}
return offset_lod;
}
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
...@@ -226,5 +226,19 @@ extern void WriteToRecordIO(recordio::Writer* writer, ...@@ -226,5 +226,19 @@ extern void WriteToRecordIO(recordio::Writer* writer,
extern std::vector<LoDTensor> ReadFromRecordIO( extern std::vector<LoDTensor> ReadFromRecordIO(
recordio::Scanner* scanner, const platform::DeviceContext& dev_ctx); recordio::Scanner* scanner, const platform::DeviceContext& dev_ctx);
/*
* Convert between length-based LoD and offset-based LoD.
* The implementation of LoDTensor class use offset-based LoD.
* However, we want to expose the more user-friendly length-based
* LoD to the Python side instead.
*
* Example:
* If offset_lod = [[0, 2, 3],[0, 3, 5, 9]]
* then length_lod = [[2, 1], [3, 2, 4]]
*/
LoD ConvertToLengthBasedLoD(const LoD& offset_lod);
LoD ConvertToOffsetBasedLoD(const LoD& length_lod);
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
...@@ -228,6 +228,38 @@ TEST(LoD, CheckAbsLoD) { ...@@ -228,6 +228,38 @@ TEST(LoD, CheckAbsLoD) {
ASSERT_FALSE(CheckAbsLoD(abs_lod0)); ASSERT_FALSE(CheckAbsLoD(abs_lod0));
} }
TEST(LoD, ConvertToLengthBasedLoD) {
LoD offset_lod;
offset_lod.push_back(std::vector<size_t>({0, 2}));
offset_lod.push_back(std::vector<size_t>({0, 1, 3}));
offset_lod.push_back(std::vector<size_t>({0, 2, 4, 5}));
LoD length_lod = ConvertToLengthBasedLoD(offset_lod);
LoD expected;
expected.push_back(std::vector<size_t>({2}));
expected.push_back(std::vector<size_t>({1, 2}));
expected.push_back(std::vector<size_t>({2, 2, 1}));
EXPECT_EQ(length_lod, expected);
}
TEST(LoD, ConvertToOffsetBasedLoD) {
LoD length_lod;
length_lod.push_back(std::vector<size_t>({2}));
length_lod.push_back(std::vector<size_t>({1, 2}));
length_lod.push_back(std::vector<size_t>({2, 2, 1}));
LoD offset_lod = ConvertToOffsetBasedLoD(length_lod);
LoD expected;
expected.push_back(std::vector<size_t>({0, 2}));
expected.push_back(std::vector<size_t>({0, 1, 3}));
expected.push_back(std::vector<size_t>({0, 2, 4, 5}));
EXPECT_EQ(offset_lod, expected);
}
template <typename T> template <typename T>
static void TestRecordIO() { static void TestRecordIO() {
LoDTensor tensor; LoDTensor tensor;
......
...@@ -144,28 +144,74 @@ PYBIND11_PLUGIN(core) { ...@@ -144,28 +144,74 @@ PYBIND11_PLUGIN(core) {
py::class_<LoDTensor, Tensor>(m, "LoDTensor") py::class_<LoDTensor, Tensor>(m, "LoDTensor")
.def_buffer( .def_buffer(
[](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); }) [](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); })
.def( .def("__init__",
"__init__", [](LoDTensor &instance, const std::vector<std::vector<size_t>>
[](LoDTensor &instance, const std::vector<std::vector<size_t>> &lod) { &recursive_sequence_lengths) {
LoD new_lod; LoD new_lod;
new_lod.reserve(lod.size()); new_lod.reserve(recursive_sequence_lengths.size());
std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod)); std::copy(recursive_sequence_lengths.begin(),
new (&instance) LoDTensor(new_lod); recursive_sequence_lengths.end(),
}) std::back_inserter(new_lod));
LoD new_offset_lod = ConvertToOffsetBasedLoD(new_lod);
PADDLE_ENFORCE(
CheckLoD(new_offset_lod, -1),
"the provided recursive_sequence_lengths info is invalid");
new (&instance) LoDTensor(new_offset_lod);
})
.def("__init__", [](LoDTensor &instance) { new (&instance) LoDTensor(); }) .def("__init__", [](LoDTensor &instance) { new (&instance) LoDTensor(); })
.def("set_lod", .def("set_lod",
[](LoDTensor &self, const std::vector<std::vector<size_t>> &lod) { [](LoDTensor &self, const std::vector<std::vector<size_t>> &lod) {
// the input lod is offset-based level-of-detail info
LOG(WARNING)
<< "set_lod is deprecated and will be removed by 9.2018, "
"please switch to set_recursive_sequence_lengths.";
LoD new_lod; LoD new_lod;
new_lod.reserve(lod.size()); new_lod.reserve(lod.size());
std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod)); std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
PADDLE_ENFORCE(CheckLoD(new_lod, vectorize(self.dims()).front()),
"the provided lod info is invalid");
self.set_lod(new_lod); self.set_lod(new_lod);
}) })
.def("lod", [](LoDTensor &self) -> std::vector<std::vector<size_t>> { .def("set_recursive_sequence_lengths",
auto lod = self.lod(); [](LoDTensor &self, const std::vector<std::vector<size_t>>
std::vector<std::vector<size_t>> new_lod; &recursive_sequence_lengths) {
new_lod.reserve(lod.size()); // the input recursive_sequence_lengths is length-based
std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod)); // level-of-detail info
return new_lod; LoD new_lod;
new_lod.reserve(recursive_sequence_lengths.size());
std::copy(recursive_sequence_lengths.begin(),
recursive_sequence_lengths.end(),
std::back_inserter(new_lod));
LoD new_offset_lod = ConvertToOffsetBasedLoD(new_lod);
PADDLE_ENFORCE(
CheckLoD(new_offset_lod, vectorize(self.dims()).front()),
"the provided recursive_sequence_lengths info is invalid");
self.set_lod(new_offset_lod);
})
.def("lod",
[](LoDTensor &self) -> std::vector<std::vector<size_t>> {
// output the offset-based lod info
LOG(WARNING) << "lod is deprecated and will be removed by 9.2018, "
"please switch to recursive_sequence_lengths.";
LoD lod = self.lod();
std::vector<std::vector<size_t>> new_lod;
new_lod.reserve(lod.size());
std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
return new_lod;
})
.def("recursive_sequence_lengths",
[](LoDTensor &self) -> std::vector<std::vector<size_t>> {
// output the length-based lod info
LoD lod = ConvertToLengthBasedLoD(self.lod());
std::vector<std::vector<size_t>> new_lod;
new_lod.reserve(lod.size());
std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
return new_lod;
})
.def("has_valid_recursive_sequence_lengths", [](LoDTensor &self) -> bool {
// Check that the lod info is valid and match the outermost
// dimension of the LoDTensor data
return CheckLoD(self.lod(), vectorize(self.dims()).front());
}); });
py::class_<SelectedRows>(m, "SelectedRows") py::class_<SelectedRows>(m, "SelectedRows")
......
...@@ -47,7 +47,7 @@ class DataToLoDTensorConverter(object): ...@@ -47,7 +47,7 @@ class DataToLoDTensorConverter(object):
self.lod = [] self.lod = []
for i in six.range(lod_level): for i in six.range(lod_level):
self.lod.append([0]) self.lod.append([])
def feed(self, data): def feed(self, data):
self._feed_impl_(data, self.lod, self.lod_level) self._feed_impl_(data, self.lod, self.lod_level)
...@@ -56,8 +56,7 @@ class DataToLoDTensorConverter(object): ...@@ -56,8 +56,7 @@ class DataToLoDTensorConverter(object):
if lod_level == 0: if lod_level == 0:
self.data.append(data) self.data.append(data)
else: else:
cur_lod_len = len(data) lod[0].append(len(data))
lod[0].append(lod[0][-1] + cur_lod_len)
for each_data in data: for each_data in data:
self._feed_impl_(each_data, lod[1:], lod_level - 1) self._feed_impl_(each_data, lod[1:], lod_level - 1)
...@@ -66,7 +65,7 @@ class DataToLoDTensorConverter(object): ...@@ -66,7 +65,7 @@ class DataToLoDTensorConverter(object):
t = core.LoDTensor() t = core.LoDTensor()
t.set(arr, self.place) t.set(arr, self.place)
if self.lod_level > 0: if self.lod_level > 0:
t.set_lod(self.lod) t.set_recursive_sequence_lengths(self.lod)
return t return t
......
...@@ -18,80 +18,6 @@ import numpy as np ...@@ -18,80 +18,6 @@ import numpy as np
__all__ = ['create_lod_tensor', 'create_random_int_lodtensor'] __all__ = ['create_lod_tensor', 'create_random_int_lodtensor']
def _validate_lod(lod, tensor_height=-1):
"""Check whether the input length-based lod info is valid.
There are several things to check:
1. lod should be a list of lists. Empty list is fine.
2. The length of each sublist (a lod level) should be at least one.
3. Each element in each lod level should be an integer greater than 0.
4. The sum of one lod level should be equal to the length of the next lod level.
5. The sum of the last lod level should be equal to the tensor height.
Bypass this check if user does not provide tensor_height as input.
Args:
lod: the length-based lod info, e.g., [[2, 3], [2, 1, 2, 3, 4]].
tensor_height: the outermost dimension of the tensor with which the input
lod is associated with.
Returns:
A boolean indicating whether the input lod is valid or not.
"""
assert isinstance(lod, list), "lod should be a list"
# Empty lod is fine
if len(lod) == 0:
return True
lod_sum = []
for level in lod:
assert isinstance(level, list), "each item in lod should be a list"
# Each level of lod should have at least one length info
if len(level) < 1:
return False
level_sum = 0
for lod_len in level:
# Each length in a level should be > 0
if lod_len <= 0:
return False
level_sum += lod_len
lod_sum.append(level_sum)
for idx, val in enumerate(lod_sum[:-1]):
# Each level's sum should be equal to
# the number of items in the next level
if val != len(lod[idx + 1]):
return False
if tensor_height == -1:
return True
else:
# Last level's sum should be equal to the tensor height
return lod_sum[-1] == tensor_height
def _convert_lod(lod):
"""Convert a length-based lod to a offset-based lod.
If the length-based lod is [[2, 3], [2, 1, 2, 3, 4]],
then the offset-based lod is [[0, 2, 5], [0, 2, 3, 5, 8, 12]].
Args:
lod: a length-based lod info.
Returns:
A list of lists as the offset-based lod converted to from the input lod.
"""
new_lod = []
for level in lod:
cur_len = 0
new_level = [cur_len]
for lod_len in level:
cur_len += lod_len
new_level.append(cur_len)
new_lod.append(new_level)
return new_lod
def create_lod_tensor(data, lod, place): def create_lod_tensor(data, lod, place):
"""Create a lod tensor from a numpy array, a list, or an existing lod tensor. """Create a lod tensor from a numpy array, a list, or an existing lod tensor.
...@@ -139,11 +65,11 @@ def create_lod_tensor(data, lod, place): ...@@ -139,11 +65,11 @@ def create_lod_tensor(data, lod, place):
flattened_data = flattened_data.reshape([len(flattened_data), 1]) flattened_data = flattened_data.reshape([len(flattened_data), 1])
return create_lod_tensor(flattened_data, lod, place) return create_lod_tensor(flattened_data, lod, place)
elif isinstance(data, np.ndarray): elif isinstance(data, np.ndarray):
assert _validate_lod(lod,
data.shape[0]), "the provided lod info is invalid"
tensor = core.LoDTensor() tensor = core.LoDTensor()
tensor.set(data, place) tensor.set(data, place)
tensor.set_lod(_convert_lod(lod)) tensor.set_recursive_sequence_lengths(lod)
assert tensor.has_valid_recursive_sequence_lengths(
), "the provided lod info is invalid"
return tensor return tensor
else: else:
raise TypeError( raise TypeError(
...@@ -181,9 +107,8 @@ def create_random_int_lodtensor(lod, base_shape, place, low, high): ...@@ -181,9 +107,8 @@ def create_random_int_lodtensor(lod, base_shape, place, low, high):
A fluid LoDTensor object with tensor data and lod info. A fluid LoDTensor object with tensor data and lod info.
""" """
assert isinstance(base_shape, list), "base_shape should be a list" assert isinstance(base_shape, list), "base_shape should be a list"
converted_lod = _convert_lod(lod)
# append the total number of basic elements to the front of its shape # append the total number of basic elements to the front of its shape
overall_shape = [converted_lod[-1][-1]] + base_shape overall_shape = [sum(lod[-1])] + base_shape
# the range of integer data elements is [low, high] # the range of integer data elements is [low, high]
data = np.random.random_integers(low, high, overall_shape).astype("int64") data = np.random.random_integers(low, high, overall_shape).astype("int64")
return create_lod_tensor(data, lod, place) return create_lod_tensor(data, lod, place)
...@@ -22,12 +22,11 @@ class TestDataFeeder(unittest.TestCase): ...@@ -22,12 +22,11 @@ class TestDataFeeder(unittest.TestCase):
label = fluid.layers.data(name='label', shape=[1], dtype='int64') label = fluid.layers.data(name='label', shape=[1], dtype='int64')
feeder = fluid.DataFeeder([img, label], fluid.CPUPlace()) feeder = fluid.DataFeeder([img, label], fluid.CPUPlace())
result = feeder.feed([([0] * 784, [9]), ([1] * 784, [1])]) result = feeder.feed([([0] * 784, [9]), ([1] * 784, [1])])
print(result)
self.assertEqual(result['image'].shape(), [2, 1, 28, 28]) self.assertEqual(result['image'].shape(), [2, 1, 28, 28])
self.assertEqual(result['label'].shape(), [2, 1]) self.assertEqual(result['label'].shape(), [2, 1])
self.assertEqual(result['image'].lod(), []) self.assertEqual(result['image'].recursive_sequence_lengths(), [])
self.assertEqual(result['label'].lod(), []) self.assertEqual(result['label'].recursive_sequence_lengths(), [])
def test_lod_level_1_converter(self): def test_lod_level_1_converter(self):
# lod_level = 1 # lod_level = 1
...@@ -42,12 +41,12 @@ class TestDataFeeder(unittest.TestCase): ...@@ -42,12 +41,12 @@ class TestDataFeeder(unittest.TestCase):
# label = [1] * len(data) # label = [1] * len(data)
result = feeder.feed( result = feeder.feed(
[([1, 2, 3], [1]), ([4, 5], [1]), ([6, 7, 8, 9], [1])]) [([1, 2, 3], [1]), ([4, 5], [1]), ([6, 7, 8, 9], [1])])
print(result)
self.assertEqual(result['sentences'].shape(), [9, 1]) self.assertEqual(result['sentences'].shape(), [9, 1])
self.assertEqual(result['label'].shape(), [3, 1]) self.assertEqual(result['label'].shape(), [3, 1])
self.assertEqual(result['sentences'].lod(), [[0, 3, 5, 9]]) self.assertEqual(result['sentences'].recursive_sequence_lengths(),
self.assertEqual(result['label'].lod(), []) [[3, 2, 4]])
self.assertEqual(result['label'].recursive_sequence_lengths(), [])
def test_lod_level_2_converter(self): def test_lod_level_2_converter(self):
# lod_level = 2 # lod_level = 2
...@@ -62,12 +61,12 @@ class TestDataFeeder(unittest.TestCase): ...@@ -62,12 +61,12 @@ class TestDataFeeder(unittest.TestCase):
# label = [1] * len(data) # label = [1] * len(data)
result = feeder.feed( result = feeder.feed(
[([[1, 2, 3], [4, 5]], [1]), ([[6, 7, 8, 9]], [1])]) [([[1, 2, 3], [4, 5]], [1]), ([[6, 7, 8, 9]], [1])])
print(result)
self.assertEqual(result['paragraphs'].shape(), [9, 1]) self.assertEqual(result['paragraphs'].shape(), [9, 1])
self.assertEqual(result['label'].shape(), [2, 1]) self.assertEqual(result['label'].shape(), [2, 1])
self.assertEqual(result['paragraphs'].lod(), [[0, 2, 3], [0, 3, 5, 9]]) self.assertEqual(result['paragraphs'].recursive_sequence_lengths(),
self.assertEqual(result['label'].lod(), []) [[2, 1], [3, 2, 4]])
self.assertEqual(result['label'].recursive_sequence_lengths(), [])
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -13,44 +13,41 @@ ...@@ -13,44 +13,41 @@
# limitations under the License. # limitations under the License.
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.lod_tensor import create_lod_tensor, create_random_int_lodtensor, _validate_lod, _convert_lod from paddle.fluid.lod_tensor import create_lod_tensor, create_random_int_lodtensor
import numpy import numpy as np
import unittest import unittest
class TestLoDTensor(unittest.TestCase): class TestLoDTensor(unittest.TestCase):
def test_validate_lod(self): def test_pybind_lod(self):
lod = (1, 2, 1) tensor = fluid.LoDTensor()
self.assertRaises(AssertionError, _validate_lod, lod, -1)
lod = [[1, 2], (2, 3)]
self.assertRaises(AssertionError, _validate_lod, lod, -1)
lod = [1, 2, 3]
self.assertRaises(AssertionError, _validate_lod, lod, -1)
lod = [] lod = []
self.assertTrue(_validate_lod(lod, -1)) tensor.set_recursive_sequence_lengths(lod)
lod = [[], [1], [3]] lod = [[], [1], [3]]
self.assertFalse(_validate_lod(lod, -1)) self.assertRaises(Exception, tensor.set_recursive_sequence_lengths, lod)
lod = [[0], [-1], [3]] lod = [[0], [2], [3]]
self.assertFalse(_validate_lod(lod, -1)) self.assertRaises(Exception, tensor.set_recursive_sequence_lengths, lod)
# Each level's sum should be equal to the number of items in the next level
# Moreover, last level's sum should be equal to the tensor height
lod = [[2, 3], [1, 3, 1, 2, 1]]
self.assertTrue(_validate_lod(lod, tensor_height=8))
lod = [[1, 3], [2, 1, 3]]
self.assertFalse(_validate_lod(lod, tensor_height=6))
lod = [[1, 3], [2, 1, 3, 4]]
self.assertFalse(_validate_lod(lod, tensor_height=5))
def test_convert_lod(self):
lod = [[1, 2, 3]] lod = [[1, 2, 3]]
converted_lod = [[0, 1, 3, 6]] tensor.set_recursive_sequence_lengths(lod)
self.assertEqual(_convert_lod(lod), converted_lod) self.assertEqual(tensor.recursive_sequence_lengths(), lod)
tensor.set(np.random.random([6, 1]), fluid.CPUPlace())
self.assertTrue(tensor.has_valid_recursive_sequence_lengths())
tensor.set(np.random.random([9, 1]), fluid.CPUPlace())
self.assertFalse(tensor.has_valid_recursive_sequence_lengths())
# Each level's sum should be equal to the number of items in the next level
# Moreover, last level's sum should be equal to the tensor height
lod = [[2, 3], [1, 3, 1, 2, 2]]
tensor.set_recursive_sequence_lengths(lod)
self.assertEqual(tensor.recursive_sequence_lengths(), lod)
tensor.set(np.random.random([8, 1]), fluid.CPUPlace())
self.assertFalse(tensor.has_valid_recursive_sequence_lengths())
lod = [[2, 3], [1, 3, 1, 2, 1]] lod = [[2, 3], [1, 3, 1, 2, 1]]
converted_lod = [[0, 2, 5], [0, 1, 4, 5, 7, 8]] tensor.set_recursive_sequence_lengths(lod)
self.assertEqual(_convert_lod(lod), converted_lod) self.assertTrue(tensor.has_valid_recursive_sequence_lengths())
tensor.set(np.random.random([9, 1]), fluid.CPUPlace())
self.assertFalse(tensor.has_valid_recursive_sequence_lengths())
def test_create_lod_tensor(self): def test_create_lod_tensor(self):
# Create LoDTensor from a list # Create LoDTensor from a list
...@@ -60,19 +57,19 @@ class TestLoDTensor(unittest.TestCase): ...@@ -60,19 +57,19 @@ class TestLoDTensor(unittest.TestCase):
self.assertRaises(AssertionError, create_lod_tensor, data, wrong_lod, self.assertRaises(AssertionError, create_lod_tensor, data, wrong_lod,
fluid.CPUPlace()) fluid.CPUPlace())
tensor = create_lod_tensor(data, correct_lod, fluid.CPUPlace()) tensor = create_lod_tensor(data, correct_lod, fluid.CPUPlace())
self.assertEqual(tensor.lod(), [[0, 3, 5]]) self.assertEqual(tensor.recursive_sequence_lengths(), correct_lod)
# Create LoDTensor from numpy array # Create LoDTensor from numpy array
data = numpy.random.random([10, 1]) data = np.random.random([10, 1])
lod = [[2, 1], [3, 3, 4]] lod = [[2, 1], [3, 3, 4]]
tensor = create_lod_tensor(data, lod, fluid.CPUPlace()) tensor = create_lod_tensor(data, lod, fluid.CPUPlace())
self.assertEqual(tensor.lod(), [[0, 2, 3], [0, 3, 6, 10]]) self.assertEqual(tensor.recursive_sequence_lengths(), lod)
# Create LoDTensor from another LoDTensor, they are differnt instances # Create LoDTensor from another LoDTensor, they are differnt instances
new_lod = [[2, 2, 1], [1, 2, 2, 3, 2]] new_lod = [[2, 2, 1], [1, 2, 2, 3, 2]]
new_tensor = create_lod_tensor(tensor, new_lod, fluid.CPUPlace()) new_tensor = create_lod_tensor(tensor, new_lod, fluid.CPUPlace())
self.assertEqual(tensor.lod(), [[0, 2, 3], [0, 3, 6, 10]]) self.assertEqual(tensor.recursive_sequence_lengths(), lod)
self.assertEqual(new_tensor.lod(), [[0, 2, 4, 5], [0, 1, 3, 5, 8, 10]]) self.assertEqual(new_tensor.recursive_sequence_lengths(), new_lod)
def test_create_random_int_lodtensor(self): def test_create_random_int_lodtensor(self):
# The shape of a word, commonly used in speech and NLP problem, is [1] # The shape of a word, commonly used in speech and NLP problem, is [1]
...@@ -83,7 +80,7 @@ class TestLoDTensor(unittest.TestCase): ...@@ -83,7 +80,7 @@ class TestLoDTensor(unittest.TestCase):
high = dict_size - 1 high = dict_size - 1
tensor = create_random_int_lodtensor(lod, shape, tensor = create_random_int_lodtensor(lod, shape,
fluid.CPUPlace(), low, high) fluid.CPUPlace(), low, high)
self.assertEqual(tensor.lod(), [[0, 2, 5, 10]]) self.assertEqual(tensor.recursive_sequence_lengths(), lod)
self.assertEqual(tensor.shape(), [10, 1]) self.assertEqual(tensor.shape(), [10, 1])
......
...@@ -162,7 +162,7 @@ class OpTest(unittest.TestCase): ...@@ -162,7 +162,7 @@ class OpTest(unittest.TestCase):
tensor = core.LoDTensor() tensor = core.LoDTensor()
if isinstance(np_value, tuple): if isinstance(np_value, tuple):
tensor.set(np_value[0], place) tensor.set(np_value[0], place)
tensor.set_lod(np_value[1]) tensor.set_recursive_sequence_lengths(np_value[1])
else: else:
tensor.set(np_value, place) tensor.set(np_value, place)
feed_map[name] = tensor feed_map[name] = tensor
...@@ -170,7 +170,8 @@ class OpTest(unittest.TestCase): ...@@ -170,7 +170,8 @@ class OpTest(unittest.TestCase):
tensor = core.LoDTensor() tensor = core.LoDTensor()
if isinstance(self.inputs[var_name], tuple): if isinstance(self.inputs[var_name], tuple):
tensor.set(self.inputs[var_name][0], place) tensor.set(self.inputs[var_name][0], place)
tensor.set_lod(self.inputs[var_name][1]) tensor.set_recursive_sequence_lengths(self.inputs[var_name][
1])
else: else:
tensor.set(self.inputs[var_name], place) tensor.set(self.inputs[var_name], place)
feed_map[var_name] = tensor feed_map[var_name] = tensor
...@@ -293,7 +294,8 @@ class OpTest(unittest.TestCase): ...@@ -293,7 +294,8 @@ class OpTest(unittest.TestCase):
str(place)) str(place))
if isinstance(expect, tuple): if isinstance(expect, tuple):
self.assertListEqual( self.assertListEqual(
actual.lod(), expect[1], "Output (" + sub_out_name + actual.recursive_sequence_lengths(), expect[1],
"Output (" + sub_out_name +
") has different lod at " + str(place)) ") has different lod at " + str(place))
else: else:
idx = find_actual(out_name, fetch_list) idx = find_actual(out_name, fetch_list)
...@@ -307,8 +309,8 @@ class OpTest(unittest.TestCase): ...@@ -307,8 +309,8 @@ class OpTest(unittest.TestCase):
"Output (" + out_name + ") has diff at " + str(place) + "Output (" + out_name + ") has diff at " + str(place) +
str(actual_t) + "\n" + str(expect_t)) str(actual_t) + "\n" + str(expect_t))
if isinstance(expect, tuple): if isinstance(expect, tuple):
self.assertListEqual(actual.lod(), expect[1], self.assertListEqual(actual.recursive_sequence_lengths(),
"Output (" + out_name + expect[1], "Output (" + out_name +
") has different lod at " + str(place)) ") has different lod at " + str(place))
def _get_places(self): def _get_places(self):
...@@ -408,7 +410,7 @@ class OpTest(unittest.TestCase): ...@@ -408,7 +410,7 @@ class OpTest(unittest.TestCase):
tensor = core.LoDTensor() tensor = core.LoDTensor()
tensor.set(np_value, place) tensor.set(np_value, place)
if lod is not None: if lod is not None:
tensor.set_lod(lod) tensor.set_recursive_sequence_lengths(lod)
return tensor return tensor
@staticmethod @staticmethod
......
...@@ -128,7 +128,7 @@ def create_or_get_tensor(scope, var_name, var, place): ...@@ -128,7 +128,7 @@ def create_or_get_tensor(scope, var_name, var, place):
tensor = scope.var(var_name).get_tensor() tensor = scope.var(var_name).get_tensor()
if var is not None: if var is not None:
assert isinstance(var, np.ndarray) assert isinstance(var, np.ndarray)
tensor.set_lod([[]]) tensor.set_recursive_sequence_lengths([])
tensor.set_dims(var.shape) tensor.set_dims(var.shape)
tensor.set(var, place) tensor.set(var, place)
return tensor return tensor
......
...@@ -26,36 +26,36 @@ class TestBeamSearchDecodeOp(unittest.TestCase): ...@@ -26,36 +26,36 @@ class TestBeamSearchDecodeOp(unittest.TestCase):
def append_lod_tensor(self, tensor_array, lod, data): def append_lod_tensor(self, tensor_array, lod, data):
lod_tensor = core.LoDTensor() lod_tensor = core.LoDTensor()
lod_tensor.set_lod(lod) lod_tensor.set_recursive_sequence_lengths(lod)
lod_tensor.set(data, self.place) lod_tensor.set(data, self.place)
tensor_array.append(lod_tensor) tensor_array.append(lod_tensor)
def test_get_set(self): def test_get_set(self):
ids = self.scope.var("ids").get_lod_tensor_array() ids = self.scope.var("ids").get_lod_tensor_array()
self.append_lod_tensor( self.append_lod_tensor(
ids, [[0, 3, 6], [0, 1, 2, 3, 4, 5, 6]], ids, [[3, 3], [1, 1, 1, 1, 1, 1]],
np.array( np.array(
[1, 2, 3, 4, 5, 6], dtype="int64")) [1, 2, 3, 4, 5, 6], dtype="int64"))
self.append_lod_tensor( self.append_lod_tensor(
ids, [[0, 3, 6], [0, 1, 1, 3, 5, 5, 6]], ids, [[3, 3], [1, 0, 2, 2, 0, 1]],
np.array( np.array(
[0, 1, 2, 3, 4, 5], dtype="int64")) [0, 1, 2, 3, 4, 5], dtype="int64"))
self.append_lod_tensor( self.append_lod_tensor(
ids, [[0, 3, 6], [0, 0, 1, 2, 3, 4, 5]], ids, [[3, 3], [0, 1, 1, 1, 1, 1]],
np.array( np.array(
[0, 1, 2, 3, 4], dtype="int64")) [0, 1, 2, 3, 4], dtype="int64"))
scores = self.scope.var("scores").get_lod_tensor_array() scores = self.scope.var("scores").get_lod_tensor_array()
self.append_lod_tensor( self.append_lod_tensor(
scores, [[0, 3, 6], [0, 1, 2, 3, 4, 5, 6]], scores, [[3, 3], [1, 1, 1, 1, 1, 1]],
np.array( np.array(
[1, 2, 3, 4, 5, 6], dtype="float64")) [1, 2, 3, 4, 5, 6], dtype="float64"))
self.append_lod_tensor( self.append_lod_tensor(
scores, [[0, 3, 6], [0, 1, 1, 3, 5, 5, 6]], scores, [[3, 3], [1, 0, 2, 2, 0, 1]],
np.array( np.array(
[0, 1, 2, 3, 4, 5], dtype="float64")) [0, 1, 2, 3, 4, 5], dtype="float64"))
self.append_lod_tensor( self.append_lod_tensor(
scores, [[0, 3, 6], [0, 0, 1, 2, 3, 4, 5]], scores, [[3, 3], [0, 1, 1, 1, 1, 1]],
np.array( np.array(
[0, 1, 2, 3, 4], dtype="float64")) [0, 1, 2, 3, 4], dtype="float64"))
...@@ -73,9 +73,11 @@ class TestBeamSearchDecodeOp(unittest.TestCase): ...@@ -73,9 +73,11 @@ class TestBeamSearchDecodeOp(unittest.TestCase):
beam_search_decode_op.run(self.scope, self.place) beam_search_decode_op.run(self.scope, self.place)
expected_lod = [[0, 4, 8], [0, 1, 3, 6, 9, 10, 13, 16, 19]] expected_lod = [[4, 4], [1, 2, 3, 3, 1, 3, 3, 3]]
self.assertEqual(sentence_ids.lod(), expected_lod) self.assertEqual(sentence_ids.recursive_sequence_lengths(),
self.assertEqual(sentence_scores.lod(), expected_lod) expected_lod)
self.assertEqual(sentence_scores.recursive_sequence_lengths(),
expected_lod)
expected_data = np.array( expected_data = np.array(
[2, 1, 0, 3, 1, 0, 3, 2, 1, 5, 4, 3, 2, 4, 4, 3, 6, 5, 4], "int64") [2, 1, 0, 3, 1, 0, 3, 2, 1, 5, 4, 3, 2, 4, 4, 3, 6, 5, 4], "int64")
......
...@@ -48,18 +48,18 @@ class BeamSearchOpTester(unittest.TestCase): ...@@ -48,18 +48,18 @@ class BeamSearchOpTester(unittest.TestCase):
op.run(self.scope, core.CPUPlace()) op.run(self.scope, core.CPUPlace())
selected_ids = self.scope.find_var("selected_ids").get_tensor() selected_ids = self.scope.find_var("selected_ids").get_tensor()
print 'selected_ids', np.array(selected_ids) print 'selected_ids', np.array(selected_ids)
print 'lod', selected_ids.lod() print 'lod', selected_ids.recursive_sequence_lengths()
def _create_pre_ids(self): def _create_pre_ids(self):
np_data = np.array([[1, 2, 3, 4]], dtype='int64') np_data = np.array([[1, 2, 3, 4]], dtype='int64')
tensor = create_tensor(self.scope, "pre_ids", np_data) tensor = create_tensor(self.scope, "pre_ids", np_data)
def _create_ids(self): def _create_ids(self):
self.lod = [[0, 1, 4], [0, 1, 2, 3, 4]] self.lod = [[1, 3], [1, 1, 1, 1]]
np_data = np.array( np_data = np.array(
[[4, 2, 5], [2, 1, 3], [3, 5, 2], [8, 2, 1]], dtype='int64') [[4, 2, 5], [2, 1, 3], [3, 5, 2], [8, 2, 1]], dtype='int64')
tensor = create_tensor(self.scope, "ids", np_data) tensor = create_tensor(self.scope, "ids", np_data)
tensor.set_lod(self.lod) tensor.set_recursive_sequence_lengths(self.lod)
def _create_scores(self): def _create_scores(self):
np_data = np.array( np_data = np.array(
...@@ -71,7 +71,7 @@ class BeamSearchOpTester(unittest.TestCase): ...@@ -71,7 +71,7 @@ class BeamSearchOpTester(unittest.TestCase):
], ],
dtype='float32') dtype='float32')
tensor = create_tensor(self.scope, "scores", np_data) tensor = create_tensor(self.scope, "scores", np_data)
tensor.set_lod(self.lod) tensor.set_recursive_sequence_lengths(self.lod)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -65,23 +65,25 @@ def batch_bipartite_match(distance, lod, match_type=None, dist_threshold=None): ...@@ -65,23 +65,25 @@ def batch_bipartite_match(distance, lod, match_type=None, dist_threshold=None):
distance (numpy.array) : The distance of two entries with shape [M, N]. distance (numpy.array) : The distance of two entries with shape [M, N].
lod (list of int): The offsets of each input in this batch. lod (list of int): The offsets of each input in this batch.
""" """
n = len(lod) - 1 n = len(lod)
m = distance.shape[1] m = distance.shape[1]
match_indices = -1 * np.ones((n, m), dtype=np.int) match_indices = -1 * np.ones((n, m), dtype=np.int)
match_dist = np.zeros((n, m), dtype=np.float32) match_dist = np.zeros((n, m), dtype=np.float32)
for i in range(len(lod) - 1): cur_offset = 0
bipartite_match(distance[lod[i]:lod[i + 1], :], match_indices[i, :], for i in range(n):
match_dist[i, :]) bipartite_match(distance[cur_offset:(cur_offset + lod[i]), :],
match_indices[i, :], match_dist[i, :])
if match_type == 'per_prediction': if match_type == 'per_prediction':
argmax_match(distance[lod[i]:lod[i + 1], :], match_indices[i, :], argmax_match(distance[cur_offset:(cur_offset + lod[i]), :],
match_dist[i, :], dist_threshold) match_indices[i, :], match_dist[i, :], dist_threshold)
cur_offset += lod[i]
return match_indices, match_dist return match_indices, match_dist
class TestBipartiteMatchOpWithLoD(OpTest): class TestBipartiteMatchOpWithLoD(OpTest):
def setUp(self): def setUp(self):
self.op_type = 'bipartite_match' self.op_type = 'bipartite_match'
lod = [[0, 5, 11, 23]] lod = [[5, 6, 12]]
dist = np.random.random((23, 217)).astype('float32') dist = np.random.random((23, 217)).astype('float32')
match_indices, match_dist = batch_bipartite_match(dist, lod[0]) match_indices, match_dist = batch_bipartite_match(dist, lod[0])
...@@ -98,7 +100,7 @@ class TestBipartiteMatchOpWithLoD(OpTest): ...@@ -98,7 +100,7 @@ class TestBipartiteMatchOpWithLoD(OpTest):
class TestBipartiteMatchOpWithoutLoD(OpTest): class TestBipartiteMatchOpWithoutLoD(OpTest):
def setUp(self): def setUp(self):
self.op_type = 'bipartite_match' self.op_type = 'bipartite_match'
lod = [[0, 8]] lod = [[8]]
dist = np.random.random((8, 17)).astype('float32') dist = np.random.random((8, 17)).astype('float32')
match_indices, match_dist = batch_bipartite_match(dist, lod[0]) match_indices, match_dist = batch_bipartite_match(dist, lod[0])
...@@ -115,7 +117,7 @@ class TestBipartiteMatchOpWithoutLoD(OpTest): ...@@ -115,7 +117,7 @@ class TestBipartiteMatchOpWithoutLoD(OpTest):
class TestBipartiteMatchOpWithPerPredictionType(OpTest): class TestBipartiteMatchOpWithPerPredictionType(OpTest):
def setUp(self): def setUp(self):
self.op_type = 'bipartite_match' self.op_type = 'bipartite_match'
lod = [[0, 5, 11, 23]] lod = [[5, 6, 12]]
dist = np.random.random((23, 237)).astype('float32') dist = np.random.random((23, 237)).astype('float32')
match_indices, match_dist = batch_bipartite_match(dist, lod[0], match_indices, match_dist = batch_bipartite_match(dist, lod[0],
'per_prediction', 0.5) 'per_prediction', 0.5)
......
...@@ -81,15 +81,19 @@ def batch_box_coder(prior_box, prior_box_var, target_box, lod, code_type, ...@@ -81,15 +81,19 @@ def batch_box_coder(prior_box, prior_box_var, target_box, lod, code_type,
n = target_box.shape[0] n = target_box.shape[0]
m = prior_box.shape[0] m = prior_box.shape[0]
output_box = np.zeros((n, m, 4), dtype=np.float32) output_box = np.zeros((n, m, 4), dtype=np.float32)
for i in range(len(lod) - 1): cur_offset = 0
for i in range(len(lod)):
if (code_type == "EncodeCenterSize"): if (code_type == "EncodeCenterSize"):
box_coder(target_box[lod[i]:lod[i + 1], :], prior_box, box_coder(target_box[cur_offset:(cur_offset + lod[i]), :],
prior_box_var, output_box[lod[i]:lod[i + 1], :, :], prior_box, prior_box_var,
output_box[cur_offset:(cur_offset + lod[i]), :, :],
code_type, box_normalized) code_type, box_normalized)
elif (code_type == "DecodeCenterSize"): elif (code_type == "DecodeCenterSize"):
box_coder(target_box[lod[i]:lod[i + 1], :, :], prior_box, box_coder(target_box[cur_offset:(cur_offset + lod[i]), :, :],
prior_box_var, output_box[lod[i]:lod[i + 1], :, :], prior_box, prior_box_var,
output_box[cur_offset:(cur_offset + lod[i]), :, :],
code_type, box_normalized) code_type, box_normalized)
cur_offset += lod[i]
return output_box return output_box
...@@ -99,7 +103,7 @@ class TestBoxCoderOp(OpTest): ...@@ -99,7 +103,7 @@ class TestBoxCoderOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "box_coder" self.op_type = "box_coder"
lod = [[0, 1, 2, 3, 4, 5]] lod = [[1, 1, 1, 1, 1]]
prior_box = np.random.random((10, 4)).astype('float32') prior_box = np.random.random((10, 4)).astype('float32')
prior_box_var = np.random.random((10, 4)).astype('float32') prior_box_var = np.random.random((10, 4)).astype('float32')
target_box = np.random.random((5, 10, 4)).astype('float32') target_box = np.random.random((5, 10, 4)).astype('float32')
...@@ -152,7 +156,7 @@ class TestBoxCoderOpWithLoD(OpTest): ...@@ -152,7 +156,7 @@ class TestBoxCoderOpWithLoD(OpTest):
def setUp(self): def setUp(self):
self.op_type = "box_coder" self.op_type = "box_coder"
lod = [[0, 4, 12, 20]] lod = [[4, 8, 8]]
prior_box = np.random.random((10, 4)).astype('float32') prior_box = np.random.random((10, 4)).astype('float32')
prior_box_var = np.random.random((10, 4)).astype('float32') prior_box_var = np.random.random((10, 4)).astype('float32')
target_box = np.random.random((20, 4)).astype('float32') target_box = np.random.random((20, 4)).astype('float32')
......
...@@ -144,10 +144,10 @@ class TestChunkEvalOp(OpTest): ...@@ -144,10 +144,10 @@ class TestChunkEvalOp(OpTest):
starts = sorted(starts) starts = sorted(starts)
self.num_correct_chunks, self.num_infer_chunks, self.num_label_chunks = self.gen_chunks( self.num_correct_chunks, self.num_infer_chunks, self.num_label_chunks = self.gen_chunks(
infer, label, starts) infer, label, starts)
self.inputs = { lod = []
'Inference': (infer, [starts]), for i in range(len(starts) - 1):
'Label': (label, [starts]) lod.append(starts[i + 1] - starts[i])
} self.inputs = {'Inference': (infer, [lod]), 'Label': (label, [lod])}
precision = float( precision = float(
self.num_correct_chunks self.num_correct_chunks
) / self.num_infer_chunks if self.num_infer_chunks else 0 ) / self.num_infer_chunks if self.num_infer_chunks else 0
......
...@@ -22,9 +22,9 @@ from op_test import OpTest ...@@ -22,9 +22,9 @@ from op_test import OpTest
class CRFDecoding(object): class CRFDecoding(object):
def __init__(self, emission_weights, transition_weights, def __init__(self, emission_weights, transition_weights,
seq_start_positions): seq_start_positions):
assert (emission_weights.shape[0] == seq_start_positions[-1]) assert (emission_weights.shape[0] == sum(seq_start_positions))
self.tag_num = emission_weights.shape[1] self.tag_num = emission_weights.shape[1]
self.seq_num = len(seq_start_positions) - 1 self.seq_num = len(seq_start_positions)
self.seq_start_positions = seq_start_positions self.seq_start_positions = seq_start_positions
self.x = emission_weights self.x = emission_weights
...@@ -34,9 +34,9 @@ class CRFDecoding(object): ...@@ -34,9 +34,9 @@ class CRFDecoding(object):
self.w = transition_weights[2:, :] self.w = transition_weights[2:, :]
self.track = np.zeros( self.track = np.zeros(
(seq_start_positions[-1], self.tag_num), dtype="int64") (sum(seq_start_positions), self.tag_num), dtype="int64")
self.decoded_path = np.zeros( self.decoded_path = np.zeros(
(seq_start_positions[-1], 1), dtype="int64") (sum(seq_start_positions), 1), dtype="int64")
def _decode_one_sequence(self, decoded_path, x): def _decode_one_sequence(self, decoded_path, x):
seq_len, tag_num = x.shape seq_len, tag_num = x.shape
...@@ -71,9 +71,11 @@ class CRFDecoding(object): ...@@ -71,9 +71,11 @@ class CRFDecoding(object):
decoded_path[i - 1] = max_idx = track[i, max_idx] decoded_path[i - 1] = max_idx = track[i, max_idx]
def decode(self): def decode(self):
cur_pos = 0
for i in range(self.seq_num): for i in range(self.seq_num):
start = self.seq_start_positions[i] start = cur_pos
end = self.seq_start_positions[i + 1] cur_pos += self.seq_start_positions[i]
end = cur_pos
self._decode_one_sequence(self.decoded_path[start:end, :], self._decode_one_sequence(self.decoded_path[start:end, :],
self.x[start:end, :]) self.x[start:end, :])
return self.decoded_path return self.decoded_path
...@@ -90,11 +92,13 @@ class TestCRFDecodingOp1(OpTest): ...@@ -90,11 +92,13 @@ class TestCRFDecodingOp1(OpTest):
TAG_NUM = 17 TAG_NUM = 17
MAX_SEQ_LEN = 10 MAX_SEQ_LEN = 10
lod = [[0]] lod = [[]]
total_len = 0
for i in range(SEQ_NUM): for i in range(SEQ_NUM):
lod[-1].append(lod[-1][-1] + random.randint(1, MAX_SEQ_LEN)) lod[-1].append(random.randint(1, MAX_SEQ_LEN))
total_len += lod[-1][-1]
emission = np.random.uniform(-1, 1, emission = np.random.uniform(-1, 1,
[lod[-1][-1], TAG_NUM]).astype("float64") [total_len, TAG_NUM]).astype("float64")
transition = np.random.uniform(-0.5, 0.5, transition = np.random.uniform(-0.5, 0.5,
[TAG_NUM + 2, TAG_NUM]).astype("float64") [TAG_NUM + 2, TAG_NUM]).astype("float64")
...@@ -126,7 +130,8 @@ class TestCRFDecodingOp2(OpTest): ...@@ -126,7 +130,8 @@ class TestCRFDecodingOp2(OpTest):
self.op_type = "crf_decoding" self.op_type = "crf_decoding"
TAG_NUM = 5 TAG_NUM = 5
lod = [[0, 1, 3, 6, 10]] lod = [[1, 2, 3, 4]]
total_len = sum(lod[-1])
transition = np.repeat( transition = np.repeat(
np.arange( np.arange(
TAG_NUM, dtype="float64").reshape(1, TAG_NUM), TAG_NUM, dtype="float64").reshape(1, TAG_NUM),
...@@ -135,13 +140,13 @@ class TestCRFDecodingOp2(OpTest): ...@@ -135,13 +140,13 @@ class TestCRFDecodingOp2(OpTest):
emission = np.repeat( emission = np.repeat(
np.arange( np.arange(
TAG_NUM, dtype="float64").reshape(1, TAG_NUM), TAG_NUM, dtype="float64").reshape(1, TAG_NUM),
lod[-1][-1], total_len,
axis=0) axis=0)
labels = np.random.randint( labels = np.random.randint(
low=0, high=TAG_NUM, size=(lod[-1][-1], 1), dtype="int64") low=0, high=TAG_NUM, size=(total_len, 1), dtype="int64")
predicted_labels = np.ones( predicted_labels = np.ones(
(lod[-1][-1], 1), dtype="int64") * (TAG_NUM - 1) (total_len, 1), dtype="int64") * (TAG_NUM - 1)
expected_output = (labels == predicted_labels).astype("int64") expected_output = (labels == predicted_labels).astype("int64")
self.inputs = { self.inputs = {
......
...@@ -22,14 +22,16 @@ from test_softmax_op import stable_softmax ...@@ -22,14 +22,16 @@ from test_softmax_op import stable_softmax
def CTCAlign(input, lod, blank, merge_repeated): def CTCAlign(input, lod, blank, merge_repeated):
lod0 = lod[0] lod0 = lod[0]
result = [] result = []
for i in range(len(lod0) - 1): cur_offset = 0
for i in range(len(lod0)):
prev_token = -1 prev_token = -1
for j in range(lod0[i], lod0[i + 1]): for j in range(cur_offset, cur_offset + lod0[i]):
token = input[j][0] token = input[j][0]
if (token != blank) and not (merge_repeated and if (token != blank) and not (merge_repeated and
token == prev_token): token == prev_token):
result.append(token) result.append(token)
prev_token = token prev_token = token
cur_offset += lod0[i]
result = np.array(result).reshape([len(result), 1]).astype("int32") result = np.array(result).reshape([len(result), 1]).astype("int32")
if len(result) == 0: if len(result) == 0:
result = np.array([-1]) result = np.array([-1])
...@@ -39,7 +41,7 @@ def CTCAlign(input, lod, blank, merge_repeated): ...@@ -39,7 +41,7 @@ def CTCAlign(input, lod, blank, merge_repeated):
class TestCTCAlignOp(OpTest): class TestCTCAlignOp(OpTest):
def config(self): def config(self):
self.op_type = "ctc_align" self.op_type = "ctc_align"
self.input_lod = [[0, 11, 18]] self.input_lod = [[11, 7]]
self.blank = 0 self.blank = 0
self.merge_repeated = False self.merge_repeated = False
self.input = np.array( self.input = np.array(
...@@ -66,7 +68,7 @@ class TestCTCAlignOp(OpTest): ...@@ -66,7 +68,7 @@ class TestCTCAlignOp(OpTest):
class TestCTCAlignOpCase1(TestCTCAlignOp): class TestCTCAlignOpCase1(TestCTCAlignOp):
def config(self): def config(self):
self.op_type = "ctc_align" self.op_type = "ctc_align"
self.input_lod = [[0, 11, 19]] self.input_lod = [[11, 8]]
self.blank = 0 self.blank = 0
self.merge_repeated = True self.merge_repeated = True
self.input = np.array( self.input = np.array(
...@@ -77,7 +79,7 @@ class TestCTCAlignOpCase1(TestCTCAlignOp): ...@@ -77,7 +79,7 @@ class TestCTCAlignOpCase1(TestCTCAlignOp):
class TestCTCAlignOpCase2(TestCTCAlignOp): class TestCTCAlignOpCase2(TestCTCAlignOp):
def config(self): def config(self):
self.op_type = "ctc_align" self.op_type = "ctc_align"
self.input_lod = [[0, 4]] self.input_lod = [[4]]
self.blank = 0 self.blank = 0
self.merge_repeated = True self.merge_repeated = True
self.input = np.array([0, 0, 0, 0]).reshape([4, 1]).astype("int32") self.input = np.array([0, 0, 0, 0]).reshape([4, 1]).astype("int32")
......
...@@ -74,13 +74,13 @@ class TestDetectionMAPOp(OpTest): ...@@ -74,13 +74,13 @@ class TestDetectionMAPOp(OpTest):
self.evaluate_difficult = True self.evaluate_difficult = True
self.ap_type = "integral" self.ap_type = "integral"
self.label_lod = [[0, 2, 4]] self.label_lod = [[2, 2]]
# label difficult xmin ymin xmax ymax # label difficult xmin ymin xmax ymax
self.label = [[1, 0, 0.1, 0.1, 0.3, 0.3], [1, 1, 0.6, 0.6, 0.8, 0.8], self.label = [[1, 0, 0.1, 0.1, 0.3, 0.3], [1, 1, 0.6, 0.6, 0.8, 0.8],
[2, 0, 0.3, 0.3, 0.6, 0.5], [1, 0, 0.7, 0.1, 0.9, 0.3]] [2, 0, 0.3, 0.3, 0.6, 0.5], [1, 0, 0.7, 0.1, 0.9, 0.3]]
# label score xmin ymin xmax ymax difficult # label score xmin ymin xmax ymax difficult
self.detect_lod = [[0, 3, 7]] self.detect_lod = [[3, 4]]
self.detect = [ self.detect = [
[1, 0.3, 0.1, 0.0, 0.4, 0.3], [1, 0.7, 0.0, 0.1, 0.2, 0.3], [1, 0.3, 0.1, 0.0, 0.4, 0.3], [1, 0.7, 0.0, 0.1, 0.2, 0.3],
[1, 0.9, 0.7, 0.6, 0.8, 0.8], [2, 0.8, 0.2, 0.1, 0.4, 0.4], [1, 0.9, 0.7, 0.6, 0.8, 0.8], [2, 0.8, 0.2, 0.1, 0.4, 0.4],
...@@ -89,7 +89,7 @@ class TestDetectionMAPOp(OpTest): ...@@ -89,7 +89,7 @@ class TestDetectionMAPOp(OpTest):
] ]
# label score true_pos false_pos # label score true_pos false_pos
self.tf_pos_lod = [[0, 3, 7]] self.tf_pos_lod = [[3, 4]]
self.tf_pos = [[1, 0.9, 1, 0], [1, 0.7, 1, 0], [1, 0.3, 0, 1], self.tf_pos = [[1, 0.9, 1, 0], [1, 0.7, 1, 0], [1, 0.3, 0, 1],
[1, 0.2, 1, 0], [2, 0.8, 0, 1], [2, 0.1, 1, 0], [1, 0.2, 1, 0], [2, 0.8, 0, 1], [2, 0.1, 1, 0],
[3, 0.2, 0, 1]] [3, 0.2, 0, 1]]
...@@ -112,15 +112,19 @@ class TestDetectionMAPOp(OpTest): ...@@ -112,15 +112,19 @@ class TestDetectionMAPOp(OpTest):
for i, count in enumerate(class_pos_count): for i, count in enumerate(class_pos_count):
class_pos_count_dict[i] = count class_pos_count_dict[i] = count
for i in range(len(true_pos_lod[0]) - 1): cur_pos = 0
start = true_pos_lod[0][i] for i in range(len(true_pos_lod[0])):
end = true_pos_lod[0][i + 1] start = cur_pos
cur_pos += true_pos_lod[0][i]
end = cur_pos
for j in range(start, end): for j in range(start, end):
true_pos_dict[i].append(true_pos[j]) true_pos_dict[i].append(true_pos[j])
for i in range(len(false_pos_lod[0]) - 1): cur_pos = 0
start = false_pos_lod[0][i] for i in range(len(false_pos_lod[0])):
end = false_pos_lod[0][i + 1] start = cur_pos
cur_pos += false_pos_lod[0][i]
end = cur_pos
for j in range(start, end): for j in range(start, end):
false_pos_dict[i].append(false_pos[j]) false_pos_dict[i].append(false_pos[j])
...@@ -130,19 +134,19 @@ class TestDetectionMAPOp(OpTest): ...@@ -130,19 +134,19 @@ class TestDetectionMAPOp(OpTest):
label_number = self.class_num label_number = self.class_num
out_class_pos_count = [] out_class_pos_count = []
out_true_pos_lod = [0] out_true_pos_lod = []
out_true_pos = [] out_true_pos = []
out_false_pos_lod = [0] out_false_pos_lod = []
out_false_pos = [] out_false_pos = []
for i in range(label_number): for i in range(label_number):
out_class_pos_count.append([label_count[i]]) out_class_pos_count.append([label_count[i]])
true_pos_list = true_pos[i] true_pos_list = true_pos[i]
out_true_pos += true_pos_list out_true_pos += true_pos_list
out_true_pos_lod.append(len(out_true_pos)) out_true_pos_lod.append(len(true_pos_list))
false_pos_list = false_pos[i] false_pos_list = false_pos[i]
out_false_pos += false_pos_list out_false_pos += false_pos_list
out_false_pos_lod.append(len(out_false_pos)) out_false_pos_lod.append(len(false_pos_list))
return out_class_pos_count, out_true_pos, [ return out_class_pos_count, out_true_pos, [
out_true_pos_lod out_true_pos_lod
...@@ -241,7 +245,7 @@ class TestDetectionMAPOpSkipDiff(TestDetectionMAPOp): ...@@ -241,7 +245,7 @@ class TestDetectionMAPOpSkipDiff(TestDetectionMAPOp):
self.evaluate_difficult = False self.evaluate_difficult = False
self.tf_pos_lod = [[0, 2, 6]] self.tf_pos_lod = [[2, 4]]
# label score true_pos false_pos # label score true_pos false_pos
self.tf_pos = [[1, 0.7, 1, 0], [1, 0.3, 0, 1], [1, 0.2, 1, 0], self.tf_pos = [[1, 0.7, 1, 0], [1, 0.3, 0, 1], [1, 0.2, 1, 0],
[2, 0.8, 0, 1], [2, 0.1, 1, 0], [3, 0.2, 0, 1]] [2, 0.8, 0, 1], [2, 0.1, 1, 0], [3, 0.2, 0, 1]]
...@@ -267,9 +271,9 @@ class TestDetectionMAPOpMultiBatch(TestDetectionMAPOp): ...@@ -267,9 +271,9 @@ class TestDetectionMAPOpMultiBatch(TestDetectionMAPOp):
def init_test_case(self): def init_test_case(self):
super(TestDetectionMAPOpMultiBatch, self).init_test_case() super(TestDetectionMAPOpMultiBatch, self).init_test_case()
self.class_pos_count = [0, 2, 1] self.class_pos_count = [0, 2, 1]
self.true_pos_lod = [[0, 0, 3, 5]] self.true_pos_lod = [[0, 3, 2]]
self.true_pos = [[0.7, 1.], [0.3, 0.], [0.2, 1.], [0.8, 0.], [0.1, 1.]] self.true_pos = [[0.7, 1.], [0.3, 0.], [0.2, 1.], [0.8, 0.], [0.1, 1.]]
self.false_pos_lod = [[0, 0, 3, 5]] self.false_pos_lod = [[0, 3, 2]]
self.false_pos = [[0.7, 0.], [0.3, 1.], [0.2, 0.], [0.8, 1.], [0.1, 0.]] self.false_pos = [[0.7, 0.], [0.3, 1.], [0.2, 0.], [0.8, 1.], [0.1, 0.]]
......
...@@ -136,16 +136,16 @@ class BaseRNN(object): ...@@ -136,16 +136,16 @@ class BaseRNN(object):
feed_dict = dict() feed_dict = dict()
for iname in self.inputs: for iname in self.inputs:
lod = [0] lod = []
np_flatten = [] np_flatten = []
for seq_id in xrange(len(self.inputs[iname])): for seq_id in xrange(len(self.inputs[iname])):
seq_len = len(self.inputs[iname][seq_id]) seq_len = len(self.inputs[iname][seq_id])
lod.append(lod[-1] + seq_len) lod.append(seq_len)
np_flatten.extend(self.inputs[iname][seq_id]) np_flatten.extend(self.inputs[iname][seq_id])
t = fluid.Tensor() t = fluid.Tensor()
t.set(numpy.array(np_flatten), place) t.set(numpy.array(np_flatten), place)
t.set_lod([lod]) t.set_recursive_sequence_lengths([lod])
feed_dict[iname] = t feed_dict[iname] = t
for pname in self.params: for pname in self.params:
......
...@@ -39,20 +39,20 @@ class TestDyRnnStaticInput(unittest.TestCase): ...@@ -39,20 +39,20 @@ class TestDyRnnStaticInput(unittest.TestCase):
def prepare_x_tensor(self): def prepare_x_tensor(self):
self.x_tensor_dim = 10 self.x_tensor_dim = 10
lod = [[0, 2, 3, 6]] lod = [[2, 1, 3]]
shape = [lod[0][-1], self.x_tensor_dim] shape = [sum(lod[0]), self.x_tensor_dim]
self.x_tensor_data = np.random.random(shape).astype('float32') self.x_tensor_data = np.random.random(shape).astype('float32')
self.x_tensor = core.LoDTensor() self.x_tensor = core.LoDTensor()
self.x_tensor.set_lod(lod) self.x_tensor.set_recursive_sequence_lengths(lod)
self.x_tensor.set(self.x_tensor_data, self.place) self.x_tensor.set(self.x_tensor_data, self.place)
def prepare_static_input_tensor(self): def prepare_static_input_tensor(self):
self.static_input_tensor_dim = 4 self.static_input_tensor_dim = 4
lod = [[0, 1, 3, 6]] lod = [[1, 2, 3]]
shape = [lod[0][-1], self.static_input_tensor_dim] shape = [sum(lod[0]), self.static_input_tensor_dim]
self.static_input_data = np.random.random(shape).astype('float32') self.static_input_data = np.random.random(shape).astype('float32')
self.static_input_tensor = core.LoDTensor() self.static_input_tensor = core.LoDTensor()
self.static_input_tensor.set_lod(lod) self.static_input_tensor.set_recursive_sequence_lengths(lod)
self.static_input_tensor.set(self.static_input_data, self.place) self.static_input_tensor.set(self.static_input_data, self.place)
def fetch_value(self, var): def fetch_value(self, var):
...@@ -69,7 +69,7 @@ class TestDyRnnStaticInput(unittest.TestCase): ...@@ -69,7 +69,7 @@ class TestDyRnnStaticInput(unittest.TestCase):
ndarray = np.zeros(shape=dims).astype('float32') ndarray = np.zeros(shape=dims).astype('float32')
for i in xrange(np.product(dims)): for i in xrange(np.product(dims)):
ndarray.ravel()[i] = lod_tensor.get_float_element(i) ndarray.ravel()[i] = lod_tensor.get_float_element(i)
return ndarray, lod_tensor.lod() return ndarray, lod_tensor.recursive_sequence_lengths()
def build_graph(self, only_forward=False): def build_graph(self, only_forward=False):
x_tensor = fluid.layers.data( x_tensor = fluid.layers.data(
...@@ -131,21 +131,20 @@ class TestDyRnnStaticInput(unittest.TestCase): ...@@ -131,21 +131,20 @@ class TestDyRnnStaticInput(unittest.TestCase):
framework.grad_var_name('static_input_tensor')) framework.grad_var_name('static_input_tensor'))
return static_input_grad, loss return static_input_grad, loss
def get_seq_len_from_lod(self, lod):
return [lod[0][i + 1] - lod[0][i] for i in xrange(len(lod[0]) - 1)]
def get_expected_static_step_outs(self): def get_expected_static_step_outs(self):
x_lod = self.x_tensor.lod() x_lod = self.x_tensor.recursive_sequence_lengths()
x_seq_len = self.get_seq_len_from_lod(x_lod) x_seq_len = x_lod[0]
x_seq_len_sorted = sorted(x_seq_len) x_seq_len_sorted = sorted(x_seq_len)
x_sorted_indices = np.argsort(x_seq_len)[::-1] x_sorted_indices = np.argsort(x_seq_len)[::-1]
static_lod = self.static_input_tensor.lod() static_lod = self.static_input_tensor.recursive_sequence_lengths()
static_sliced = [ static_sliced = []
self.static_input_data[static_lod[0][i]:static_lod[0][i + 1]] cur_offset = 0
for i in xrange(len(static_lod[0]) - 1) for i in xrange(len(static_lod[0])):
] static_sliced.append(self.static_input_data[cur_offset:(
static_seq_len = self.get_seq_len_from_lod(static_lod) cur_offset + static_lod[0][i])])
cur_offset += static_lod[0][i]
static_seq_len = static_lod[0]
static_reordered = [] static_reordered = []
for i in xrange(len(x_sorted_indices)): for i in xrange(len(x_sorted_indices)):
static_reordered.extend(static_sliced[x_sorted_indices[i]].tolist()) static_reordered.extend(static_sliced[x_sorted_indices[i]].tolist())
...@@ -159,11 +158,13 @@ class TestDyRnnStaticInput(unittest.TestCase): ...@@ -159,11 +158,13 @@ class TestDyRnnStaticInput(unittest.TestCase):
for i in xrange(self._max_sequence_len): for i in xrange(self._max_sequence_len):
end = len(x_seq_len) - bisect.bisect_left(x_seq_len_sorted, i + 1) end = len(x_seq_len) - bisect.bisect_left(x_seq_len_sorted, i + 1)
lod = [0] lod = []
total_len = 0
for i in xrange(end): for i in xrange(end):
lod.append(static_seq_len_reordered[i] + lod[-1]) lod.append(static_seq_len_reordered[i])
total_len += lod[-1]
static_step_lods.append([lod]) static_step_lods.append([lod])
end = lod[-1] end = total_len
static_step_outs.append( static_step_outs.append(
np.array(static_reordered[:end]).astype('float32')) np.array(static_reordered[:end]).astype('float32'))
...@@ -199,7 +200,9 @@ class TestDyRnnStaticInput(unittest.TestCase): ...@@ -199,7 +200,9 @@ class TestDyRnnStaticInput(unittest.TestCase):
self.static_input_tensor.set_float_element(i, origin) self.static_input_tensor.set_float_element(i, origin)
numeric_gradients.ravel()[i] = (y_pos - y_neg) / self._delta / 2 numeric_gradients.ravel()[i] = (y_pos - y_neg) / self._delta / 2
self.assertTrue(np.allclose(actual_gradients, numeric_gradients, 0.001)) self.assertTrue(np.allclose(actual_gradients, numeric_gradients, 0.001))
self.assertTrue(np.allclose(actual_lod, self.static_input_tensor.lod())) self.assertTrue(
np.allclose(actual_lod,
self.static_input_tensor.recursive_sequence_lengths()))
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -52,23 +52,29 @@ class TestEditDistanceOp(OpTest): ...@@ -52,23 +52,29 @@ class TestEditDistanceOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "edit_distance" self.op_type = "edit_distance"
normalized = False normalized = False
x1 = np.array([[0, 12, 3, 5, 8, 2]]).astype("int64") x1 = np.array([[12, 3, 5, 8, 2]]).astype("int64")
x2 = np.array([[0, 12, 4, 7, 8]]).astype("int64") x2 = np.array([[12, 4, 7, 8]]).astype("int64")
x1 = np.transpose(x1) x1 = np.transpose(x1)
x2 = np.transpose(x2) x2 = np.transpose(x2)
x1_lod = [0, 1, 5] x1_lod = [1, 4]
x2_lod = [0, 3, 4] x2_lod = [3, 1]
num_strs = len(x1_lod) - 1 num_strs = len(x1_lod)
distance = np.zeros((num_strs, 1)).astype("float32") distance = np.zeros((num_strs, 1)).astype("float32")
sequence_num = np.array(2).astype("int64") sequence_num = np.array(2).astype("int64")
x1_offset = 0
x2_offset = 0
for i in range(0, num_strs): for i in range(0, num_strs):
distance[i] = Levenshtein( distance[i] = Levenshtein(
hyp=x1[x1_lod[i]:x1_lod[i + 1]], hyp=x1[x1_offset:(x1_offset + x1_lod[i])],
ref=x2[x2_lod[i]:x2_lod[i + 1]]) ref=x2[x2_offset:(x2_offset + x2_lod[i])])
x1_offset += x1_lod[i]
x2_offset += x2_lod[i]
if normalized is True: if normalized is True:
len_ref = x2_lod[i + 1] - x2_lod[i] len_ref = x2_lod[i]
distance[i] = distance[i] / len_ref distance[i] = distance[i] / len_ref
self.attrs = {'normalized': normalized} self.attrs = {'normalized': normalized}
self.inputs = {'Hyps': (x1, [x1_lod]), 'Refs': (x2, [x2_lod])} self.inputs = {'Hyps': (x1, [x1_lod]), 'Refs': (x2, [x2_lod])}
self.outputs = {'Out': distance, 'SequenceNum': sequence_num} self.outputs = {'Out': distance, 'SequenceNum': sequence_num}
...@@ -81,23 +87,29 @@ class TestEditDistanceOpNormalized(OpTest): ...@@ -81,23 +87,29 @@ class TestEditDistanceOpNormalized(OpTest):
def setUp(self): def setUp(self):
self.op_type = "edit_distance" self.op_type = "edit_distance"
normalized = True normalized = True
x1 = np.array([[0, 10, 3, 6, 5, 8, 2]]).astype("int64") x1 = np.array([[10, 3, 6, 5, 8, 2]]).astype("int64")
x2 = np.array([[0, 10, 4, 6, 7, 8]]).astype("int64") x2 = np.array([[10, 4, 6, 7, 8]]).astype("int64")
x1 = np.transpose(x1) x1 = np.transpose(x1)
x2 = np.transpose(x2) x2 = np.transpose(x2)
x1_lod = [0, 1, 3, 6] x1_lod = [1, 2, 3]
x2_lod = [0, 2, 3, 5] x2_lod = [2, 1, 2]
num_strs = len(x1_lod) - 1 num_strs = len(x1_lod)
distance = np.zeros((num_strs, 1)).astype("float32") distance = np.zeros((num_strs, 1)).astype("float32")
sequence_num = np.array(3).astype("int64") sequence_num = np.array(3).astype("int64")
x1_offset = 0
x2_offset = 0
for i in range(0, num_strs): for i in range(0, num_strs):
distance[i] = Levenshtein( distance[i] = Levenshtein(
hyp=x1[x1_lod[i]:x1_lod[i + 1]], hyp=x1[x1_offset:(x1_offset + x1_lod[i])],
ref=x2[x2_lod[i]:x2_lod[i + 1]]) ref=x2[x2_offset:(x2_offset + x2_lod[i])])
x1_offset += x1_lod[i]
x2_offset += x2_lod[i]
if normalized is True: if normalized is True:
len_ref = x2_lod[i + 1] - x2_lod[i] len_ref = x2_lod[i]
distance[i] = distance[i] / len_ref distance[i] = distance[i] / len_ref
self.attrs = {'normalized': normalized} self.attrs = {'normalized': normalized}
self.inputs = {'Hyps': (x1, [x1_lod]), 'Refs': (x2, [x2_lod])} self.inputs = {'Hyps': (x1, [x1_lod]), 'Refs': (x2, [x2_lod])}
self.outputs = {'Out': distance, 'SequenceNum': sequence_num} self.outputs = {'Out': distance, 'SequenceNum': sequence_num}
......
...@@ -24,17 +24,16 @@ class TestFeedFetch(unittest.TestCase): ...@@ -24,17 +24,16 @@ class TestFeedFetch(unittest.TestCase):
input_array = np.ones((4, 4, 6)).astype("float32") input_array = np.ones((4, 4, 6)).astype("float32")
input_array[0, 0, 0] = 3 input_array[0, 0, 0] = 3
input_array[3, 3, 5] = 10 input_array[3, 3, 5] = 10
input_tensor = core.LoDTensor([[0, 2, 4]]) input_tensor = core.LoDTensor([[2, 2]])
input_tensor.set(input_array, place) input_tensor.set(input_array, place)
core.set_feed_variable(scope, input_tensor, "feed", 0) core.set_feed_variable(scope, input_tensor, "feed", 0)
output_tensor = core.get_fetch_variable(scope, "feed", 0) output_tensor = core.get_fetch_variable(scope, "feed", 0)
output_lod = output_tensor.lod() output_lod = output_tensor.recursive_sequence_lengths()
self.assertEqual(0, output_lod[0][0]) self.assertEqual(2, output_lod[0][0])
self.assertEqual(2, output_lod[0][1]) self.assertEqual(2, output_lod[0][1])
self.assertEqual(4, output_lod[0][2])
output_array = np.array(output_tensor) output_array = np.array(output_tensor)
self.assertEqual(3, output_array[0, 0, 0]) self.assertEqual(3, output_array[0, 0, 0])
......
...@@ -55,7 +55,7 @@ class TestFillConstantBatchSizeLikeWithLoDTensor(OpTest): ...@@ -55,7 +55,7 @@ class TestFillConstantBatchSizeLikeWithLoDTensor(OpTest):
self.op_type = "fill_constant_batch_size_like" self.op_type = "fill_constant_batch_size_like"
self.inputs = { self.inputs = {
'Input': (np.random.random((31, 28)).astype("float32"), 'Input': (np.random.random((31, 28)).astype("float32"),
[[0, 9, 23, 31]]) [[9, 14, 8]])
} }
self.attrs = { self.attrs = {
'value': 3.5, 'value': 3.5,
......
...@@ -20,8 +20,8 @@ from test_lstm_op import identity, sigmoid, tanh, relu ...@@ -20,8 +20,8 @@ from test_lstm_op import identity, sigmoid, tanh, relu
class TestGRUOp(OpTest): class TestGRUOp(OpTest):
lod = [[0, 2, 6, 9]] lod = [[2, 4, 3]]
batch_size = lod[0][-1] batch_size = sum(lod[0])
frame_size = 5 frame_size = 5
activate = { activate = {
'identity': identity, 'identity': identity,
...@@ -33,10 +33,10 @@ class TestGRUOp(OpTest): ...@@ -33,10 +33,10 @@ class TestGRUOp(OpTest):
@staticmethod @staticmethod
def seq_to_batch(lod, is_reverse): def seq_to_batch(lod, is_reverse):
idx_in_seq_list = [] idx_in_seq_list = []
seq_starts = lod[0] seq_lens = lod[0]
seq_lens = [] seq_starts = [0]
for i in range(len(seq_starts) - 1): for i in range(len(seq_lens)):
seq_lens.append(seq_starts[i + 1] - seq_starts[i]) seq_starts.append(seq_starts[-1] + seq_lens[i])
sorted_seqs = sorted( sorted_seqs = sorted(
range(len(seq_lens)), lambda x, y: seq_lens[y] - seq_lens[x]) range(len(seq_lens)), lambda x, y: seq_lens[y] - seq_lens[x])
num_batch = seq_lens[sorted_seqs[0]] num_batch = seq_lens[sorted_seqs[0]]
......
...@@ -58,8 +58,8 @@ class TestIOUSimilarityOpWithLoD(TestIOUSimilarityOp): ...@@ -58,8 +58,8 @@ class TestIOUSimilarityOpWithLoD(TestIOUSimilarityOp):
def setUp(self): def setUp(self):
super(TestIOUSimilarityOpWithLoD, self).setUp() super(TestIOUSimilarityOpWithLoD, self).setUp()
self.boxes1_lod = [[0, 1, 2]] self.boxes1_lod = [[1, 1]]
self.output_lod = [[0, 1, 2]] self.output_lod = [[1, 1]]
self.inputs = {'X': (self.boxes1, self.boxes1_lod), 'Y': self.boxes2} self.inputs = {'X': (self.boxes1, self.boxes1_lod), 'Y': self.boxes2}
self.outputs = {'Out': (self.output, self.output_lod)} self.outputs = {'Out': (self.output, self.output_lod)}
......
...@@ -105,11 +105,13 @@ class TestLinearChainCrfOp(OpTest): ...@@ -105,11 +105,13 @@ class TestLinearChainCrfOp(OpTest):
MAX_SEQ_LEN = 5 MAX_SEQ_LEN = 5
# the linear_chain_crf operator only supports sequence (LoD level = 1) # the linear_chain_crf operator only supports sequence (LoD level = 1)
lod = [[0]] lod = [[]]
seq_start_pos = [0]
for i in range(SEQ_NUM): for i in range(SEQ_NUM):
lod[-1].append(lod[-1][-1] + random.randint(1, MAX_SEQ_LEN)) lod[-1].append(random.randint(1, MAX_SEQ_LEN))
emission = np.random.uniform(-1, 1, seq_start_pos.append(seq_start_pos[-1] + lod[-1][-1])
[lod[-1][-1], TAG_NUM]).astype("float64") emission = np.random.uniform(
-1, 1, [seq_start_pos[-1], TAG_NUM]).astype("float64")
emission_row_max = np.amax(emission, axis=1, keepdims=True) emission_row_max = np.amax(emission, axis=1, keepdims=True)
emission_exps = np.exp(emission - emission_row_max) emission_exps = np.exp(emission - emission_row_max)
...@@ -118,14 +120,14 @@ class TestLinearChainCrfOp(OpTest): ...@@ -118,14 +120,14 @@ class TestLinearChainCrfOp(OpTest):
transition_exps = np.exp(transition) transition_exps = np.exp(transition)
labels = np.random.randint( labels = np.random.randint(
low=0, high=TAG_NUM, size=(lod[-1][-1], 1), dtype="int64") low=0, high=TAG_NUM, size=(seq_start_pos[-1], 1), dtype="int64")
self.inputs = { self.inputs = {
"Emission": (emission, lod), "Emission": (emission, lod),
"Transition": transition, "Transition": transition,
"Label": (labels, lod) "Label": (labels, lod)
} }
crf = LinearChainCrfForward(lod[0], emission, emission_row_max, crf = LinearChainCrfForward(seq_start_pos, emission, emission_row_max,
emission_exps, transition, transition_exps, emission_exps, transition, transition_exps,
labels) labels)
alpha, log_likelihood = crf.crf_forward_compute() alpha, log_likelihood = crf.crf_forward_compute()
......
...@@ -30,7 +30,8 @@ class TestLoDRankTable(unittest.TestCase): ...@@ -30,7 +30,8 @@ class TestLoDRankTable(unittest.TestCase):
tensor = core.LoDTensor() tensor = core.LoDTensor()
tensor.set(numpy.random.random(size=(17, 100)), cpu) tensor.set(numpy.random.random(size=(17, 100)), cpu)
tensor.set_lod([[0, 1, 3], [0, 5, 6, 7], [0, 3, 4, 9, 10, 13, 16, 17]]) tensor.set_recursive_sequence_lengths(
[[1, 2], [5, 1, 1], [3, 1, 5, 1, 3, 3, 1]])
exe.run(scope=scope, feed={'x': tensor}) exe.run(scope=scope, feed={'x': tensor})
var = scope.find_var(rank_table.name) var = scope.find_var(rank_table.name)
table = var.get_lod_rank_table() table = var.get_lod_rank_table()
......
...@@ -21,11 +21,15 @@ class TestLodResetOpByAttr(OpTest): ...@@ -21,11 +21,15 @@ class TestLodResetOpByAttr(OpTest):
def setUp(self): def setUp(self):
self.op_type = "lod_reset" self.op_type = "lod_reset"
x = np.random.random((10, 20)).astype("float32") x = np.random.random((10, 20)).astype("float32")
lod = [[0, 3, 5, 10]] lod = [[3, 2, 5]]
target_lod_0 = [0, 7, 10] # target_offset_lod and target_lod are the same lod info represented
# in offset-based format and length-based format, respectively.
target_offset_lod = [0, 7, 10]
target_lod = [7, 3]
self.inputs = {'X': (x, lod)} self.inputs = {'X': (x, lod)}
self.attrs = {'target_lod': target_lod_0} # The `target_lod` attribute is still based on offset
self.outputs = {'Out': (x, [target_lod_0])} self.attrs = {'target_lod': target_offset_lod}
self.outputs = {'Out': (x, [target_lod])}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
...@@ -38,13 +42,16 @@ class TestLodResetOpByInput(OpTest): ...@@ -38,13 +42,16 @@ class TestLodResetOpByInput(OpTest):
def setUp(self): def setUp(self):
self.op_type = "lod_reset" self.op_type = "lod_reset"
x = np.random.random((10, 20)).astype("float32") x = np.random.random((10, 20)).astype("float32")
lod = [[0, 3, 5, 10]] lod = [[3, 2, 5]]
target_lod_0 = [0, 4, 7, 10] # target_offset_lod and target_lod are the same lod info represented
# in offset-based format and length-based format, respectively.
target_offset_lod = [0, 4, 7, 10]
target_lod = [4, 3, 3]
self.inputs = { self.inputs = {
'X': (x, lod), 'X': (x, lod),
'Y': np.array([target_lod_0]).astype('int32') 'Y': np.array([target_offset_lod]).astype('int32')
} }
self.outputs = {'Out': (x, [target_lod_0])} self.outputs = {'Out': (x, [target_lod])}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
...@@ -57,15 +64,16 @@ class TestLodResetOpBoth(OpTest): ...@@ -57,15 +64,16 @@ class TestLodResetOpBoth(OpTest):
def setUp(self): def setUp(self):
self.op_type = "lod_reset" self.op_type = "lod_reset"
x = np.random.random((10, 20)).astype("float32") x = np.random.random((10, 20)).astype("float32")
lod = [[0, 3, 5, 10]] lod = [[3, 2, 5]]
target_lod_0_attr = [0, 7, 10] target_offset_lod_attr = [0, 7, 10]
target_lod_0_in = [0, 4, 7, 10] target_offset_lod_in = [0, 4, 7, 10]
target_lod_in = [4, 3, 3]
self.inputs = { self.inputs = {
'X': (x, lod), 'X': (x, lod),
'Y': np.array(target_lod_0_in).astype('int32') 'Y': np.array(target_offset_lod_in).astype('int32')
} }
self.attrs = {'target_lod': target_lod_0_attr} self.attrs = {'target_lod': target_offset_lod_attr}
self.outputs = {'Out': (x, [target_lod_0_in])} self.outputs = {'Out': (x, [target_lod_in])}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
...@@ -78,11 +86,11 @@ class TestLodResetOpYIsLoDTensor(OpTest): ...@@ -78,11 +86,11 @@ class TestLodResetOpYIsLoDTensor(OpTest):
def setUp(self): def setUp(self):
self.op_type = "lod_reset" self.op_type = "lod_reset"
x = np.random.random((10, 20)).astype("float32") x = np.random.random((10, 20)).astype("float32")
lod = [[0, 3, 5, 10]] lod = [[3, 2, 5]]
y = np.random.random((10, 10)).astype("float32") y = np.random.random((10, 10)).astype("float32")
target_lod_0 = [[0, 4, 7, 10]] target_lod = [[4, 3, 3]]
self.inputs = {'X': (x, lod), 'Y': (y, target_lod_0)} self.inputs = {'X': (x, lod), 'Y': (y, target_lod)}
self.outputs = {'Out': (x, target_lod_0)} self.outputs = {'Out': (x, target_lod)}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
......
...@@ -27,7 +27,7 @@ class TestLoDTensorArray(unittest.TestCase): ...@@ -27,7 +27,7 @@ class TestLoDTensorArray(unittest.TestCase):
for i in xrange(10): for i in xrange(10):
t = core.LoDTensor() t = core.LoDTensor()
t.set(numpy.array([i], dtype='float32'), cpu) t.set(numpy.array([i], dtype='float32'), cpu)
t.set_lod([[0, 1]]) t.set_recursive_sequence_lengths([[1]])
tensor_array.append(t) tensor_array.append(t)
self.assertEqual(10, len(tensor_array)) self.assertEqual(10, len(tensor_array))
...@@ -35,17 +35,17 @@ class TestLoDTensorArray(unittest.TestCase): ...@@ -35,17 +35,17 @@ class TestLoDTensorArray(unittest.TestCase):
for i in xrange(10): for i in xrange(10):
t = tensor_array[i] t = tensor_array[i]
self.assertEqual(numpy.array(t), numpy.array([i], dtype='float32')) self.assertEqual(numpy.array(t), numpy.array([i], dtype='float32'))
self.assertEqual([[0, 1]], t.lod()) self.assertEqual([[1]], t.recursive_sequence_lengths())
t = core.LoDTensor() t = core.LoDTensor()
t.set(numpy.array([i + 10], dtype='float32'), cpu) t.set(numpy.array([i + 10], dtype='float32'), cpu)
t.set_lod([[0, 2]]) t.set_recursive_sequence_lengths([[1]])
tensor_array[i] = t tensor_array[i] = t
t = tensor_array[i] t = tensor_array[i]
self.assertEqual( self.assertEqual(
numpy.array(t), numpy.array( numpy.array(t), numpy.array(
[i + 10], dtype='float32')) [i + 10], dtype='float32'))
self.assertEqual([[0, 2]], t.lod()) self.assertEqual([[1]], t.recursive_sequence_lengths())
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -29,7 +29,7 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): ...@@ -29,7 +29,7 @@ class TestCPULoDTensorArrayOps(unittest.TestCase):
tensor = core.LoDTensor() tensor = core.LoDTensor()
tensor.set( tensor.set(
numpy.arange(10).reshape(10, 1).astype('int32'), self.place()) numpy.arange(10).reshape(10, 1).astype('int32'), self.place())
tensor.set_lod([[0, 3, 9, 10]]) tensor.set_recursive_sequence_lengths([[3, 6, 1]])
expect = map(lambda x: numpy.array(x).astype('int32'), expect = map(lambda x: numpy.array(x).astype('int32'),
[[3, 0, 9], [4, 1], [5, 2], [6], [7], [8]]) [[3, 0, 9], [4, 1], [5, 2], [6], [7], [8]])
self.main( self.main(
...@@ -42,7 +42,7 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): ...@@ -42,7 +42,7 @@ class TestCPULoDTensorArrayOps(unittest.TestCase):
tensor = core.LoDTensor() tensor = core.LoDTensor()
tensor.set( tensor.set(
numpy.arange(10).reshape(10, 1).astype('int32'), self.place()) numpy.arange(10).reshape(10, 1).astype('int32'), self.place())
tensor.set_lod([[0, 3, 9, 9, 10]]) tensor.set_recursive_sequence_lengths([[3, 6, 0, 1]])
expect = map(lambda x: numpy.array(x).astype('int32'), expect = map(lambda x: numpy.array(x).astype('int32'),
[[3, 0, 9], [4, 1], [5, 2], [6], [7], [8]]) [[3, 0, 9], [4, 1], [5, 2], [6], [7], [8]])
self.main( self.main(
...@@ -55,7 +55,7 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): ...@@ -55,7 +55,7 @@ class TestCPULoDTensorArrayOps(unittest.TestCase):
tensor = core.LoDTensor() tensor = core.LoDTensor()
tensor.set( tensor.set(
numpy.arange(20).reshape(20, 1).astype('int32'), self.place()) numpy.arange(20).reshape(20, 1).astype('int32'), self.place())
tensor.set_lod([[0, 2, 5], [0, 3, 9, 11, 17, 20]]) tensor.set_recursive_sequence_lengths([[2, 3], [3, 6, 2, 6, 3]])
expect = [ expect = [
numpy.array( numpy.array(
...@@ -65,7 +65,7 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): ...@@ -65,7 +65,7 @@ class TestCPULoDTensorArrayOps(unittest.TestCase):
[17, 18, 19], dtype='int32') [17, 18, 19], dtype='int32')
] ]
lod = [[[0, 2, 5]], [[0, 6, 12]], [[0, 3]]] lod = [[[2, 3]], [[6, 6]], [[3]]]
self.main( self.main(
tensor=tensor, tensor=tensor,
expect_array=expect, expect_array=expect,
...@@ -77,8 +77,8 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): ...@@ -77,8 +77,8 @@ class TestCPULoDTensorArrayOps(unittest.TestCase):
tensor.set( tensor.set(
numpy.arange(31).reshape(31, 1).astype('int32'), self.place()) numpy.arange(31).reshape(31, 1).astype('int32'), self.place())
tensor.set_lod([[0, 3, 5, 9, 11], tensor.set_recursive_sequence_lengths(
[0, 3, 7, 11, 11, 12, 17, 19, 21, 23, 30, 31]]) [[3, 2, 4, 2], [3, 4, 4, 0, 1, 5, 2, 2, 2, 7, 1]])
expect = [ expect = [
numpy.array( numpy.array(
...@@ -88,7 +88,7 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): ...@@ -88,7 +88,7 @@ class TestCPULoDTensorArrayOps(unittest.TestCase):
], [17, 18, 3, 4, 5, 6, 11, 30], [19, 20, 7, 8, 9, 10], [21, 22]] ], [17, 18, 3, 4, 5, 6, 11, 30], [19, 20, 7, 8, 9, 10], [21, 22]]
] ]
lod = [[[0, 5, 8, 8, 15]], [[0, 2, 6, 7, 8]], [[0, 2, 6]], [[0, 2]]] lod = [[[5, 3, 0, 7]], [[2, 4, 1, 1]], [[2, 4]], [[2]]]
self.main( self.main(
tensor=tensor, tensor=tensor,
expect_array=expect, expect_array=expect,
...@@ -99,8 +99,9 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): ...@@ -99,8 +99,9 @@ class TestCPULoDTensorArrayOps(unittest.TestCase):
tensor = core.LoDTensor() tensor = core.LoDTensor()
tensor.set( tensor.set(
numpy.arange(50).reshape(50, 1).astype('int32'), self.place()) numpy.arange(50).reshape(50, 1).astype('int32'), self.place())
tensor.set_lod([[0, 2, 5, 6], [0, 2, 5, 6, 10, 12, 13], tensor.set_recursive_sequence_lengths(
[0, 3, 7, 11, 17, 21, 22, 23, 27, 31, 39, 45, 46, 50]]) [[2, 3, 1], [2, 3, 1, 4, 2, 1],
[3, 4, 4, 6, 4, 1, 1, 4, 4, 8, 6, 1, 4]])
expect = [ expect = [
numpy.array( numpy.array(
...@@ -108,8 +109,8 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): ...@@ -108,8 +109,8 @@ class TestCPULoDTensorArrayOps(unittest.TestCase):
for item in [[21, 0, 1, 2, 3, 4, 5, 6, 46, 47, 48, 49], range( for item in [[21, 0, 1, 2, 3, 4, 5, 6, 46, 47, 48, 49], range(
22, 39) + range(7, 21), range(39, 46)] 22, 39) + range(7, 21), range(39, 46)]
] ]
lod = [[[0, 1, 3, 4], [0, 1, 4, 8, 12]], lod = [[[1, 2, 1], [1, 3, 4, 4]], [[4, 3], [1, 4, 4, 8, 4, 6, 4]],
[[0, 4, 7], [0, 1, 5, 9, 17, 21, 27, 31]], [[0, 2], [0, 6, 7]]] [[2], [6, 1]]]
self.main( self.main(
tensor=tensor, tensor=tensor,
expect_array=expect, expect_array=expect,
...@@ -120,8 +121,9 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): ...@@ -120,8 +121,9 @@ class TestCPULoDTensorArrayOps(unittest.TestCase):
tensor = core.LoDTensor() tensor = core.LoDTensor()
tensor.set( tensor.set(
numpy.arange(50).reshape(50, 1).astype('int32'), self.place()) numpy.arange(50).reshape(50, 1).astype('int32'), self.place())
tensor.set_lod([[0, 2, 5, 6], [0, 2, 5, 6, 10, 12, 13], tensor.set_recursive_sequence_lengths(
[0, 3, 7, 11, 17, 21, 22, 23, 27, 31, 39, 45, 46, 50]]) [[2, 3, 1], [2, 3, 1, 4, 2, 1],
[3, 4, 4, 6, 4, 1, 1, 4, 4, 8, 6, 1, 4]])
self.main( self.main(
tensor=tensor, tensor=tensor,
expect_array=None, expect_array=None,
...@@ -162,12 +164,13 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): ...@@ -162,12 +164,13 @@ class TestCPULoDTensorArrayOps(unittest.TestCase):
exp_tensor, exp_lod = exp exp_tensor, exp_lod = exp
exp_tensor = numpy.expand_dims(exp_tensor, axis=1) exp_tensor = numpy.expand_dims(exp_tensor, axis=1)
self.assertTrue(numpy.allclose(exp_tensor, numpy.array(array[i]))) self.assertTrue(numpy.allclose(exp_tensor, numpy.array(array[i])))
self.assertEqual(exp_lod, array[i].lod()) self.assertEqual(exp_lod, array[i].recursive_sequence_lengths())
def check_tensor_same(self, actual, expect): def check_tensor_same(self, actual, expect):
self.assertTrue( self.assertTrue(
numpy.allclose(numpy.array(actual), numpy.array(expect))) numpy.allclose(numpy.array(actual), numpy.array(expect)))
self.assertEqual(actual.lod(), expect.lod()) self.assertEqual(actual.recursive_sequence_lengths(),
expect.recursive_sequence_lengths())
class TestCPULoDTensorArrayOpGrad(unittest.TestCase): class TestCPULoDTensorArrayOpGrad(unittest.TestCase):
...@@ -188,7 +191,7 @@ class TestCPULoDTensorArrayOpGrad(unittest.TestCase): ...@@ -188,7 +191,7 @@ class TestCPULoDTensorArrayOpGrad(unittest.TestCase):
tensor = core.LoDTensor() tensor = core.LoDTensor()
tensor.set(numpy.arange(10).reshape(10, 1).astype('float32'), place) tensor.set(numpy.arange(10).reshape(10, 1).astype('float32'), place)
tensor.set_lod([[0, 3, 9, 10]]) tensor.set_recursive_sequence_lengths([[3, 6, 1]])
g_vars = program.global_block().var(x.name + "@GRAD") g_vars = program.global_block().var(x.name + "@GRAD")
......
...@@ -84,15 +84,17 @@ def lstm( ...@@ -84,15 +84,17 @@ def lstm(
h = g_o * act_cell(c) h = g_o * act_cell(c)
return h, c return h, c
def _reverse(x, lod): def _reverse(x, offset):
y = np.zeros_like(x) y = np.zeros_like(x)
for i in range(len(lod) - 1): for i in range(len(offset) - 1):
b, e = lod[i], lod[i + 1] b, e = offset[i], offset[i + 1]
y[b:e, :] = np.flip(x[b:e, :], 0) y[b:e, :] = np.flip(x[b:e, :], 0)
return y return y
offset = lod[0] offset = [0]
batch_size = len(offset) - 1 for l in lod[0]:
offset.append(offset[-1] + l)
batch_size = len(lod[0])
hidden = [] hidden = []
cell = [] cell = []
input = _reverse(input, offset) if is_reverse else input input = _reverse(input, offset) if is_reverse else input
...@@ -100,7 +102,7 @@ def lstm( ...@@ -100,7 +102,7 @@ def lstm(
input = input + np.tile(w_b, (offset[-1], 1)) input = input + np.tile(w_b, (offset[-1], 1))
for i in range(batch_size): for i in range(batch_size):
# compute one sequence # compute one sequence
seq_len = offset[i + 1] - offset[i] seq_len = lod[0][i]
x = input[offset[i]:offset[i + 1], :] x = input[offset[i]:offset[i + 1], :]
h_pre = h0[i] # 1 x D h_pre = h0[i] # 1 x D
c_pre = c0[i] # 1 x D c_pre = c0[i] # 1 x D
...@@ -124,7 +126,7 @@ def lstm( ...@@ -124,7 +126,7 @@ def lstm(
class TestLstmOp(OpTest): class TestLstmOp(OpTest):
def set_argument(self): def set_argument(self):
self.lod = [[0, 2, 5, 7]] self.lod = [[2, 3, 2]]
self.D = 16 self.D = 16
self.act_gate = 'sigmoid' self.act_gate = 'sigmoid'
...@@ -139,8 +141,8 @@ class TestLstmOp(OpTest): ...@@ -139,8 +141,8 @@ class TestLstmOp(OpTest):
self.set_argument() self.set_argument()
self.op_type = 'lstm' self.op_type = 'lstm'
T = self.lod[0][-1] T = sum(self.lod[0])
N = len(self.lod[0]) - 1 N = len(self.lod[0])
x = np.random.normal(size=(T, 4 * self.D)).astype('float64') x = np.random.normal(size=(T, 4 * self.D)).astype('float64')
if self.has_initial_state: if self.has_initial_state:
...@@ -186,7 +188,7 @@ class TestLstmOp(OpTest): ...@@ -186,7 +188,7 @@ class TestLstmOp(OpTest):
def test_check_grad(self): def test_check_grad(self):
# TODO(qingqing) remove folowing lines after the check_grad is refined. # TODO(qingqing) remove folowing lines after the check_grad is refined.
N = len(self.lod[0]) - 1 N = len(self.lod[0])
self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
self.outputs['BatchCellPreAct'] = np.zeros( self.outputs['BatchCellPreAct'] = np.zeros(
(N, self.D)).astype('float64') (N, self.D)).astype('float64')
...@@ -196,7 +198,7 @@ class TestLstmOp(OpTest): ...@@ -196,7 +198,7 @@ class TestLstmOp(OpTest):
# class TestLstmOpHasInitial(TestLstmOp): # class TestLstmOpHasInitial(TestLstmOp):
# def set_argument(self): # def set_argument(self):
# self.lod = [[0, 2, 5, 7]] # self.lod = [[2, 3, 2]]
# self.D = 16 # self.D = 16
# self.act_gate = 'sigmoid' # self.act_gate = 'sigmoid'
...@@ -209,7 +211,7 @@ class TestLstmOp(OpTest): ...@@ -209,7 +211,7 @@ class TestLstmOp(OpTest):
# def test_check_grad(self): # def test_check_grad(self):
# # TODO(qingqing) remove folowing lines after the check_grad is refined. # # TODO(qingqing) remove folowing lines after the check_grad is refined.
# N = len(self.lod[0]) - 1 # N = len(self.lod[0])
# self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') # self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
# self.outputs['BatchCellPreAct'] = np.zeros( # self.outputs['BatchCellPreAct'] = np.zeros(
# (N, self.D)).astype('float64') # (N, self.D)).astype('float64')
...@@ -218,7 +220,7 @@ class TestLstmOp(OpTest): ...@@ -218,7 +220,7 @@ class TestLstmOp(OpTest):
# max_relative_error=5e-4) # max_relative_error=5e-4)
# def test_check_grad_ingore_bias(self): # def test_check_grad_ingore_bias(self):
# N = len(self.lod[0]) - 1 # N = len(self.lod[0])
# self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') # self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
# self.outputs['BatchCellPreAct'] = np.zeros( # self.outputs['BatchCellPreAct'] = np.zeros(
# (N, self.D)).astype('float64') # (N, self.D)).astype('float64')
...@@ -228,7 +230,7 @@ class TestLstmOp(OpTest): ...@@ -228,7 +230,7 @@ class TestLstmOp(OpTest):
# no_grad_set=set('Bias')) # no_grad_set=set('Bias'))
# def test_check_grad_ingore_weight(self): # def test_check_grad_ingore_weight(self):
# N = len(self.lod[0]) - 1 # N = len(self.lod[0])
# self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') # self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
# self.outputs['BatchCellPreAct'] = np.zeros( # self.outputs['BatchCellPreAct'] = np.zeros(
# (N, self.D)).astype('float64') # (N, self.D)).astype('float64')
...@@ -238,7 +240,7 @@ class TestLstmOp(OpTest): ...@@ -238,7 +240,7 @@ class TestLstmOp(OpTest):
# no_grad_set=set('Weight')) # no_grad_set=set('Weight'))
# def test_check_grad_ingore_input(self): # def test_check_grad_ingore_input(self):
# N = len(self.lod[0]) - 1 # N = len(self.lod[0])
# self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') # self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
# self.outputs['BatchCellPreAct'] = np.zeros( # self.outputs['BatchCellPreAct'] = np.zeros(
# (N, self.D)).astype('float64') # (N, self.D)).astype('float64')
...@@ -248,7 +250,7 @@ class TestLstmOp(OpTest): ...@@ -248,7 +250,7 @@ class TestLstmOp(OpTest):
# no_grad_set=set('Input')) # no_grad_set=set('Input'))
# def test_check_grad_ingore_h0(self): # def test_check_grad_ingore_h0(self):
# N = len(self.lod[0]) - 1 # N = len(self.lod[0])
# self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') # self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
# self.outputs['BatchCellPreAct'] = np.zeros( # self.outputs['BatchCellPreAct'] = np.zeros(
# (N, self.D)).astype('float64') # (N, self.D)).astype('float64')
...@@ -258,7 +260,7 @@ class TestLstmOp(OpTest): ...@@ -258,7 +260,7 @@ class TestLstmOp(OpTest):
# no_grad_set=set('H0')) # no_grad_set=set('H0'))
# def test_check_grad_ingore_c0(self): # def test_check_grad_ingore_c0(self):
# N = len(self.lod[0]) - 1 # N = len(self.lod[0])
# self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') # self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
# self.outputs['BatchCellPreAct'] = np.zeros( # self.outputs['BatchCellPreAct'] = np.zeros(
# (N, self.D)).astype('float64') # (N, self.D)).astype('float64')
...@@ -269,7 +271,7 @@ class TestLstmOp(OpTest): ...@@ -269,7 +271,7 @@ class TestLstmOp(OpTest):
# class TestLstmOpRerverse(TestLstmOp): # class TestLstmOpRerverse(TestLstmOp):
# def set_argument(self): # def set_argument(self):
# self.lod = [[0, 2, 5, 7]] # self.lod = [[2, 3, 2]]
# self.D = 16 # self.D = 16
# self.act_gate = 'sigmoid' # self.act_gate = 'sigmoid'
...@@ -282,7 +284,7 @@ class TestLstmOp(OpTest): ...@@ -282,7 +284,7 @@ class TestLstmOp(OpTest):
# class TestLstmOpNotUsePeepholes(TestLstmOp): # class TestLstmOpNotUsePeepholes(TestLstmOp):
# def set_argument(self): # def set_argument(self):
# self.lod = [[0, 2, 5, 7]] # self.lod = [[2, 3, 2]]
# self.D = 16 # self.D = 16
# self.act_gate = 'sigmoid' # self.act_gate = 'sigmoid'
......
...@@ -64,15 +64,17 @@ def lstmp( ...@@ -64,15 +64,17 @@ def lstmp(
r = act_proj(r) r = act_proj(r)
return r, c return r, c
def _reverse(x, lod): def _reverse(x, offset):
y = np.zeros_like(x) y = np.zeros_like(x)
for i in range(len(lod) - 1): for i in range(len(offset) - 1):
b, e = lod[i], lod[i + 1] b, e = offset[i], offset[i + 1]
y[b:e, :] = np.flip(x[b:e, :], 0) y[b:e, :] = np.flip(x[b:e, :], 0)
return y return y
offset = lod[0] offset = [0]
batch_size = len(offset) - 1 for l in lod[0]:
offset.append(offset[-1] + l)
batch_size = len(lod[0])
# recurrent projection state # recurrent projection state
projection = [] projection = []
cell = [] cell = []
...@@ -81,7 +83,7 @@ def lstmp( ...@@ -81,7 +83,7 @@ def lstmp(
input = input + np.tile(w_b, (offset[-1], 1)) input = input + np.tile(w_b, (offset[-1], 1))
for i in range(batch_size): for i in range(batch_size):
# compute one sequence # compute one sequence
seq_len = offset[i + 1] - offset[i] seq_len = lod[0][i]
x = input[offset[i]:offset[i + 1], :] x = input[offset[i]:offset[i + 1], :]
r_pre = np.dot(h0[i], w_rh) # 1 x P r_pre = np.dot(h0[i], w_rh) # 1 x P
r_pre = act_proj(r_pre) r_pre = act_proj(r_pre)
...@@ -117,8 +119,8 @@ class TestLstmpOp(LstmTest.TestLstmOp): ...@@ -117,8 +119,8 @@ class TestLstmpOp(LstmTest.TestLstmOp):
self.reset_argument() self.reset_argument()
self.op_type = 'lstmp' self.op_type = 'lstmp'
T = self.lod[0][-1] T = sum(self.lod[0])
N = len(self.lod[0]) - 1 N = len(self.lod[0])
x = np.random.normal(size=(T, 4 * self.D)).astype('float64') x = np.random.normal(size=(T, 4 * self.D)).astype('float64')
if self.has_initial_state: if self.has_initial_state:
...@@ -166,7 +168,7 @@ class TestLstmpOp(LstmTest.TestLstmOp): ...@@ -166,7 +168,7 @@ class TestLstmpOp(LstmTest.TestLstmOp):
def test_check_grad(self): def test_check_grad(self):
# TODO(qingqing) remove folowing lines after the check_grad is refined. # TODO(qingqing) remove folowing lines after the check_grad is refined.
N = len(self.lod[0]) - 1 N = len(self.lod[0])
self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64') self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64')
self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64')
...@@ -183,7 +185,7 @@ class TestLstmpOpHasInitial(TestLstmpOp): ...@@ -183,7 +185,7 @@ class TestLstmpOpHasInitial(TestLstmpOp):
def test_check_grad(self): def test_check_grad(self):
# TODO(qingqing) remove folowing lines after the check_grad is refined. # TODO(qingqing) remove folowing lines after the check_grad is refined.
N = len(self.lod[0]) - 1 N = len(self.lod[0])
self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64') self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64')
self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64')
...@@ -195,7 +197,7 @@ class TestLstmpOpHasInitial(TestLstmpOp): ...@@ -195,7 +197,7 @@ class TestLstmpOpHasInitial(TestLstmpOp):
max_relative_error=1e-2) max_relative_error=1e-2)
def test_check_grad_ingore_bias(self): def test_check_grad_ingore_bias(self):
N = len(self.lod[0]) - 1 N = len(self.lod[0])
self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64') self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64')
self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64')
...@@ -207,7 +209,7 @@ class TestLstmpOpHasInitial(TestLstmpOp): ...@@ -207,7 +209,7 @@ class TestLstmpOpHasInitial(TestLstmpOp):
no_grad_set=set('Bias')) no_grad_set=set('Bias'))
def test_check_grad_ingore_weight(self): def test_check_grad_ingore_weight(self):
N = len(self.lod[0]) - 1 N = len(self.lod[0])
self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64') self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64')
self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64')
...@@ -219,7 +221,7 @@ class TestLstmpOpHasInitial(TestLstmpOp): ...@@ -219,7 +221,7 @@ class TestLstmpOpHasInitial(TestLstmpOp):
no_grad_set=set('Weight')) no_grad_set=set('Weight'))
def test_check_grad_ingore_proj_weight(self): def test_check_grad_ingore_proj_weight(self):
N = len(self.lod[0]) - 1 N = len(self.lod[0])
self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64') self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64')
self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64')
...@@ -231,7 +233,7 @@ class TestLstmpOpHasInitial(TestLstmpOp): ...@@ -231,7 +233,7 @@ class TestLstmpOpHasInitial(TestLstmpOp):
no_grad_set=set('ProjWeight')) no_grad_set=set('ProjWeight'))
def test_check_grad_ingore_input(self): def test_check_grad_ingore_input(self):
N = len(self.lod[0]) - 1 N = len(self.lod[0])
self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64') self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64')
self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64')
...@@ -243,7 +245,7 @@ class TestLstmpOpHasInitial(TestLstmpOp): ...@@ -243,7 +245,7 @@ class TestLstmpOpHasInitial(TestLstmpOp):
no_grad_set=set('Input')) no_grad_set=set('Input'))
def test_check_grad_ingore_h0(self): def test_check_grad_ingore_h0(self):
N = len(self.lod[0]) - 1 N = len(self.lod[0])
self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64') self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64')
self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64')
...@@ -255,7 +257,7 @@ class TestLstmpOpHasInitial(TestLstmpOp): ...@@ -255,7 +257,7 @@ class TestLstmpOpHasInitial(TestLstmpOp):
no_grad_set=set('H0')) no_grad_set=set('H0'))
def test_check_grad_ingore_c0(self): def test_check_grad_ingore_c0(self):
N = len(self.lod[0]) - 1 N = len(self.lod[0])
self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64') self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64')
self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64')
......
...@@ -70,7 +70,7 @@ class TestMineHardExamplesOp(OpTest): ...@@ -70,7 +70,7 @@ class TestMineHardExamplesOp(OpTest):
self.updated_match_indices = self.match_indices self.updated_match_indices = self.match_indices
self.neg_indices_lod = [[0, 1, 2]] self.neg_indices_lod = [[1, 1]]
self.neg_indices = np.array([[1], [0]]).astype('int32') self.neg_indices = np.array([[1], [0]]).astype('int32')
...@@ -92,7 +92,7 @@ class TestMineHardExamplesOpHardExample(TestMineHardExamplesOp): ...@@ -92,7 +92,7 @@ class TestMineHardExamplesOpHardExample(TestMineHardExamplesOp):
self.updated_match_indices = np.array([[0, -1, -1], self.updated_match_indices = np.array([[0, -1, -1],
[-1, -1, -1]]).astype('int32') [-1, -1, -1]]).astype('int32')
self.neg_indices_lod = [[0, 1, 3]] self.neg_indices_lod = [[1, 2]]
self.neg_indices = np.array([[2], [0], [2]]).astype('int32') self.neg_indices = np.array([[2], [0], [2]]).astype('int32')
......
...@@ -135,12 +135,12 @@ def batched_multiclass_nms(boxes, scores, background, score_threshold, ...@@ -135,12 +135,12 @@ def batched_multiclass_nms(boxes, scores, background, score_threshold,
batch_size = scores.shape[0] batch_size = scores.shape[0]
det_outs = [] det_outs = []
lod = [0] lod = []
for n in range(batch_size): for n in range(batch_size):
nmsed_outs, nmsed_num = multiclass_nms(boxes[n], scores[n], background, nmsed_outs, nmsed_num = multiclass_nms(boxes[n], scores[n], background,
score_threshold, nms_threshold, score_threshold, nms_threshold,
nms_top_k, keep_top_k) nms_top_k, keep_top_k)
lod.append(lod[-1] + nmsed_num) lod.append(nmsed_num)
if nmsed_num == 0: continue if nmsed_num == 0: continue
for c, indices in nmsed_outs.iteritems(): for c, indices in nmsed_outs.iteritems():
......
...@@ -27,9 +27,9 @@ class TestOneHotOp(OpTest): ...@@ -27,9 +27,9 @@ class TestOneHotOp(OpTest):
self.op_type = 'one_hot' self.op_type = 'one_hot'
depth = 10 depth = 10
dimension = 12 dimension = 12
x_lod = [[0, 4, 5, 8, 11]] x_lod = [[4, 1, 3, 3]]
x = [np.random.randint(0, depth - 1) for i in xrange(x_lod[0][-1])] x = [np.random.randint(0, depth - 1) for i in xrange(sum(x_lod[0]))]
x = np.array(x).astype('int').reshape([x_lod[0][-1], 1]) x = np.array(x).astype('int').reshape([sum(x_lod[0]), 1])
out = np.zeros(shape=(np.product(x.shape[:-1]), out = np.zeros(shape=(np.product(x.shape[:-1]),
depth)).astype('float32') depth)).astype('float32')
...@@ -50,9 +50,9 @@ class TestOneHotOp_default_dtype(OpTest): ...@@ -50,9 +50,9 @@ class TestOneHotOp_default_dtype(OpTest):
self.op_type = 'one_hot' self.op_type = 'one_hot'
depth = 10 depth = 10
dimension = 12 dimension = 12
x_lod = [[0, 4, 5, 8, 11]] x_lod = [[4, 1, 3, 3]]
x = [np.random.randint(0, depth - 1) for i in xrange(x_lod[0][-1])] x = [np.random.randint(0, depth - 1) for i in xrange(sum(x_lod[0]))]
x = np.array(x).astype('int').reshape([x_lod[0][-1], 1]) x = np.array(x).astype('int').reshape([sum(x_lod[0]), 1])
out = np.zeros(shape=(np.product(x.shape[:-1]), out = np.zeros(shape=(np.product(x.shape[:-1]),
depth)).astype('float32') depth)).astype('float32')
...@@ -75,11 +75,11 @@ class TestOneHotOp_exception(OpTest): ...@@ -75,11 +75,11 @@ class TestOneHotOp_exception(OpTest):
self.place = core.CPUPlace() self.place = core.CPUPlace()
self.dimension = 12 self.dimension = 12
self.x = core.LoDTensor() self.x = core.LoDTensor()
x_lod = [[0, 4, 5, 8, 11]] x_lod = [[4, 1, 3, 3]]
data = [np.random.randint(11, 20) for i in xrange(x_lod[0][-1])] data = [np.random.randint(11, 20) for i in xrange(sum(x_lod[0]))]
data = np.array(data).astype('int').reshape([x_lod[0][-1], 1]) data = np.array(data).astype('int').reshape([sum(x_lod[0]), 1])
self.x.set(data, self.place) self.x.set(data, self.place)
self.x.set_lod(x_lod) self.x.set_recursive_sequence_lengths(x_lod)
def test_check_output(self): def test_check_output(self):
program = Program() program = Program()
......
...@@ -28,7 +28,7 @@ class TestPrintOpCPU(unittest.TestCase): ...@@ -28,7 +28,7 @@ class TestPrintOpCPU(unittest.TestCase):
self.x_tensor = core.LoDTensor() self.x_tensor = core.LoDTensor()
tensor_np = np.random.random(size=(2, 3)).astype('float32') tensor_np = np.random.random(size=(2, 3)).astype('float32')
self.x_tensor.set(tensor_np, self.place) self.x_tensor.set(tensor_np, self.place)
self.x_tensor.set_lod([[0, 1, 1]]) self.x_tensor.set_recursive_sequence_lengths([[1, 1]])
def build_network(self, only_forward, **kargs): def build_network(self, only_forward, **kargs):
x = layers.data('x', shape=[3], dtype='float32', lod_level=1) x = layers.data('x', shape=[3], dtype='float32', lod_level=1)
...@@ -62,7 +62,7 @@ class TestPrintOpGPU(TestPrintOpCPU): ...@@ -62,7 +62,7 @@ class TestPrintOpGPU(TestPrintOpCPU):
self.x_tensor = core.LoDTensor() self.x_tensor = core.LoDTensor()
tensor_np = np.random.random(size=(2, 3)).astype('float32') tensor_np = np.random.random(size=(2, 3)).astype('float32')
self.x_tensor.set(tensor_np, self.place) self.x_tensor.set(tensor_np, self.place)
self.x_tensor.set_lod([[0, 1, 1]]) self.x_tensor.set_recursive_sequence_lengths([[1, 1]])
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -70,11 +70,10 @@ class TestReorderLoDTensor(unittest.TestCase): ...@@ -70,11 +70,10 @@ class TestReorderLoDTensor(unittest.TestCase):
lod_level_i = numpy.random.randint( lod_level_i = numpy.random.randint(
low=1, low=1,
high=5, high=5,
size=self.num_seq if i == 0 else lod_level_i[-1]) size=self.num_seq if i == 0 else sum(lod_level_i)).tolist()
lod_level_i = [0] + numpy.cumsum(lod_level_i).tolist()
data_lod.append(lod_level_i) data_lod.append(lod_level_i)
data_value = numpy.random.random( data_value = numpy.random.random(
size=[data_lod[-1][-1] if data_lod else self.num_seq size=[sum(data_lod[-1]) if data_lod else self.num_seq
] + data_shape).astype('float32') ] + data_shape).astype('float32')
self.data[data_name] = (data_value, data_lod) self.data[data_name] = (data_value, data_lod)
...@@ -84,29 +83,36 @@ class TestReorderLoDTensor(unittest.TestCase): ...@@ -84,29 +83,36 @@ class TestReorderLoDTensor(unittest.TestCase):
tensor = fluid.Tensor() tensor = fluid.Tensor()
tensor.set(self.data[desc[0]][0], place) tensor.set(self.data[desc[0]][0], place)
if self.data[desc[0]][1]: if self.data[desc[0]][1]:
tensor.set_lod(self.data[desc[0]][1]) tensor.set_recursive_sequence_lengths(self.data[desc[0]][1])
self.inputs[desc[0]] = tensor self.inputs[desc[0]] = tensor
def reorder(self): def reorder(self):
level = 0 def convert_to_offset(lod):
offset_lod = [[0] for i in lod]
for i, level in enumerate(lod):
for seq_len in level:
offset_lod[i].append(offset_lod[i][-1] + seq_len)
return offset_lod
level = 0
# compute the rank_table according to ref_lod # compute the rank_table according to ref_lod
ref_lod = self.data[self.data_desc[1][0]][1][level] ref_lod = self.data[self.data_desc[1][0]][1][level]
rank_table = [] # list of (index, length) rank_table = [] # list of (index, length)
for i in range(len(ref_lod) - 1): for i in range(len(ref_lod)):
rank_table.append((i, ref_lod[i + 1] - ref_lod[i])) rank_table.append((i, ref_lod[i]))
rank_table = sorted(rank_table, lambda x, y: y[1] - x[1]) rank_table = sorted(rank_table, lambda x, y: y[1] - x[1])
# compute the input sequence info according to input_lod # compute the input sequence info according to input_lod
input_value, input_lod = self.data[self.data_desc[0][0]] input_value, input_lod = self.data[self.data_desc[0][0]]
offset_lod = convert_to_offset(input_lod)
input_table = [] # list of (offset, length, sub_lod) input_table = [] # list of (offset, length, sub_lod)
if input_lod: if offset_lod:
for i in range(len(input_lod[level]) - 1): for i in range(len(offset_lod[level]) - 1):
start_idx = i start_idx = i
end_idx = i + 1 end_idx = i + 1
sub_lod = [] sub_lod = []
for lod_level_i in input_lod[level:]: for lod_level_i in offset_lod[level:]:
sub_lod_i = [] sub_lod_i = []
for idx in range(start_idx, end_idx): for idx in range(start_idx, end_idx):
sub_lod_i.append(lod_level_i[idx + 1] - lod_level_i[ sub_lod_i.append(lod_level_i[idx + 1] - lod_level_i[
...@@ -132,10 +138,9 @@ class TestReorderLoDTensor(unittest.TestCase): ...@@ -132,10 +138,9 @@ class TestReorderLoDTensor(unittest.TestCase):
input_seq_sub_lod = input_table[index][2] input_seq_sub_lod = input_table[index][2]
if len(output_lod) == 0: if len(output_lod) == 0:
output_lod = [[0] for i in input_seq_sub_lod] output_lod = [[] for i in input_seq_sub_lod]
for i, sub_lod_i in enumerate(input_seq_sub_lod): for i, level in enumerate(input_seq_sub_lod):
for idx_sub in sub_lod_i: output_lod[i].extend(level)
output_lod[i].append(output_lod[i][-1] + idx_sub)
return output_value, output_lod return output_value, output_lod
def test_reorder_lod_tensor(self): def test_reorder_lod_tensor(self):
...@@ -148,7 +153,8 @@ class TestReorderLoDTensor(unittest.TestCase): ...@@ -148,7 +153,8 @@ class TestReorderLoDTensor(unittest.TestCase):
self.assertTrue( self.assertTrue(
numpy.allclose( numpy.allclose(
numpy.array(actual_output), expect_output, atol=0.001)) numpy.array(actual_output), expect_output, atol=0.001))
self.assertEqual(expect_output_lod, actual_output.lod()) self.assertEqual(expect_output_lod,
actual_output.recursive_sequence_lengths())
# check gradient # check gradient
expect_grad = numpy.ones_like(self.data[self.data_desc[0][0]][0]) expect_grad = numpy.ones_like(self.data[self.data_desc[0][0]][0])
expect_grad_lod = self.data[self.data_desc[0][0]][1] expect_grad_lod = self.data[self.data_desc[0][0]][1]
...@@ -156,7 +162,8 @@ class TestReorderLoDTensor(unittest.TestCase): ...@@ -156,7 +162,8 @@ class TestReorderLoDTensor(unittest.TestCase):
self.assertTrue( self.assertTrue(
numpy.allclose( numpy.allclose(
numpy.array(actual_grad), expect_grad, atol=0.001)) numpy.array(actual_grad), expect_grad, atol=0.001))
self.assertEqual(expect_grad_lod, actual_grad.lod()) self.assertEqual(expect_grad_lod,
actual_grad.recursive_sequence_lengths())
def test_reorder_tensor(self): def test_reorder_tensor(self):
self.data_desc[0][-1] = 0 # input is tensor self.data_desc[0][-1] = 0 # input is tensor
...@@ -168,7 +175,8 @@ class TestReorderLoDTensor(unittest.TestCase): ...@@ -168,7 +175,8 @@ class TestReorderLoDTensor(unittest.TestCase):
self.assertTrue( self.assertTrue(
numpy.allclose( numpy.allclose(
numpy.array(actual_output), expect_output, atol=0.001)) numpy.array(actual_output), expect_output, atol=0.001))
self.assertEqual(expect_output_lod, actual_output.lod()) self.assertEqual(expect_output_lod,
actual_output.recursive_sequence_lengths())
# check gradient # check gradient
expect_grad = numpy.ones_like(self.data[self.data_desc[0][0]][0]) expect_grad = numpy.ones_like(self.data[self.data_desc[0][0]][0])
expect_grad_lod = self.data[self.data_desc[0][0]][1] expect_grad_lod = self.data[self.data_desc[0][0]][1]
...@@ -176,14 +184,14 @@ class TestReorderLoDTensor(unittest.TestCase): ...@@ -176,14 +184,14 @@ class TestReorderLoDTensor(unittest.TestCase):
self.assertTrue( self.assertTrue(
numpy.allclose( numpy.allclose(
numpy.array(actual_grad), expect_grad, atol=0.001)) numpy.array(actual_grad), expect_grad, atol=0.001))
self.assertEqual(expect_grad_lod, actual_grad.lod()) self.assertEqual(expect_grad_lod,
actual_grad.recursive_sequence_lengths())
# compare outputs between LodTensors with explicit and implicit lod # compare outputs between LodTensors with explicit and implicit lod
# use the same data but set the input lod explicitly # use the same data but set the input lod explicitly
input_lod = [[ input_lod = [[1] * len(self.data[self.data_desc[0][0]][0])]
i for i in range(len(self.data[self.data_desc[0][0]][0]) + 1) self.inputs[self.data_desc[0][0]].set_recursive_sequence_lengths(
]] input_lod)
self.inputs[self.data_desc[0][0]].set_lod(input_lod)
# preserve the output of LodTensor with implicit lod to compare # preserve the output of LodTensor with implicit lod to compare
expect_output = [ expect_output = [
numpy.array(actual_output) for actual_output in self.actual_outputs numpy.array(actual_output) for actual_output in self.actual_outputs
......
...@@ -107,7 +107,7 @@ class TestROIPoolOp(OpTest): ...@@ -107,7 +107,7 @@ class TestROIPoolOp(OpTest):
rois = [] rois = []
self.rois_lod = [[]] self.rois_lod = [[]]
for bno in range(self.batch_size): for bno in range(self.batch_size):
self.rois_lod[0].append(len(rois)) self.rois_lod[0].append(bno + 1)
for i in range(bno + 1): for i in range(bno + 1):
x1 = np.random.random_integers( x1 = np.random.random_integers(
0, self.width / self.spatial_scale - self.pooled_width) 0, self.width / self.spatial_scale - self.pooled_width)
...@@ -121,7 +121,6 @@ class TestROIPoolOp(OpTest): ...@@ -121,7 +121,6 @@ class TestROIPoolOp(OpTest):
roi = [bno, x1, y1, x2, y2] roi = [bno, x1, y1, x2, y2]
rois.append(roi) rois.append(roi)
self.rois_lod[0].append(len(rois))
self.rois_num = len(rois) self.rois_num = len(rois)
self.rois = np.array(rois).astype("int64") self.rois = np.array(rois).astype("int64")
......
...@@ -19,8 +19,10 @@ from op_test import OpTest ...@@ -19,8 +19,10 @@ from op_test import OpTest
def row_conv_forward(x, lod, wt): def row_conv_forward(x, lod, wt):
out = np.zeros_like(x) out = np.zeros_like(x)
seq_info = lod[0] num_sequences = len(lod[0])
num_sequences = len(seq_info) - 1 seq_info = [0]
for seq_len in lod[0]:
seq_info.append(seq_info[-1] + seq_len)
context_length = wt.shape[0] context_length = wt.shape[0]
for i in range(num_sequences): # loop over number of sequences for i in range(num_sequences): # loop over number of sequences
...@@ -32,7 +34,6 @@ def row_conv_forward(x, lod, wt): ...@@ -32,7 +34,6 @@ def row_conv_forward(x, lod, wt):
cur_timesteps = end - start cur_timesteps = end - start
for j in range(cur_timesteps): # loop over different timesteps for j in range(cur_timesteps): # loop over different timesteps
for k in range(context_length): for k in range(context_length):
if j + k >= cur_timesteps: if j + k >= cur_timesteps:
continue continue
curoutput[j, :] += curinput[j + k, :] * wt[k, :] curoutput[j, :] += curinput[j + k, :] * wt[k, :]
...@@ -44,8 +45,8 @@ class TestRowConvOp1(OpTest): ...@@ -44,8 +45,8 @@ class TestRowConvOp1(OpTest):
def setUp(self): def setUp(self):
self.op_type = "row_conv" self.op_type = "row_conv"
lod = [[0, 2, 5, 7]] lod = [[2, 3, 2]]
T = lod[0][-1] T = sum(lod[0])
D = 16 D = 16
context_length = 2 context_length = 2
...@@ -75,8 +76,8 @@ class TestRowConvOp2(OpTest): ...@@ -75,8 +76,8 @@ class TestRowConvOp2(OpTest):
def setUp(self): def setUp(self):
self.op_type = "row_conv" self.op_type = "row_conv"
lod = [[0, 20, 50, 100]] lod = [[20, 30, 50]]
T = lod[0][-1] T = sum(lod[0])
D = 35 D = 35
context_length = 35 context_length = 35
......
...@@ -18,14 +18,19 @@ import sys ...@@ -18,14 +18,19 @@ import sys
from op_test import OpTest from op_test import OpTest
def to_abs_lod(lod): def to_abs_offset_lod(lod):
if len(lod) == 0 or len(lod) == 1: offset_lod = [[0] for i in lod]
return lod for i, level in enumerate(lod):
for seq_len in level:
offset_lod[i].append(offset_lod[i][-1] + seq_len)
if len(offset_lod) == 0 or len(offset_lod) == 1:
return offset_lod
import copy import copy
new_lod = copy.deepcopy(lod) new_offset_lod = copy.deepcopy(offset_lod)
for idx, val in enumerate(lod[0]): for idx, val in enumerate(offset_lod[0]):
new_lod[0][idx] = lod[1][val] new_offset_lod[0][idx] = offset_lod[1][val]
return new_lod return new_offset_lod
def seq_concat(inputs, level): def seq_concat(inputs, level):
...@@ -35,11 +40,11 @@ def seq_concat(inputs, level): ...@@ -35,11 +40,11 @@ def seq_concat(inputs, level):
x1 = inputs['X'][1][1][0] x1 = inputs['X'][1][1][0]
level_idx = len(lod0) - level - 1 level_idx = len(lod0) - level - 1
outs = [] outs = []
for i in range(len(lod0[level_idx]) - 1): for i in range(len(lod0[level_idx])):
sub_x0 = x0[to_abs_lod(lod0)[level_idx][i]:to_abs_lod(lod0)[level_idx][ sub_x0 = x0[to_abs_offset_lod(lod0)[level_idx][i]:to_abs_offset_lod(
i + 1], :] lod0)[level_idx][i + 1], :]
sub_x1 = x1[to_abs_lod(lod1)[level_idx][i]:to_abs_lod(lod1)[level_idx][ sub_x1 = x1[to_abs_offset_lod(lod1)[level_idx][i]:to_abs_offset_lod(
i + 1], :] lod1)[level_idx][i + 1], :]
outs.append(np.concatenate((sub_x0, sub_x1), axis=0)) outs.append(np.concatenate((sub_x0, sub_x1), axis=0))
return np.concatenate(outs, axis=0) return np.concatenate(outs, axis=0)
...@@ -48,9 +53,9 @@ class TestSeqConcatOp(OpTest): ...@@ -48,9 +53,9 @@ class TestSeqConcatOp(OpTest):
def set_data(self): def set_data(self):
# two level, batch size is 3 # two level, batch size is 3
x0 = np.random.random((4, 6, 3)).astype('float32') x0 = np.random.random((4, 6, 3)).astype('float32')
lod0 = [[0, 2, 4], [0, 1, 2, 3, 4]] lod0 = [[2, 2], [1, 1, 1, 1]]
x1 = np.random.random((4, 8, 3)).astype('float32') x1 = np.random.random((4, 8, 3)).astype('float32')
lod1 = [[0, 2, 4], [0, 1, 2, 3, 4]] lod1 = [[2, 2], [1, 1, 1, 1]]
axis = 1 axis = 1
level = 1 level = 1
self.inputs = {'X': [('x0', (x0, lod0)), ('x1', (x1, lod1))]} self.inputs = {'X': [('x0', (x0, lod0)), ('x1', (x1, lod1))]}
...@@ -72,14 +77,14 @@ class TestSeqConcatOpLevelZeroNestedSequence(TestSeqConcatOp): ...@@ -72,14 +77,14 @@ class TestSeqConcatOpLevelZeroNestedSequence(TestSeqConcatOp):
def set_data(self): def set_data(self):
# two level, batch size is 3 # two level, batch size is 3
x0 = np.random.random((4, 6, 3)).astype('float32') x0 = np.random.random((4, 6, 3)).astype('float32')
lod0 = [[0, 2, 4], [0, 1, 2, 3, 4]] lod0 = [[2, 2], [1, 1, 1, 1]]
x1 = np.random.random((7, 6, 3)).astype('float32') x1 = np.random.random((7, 6, 3)).astype('float32')
lod1 = [[0, 2, 4], [0, 1, 3, 5, 7]] lod1 = [[2, 2], [1, 2, 2, 2]]
axis = 0 axis = 0
level = 0 level = 0
self.inputs = {'X': [('x0', (x0, lod0)), ('x1', (x1, lod1))]} self.inputs = {'X': [('x0', (x0, lod0)), ('x1', (x1, lod1))]}
self.attrs = {'axis': axis, 'level': level} self.attrs = {'axis': axis, 'level': level}
out_lod = [[0, 2, 4], [0, 2, 5, 8, 11]] out_lod = [[2, 2], [2, 3, 3, 3]]
self.outputs = {'Out': (seq_concat(self.inputs, level), out_lod)} self.outputs = {'Out': (seq_concat(self.inputs, level), out_lod)}
...@@ -87,14 +92,14 @@ class TestSeqConcatOplevelOneNestedSequence(TestSeqConcatOp): ...@@ -87,14 +92,14 @@ class TestSeqConcatOplevelOneNestedSequence(TestSeqConcatOp):
def set_data(self): def set_data(self):
# two level, batch size is 3 # two level, batch size is 3
x0 = np.random.random((4, 6, 3)).astype('float32') x0 = np.random.random((4, 6, 3)).astype('float32')
lod0 = [[0, 2, 4], [0, 1, 2, 3, 4]] lod0 = [[2, 2], [1, 1, 1, 1]]
x1 = np.random.random((7, 6, 3)).astype('float32') x1 = np.random.random((7, 6, 3)).astype('float32')
lod1 = [[0, 3, 4], [0, 1, 3, 5, 7]] lod1 = [[3, 1], [1, 2, 2, 2]]
axis = 0 axis = 0
level = 1 level = 1
self.inputs = {'X': [('x0', (x0, lod0)), ('x1', (x1, lod1))]} self.inputs = {'X': [('x0', (x0, lod0)), ('x1', (x1, lod1))]}
self.attrs = {'axis': axis, 'level': level} self.attrs = {'axis': axis, 'level': level}
out_lod = [[0, 5, 8], [0, 1, 2, 3, 5, 7, 8, 9, 11]] out_lod = [[5, 3], [1, 1, 1, 2, 2, 1, 1, 2]]
self.outputs = {'Out': (seq_concat(self.inputs, level), out_lod)} self.outputs = {'Out': (seq_concat(self.inputs, level), out_lod)}
...@@ -102,14 +107,14 @@ class TestSeqConcatOpLevelZeroSequence(TestSeqConcatOp): ...@@ -102,14 +107,14 @@ class TestSeqConcatOpLevelZeroSequence(TestSeqConcatOp):
def set_data(self): def set_data(self):
# two level, batch size is 3 # two level, batch size is 3
x0 = np.random.random((4, 3, 4)).astype('float32') x0 = np.random.random((4, 3, 4)).astype('float32')
lod0 = [[0, 1, 2, 3, 4]] lod0 = [[1, 1, 1, 1]]
x1 = np.random.random((7, 3, 4)).astype('float32') x1 = np.random.random((7, 3, 4)).astype('float32')
lod1 = [[0, 1, 3, 5, 7]] lod1 = [[1, 2, 2, 2]]
axis = 0 axis = 0
level = 0 level = 0
self.inputs = {'X': [('x0', (x0, lod0)), ('x1', (x1, lod1))]} self.inputs = {'X': [('x0', (x0, lod0)), ('x1', (x1, lod1))]}
self.attrs = {'axis': axis, 'level': level} self.attrs = {'axis': axis, 'level': level}
out_lod = [[0, 2, 5, 8, 11]] out_lod = [[2, 3, 3, 3]]
self.outputs = {'Out': (seq_concat(self.inputs, level), out_lod)} self.outputs = {'Out': (seq_concat(self.inputs, level), out_lod)}
......
...@@ -75,35 +75,38 @@ class TestSeqProject(OpTest): ...@@ -75,35 +75,38 @@ class TestSeqProject(OpTest):
pading_data = self.pad_data pading_data = self.pad_data
out = np.zeros((self.input_size[0], self.context_length * out = np.zeros((self.input_size[0], self.context_length *
self.input_size[1])).astype('float32') self.input_size[1])).astype('float32')
lod = lod[0] offset = [0]
for seq_len in lod[0]:
offset.append(offset[-1] + seq_len)
begin_pad = np.max([0, -self.context_start]) begin_pad = np.max([0, -self.context_start])
for i in range(len(lod) - 1): for i in range(len(offset) - 1):
for j in range(self.context_length): for j in range(self.context_length):
in_begin = lod[i] + self.context_start + j in_begin = offset[i] + self.context_start + j
in_end = lod[i + 1] + self.context_start + j in_end = offset[i + 1] + self.context_start + j
out_begin = lod[i] out_begin = offset[i]
out_end = lod[i + 1] out_end = offset[i + 1]
if in_begin < lod[i]: if in_begin < offset[i]:
pad_size = np.min([lod[i] - in_begin, lod[i + 1] - lod[i]]) pad_size = np.min(
[offset[i] - in_begin, offset[i + 1] - offset[i]])
if self.padding_trainable: if self.padding_trainable:
sub_w = pading_data[j:j + pad_size, :] sub_w = pading_data[j:j + pad_size, :]
out[lod[i]:lod[i] + pad_size, j * self.input_size[1]:( out[offset[i]:offset[i] + pad_size, j * self.input_size[
j + 1) * self.input_size[1]] = sub_w 1]:(j + 1) * self.input_size[1]] = sub_w
out_begin = lod[i] + pad_size out_begin = offset[i] + pad_size
in_begin = lod[i] in_begin = offset[i]
if in_end > lod[i + 1]: if in_end > offset[i + 1]:
pad_size = np.min( pad_size = np.min(
[in_end - lod[i + 1], lod[i + 1] - lod[i]]) [in_end - offset[i + 1], offset[i + 1] - offset[i]])
if self.padding_trainable: if self.padding_trainable:
sub_w = pading_data[begin_pad + self.context_start + j - sub_w = pading_data[begin_pad + self.context_start + j -
pad_size:begin_pad + pad_size:begin_pad +
self.context_start + j, :] self.context_start + j, :]
out[lod[i + 1] - pad_size:lod[i + 1], j * self. out[offset[i + 1] - pad_size:offset[i + 1], j * self.
input_size[1]:(j + 1) * self.input_size[1]] = sub_w input_size[1]:(j + 1) * self.input_size[1]] = sub_w
in_end = lod[i + 1] in_end = offset[i + 1]
out_end = lod[i + 1] - pad_size out_end = offset[i + 1] - pad_size
if in_end <= in_begin: if in_end <= in_begin:
continue continue
...@@ -175,7 +178,11 @@ class TestSeqProject(OpTest): ...@@ -175,7 +178,11 @@ class TestSeqProject(OpTest):
self.context_stride = 1 self.context_stride = 1
self.input_size = [self.input_row, 23] self.input_size = [self.input_row, 23]
self.lod = [[0, 4, 5, 8, self.input_row]] offset_lod = [[0, 4, 5, 8, self.input_row]]
self.lod = [[]]
# convert from offset-based lod to length-based lod
for i in range(len(offset_lod[0]) - 1):
self.lod[0].append(offset_lod[0][i + 1] - offset_lod[0][i])
self.output_represention = 8 # output feature size self.output_represention = 8 # output feature size
...@@ -188,7 +195,11 @@ class TestSeqProjectCase1(TestSeqProject): ...@@ -188,7 +195,11 @@ class TestSeqProjectCase1(TestSeqProject):
self.context_stride = 1 self.context_stride = 1
self.input_size = [self.input_row, 23] self.input_size = [self.input_row, 23]
self.lod = [[0, 4, 5, 8, self.input_row]] offset_lod = [[0, 4, 5, 8, self.input_row]]
self.lod = [[]]
# convert from offset-based lod to length-based lod
for i in range(len(offset_lod[0]) - 1):
self.lod[0].append(offset_lod[0][i + 1] - offset_lod[0][i])
self.output_represention = 8 # output feature size self.output_represention = 8 # output feature size
...@@ -203,8 +214,12 @@ class TestSeqProjectCase2(TestSeqProject): ...@@ -203,8 +214,12 @@ class TestSeqProjectCase2(TestSeqProject):
self.input_size = [self.input_row, 23] self.input_size = [self.input_row, 23]
idx = range(self.input_size[0]) idx = range(self.input_size[0])
del idx[0] del idx[0]
self.lod = [[0] + np.sort(random.sample(idx, 8)).tolist() + offset_lod = [[0] + np.sort(random.sample(idx, 8)).tolist() +
[self.input_size[0]]] [self.input_size[0]]]
self.lod = [[]]
# convert from offset-based lod to length-based lod
for i in range(len(offset_lod[0]) - 1):
self.lod[0].append(offset_lod[0][i + 1] - offset_lod[0][i])
self.output_represention = 8 # output feature size self.output_represention = 8 # output feature size
......
...@@ -18,26 +18,34 @@ from op_test import OpTest ...@@ -18,26 +18,34 @@ from op_test import OpTest
class TestSeqAvgPool(OpTest): class TestSeqAvgPool(OpTest):
def convert_to_offset(self, lod):
offset = [[0] for i in lod]
for i, level in enumerate(lod):
for seq_len in level:
offset[i].append(offset[i][-1] + seq_len)
return offset
def set_data(self): def set_data(self):
self.op_type = 'sequence_pool' self.op_type = 'sequence_pool'
# one level, batch size is 4 # one level, batch size is 4
x = np.random.uniform(0.1, 1, [11, 23]).astype('float32') x = np.random.uniform(0.1, 1, [11, 23]).astype('float32')
lod = [[0, 4, 5, 8, 11]] lod = [[4, 1, 3, 3]]
self.inputs = {'X': (x, lod)} self.inputs = {'X': (x, lod)}
offset = self.convert_to_offset(lod)
out = np.zeros((4, 23)).astype('float32') out = np.zeros((4, 23)).astype('float32')
self.outputs = {'Out': out} self.outputs = {'Out': out}
return x, lod, out return x, offset, out
def compute(self, x, lod, out): def compute(self, x, offset, out):
self.attrs = {'pooltype': "AVERAGE"} self.attrs = {'pooltype': "AVERAGE"}
for i in range(4): for i in range(len(offset[0]) - 1):
sub_x = x[lod[0][i]:lod[0][i + 1], :] sub_x = x[offset[0][i]:offset[0][i + 1], :]
out[i] = sub_x.mean(axis=0) out[i] = sub_x.mean(axis=0)
def setUp(self): def setUp(self):
x, lod, out = self.set_data() x, offset, out = self.set_data()
self.compute(x, lod, out) self.compute(x, offset, out)
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
...@@ -50,10 +58,10 @@ class TestSeqAvgPool(OpTest): ...@@ -50,10 +58,10 @@ class TestSeqAvgPool(OpTest):
class TestSeqSumPool(TestSeqAvgPool): class TestSeqSumPool(TestSeqAvgPool):
def compute(self, x, lod, out): def compute(self, x, offset, out):
self.attrs = {'pooltype': "SUM"} self.attrs = {'pooltype': "SUM"}
for i in range(4): for i in range(len(offset[0]) - 1):
sub_x = x[lod[0][i]:lod[0][i + 1], :] sub_x = x[offset[0][i]:offset[0][i + 1], :]
out[i] = sub_x.sum(axis=0) out[i] = sub_x.sum(axis=0)
...@@ -61,46 +69,47 @@ class TestSeqMaxPool(TestSeqAvgPool): ...@@ -61,46 +69,47 @@ class TestSeqMaxPool(TestSeqAvgPool):
def set_data(self): def set_data(self):
self.op_type = 'sequence_pool' self.op_type = 'sequence_pool'
x = np.random.uniform(0.1, 1, [13, 23]).astype('float32') x = np.random.uniform(0.1, 1, [13, 23]).astype('float32')
lod = [[0, 4, 5, 8, 13]] lod = [[4, 1, 3, 5]]
for i in range(4): offset = self.convert_to_offset(lod)
l = lod[0][i + 1] - lod[0][i] for i in range(len(offset[0]) - 1):
x[lod[0][i] + np.random.randint(l), :] += 2.0 l = offset[0][i + 1] - offset[0][i]
x[offset[0][i] + np.random.randint(l), :] += 2.0
self.inputs = {'X': (x, lod)} self.inputs = {'X': (x, lod)}
out = np.zeros((4, 23)).astype('float32') out = np.zeros((4, 23)).astype('float32')
self.outputs = {'Out': out} self.outputs = {'Out': out}
return x, lod, out return x, offset, out
def compute(self, x, lod, out): def compute(self, x, offset, out):
self.attrs = {'pooltype': "MAX"} self.attrs = {'pooltype': "MAX"}
for i in range(4): for i in range(len(offset[0]) - 1):
sub_x = x[lod[0][i]:lod[0][i + 1], :] sub_x = x[offset[0][i]:offset[0][i + 1], :]
out[i] = np.amax(sub_x, axis=0) out[i] = np.amax(sub_x, axis=0)
class TestSeqSqrtPool(TestSeqAvgPool): class TestSeqSqrtPool(TestSeqAvgPool):
def compute(self, x, lod, out): def compute(self, x, offset, out):
self.attrs = {'pooltype': "SQRT"} self.attrs = {'pooltype': "SQRT"}
for i in range(4): for i in range(len(offset[0]) - 1):
sub_x = x[lod[0][i]:lod[0][i + 1], :] sub_x = x[offset[0][i]:offset[0][i + 1], :]
len = lod[0][i + 1] - lod[0][i] seq_len = offset[0][i + 1] - offset[0][i]
out[i] = sub_x.sum(axis=0) / np.sqrt(len) out[i] = sub_x.sum(axis=0) / np.sqrt(seq_len)
class TestSeqLastPool(TestSeqAvgPool): class TestSeqLastPool(TestSeqAvgPool):
def compute(self, x, lod, out): def compute(self, x, offset, out):
self.attrs = {'pooltype': "LAST"} self.attrs = {'pooltype': "LAST"}
for i in range(4): for i in range(len(offset[0]) - 1):
sub_x = x[lod[0][i]:lod[0][i + 1], :] sub_x = x[offset[0][i]:offset[0][i + 1], :]
out[i] = sub_x[-1, :] out[i] = sub_x[-1, :]
class TestSeqFirstPool(TestSeqAvgPool): class TestSeqFirstPool(TestSeqAvgPool):
def compute(self, x, lod, out): def compute(self, x, offset, out):
self.attrs = {'pooltype': "FIRST"} self.attrs = {'pooltype': "FIRST"}
for i in range(4): for i in range(len(offset[0]) - 1):
sub_x = x[lod[0][i]:lod[0][i + 1], :] sub_x = x[offset[0][i]:offset[0][i + 1], :]
out[i] = sub_x[0, :] out[i] = sub_x[0, :]
...@@ -109,35 +118,39 @@ class TestSeqAvgPool2D(TestSeqAvgPool): ...@@ -109,35 +118,39 @@ class TestSeqAvgPool2D(TestSeqAvgPool):
self.op_type = 'sequence_pool' self.op_type = 'sequence_pool'
# one level, batch size is 4 # one level, batch size is 4
x = np.random.uniform(0.1, 1, [13, 3, 17]).astype('float32') x = np.random.uniform(0.1, 1, [13, 3, 17]).astype('float32')
lod = [[0, 4, 5, 8, 13]] lod = [[4, 1, 3, 5]]
self.inputs = {'X': (x, lod)} self.inputs = {'X': (x, lod)}
offset = self.convert_to_offset(lod)
out = np.zeros((4, 3, 17)).astype('float32') out = np.zeros((4, 3, 17)).astype('float32')
self.outputs = {'Out': out} self.outputs = {'Out': out}
return x, lod, out return x, offset, out
def compute(self, x, lod, out): def compute(self, x, offset, out):
self.attrs = {'pooltype': "AVERAGE"} self.attrs = {'pooltype': "AVERAGE"}
for i in range(4): for i in range(len(offset[0]) - 1):
sub_x = np.reshape(x[lod[0][i]:lod[0][i + 1], :], (-1, 3 * 17)) sub_x = np.reshape(x[offset[0][i]:offset[0][i + 1], :],
(-1, 3 * 17))
out[i] = np.reshape(sub_x.mean(axis=0), (3, 17)) out[i] = np.reshape(sub_x.mean(axis=0), (3, 17))
class TestSeqSumPool2D(TestSeqAvgPool2D): class TestSeqSumPool2D(TestSeqAvgPool2D):
def compute(self, x, lod, out): def compute(self, x, offset, out):
self.attrs = {'pooltype': "SUM"} self.attrs = {'pooltype': "SUM"}
for i in range(4): for i in range(len(offset[0]) - 1):
sub_x = np.reshape(x[lod[0][i]:lod[0][i + 1], :], (-1, 3 * 17)) sub_x = np.reshape(x[offset[0][i]:offset[0][i + 1], :],
(-1, 3 * 17))
out[i] = np.reshape(sub_x.sum(axis=0), (3, 17)) out[i] = np.reshape(sub_x.sum(axis=0), (3, 17))
class TestSeqSqrtPool2D(TestSeqAvgPool2D): class TestSeqSqrtPool2D(TestSeqAvgPool2D):
def compute(self, x, lod, out): def compute(self, x, offset, out):
self.attrs = {'pooltype': "SQRT"} self.attrs = {'pooltype': "SQRT"}
for i in range(4): for i in range(len(offset[0]) - 1):
sub_x = np.reshape(x[lod[0][i]:lod[0][i + 1], :], (-1, 3 * 17)) sub_x = np.reshape(x[offset[0][i]:offset[0][i + 1], :],
len = lod[0][i + 1] - lod[0][i] (-1, 3 * 17))
out[i] = np.reshape(sub_x.sum(axis=0) / np.sqrt(len), (3, 17)) seq_len = offset[0][i + 1] - offset[0][i]
out[i] = np.reshape(sub_x.sum(axis=0) / np.sqrt(seq_len), (3, 17))
def test_check_grad(self): def test_check_grad(self):
# Remove MaxIndex after check_grad is refined. # Remove MaxIndex after check_grad is refined.
...@@ -150,36 +163,40 @@ class TestSeqMaxPool2D(TestSeqAvgPool2D): ...@@ -150,36 +163,40 @@ class TestSeqMaxPool2D(TestSeqAvgPool2D):
def set_data(self): def set_data(self):
self.op_type = 'sequence_pool' self.op_type = 'sequence_pool'
x = np.random.uniform(0.1, 1, [13, 3, 11]).astype('float32') x = np.random.uniform(0.1, 1, [13, 3, 11]).astype('float32')
lod = [[0, 4, 5, 8, 13]] lod = [[4, 1, 3, 5]]
self.inputs = {'X': (x, lod)} self.inputs = {'X': (x, lod)}
for i in range(4): offset = self.convert_to_offset(lod)
l = lod[0][i + 1] - lod[0][i] for i in range(len(offset[0]) - 1):
x[lod[0][i] + np.random.randint(l), :] += 1.0 l = offset[0][i + 1] - offset[0][i]
x[offset[0][i] + np.random.randint(l), :] += 1.0
out = np.zeros((4, 3, 11)).astype('float32') out = np.zeros((4, 3, 11)).astype('float32')
self.outputs = {'Out': out} self.outputs = {'Out': out}
return x, lod, out return x, offset, out
def compute(self, x, lod, out): def compute(self, x, offset, out):
self.attrs = {'pooltype': "MAX"} self.attrs = {'pooltype': "MAX"}
for i in range(4): for i in range(len(offset[0]) - 1):
sub_x = np.reshape(x[lod[0][i]:lod[0][i + 1], :], (-1, 3 * 11)) sub_x = np.reshape(x[offset[0][i]:offset[0][i + 1], :],
(-1, 3 * 11))
out[i] = np.reshape(np.amax(sub_x, axis=0), (3, 11)) out[i] = np.reshape(np.amax(sub_x, axis=0), (3, 11))
class TestSeqLastPool2D(TestSeqAvgPool2D): class TestSeqLastPool2D(TestSeqAvgPool2D):
def compute(self, x, lod, out): def compute(self, x, offset, out):
self.attrs = {'pooltype': "LAST"} self.attrs = {'pooltype': "LAST"}
for i in range(4): for i in range(len(offset[0]) - 1):
sub_x = np.reshape(x[lod[0][i]:lod[0][i + 1], :], (-1, 3 * 17)) sub_x = np.reshape(x[offset[0][i]:offset[0][i + 1], :],
(-1, 3 * 17))
out[i] = np.reshape(sub_x[-1, :], (3, 17)) out[i] = np.reshape(sub_x[-1, :], (3, 17))
class TestSeqFirstPool2D(TestSeqAvgPool2D): class TestSeqFirstPool2D(TestSeqAvgPool2D):
def compute(self, x, lod, out): def compute(self, x, offset, out):
self.attrs = {'pooltype': "FIRST"} self.attrs = {'pooltype': "FIRST"}
for i in range(4): for i in range(len(offset[0]) - 1):
sub_x = np.reshape(x[lod[0][i]:lod[0][i + 1], :], (-1, 3 * 17)) sub_x = np.reshape(x[offset[0][i]:offset[0][i + 1], :],
(-1, 3 * 17))
out[i] = np.reshape(sub_x[0, :], (3, 17)) out[i] = np.reshape(sub_x[0, :], (3, 17))
......
...@@ -18,15 +18,17 @@ from op_test import OpTest ...@@ -18,15 +18,17 @@ from op_test import OpTest
def sequence_erase(in_seq, lod0, tokens): def sequence_erase(in_seq, lod0, tokens):
new_lod0 = [0] new_lod0 = []
out_seq = [] out_seq = []
for i in range(0, len(lod0) - 1): offset = 0
for i in range(0, len(lod0)):
num_out = 0 num_out = 0
for dat in in_seq[lod0[i]:lod0[i + 1]]: for dat in in_seq[offset:(offset + lod0[i])]:
if dat not in tokens: if dat not in tokens:
out_seq.append(dat) out_seq.append(dat)
num_out += 1 num_out += 1
new_lod0.append(new_lod0[-1] + num_out) offset += lod0[i]
new_lod0.append(num_out)
return np.array(out_seq).astype("int32"), new_lod0 return np.array(out_seq).astype("int32"), new_lod0
...@@ -34,7 +36,7 @@ class TestSequenceEraseOpInt32(OpTest): ...@@ -34,7 +36,7 @@ class TestSequenceEraseOpInt32(OpTest):
def setUp(self): def setUp(self):
self.op_type = "sequence_erase" self.op_type = "sequence_erase"
in_seq = np.random.randint(0, 10, (30, 1)).astype("int32") in_seq = np.random.randint(0, 10, (30, 1)).astype("int32")
lod = [[0, 9, 13, 24, 30]] lod = [[9, 4, 11, 6]]
tokens = [2, 3, 5] tokens = [2, 3, 5]
out_seq, new_lod0 = sequence_erase(in_seq, lod[0], tokens) out_seq, new_lod0 = sequence_erase(in_seq, lod[0], tokens)
self.attrs = {'tokens': tokens} self.attrs = {'tokens': tokens}
...@@ -49,7 +51,7 @@ class TestSequenceEraseOpInt64(OpTest): ...@@ -49,7 +51,7 @@ class TestSequenceEraseOpInt64(OpTest):
def setUp(self): def setUp(self):
self.op_type = "sequence_erase" self.op_type = "sequence_erase"
in_seq = np.random.randint(0, 10, (30, 1)).astype("int64") in_seq = np.random.randint(0, 10, (30, 1)).astype("int64")
lod = [[0, 9, 13, 24, 30]] lod = [[9, 4, 11, 6]]
tokens = [2, 3, 5] tokens = [2, 3, 5]
out_seq, new_lod0 = sequence_erase(in_seq, lod[0], tokens) out_seq, new_lod0 = sequence_erase(in_seq, lod[0], tokens)
self.attrs = {'tokens': tokens} self.attrs = {'tokens': tokens}
...@@ -64,7 +66,7 @@ class TestSequenceEraseOpEmpty(OpTest): ...@@ -64,7 +66,7 @@ class TestSequenceEraseOpEmpty(OpTest):
def setUp(self): def setUp(self):
self.op_type = "sequence_erase" self.op_type = "sequence_erase"
in_seq = np.random.randint(0, 10, (30, 1)).astype("int32") in_seq = np.random.randint(0, 10, (30, 1)).astype("int32")
lod = [[0, 9, 13, 24, 30]] lod = [[9, 4, 11, 6]]
tokens = [] tokens = []
out_seq, new_lod0 = sequence_erase(in_seq, lod[0], tokens) out_seq, new_lod0 = sequence_erase(in_seq, lod[0], tokens)
self.attrs = {'tokens': tokens} self.attrs = {'tokens': tokens}
......
...@@ -21,7 +21,7 @@ class TestSequenceExpand(OpTest): ...@@ -21,7 +21,7 @@ class TestSequenceExpand(OpTest):
def set_data(self): def set_data(self):
x_data = np.random.uniform(0.1, 1, [3, 1]).astype('float32') x_data = np.random.uniform(0.1, 1, [3, 1]).astype('float32')
y_data = np.random.uniform(0.1, 1, [8, 1]).astype('float32') y_data = np.random.uniform(0.1, 1, [8, 1]).astype('float32')
y_lod = [[0, 1, 4, 8]] y_lod = [[1, 3, 4]]
self.inputs = {'X': x_data, 'Y': (y_data, y_lod)} self.inputs = {'X': x_data, 'Y': (y_data, y_lod)}
def compute(self): def compute(self):
...@@ -37,23 +37,27 @@ class TestSequenceExpand(OpTest): ...@@ -37,23 +37,27 @@ class TestSequenceExpand(OpTest):
out = np.zeros(shape=((0, ) + x_data.shape[1:]), dtype=x_data.dtype) out = np.zeros(shape=((0, ) + x_data.shape[1:]), dtype=x_data.dtype)
if x_lod is None: if x_lod is None:
x_idx = [i for i in xrange(x_data.shape[0] + 1)] # x_idx = [i for i in xrange(x_data.shape[0] + 1)]
x_idx = [1] * x_data.shape[0]
else: else:
x_idx = x_lod[0] x_idx = x_lod[0]
out_lod = [[0]] out_lod = [[]]
offset = 0
for i in xrange(len(y_lod[ref_level])):
repeat_num = y_lod[ref_level][i]
x_len = x_idx[i]
for i in xrange(1, len(y_lod[ref_level])):
repeat_num = y_lod[ref_level][i] - y_lod[ref_level][i - 1]
x_len = x_idx[i] - x_idx[i - 1]
if repeat_num > 0: if repeat_num > 0:
x_sub = x_data[x_idx[i - 1]:x_idx[i], :] x_sub = x_data[offset:(offset + x_len), :]
stacked_x_sub = x_sub stacked_x_sub = x_sub
for r in range(repeat_num - 1): for r in range(repeat_num - 1):
stacked_x_sub = np.vstack((stacked_x_sub, x_sub)) stacked_x_sub = np.vstack((stacked_x_sub, x_sub))
out = np.vstack((out, stacked_x_sub)) out = np.vstack((out, stacked_x_sub))
if x_lod is not None: if x_lod is not None:
for j in xrange(repeat_num): for j in xrange(repeat_num):
out_lod[0].append(out_lod[0][-1] + x_len) out_lod[0].append(x_len)
offset += x_len
if x_lod is None: if x_lod is None:
self.outputs = {'Out': out} self.outputs = {'Out': out}
...@@ -75,9 +79,9 @@ class TestSequenceExpand(OpTest): ...@@ -75,9 +79,9 @@ class TestSequenceExpand(OpTest):
class TestSequenceExpandCase1(TestSequenceExpand): class TestSequenceExpandCase1(TestSequenceExpand):
def set_data(self): def set_data(self):
x_data = np.random.uniform(0.1, 1, [5, 1]).astype('float32') x_data = np.random.uniform(0.1, 1, [5, 1]).astype('float32')
x_lod = [[0, 2, 5]] x_lod = [[2, 3]]
y_data = np.random.uniform(0.1, 1, [13, 1]).astype('float32') y_data = np.random.uniform(0.1, 1, [13, 1]).astype('float32')
y_lod = [[0, 2, 5], [0, 2, 4, 7, 10, 13]] y_lod = [[2, 3], [2, 2, 3, 3, 3]]
self.inputs = {'X': x_data, 'Y': (y_data, y_lod)} self.inputs = {'X': x_data, 'Y': (y_data, y_lod)}
self.attrs = {'ref_level': 0} self.attrs = {'ref_level': 0}
...@@ -85,9 +89,9 @@ class TestSequenceExpandCase1(TestSequenceExpand): ...@@ -85,9 +89,9 @@ class TestSequenceExpandCase1(TestSequenceExpand):
class TestSequenceExpandCase2(TestSequenceExpand): class TestSequenceExpandCase2(TestSequenceExpand):
def set_data(self): def set_data(self):
x_data = np.random.uniform(0.1, 1, [1, 2, 2]).astype('float32') x_data = np.random.uniform(0.1, 1, [1, 2, 2]).astype('float32')
x_lod = [[0, 1]] x_lod = [[1]]
y_data = np.random.uniform(0.1, 1, [2, 2, 2]).astype('float32') y_data = np.random.uniform(0.1, 1, [2, 2, 2]).astype('float32')
y_lod = [[0, 2], [0, 2]] y_lod = [[2], [1, 1]]
self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod)} self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod)}
self.attrs = {'ref_level': 0} self.attrs = {'ref_level': 0}
...@@ -95,9 +99,9 @@ class TestSequenceExpandCase2(TestSequenceExpand): ...@@ -95,9 +99,9 @@ class TestSequenceExpandCase2(TestSequenceExpand):
class TestSequenceExpandCase3(TestSequenceExpand): class TestSequenceExpandCase3(TestSequenceExpand):
def set_data(self): def set_data(self):
x_data = np.random.uniform(0.1, 1, [4, 1]).astype('float32') x_data = np.random.uniform(0.1, 1, [4, 1]).astype('float32')
x_lod = [[0, 1, 2, 3, 4]] x_lod = [[1, 1, 1, 1]]
y_data = np.random.uniform(0.1, 1, [6, 1]).astype('float32') y_data = np.random.uniform(0.1, 1, [8, 1]).astype('float32')
y_lod = [[0, 2, 4, 4, 6]] y_lod = [[2, 2, 2, 2]]
self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod)} self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod)}
...@@ -105,9 +109,9 @@ class TestSequenceExpandCase4(TestSequenceExpand): ...@@ -105,9 +109,9 @@ class TestSequenceExpandCase4(TestSequenceExpand):
def set_data(self): def set_data(self):
data = np.random.uniform(0.1, 1, [5 * 2, 1]) data = np.random.uniform(0.1, 1, [5 * 2, 1])
x_data = np.array(data).reshape([5, 2]).astype('float32') x_data = np.array(data).reshape([5, 2]).astype('float32')
x_lod = [[0, 2, 5]] x_lod = [[2, 3]]
y_data = np.random.uniform(0.1, 1, [3, 1]).astype('float32') y_data = np.random.uniform(0.1, 1, [5, 1]).astype('float32')
y_lod = [[0, 1, 3], [0, 1, 3]] y_lod = [[2], [2, 3]]
self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod)} self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod)}
......
...@@ -22,7 +22,7 @@ class TestSequenceReshape(OpTest): ...@@ -22,7 +22,7 @@ class TestSequenceReshape(OpTest):
def setUp(self): def setUp(self):
self.op_type = 'sequence_reshape' self.op_type = 'sequence_reshape'
dimension = 12 dimension = 12
x_lod = [[0, 4, 5, 8, 11]] x_lod = [[4, 1, 3, 3]]
x = np.random.uniform(0.1, 1, [11, 24]).astype('float32') x = np.random.uniform(0.1, 1, [11, 24]).astype('float32')
self.inputs = {'X': (x, x_lod)} self.inputs = {'X': (x, x_lod)}
...@@ -34,13 +34,13 @@ class TestSequenceReshape(OpTest): ...@@ -34,13 +34,13 @@ class TestSequenceReshape(OpTest):
def compute_output(self, x, x_lod, dimension): def compute_output(self, x, x_lod, dimension):
x_width = x.shape[1] x_width = x.shape[1]
out_lod = [[0]] out_lod = [[]]
for i in xrange(len(x_lod[0]) - 1): for i in xrange(len(x_lod[0])):
seq_len = x_lod[0][i + 1] - x_lod[0][i] seq_len = x_lod[0][i]
offset = (seq_len * x_width) / dimension offset = (seq_len * x_width) / dimension
assert int(offset) * dimension == seq_len * x_width assert int(offset) * dimension == seq_len * x_width
out_lod[0].append(out_lod[0][-1] + int(offset)) out_lod[0].append(int(offset))
out = np.zeros(shape=(out_lod[0][-1], dimension)).astype('float32') out = np.zeros(shape=(sum(out_lod[0]), dimension)).astype('float32')
out.ravel()[:] = x.ravel()[:] out.ravel()[:] = x.ravel()[:]
return out, out_lod return out, out_lod
...@@ -55,7 +55,7 @@ class TestSequenceReshape_reduce(TestSequenceReshape): ...@@ -55,7 +55,7 @@ class TestSequenceReshape_reduce(TestSequenceReshape):
def setUp(self): def setUp(self):
self.op_type = 'sequence_reshape' self.op_type = 'sequence_reshape'
dimension = 24 dimension = 24
x_lod = [[0, 4, 6, 8, 12]] x_lod = [[4, 2, 2, 4]]
x = np.random.uniform(0.1, 1, [12, 12]).astype('float32') x = np.random.uniform(0.1, 1, [12, 12]).astype('float32')
self.inputs = {'X': (x, x_lod)} self.inputs = {'X': (x, x_lod)}
...@@ -70,7 +70,7 @@ class TestSequenceReshape_same(TestSequenceReshape): ...@@ -70,7 +70,7 @@ class TestSequenceReshape_same(TestSequenceReshape):
def setUp(self): def setUp(self):
self.op_type = 'sequence_reshape' self.op_type = 'sequence_reshape'
dimension = 12 dimension = 12
x_lod = [[0, 4, 6, 8, 12]] x_lod = [[4, 2, 2, 4]]
x = np.random.uniform(0.1, 1, [12, 12]).astype('float32') x = np.random.uniform(0.1, 1, [12, 12]).astype('float32')
self.inputs = {'X': (x, x_lod)} self.inputs = {'X': (x, x_lod)}
......
...@@ -29,20 +29,20 @@ class TestSequenceSliceOp(OpTest): ...@@ -29,20 +29,20 @@ class TestSequenceSliceOp(OpTest):
self.inputs = {'X': (x, lod), 'Offset': offset, 'Length': length} self.inputs = {'X': (x, lod), 'Offset': offset, 'Length': length}
outs = [] #np.zeros((100, 3, 2)).astype('float32') outs = [] #np.zeros((100, 3, 2)).astype('float32')
out_lod = [[0]] out_lod = [[]]
out_lod_offset = 0 lod_offset = 0
for i in range(len(offset)): for i in range(len(offset)):
sub_x = x[lod[0][i] + offset[i, 0]:lod[0][i] + offset[i, 0] + sub_x = x[lod_offset + offset[i, 0]:lod_offset + offset[i, 0] +
length[i, 0], :] length[i, 0], :]
out_lod_offset = out_lod_offset + len(sub_x)
outs.append(sub_x) outs.append(sub_x)
out_lod[0].append(out_lod_offset) out_lod[0].append(len(sub_x))
lod_offset += lod[0][i]
outs = np.concatenate(outs, axis=0) outs = np.concatenate(outs, axis=0)
self.outputs = {'Out': (outs, out_lod)} self.outputs = {'Out': (outs, out_lod)}
def init_test_case(self): def init_test_case(self):
self.x_dim = (100, 3, 2) self.x_dim = (100, 3, 2)
self.x_lod = [[0, 20, 40, 60, 80, 100]] self.x_lod = [[20, 20, 20, 20, 20]]
self.offset = [[1], [2], [3], [4], [5]] self.offset = [[1], [2], [3], [4], [5]]
self.length = [[10], [8], [6], [4], [2]] self.length = [[10], [8], [6], [4], [2]]
......
...@@ -26,15 +26,16 @@ class TestSequenceSoftmaxOp(OpTest): ...@@ -26,15 +26,16 @@ class TestSequenceSoftmaxOp(OpTest):
self.init_op_type() self.init_op_type()
x = np.random.uniform(0.1, 1, (11, 1)).astype("float32") x = np.random.uniform(0.1, 1, (11, 1)).astype("float32")
lod = [[0, 4, 5, 8, 11]] lod = [[4, 1, 3, 3]]
out = np.zeros((11, 1)).astype("float32") out = np.zeros((11, 1)).astype("float32")
for i in range(4): offset = 0
sub_x = x[lod[0][i]:lod[0][i + 1], :] for i in range(len(lod[0])):
sub_x = sub_x.reshape(1, lod[0][i + 1] - lod[0][i]) sub_x = x[offset:offset + lod[0][i], :]
sub_x = sub_x.reshape(1, lod[0][i])
sub_out = stable_softmax(sub_x) sub_out = stable_softmax(sub_x)
out[lod[0][i]:lod[0][i + 1], :] = sub_out.reshape( out[offset:offset + lod[0][i], :] = sub_out.reshape(lod[0][i], 1)
lod[0][i + 1] - lod[0][i], 1) offset += lod[0][i]
self.inputs = {"X": (x, lod)} self.inputs = {"X": (x, lod)}
self.outputs = {"Out": out} self.outputs = {"Out": out}
......
...@@ -54,12 +54,12 @@ class TestShrinkRNNMemoryReferLoD(TestShrinkRNNMemoryBase): ...@@ -54,12 +54,12 @@ class TestShrinkRNNMemoryReferLoD(TestShrinkRNNMemoryBase):
def test_refer_lod(self): def test_refer_lod(self):
cpu = core.CPUPlace() cpu = core.CPUPlace()
x_tensor = core.LoDTensor() x_tensor = core.LoDTensor()
x_tensor.set_lod([[0, 2, 5, 6]]) x_tensor.set_recursive_sequence_lengths([[2, 3, 1]])
tensor_np = np.random.random(size=(6, 100)).astype('float32') tensor_np = np.random.random(size=(6, 100)).astype('float32')
x_tensor.set(tensor_np, cpu) x_tensor.set(tensor_np, cpu)
rank_table_tensor = core.LoDTensor() rank_table_tensor = core.LoDTensor()
rank_table_tensor.set_lod([[0, 1, 3, 6]]) rank_table_tensor.set_recursive_sequence_lengths([[1, 2, 3]])
rank_table_tensor.set(np.random.random(size=(6, 1)).astype('float32'), rank_table_tensor.set(np.random.random(size=(6, 1)).astype('float32'),
cpu) cpu)
...@@ -83,7 +83,7 @@ class TestShrinkRNNMemoryNoLoD(TestShrinkRNNMemoryBase): ...@@ -83,7 +83,7 @@ class TestShrinkRNNMemoryNoLoD(TestShrinkRNNMemoryBase):
x_tensor.set(tensor_np, cpu) x_tensor.set(tensor_np, cpu)
rank_table_tensor = core.LoDTensor() rank_table_tensor = core.LoDTensor()
rank_table_tensor.set_lod([[0, 1, 3, 6]]) rank_table_tensor.set_recursive_sequence_lengths([[1, 2, 3]])
rank_table_tensor.set(np.random.random(size=(6, 1)).astype('float32'), rank_table_tensor.set(np.random.random(size=(6, 1)).astype('float32'),
cpu) cpu)
......
...@@ -56,7 +56,7 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): ...@@ -56,7 +56,7 @@ class TestCPULoDTensorArrayOps(unittest.TestCase):
def test_split_and_merge_lod_tensor_level_0(self): def test_split_and_merge_lod_tensor_level_0(self):
tensor = core.LoDTensor() tensor = core.LoDTensor()
tensor.set(np.arange(10).reshape(10, 1).astype('int32'), self.place()) tensor.set(np.arange(10).reshape(10, 1).astype('int32'), self.place())
tensor.set_lod([[0, 3, 9, 10]]) tensor.set_recursive_sequence_lengths([[3, 6, 1]])
mask_np = np.array([0, 1, 0]).astype('bool') mask_np = np.array([0, 1, 0]).astype('bool')
mask_np = np.expand_dims(mask_np, axis=1) mask_np = np.expand_dims(mask_np, axis=1)
...@@ -68,15 +68,15 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): ...@@ -68,15 +68,15 @@ class TestCPULoDTensorArrayOps(unittest.TestCase):
expect_true_tensor = np.expand_dims(expect_true_tensor, axis=1) expect_true_tensor = np.expand_dims(expect_true_tensor, axis=1)
expect_true = core.LoDTensor() expect_true = core.LoDTensor()
expect_true.set(expect_true_tensor, self.place()) expect_true.set(expect_true_tensor, self.place())
expect_true.set_lod([[0, 6]]) expect_true.set_recursive_sequence_lengths([[6]])
expect_false_tensor = np.array([0, 1, 2, 9]).astype('int32') expect_false_tensor = np.array([0, 1, 2, 9]).astype('int32')
expect_false_tensor = np.expand_dims(expect_false_tensor, axis=1) expect_false_tensor = np.expand_dims(expect_false_tensor, axis=1)
expect_false_lod = [[0, 3, 4]] expect_false_lod = [[3, 1]]
expect_false = core.LoDTensor() expect_false = core.LoDTensor()
expect_false.set(expect_false_tensor, self.place()) expect_false.set(expect_false_tensor, self.place())
expect_false.set_lod(expect_false_lod) expect_false.set_recursive_sequence_lengths(expect_false_lod)
self.main( self.main(
tensor=tensor, tensor=tensor,
...@@ -126,7 +126,8 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): ...@@ -126,7 +126,8 @@ class TestCPULoDTensorArrayOps(unittest.TestCase):
def check_tensor_same(self, actual, expect): def check_tensor_same(self, actual, expect):
self.assertTrue(np.allclose(np.array(actual), np.array(expect))) self.assertTrue(np.allclose(np.array(actual), np.array(expect)))
self.assertEqual(actual.lod(), expect.lod()) self.assertEqual(actual.recursive_sequence_lengths(),
expect.recursive_sequence_lengths())
class TestCPUSplitMergeLoDTensorGrad(unittest.TestCase): class TestCPUSplitMergeLoDTensorGrad(unittest.TestCase):
...@@ -151,7 +152,7 @@ class TestCPUSplitMergeLoDTensorGrad(unittest.TestCase): ...@@ -151,7 +152,7 @@ class TestCPUSplitMergeLoDTensorGrad(unittest.TestCase):
tensor = core.LoDTensor() tensor = core.LoDTensor()
tensor.set(np.arange(10).reshape(10, 1).astype('float32'), place) tensor.set(np.arange(10).reshape(10, 1).astype('float32'), place)
tensor.set_lod([[0, 3, 9, 10]]) tensor.set_recursive_sequence_lengths([[3, 6, 1]])
mask_np = np.array([0, 1, 0]).astype('bool') mask_np = np.array([0, 1, 0]).astype('bool')
mask_np = np.expand_dims(mask_np, axis=1) mask_np = np.expand_dims(mask_np, axis=1)
......
...@@ -22,22 +22,23 @@ def gen_match_and_neg_indices(num_prior, gt_lod, neg_lod): ...@@ -22,22 +22,23 @@ def gen_match_and_neg_indices(num_prior, gt_lod, neg_lod):
if len(gt_lod) != len(neg_lod): if len(gt_lod) != len(neg_lod):
raise AssertionError("The input arguments are illegal.") raise AssertionError("The input arguments are illegal.")
batch_size = len(gt_lod) - 1 batch_size = len(gt_lod)
match_indices = -1 * np.ones((batch_size, num_prior)).astype('int32') match_indices = -1 * np.ones((batch_size, num_prior)).astype('int32')
neg_indices = np.zeros((neg_lod[-1], 1)).astype('int32') neg_indices = np.zeros((sum(neg_lod), 1)).astype('int32')
offset = 0
for n in range(batch_size): for n in range(batch_size):
gt_num = gt_lod[n + 1] - gt_lod[n] gt_num = gt_lod[n]
ids = random.sample([i for i in range(num_prior)], gt_num) ids = random.sample([i for i in range(num_prior)], gt_num)
match_indices[n, ids] = [i for i in range(gt_num)] match_indices[n, ids] = [i for i in range(gt_num)]
ret_ids = set([i for i in range(num_prior)]) - set(ids) ret_ids = set([i for i in range(num_prior)]) - set(ids)
s = neg_lod[n] l = neg_lod[n]
e = neg_lod[n + 1]
l = e - s
neg_ids = random.sample(ret_ids, l) neg_ids = random.sample(ret_ids, l)
neg_indices[s:e, :] = np.array(neg_ids).astype('int32').reshape(l, 1) neg_indices[offset:offset + neg_lod[n], :] = np.array(neg_ids).astype(
'int32').reshape(l, 1)
offset += neg_lod[n]
return match_indices, neg_indices return match_indices, neg_indices
...@@ -56,24 +57,28 @@ def target_assign(encoded_box, gt_label, match_indices, neg_indices, gt_lod, ...@@ -56,24 +57,28 @@ def target_assign(encoded_box, gt_label, match_indices, neg_indices, gt_lod,
# init weight for target label # init weight for target label
trg_label_wt = np.zeros((batch_size, num_prior, 1)).astype('float32') trg_label_wt = np.zeros((batch_size, num_prior, 1)).astype('float32')
gt_offset = 0
neg_offset = 0
for i in range(batch_size): for i in range(batch_size):
cur_indices = match_indices[i] cur_indices = match_indices[i]
col_ids = np.where(cur_indices > -1) col_ids = np.where(cur_indices > -1)
col_val = cur_indices[col_ids] col_val = cur_indices[col_ids]
gt_start = gt_lod[i]
# target bbox # target bbox
for v, c in zip(col_val + gt_start, col_ids[0].tolist()): for v, c in zip(col_val + gt_offset, col_ids[0].tolist()):
trg_box[i][c][:] = encoded_box[v][c][:] trg_box[i][c][:] = encoded_box[v][c][:]
# weight for target bbox # weight for target bbox
trg_box_wt[i][col_ids] = 1.0 trg_box_wt[i][col_ids] = 1.0
trg_label[i][col_ids] = gt_label[col_val + gt_start] trg_label[i][col_ids] = gt_label[col_val + gt_offset]
trg_label_wt[i][col_ids] = 1.0 trg_label_wt[i][col_ids] = 1.0
# set target label weight to 1.0 for the negative samples # set target label weight to 1.0 for the negative samples
if neg_indices is not None: if neg_indices is not None:
neg_ids = neg_indices[neg_lod[i]:neg_lod[i + 1]] neg_ids = neg_indices[neg_offset:neg_offset + neg_lod[i]]
trg_label_wt[i][neg_ids] = 1.0 trg_label_wt[i][neg_ids] = 1.0
# update offset
gt_offset += gt_lod[i]
neg_offset += neg_lod[i]
return trg_box, trg_box_wt, trg_label, trg_label_wt return trg_box, trg_box_wt, trg_label, trg_label_wt
...@@ -83,11 +88,11 @@ class TestTargetAssginFloatType(OpTest): ...@@ -83,11 +88,11 @@ class TestTargetAssginFloatType(OpTest):
self.op_type = "target_assign" self.op_type = "target_assign"
num_prior = 120 num_prior = 120
num_class = 21 num_class = 21
gt_lod = [0, 5, 11, 23] gt_lod = [5, 6, 12]
neg_lod = [0, 4, 7, 13] neg_lod = [4, 3, 6]
mismatch_value = 0 mismatch_value = 0
batch_size = len(gt_lod) - 1 batch_size = len(gt_lod)
num_gt = gt_lod[-1] num_gt = sum(gt_lod)
encoded_box = np.random.random((num_gt, num_prior, 4)).astype('float32') encoded_box = np.random.random((num_gt, num_prior, 4)).astype('float32')
gt_label = np.random.randint( gt_label = np.random.randint(
...@@ -121,11 +126,11 @@ class TestTargetAssginIntType(OpTest): ...@@ -121,11 +126,11 @@ class TestTargetAssginIntType(OpTest):
self.op_type = "target_assign" self.op_type = "target_assign"
num_prior = 120 num_prior = 120
num_class = 21 num_class = 21
gt_lod = [0, 5, 11, 23] gt_lod = [5, 6, 12]
neg_lod = [0, 4, 7, 13] neg_lod = [4, 3, 6]
mismatch_value = 0 mismatch_value = 0
batch_size = len(gt_lod) - 1 batch_size = len(gt_lod)
num_gt = gt_lod[-1] num_gt = sum(gt_lod)
encoded_box = np.random.random((num_gt, num_prior, 4)).astype('float32') encoded_box = np.random.random((num_gt, num_prior, 4)).astype('float32')
gt_label = np.random.randint( gt_label = np.random.randint(
......
...@@ -69,15 +69,14 @@ class TestTensor(unittest.TestCase): ...@@ -69,15 +69,14 @@ class TestTensor(unittest.TestCase):
array[0, 0, 0] = 3 array[0, 0, 0] = 3
array[3, 3, 5] = 10 array[3, 3, 5] = 10
lod_tensor.set(array, place) lod_tensor.set(array, place)
lod_tensor.set_lod([[0, 2, 4]]) lod_tensor.set_recursive_sequence_lengths([[2, 2]])
lod_v = numpy.array(lod_tensor) lod_v = numpy.array(lod_tensor)
self.assertTrue(numpy.alltrue(array == lod_v)) self.assertTrue(numpy.alltrue(array == lod_v))
lod = lod_tensor.lod() lod = lod_tensor.recursive_sequence_lengths()
self.assertEqual(0, lod[0][0]) self.assertEqual(2, lod[0][0])
self.assertEqual(2, lod[0][1]) self.assertEqual(2, lod[0][1])
self.assertEqual(4, lod[0][2])
def test_float_lod_tensor(self): def test_float_lod_tensor(self):
place = core.CPUPlace() place = core.CPUPlace()
...@@ -97,21 +96,21 @@ class TestTensor(unittest.TestCase): ...@@ -97,21 +96,21 @@ class TestTensor(unittest.TestCase):
lod_v = numpy.array(lod_tensor) lod_v = numpy.array(lod_tensor)
self.assertAlmostEqual(1.0, lod_v[0, 0, 0, 0]) self.assertAlmostEqual(1.0, lod_v[0, 0, 0, 0])
self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1]) self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1])
self.assertEqual(len(lod_tensor.lod()), 0) self.assertEqual(len(lod_tensor.recursive_sequence_lengths()), 0)
lod_py = [[0, 2, 5], [0, 2, 4, 5]] lod_py = [[2, 1], [1, 2, 2]]
lod_tensor.set_lod(lod_py) lod_tensor.set_recursive_sequence_lengths(lod_py)
lod = lod_tensor.lod() lod = lod_tensor.recursive_sequence_lengths()
self.assertListEqual(lod_py, lod) self.assertListEqual(lod_py, lod)
def test_lod_tensor_init(self): def test_lod_tensor_init(self):
scope = core.Scope() scope = core.Scope()
place = core.CPUPlace() place = core.CPUPlace()
lod_py = [[0, 2, 5], [0, 2, 4, 5]] lod_py = [[2, 1], [1, 2, 2]]
lod_tensor = core.LoDTensor() lod_tensor = core.LoDTensor()
lod_tensor.set_dims([5, 2, 3, 4]) lod_tensor.set_dims([5, 2, 3, 4])
lod_tensor.set_lod(lod_py) lod_tensor.set_recursive_sequence_lengths(lod_py)
lod_tensor.alloc_float(place) lod_tensor.alloc_float(place)
tensor_array = numpy.array(lod_tensor) tensor_array = numpy.array(lod_tensor)
tensor_array[0, 0, 0, 0] = 1.0 tensor_array[0, 0, 0, 0] = 1.0
...@@ -121,17 +120,17 @@ class TestTensor(unittest.TestCase): ...@@ -121,17 +120,17 @@ class TestTensor(unittest.TestCase):
lod_v = numpy.array(lod_tensor) lod_v = numpy.array(lod_tensor)
self.assertAlmostEqual(1.0, lod_v[0, 0, 0, 0]) self.assertAlmostEqual(1.0, lod_v[0, 0, 0, 0])
self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1]) self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1])
self.assertListEqual(lod_py, lod_tensor.lod()) self.assertListEqual(lod_py, lod_tensor.recursive_sequence_lengths())
def test_lod_tensor_gpu_init(self): def test_lod_tensor_gpu_init(self):
if not core.is_compiled_with_cuda(): if not core.is_compiled_with_cuda():
return return
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
lod_py = [[0, 2, 5], [0, 2, 4, 5]] lod_py = [[2, 1], [1, 2, 2]]
lod_tensor = core.LoDTensor() lod_tensor = core.LoDTensor()
lod_tensor.set_dims([5, 2, 3, 4]) lod_tensor.set_dims([5, 2, 3, 4])
lod_tensor.set_lod(lod_py) lod_tensor.set_recursive_sequence_lengths(lod_py)
lod_tensor.alloc_float(place) lod_tensor.alloc_float(place)
tensor_array = numpy.array(lod_tensor) tensor_array = numpy.array(lod_tensor)
tensor_array[0, 0, 0, 0] = 1.0 tensor_array[0, 0, 0, 0] = 1.0
...@@ -141,7 +140,7 @@ class TestTensor(unittest.TestCase): ...@@ -141,7 +140,7 @@ class TestTensor(unittest.TestCase):
lod_v = numpy.array(lod_tensor) lod_v = numpy.array(lod_tensor)
self.assertAlmostEqual(1.0, lod_v[0, 0, 0, 0]) self.assertAlmostEqual(1.0, lod_v[0, 0, 0, 0])
self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1]) self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1])
self.assertListEqual(lod_py, lod_tensor.lod()) self.assertListEqual(lod_py, lod_tensor.recursive_sequence_lengths())
def test_empty_tensor(self): def test_empty_tensor(self):
place = core.CPUPlace() place = core.CPUPlace()
......
...@@ -34,8 +34,8 @@ class CTCForward(object): ...@@ -34,8 +34,8 @@ class CTCForward(object):
self.level = 0 self.level = 0
self.num_classes = softmax.shape[1] self.num_classes = softmax.shape[1]
self.batch_size = len(softmax_lod[self.level]) - 1 self.batch_size = len(softmax_lod[self.level])
assert self.batch_size == len(labels_lod[self.level]) - 1 assert self.batch_size == len(labels_lod[self.level])
self.loss = np.zeros([self.batch_size, 1], dtype="float32") self.loss = np.zeros([self.batch_size, 1], dtype="float32")
self.gradient = np.zeros(self.softmax.shape, dtype="float32") self.gradient = np.zeros(self.softmax.shape, dtype="float32")
...@@ -156,16 +156,20 @@ class CTCForward(object): ...@@ -156,16 +156,20 @@ class CTCForward(object):
return -log_prob return -log_prob
def forward(self): def forward(self):
softmax_offset = 0
labels_offset = 0
for i in range(self.batch_size): for i in range(self.batch_size):
softmax_start_i = self.softmax_lod[self.level][i] softmax_start_i = softmax_offset
softmax_end_i = self.softmax_lod[self.level][i + 1] softmax_end_i = softmax_offset + self.softmax_lod[self.level][i]
labels_start_i = self.labels_lod[self.level][i] labels_start_i = labels_offset
labels_end_i = self.labels_lod[self.level][i + 1] labels_end_i = labels_offset + self.labels_lod[self.level][i]
softmax_a_sequence = self.softmax[softmax_start_i:softmax_end_i, :] softmax_a_sequence = self.softmax[softmax_start_i:softmax_end_i, :]
labels_a_sequence = self.labels[labels_start_i:labels_end_i, :] labels_a_sequence = self.labels[labels_start_i:labels_end_i, :]
self.loss[i] = self.forward_a_sequence(softmax_a_sequence, self.loss[i] = self.forward_a_sequence(softmax_a_sequence,
labels_a_sequence) labels_a_sequence)
softmax_offset += self.softmax_lod[self.level][i]
labels_offset += self.labels_lod[self.level][i]
return self.loss return self.loss
...@@ -173,8 +177,8 @@ class TestWarpCTCOp(OpTest): ...@@ -173,8 +177,8 @@ class TestWarpCTCOp(OpTest):
def config(self): def config(self):
self.batch_size = 4 self.batch_size = 4
self.num_classes = 8 self.num_classes = 8
self.logits_lod = [[0, 4, 5, 8, 11]] self.logits_lod = [[4, 1, 3, 3]]
self.labels_lod = [[0, 3, 4, 8, 12]] self.labels_lod = [[3, 1, 4, 4]]
self.blank = self.num_classes - 1 self.blank = self.num_classes - 1
self.norm_by_times = False self.norm_by_times = False
...@@ -184,11 +188,13 @@ class TestWarpCTCOp(OpTest): ...@@ -184,11 +188,13 @@ class TestWarpCTCOp(OpTest):
logits = np.random.uniform( logits = np.random.uniform(
0.1, 1.0, 0.1, 1.0,
[self.logits_lod[0][-1], self.num_classes]).astype("float32") [sum(self.logits_lod[0]), self.num_classes]).astype("float32")
softmax = np.apply_along_axis(stable_softmax, 1, logits) softmax = np.apply_along_axis(stable_softmax, 1, logits)
# labels should not be blank # labels should not be blank
labels = np.random.randint( labels = np.random.randint(
0, self.num_classes - 1, [self.labels_lod[0][-1], 1], dtype="int32") 0,
self.num_classes - 1, [sum(self.labels_lod[0]), 1],
dtype="int32")
ctc = CTCForward(softmax, self.logits_lod, labels, self.labels_lod, ctc = CTCForward(softmax, self.logits_lod, labels, self.labels_lod,
self.blank, self.norm_by_times) self.blank, self.norm_by_times)
...@@ -196,9 +202,8 @@ class TestWarpCTCOp(OpTest): ...@@ -196,9 +202,8 @@ class TestWarpCTCOp(OpTest):
max_sequence_length = 0 max_sequence_length = 0
for i in range(self.batch_size): for i in range(self.batch_size):
max_sequence_length = max( max_sequence_length = max(max_sequence_length,
max_sequence_length, self.logits_lod[0][i])
self.logits_lod[0][i + 1] - self.logits_lod[0][i])
self.gradient = np.zeros( self.gradient = np.zeros(
[max_sequence_length, self.batch_size, self.num_classes], [max_sequence_length, self.batch_size, self.num_classes],
dtype="float32") dtype="float32")
...@@ -222,8 +227,8 @@ class TestWarpCTCOpCase1(TestWarpCTCOp): ...@@ -222,8 +227,8 @@ class TestWarpCTCOpCase1(TestWarpCTCOp):
def config(self): def config(self):
self.batch_size = 4 self.batch_size = 4
self.num_classes = CUDA_BLOCK_SIZE + 2 self.num_classes = CUDA_BLOCK_SIZE + 2
self.logits_lod = [[0, 4, 5, 8, 11]] self.logits_lod = [[4, 1, 3, 3]]
self.labels_lod = [[0, 3, 4, 8, 12]] self.labels_lod = [[3, 1, 4, 4]]
self.blank = 0 self.blank = 0
self.norm_by_times = False self.norm_by_times = False
......
...@@ -76,11 +76,11 @@ class TestWeightNormalization(unittest.TestCase): ...@@ -76,11 +76,11 @@ class TestWeightNormalization(unittest.TestCase):
lod_level_i = numpy.random.randint( lod_level_i = numpy.random.randint(
low=1, low=1,
high=5, high=5,
size=self.batch_size if i == 0 else lod_level_i[-1]) size=self.batch_size
lod_level_i = [0] + numpy.cumsum(lod_level_i).tolist() if i == 0 else sum(lod_level_i)).tolist()
data_lod.append(lod_level_i) data_lod.append(lod_level_i)
data_value = numpy.random.random( data_value = numpy.random.random(
size=[data_lod[-1][-1] if data_lod else self.batch_size size=[sum(data_lod[-1]) if data_lod else self.batch_size
] + data_shape).astype('float32') ] + data_shape).astype('float32')
self.data[data_name] = (data_value, data_lod) self.data[data_name] = (data_value, data_lod)
...@@ -90,7 +90,7 @@ class TestWeightNormalization(unittest.TestCase): ...@@ -90,7 +90,7 @@ class TestWeightNormalization(unittest.TestCase):
tensor = fluid.Tensor() tensor = fluid.Tensor()
tensor.set(self.data[desc[0]][0], place) tensor.set(self.data[desc[0]][0], place)
if self.data[desc[0]][1]: if self.data[desc[0]][1]:
tensor.set_lod(self.data[desc[0]][1]) tensor.set_recursive_sequence_lengths(self.data[desc[0]][1])
self.inputs[desc[0]] = tensor self.inputs[desc[0]] = tensor
def weight_normalize(self): def weight_normalize(self):
......
...@@ -22,7 +22,7 @@ def as_lodtensor(np_array, lod, place): ...@@ -22,7 +22,7 @@ def as_lodtensor(np_array, lod, place):
tensor = core.LoDTensor() tensor = core.LoDTensor()
tensor.set(np_value, place) tensor.set(np_value, place)
if lod is not None: if lod is not None:
tensor.set_lod(lod) tensor.set_recursive_sequence_lengths(lod)
return tensor return tensor
...@@ -73,7 +73,7 @@ def set_input(scope, op, inputs, place): ...@@ -73,7 +73,7 @@ def set_input(scope, op, inputs, place):
if isinstance(var, tuple) or isinstance(var, np.ndarray): if isinstance(var, tuple) or isinstance(var, np.ndarray):
tensor = scope.find_var(var_name).get_tensor() tensor = scope.find_var(var_name).get_tensor()
if isinstance(var, tuple): if isinstance(var, tuple):
tensor.set_lod(var[1]) tensor.set_recursive_sequence_lengths(var[1])
var = var[0] var = var[0]
tensor.set_dims(var.shape) tensor.set_dims(var.shape)
tensor.set(var, place) tensor.set(var, place)
......
...@@ -7,7 +7,7 @@ for file in $(git diff --cached --name-status | awk '$1 != "D" {print $2}'); do ...@@ -7,7 +7,7 @@ for file in $(git diff --cached --name-status | awk '$1 != "D" {print $2}'); do
if [[ $file =~ ^(paddle/api/.*|paddle/capi/.*|paddle/contrib/.*|paddle/cuda/.*|paddle/function/.*|paddle/gserver/.*|paddle/math/.*|paddle/optimizer/.*|paddle/parameter/.*|paddle/pserver/.*|paddle/trainer/.*|paddle/utils/.*) ]]; then if [[ $file =~ ^(paddle/api/.*|paddle/capi/.*|paddle/contrib/.*|paddle/cuda/.*|paddle/function/.*|paddle/gserver/.*|paddle/math/.*|paddle/optimizer/.*|paddle/parameter/.*|paddle/pserver/.*|paddle/trainer/.*|paddle/utils/.*) ]]; then
continue; continue;
else else
cpplint $file; cpplint --filter=-readability/fn_size $file;
TOTAL_ERRORS=$(expr $TOTAL_ERRORS + $?); TOTAL_ERRORS=$(expr $TOTAL_ERRORS + $?);
fi fi
done done
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册