提交 bd57dec1 编写于 作者: C chenweihang

Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into squeeze_op

...@@ -173,6 +173,7 @@ are transformed into offsets of elements/words as follows: ...@@ -173,6 +173,7 @@ are transformed into offsets of elements/words as follows:
## Slicing of LoD Tensors ## Slicing of LoD Tensors
When we use the above 2-level LoD Tensor as the input to a nested-RNN, we need to retrieve certain sequences. Here we define the sequence identified by branch <i,j,...> as the **<i,j,...>-slice**. When we use the above 2-level LoD Tensor as the input to a nested-RNN, we need to retrieve certain sequences. Here we define the sequence identified by branch <i,j,...> as the **<i,j,...>-slice**.
For example, the <2>-slice of above example is For example, the <2>-slice of above example is
...@@ -189,3 +190,22 @@ and the <2,0>-slice of above slice is ...@@ -189,3 +190,22 @@ and the <2,0>-slice of above slice is
10 12 10 12
|| ||
``` ```
## Length Representation vs Offset Representation
The offset representation is an implementation-oriented decision and it makes understanding the idea behind LoDTensor difficult.
Hence, we encapsulate this implementation detail in C++ and expose the original length representation in our Python API.
Specifically, we call this length representation `recursive_sequence_lengths` and users can use the following code to set or get the `recursive_sequence_lengths` of a LoDTensor in Python:
```Python
# length representation of lod called recursive_sequence_lengths
recursive_seq_lens = [[3, 1, 2], [2, 2, 1, 3, 1, 2]]
# Create a LoDTensor that has the above recursive_sequence_lengths info.
# This recursive_sequence_lengths will be converted to an offset representation of LoD in the C++ implementation under the hood.
tensor = fluid.LoDTensor(lod)
# Set/Change the recursive_sequence_lengths info of LoDTensor
tensor.set_recursive_sequence_lengths([[3, 1, 2]])
# Get the recursive_sequence_lengths info of a LoDTensor (the offset-based LoD representation stored in C++ will be converted
# back to length-based recursive_sequence_lengths), new_recursive_seq_lens = [[3, 1, 2]]
new_recursive_seq_lens = tensor.recursive_sequence_lengths()
```
# 如何使用timeline工具做性能分析
1. 在训练的主循环外加上`with profiler.profiler(...)`。运行之后,代码会在`/tmp/profile`目录下生成一个profile的记录文件。
**提示:**
请不要在timeline记录信息时运行太多次迭代,因为timeline中的记录数量和迭代次数是成正比的。
```python
with profiler.profiler('All', 'total', '/tmp/profile') as prof:
for pass_id in range(pass_num):
for batch_id, data in enumerate(train_reader()):
exe.run(fluid.default_main_program(),
feed=feeder.feed(data),
fetch_list=[])
...
```
1. 运行`python paddle/tools/timeline.py`来处理`/tmp/profile`,这个程序默认会生成一个`/tmp/timeline`文件,你也可以用命令行参数来修改这个路径,请参考[timeline.py](https://github.com/PaddlePaddle/Paddle/blob/develop/tools/timeline.py)
1. 打开chrome浏览器,访问<chrome://tracing/>,用`load`按钮来加载生成的`timeline`文件。
![chrome tracing](./tracing.jpeg)
1. 结果如下图所示,可以放到来查看timetime的细节信息。
![chrome timeline](./timeline.jpeg)
...@@ -61,7 +61,7 @@ cc_library(paddle_inference_tensorrt_subgraph_engine ...@@ -61,7 +61,7 @@ cc_library(paddle_inference_tensorrt_subgraph_engine
inference_api_test(test_paddle_inference_api_tensorrt_subgraph_engine ARGS test_word2vec) inference_api_test(test_paddle_inference_api_tensorrt_subgraph_engine ARGS test_word2vec)
endif() endif()
if (WITH_ANAKIN AND WITH_TESTING) # only needed in CI if (WITH_ANAKIN) # only needed in CI
# Due to Anakin do not have official library releases and the versions of protobuf and cuda do not match Paddle's, # Due to Anakin do not have official library releases and the versions of protobuf and cuda do not match Paddle's,
# so anakin library will not be merged to our official inference library. To use anakin prediction API, one need to # so anakin library will not be merged to our official inference library. To use anakin prediction API, one need to
# compile the libinference_anakin_api.a and compile with anakin.so. # compile the libinference_anakin_api.a and compile with anakin.so.
...@@ -71,10 +71,12 @@ if (WITH_ANAKIN AND WITH_TESTING) # only needed in CI ...@@ -71,10 +71,12 @@ if (WITH_ANAKIN AND WITH_TESTING) # only needed in CI
target_compile_options(inference_anakin_api_shared BEFORE PUBLIC ${ANAKIN_COMPILE_EXTRA_FLAGS}) target_compile_options(inference_anakin_api_shared BEFORE PUBLIC ${ANAKIN_COMPILE_EXTRA_FLAGS})
target_link_libraries(inference_anakin_api anakin anakin_saber_common) target_link_libraries(inference_anakin_api anakin anakin_saber_common)
target_link_libraries(inference_anakin_api_shared anakin anakin_saber_common) target_link_libraries(inference_anakin_api_shared anakin anakin_saber_common)
cc_test(inference_anakin_test SRCS paddle_inference_api_anakin_engine_tester.cc if (WITH_TESTING)
cc_test(inference_anakin_test SRCS paddle_inference_api_anakin_engine_tester.cc
ARGS --model=${ANAKIN_INSTALL_DIR}/mobilenet_v2.anakin.bin ARGS --model=${ANAKIN_INSTALL_DIR}/mobilenet_v2.anakin.bin
DEPS inference_anakin_api) DEPS inference_anakin_api)
target_compile_options(inference_anakin_test BEFORE PUBLIC ${ANAKIN_COMPILE_EXTRA_FLAGS}) target_compile_options(inference_anakin_test BEFORE PUBLIC ${ANAKIN_COMPILE_EXTRA_FLAGS})
endif(WITH_TESTING)
endif() endif()
if(WITH_TESTING) if(WITH_TESTING)
......
...@@ -253,6 +253,9 @@ void ParallelExecutor::FeedAndSplitTensorIntoLocalScopes( ...@@ -253,6 +253,9 @@ void ParallelExecutor::FeedAndSplitTensorIntoLocalScopes(
t->set_lod(lod_tensors[j].lod()); t->set_lod(lod_tensors[j].lod());
} }
} }
for (auto &p : member_->places_) {
platform::DeviceContextPool::Instance().Get(p)->Wait();
}
} }
ParallelExecutor::~ParallelExecutor() { ParallelExecutor::~ParallelExecutor() {
......
...@@ -106,6 +106,7 @@ function cmake_gen() { ...@@ -106,6 +106,7 @@ function cmake_gen() {
-DWITH_FLUID_ONLY=${WITH_FLUID_ONLY:-OFF} -DWITH_FLUID_ONLY=${WITH_FLUID_ONLY:-OFF}
-DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DCMAKE_EXPORT_COMPILE_COMMANDS=ON
-DWITH_CONTRIB=${WITH_CONTRIB:-ON} -DWITH_CONTRIB=${WITH_CONTRIB:-ON}
-DWITH_ANAKIN=${WITH_ANAKIN:-ON}
-DWITH_INFERENCE_DEMO=${WITH_INFERENCE_DEMO:-ON} -DWITH_INFERENCE_DEMO=${WITH_INFERENCE_DEMO:-ON}
======================================== ========================================
EOF EOF
......
...@@ -27,6 +27,7 @@ __all__ = [ ...@@ -27,6 +27,7 @@ __all__ = [
'Variable', 'Variable',
'Program', 'Program',
'Operator', 'Operator',
'Parameter',
'default_startup_program', 'default_startup_program',
'default_main_program', 'default_main_program',
'program_guard', 'program_guard',
...@@ -1922,7 +1923,7 @@ def program_guard(main_program, startup_program=None): ...@@ -1922,7 +1923,7 @@ def program_guard(main_program, startup_program=None):
def get_var(name, program=None): def get_var(name, program=None):
""" """
Get a variable by name from the global block of a program. Get a variable by name from the global block of a program.
Args: Args:
name(str): name of the variable name(str): name of the variable
program(Program|None): program object. program(Program|None): program object.
......
...@@ -5078,12 +5078,12 @@ def mean_iou(input, label, num_classes): ...@@ -5078,12 +5078,12 @@ def mean_iou(input, label, num_classes):
out_correct = helper.create_tmp_variable(dtype='int32') out_correct = helper.create_tmp_variable(dtype='int32')
helper.append_op( helper.append_op(
type="mean_iou", type="mean_iou",
inputs={"predictions": input, inputs={"Predictions": input,
"labels": label}, "Labels": label},
outputs={ outputs={
"out_mean_iou": out_mean_iou, "OutMeanIou": out_mean_iou,
"out_wrong": out_wrong, "OutWrong": out_wrong,
"out_correct": out_correct "OutCorrect": out_correct
}, },
attrs={"num_classes": num_classes}) attrs={"num_classes": num_classes})
return out_mean_iou, out_wrong, out_correct return out_mean_iou, out_wrong, out_correct
......
...@@ -18,15 +18,16 @@ import numpy as np ...@@ -18,15 +18,16 @@ import numpy as np
__all__ = ['create_lod_tensor', 'create_random_int_lodtensor'] __all__ = ['create_lod_tensor', 'create_random_int_lodtensor']
def create_lod_tensor(data, lod, place): def create_lod_tensor(data, recursive_seq_lens, place):
""" """
Create a lod tensor from a numpy array, a list, or an existing lod tensor. Create a lod tensor from a numpy array, a list, or an existing lod tensor.
Create a lod tensor by doing the following: Create a lod tensor by doing the following:
1. Check that the length-based input lod is valid. 1. Check that the length-based level of detail (LoD) also known as
recursive_sequence_lengths of the input is valid.
2. Convert the length-based lod to a offset-based LoD. 2. Convert recursive_sequence_lengths to a offset-based LoD.
3. Copy the data from a numpy array, a list or a existing lod tensor to 3. Copy the data from a numpy array, a list or a existing lod tensor to
CPU or GPU device (based on input place). CPU or GPU device (based on input place).
...@@ -37,45 +38,47 @@ def create_lod_tensor(data, lod, place): ...@@ -37,45 +38,47 @@ def create_lod_tensor(data, lod, place):
Suppose we want LoDTensor to hold data for sequences of word, where each Suppose we want LoDTensor to hold data for sequences of word, where each
word is represented by an integer. If we want to create a LoDTensor to word is represented by an integer. If we want to create a LoDTensor to
represent two sentences, one of 2 words, and one of 3 words. represent two sentences, one of 2 words, and one of 3 words.
Then :code:`data` can be a numpy array of integers with shape (5, 1). Then :code:`data` can be a numpy array of integers with shape (5, 1).
:code:`lod` will be [[2, 3]], indicating the length(# of words) in each :code:`recursive_seq_lens` will be [[2, 3]], indicating the length(# of words) in each
sentence. This length-based input lod [[2, 3]] will be converted to sentence. This length-based :code:`recursive_seq_lens` [[2, 3]] will be converted to
offset-based lod [[0, 2, 5]] inside the function call. offset-based LoD [[0, 2, 5]] inside the function call.
Please reference :ref:`api_guide_low_level_lod_tensor` for more details Please reference :ref:`api_guide_low_level_lod_tensor` for more details
regarding LoD. regarding LoD.
Args: Args:
data(numpy.ndarray|list|LoDTensor): a numpy array or a LoDTensor or a data(numpy.ndarray|list|LoDTensor): a numpy array or a LoDTensor or a
list holding the data to be copied. list holding the data to be copied.
lod(list): a list of lists indicating the length-based LoD info recursive_seq_lens(list): a list of lists indicating the length-based level of detail
specified by the user. info specified by the user.
place(Place): CPU or GPU place indicating where the data in the new place(Place): CPU or GPU place indicating where the data in the new
LoDTensor will be stored. LoDTensor will be stored.
Returns: Returns:
A fluid LoDTensor object with tensor data and lod info. A fluid LoDTensor object with tensor data and recursive_seq_lens info.
""" """
if isinstance(data, core.LoDTensor): if isinstance(data, core.LoDTensor):
return create_lod_tensor(np.array(data), lod, place) return create_lod_tensor(np.array(data), recursive_seq_lens, place)
elif isinstance(data, list): elif isinstance(data, list):
# When input data is a list, it only deal with the case where the base element # When input data is a list, it only deal with the case where the base element
# is an index of shape [1] and dtype int64 (e.g., word id). Hence, the generated # is an index of shape [1] and dtype int64 (e.g., word id). Hence, the generated
# LoDTensor will be of shape [n, 1] and dtype int64, where `n` is the total number # LoDTensor will be of shape [n, 1] and dtype int64, where `n` is the total number
# of words or other indexes in the sequence. # of words or other indexes in the sequence.
new_lod = [] new_recursive_seq_lens = []
for seq in data: for seq in data:
new_lod.append(len(seq)) new_recursive_seq_lens.append(len(seq))
assert [new_lod] == lod, "data and lod do not match" assert [
new_recursive_seq_lens
] == recursive_seq_lens, "data and recursive_seq_lens do not match"
flattened_data = np.concatenate(data, axis=0).astype("int64") flattened_data = np.concatenate(data, axis=0).astype("int64")
flattened_data = flattened_data.reshape([len(flattened_data), 1]) flattened_data = flattened_data.reshape([len(flattened_data), 1])
return create_lod_tensor(flattened_data, lod, place) return create_lod_tensor(flattened_data, recursive_seq_lens, place)
elif isinstance(data, np.ndarray): elif isinstance(data, np.ndarray):
tensor = core.LoDTensor() tensor = core.LoDTensor()
tensor.set(data, place) tensor.set(data, place)
tensor.set_recursive_sequence_lengths(lod) tensor.set_recursive_sequence_lengths(recursive_seq_lens)
assert tensor.has_valid_recursive_sequence_lengths( assert tensor.has_valid_recursive_sequence_lengths(
), "the provided lod info is invalid" ), "the provided lod info is invalid"
return tensor return tensor
...@@ -84,7 +87,8 @@ def create_lod_tensor(data, lod, place): ...@@ -84,7 +87,8 @@ def create_lod_tensor(data, lod, place):
"data should be either a LoDTensor, a Numpy array or a list") "data should be either a LoDTensor, a Numpy array or a list")
def create_random_int_lodtensor(lod, base_shape, place, low, high): def create_random_int_lodtensor(recursive_seq_lens, base_shape, place, low,
high):
""" """
Create a LoDTensor containing random integers. Create a LoDTensor containing random integers.
...@@ -95,7 +99,7 @@ def create_random_int_lodtensor(lod, base_shape, place, low, high): ...@@ -95,7 +99,7 @@ def create_random_int_lodtensor(lod, base_shape, place, low, high):
The function does the following: The function does the following:
1. Calculate the overall shape of the LoDTensor based on the length-based 1. Calculate the overall shape of the LoDTensor based on the length-based
:code:`lod` input and the shape of the basic element in :code:`recursive_seq_lens` input and the shape of the basic element in
:code:`base_shape`. :code:`base_shape`.
2. Create a numpy array of this shape. 2. Create a numpy array of this shape.
...@@ -105,12 +109,13 @@ def create_random_int_lodtensor(lod, base_shape, place, low, high): ...@@ -105,12 +109,13 @@ def create_random_int_lodtensor(lod, base_shape, place, low, high):
Suppose we want LoDTensor to hold data for sequences of word, where each Suppose we want LoDTensor to hold data for sequences of word, where each
word is represented by an integer. If we want to create a LoDTensor to word is represented by an integer. If we want to create a LoDTensor to
represent two sentences, one of 2 words, and one of 3 words. Then represent two sentences, one of 2 words, and one of 3 words. Then
'base_shape' is [1], input length-based 'lod' is [[2, 3]]. Then the overall 'base_shape' is [1], input length-based 'recursive_seq_lens' is [[2, 3]].
shape of the LoDTensor would be [5, 1], holding 5 words for two sentences. Then the overall shape of the LoDTensor would be [5, 1], holding 5 words
for two sentences.
Args: Args:
lod(list): a list of lists indicating the length-based LoD info recursive_seq_lens(list): a list of lists indicating the length-based
specified by the user. level of detail info specified by the user.
base_shape(list): the shape of the basic element to be held by the base_shape(list): the shape of the basic element to be held by the
LoDTensor. LoDTensor.
place(Place): CPU or GPU place indicating where the data in the new place(Place): CPU or GPU place indicating where the data in the new
...@@ -119,11 +124,11 @@ def create_random_int_lodtensor(lod, base_shape, place, low, high): ...@@ -119,11 +124,11 @@ def create_random_int_lodtensor(lod, base_shape, place, low, high):
high(int): the upper bound of the random integers. high(int): the upper bound of the random integers.
Returns: Returns:
A fluid LoDTensor object with tensor data and lod info. A fluid LoDTensor object with tensor data and recursive_seq_lens info.
""" """
assert isinstance(base_shape, list), "base_shape should be a list" assert isinstance(base_shape, list), "base_shape should be a list"
# append the total number of basic elements to the front of its shape # append the total number of basic elements to the front of its shape
overall_shape = [sum(lod[-1])] + base_shape overall_shape = [sum(recursive_seq_lens[-1])] + base_shape
# the range of integer data elements is [low, high] # the range of integer data elements is [low, high]
data = np.random.random_integers(low, high, overall_shape).astype("int64") data = np.random.random_integers(low, high, overall_shape).astype("int64")
return create_lod_tensor(data, lod, place) return create_lod_tensor(data, recursive_seq_lens, place)
...@@ -1113,7 +1113,6 @@ class ModelAverage(Optimizer): ...@@ -1113,7 +1113,6 @@ class ModelAverage(Optimizer):
Args: Args:
average_window_rate: The rate of average window. average_window_rate: The rate of average window.
params_grads: A list of parameter-grad variable pairs.
min_average_window: The minimum size of average window. min_average_window: The minimum size of average window.
max_average_window: The maximum size of average window. max_average_window: The maximum size of average window.
...@@ -1122,8 +1121,8 @@ class ModelAverage(Optimizer): ...@@ -1122,8 +1121,8 @@ class ModelAverage(Optimizer):
.. code-block:: python .. code-block:: python
optimizer = fluid.optimizer.Momentum() optimizer = fluid.optimizer.Momentum()
_, params_grads = optimizer.minimize(cost) optimizer.minimize(cost)
model_average = fluid.optimizer.ModelAverage(params_grads, 0.15, model_average = fluid.optimizer.ModelAverage(0.15,
min_average_window=10000, min_average_window=10000,
max_average_window=20000) max_average_window=20000)
for pass_id in range(args.pass_num): for pass_id in range(args.pass_num):
...@@ -1137,7 +1136,6 @@ class ModelAverage(Optimizer): ...@@ -1137,7 +1136,6 @@ class ModelAverage(Optimizer):
def __init__(self, def __init__(self,
average_window_rate, average_window_rate,
params_grads=None,
min_average_window=10000, min_average_window=10000,
max_average_window=10000, max_average_window=10000,
**kwargs): **kwargs):
...@@ -1146,21 +1144,16 @@ class ModelAverage(Optimizer): ...@@ -1146,21 +1144,16 @@ class ModelAverage(Optimizer):
self.min_average_window = min_average_window self.min_average_window = min_average_window
self.max_average_window = max_average_window self.max_average_window = max_average_window
self.params_grads = [] if params_grads is None else params_grads self.params_grads = []
params = {}
for param, grad in self.params_grads:
if param.do_model_average != False:
params[param.name] = (param, grad)
for param in framework.default_main_program().global_block( for param in framework.default_main_program().global_block(
).all_parameters(): ).all_parameters():
if param.name not in params and param.do_model_average != False: if param.do_model_average != False:
grad = param.block.create_var( grad = param.block.create_var(
name=unique_name.generate(".".join([param.name, 'tmp'])), name=unique_name.generate(".".join([param.name, 'tmp'])),
dtype=param.dtype, dtype=param.dtype,
persistable=False, persistable=False,
stop_gradient=True) stop_gradient=True)
params[param.name] = (param, grad) self.params_grads.append((param, grad))
self.params_grads = params.values()
for param, grad in self.params_grads: for param, grad in self.params_grads:
self._append_average_accumulate_op(param) self._append_average_accumulate_op(param)
......
...@@ -206,35 +206,35 @@ def infer(use_cuda, inference_program, params_dirname): ...@@ -206,35 +206,35 @@ def infer(use_cuda, inference_program, params_dirname):
inferencer = fluid.Inferencer( inferencer = fluid.Inferencer(
inference_program, param_path=params_dirname, place=place) inference_program, param_path=params_dirname, place=place)
# Setup inputs by creating LoDTensors to represent sequences of words. # Setup input by creating LoDTensor to represent sequence of words.
# Here each word is the basic element of these LoDTensors and the shape of # Here each word is the basic element of the LoDTensor and the shape of
# each word (base_shape) should be [1] since it is simply an index to # each word (base_shape) should be [1] since it is simply an index to
# look up for the corresponding word vector. # look up for the corresponding word vector.
# Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]], # Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]],
# which has only one lod level. Then the created LoDTensors will have only # which has only one level of detail. Then the created LoDTensor will have only
# one higher level structure (sequence of words, or sentence) than the basic # one higher level structure (sequence of words, or sentence) than the basic
# element (word). Hence the LoDTensor will hold data for three sentences of # element (word). Hence the LoDTensor will hold data for three sentences of
# length 3, 4 and 2, respectively. # length 3, 4 and 2, respectively.
# Note that lod info should be a list of lists. # Note that recursive_sequence_lengths should be a list of lists.
lod = [[3, 4, 2]] recursive_seq_lens = [[3, 4, 2]]
base_shape = [1] base_shape = [1]
# The range of random integers is [low, high] # The range of random integers is [low, high]
word = fluid.create_random_int_lodtensor( word = fluid.create_random_int_lodtensor(
lod, base_shape, place, low=0, high=WORD_DICT_LEN - 1) recursive_seq_lens, base_shape, place, low=0, high=WORD_DICT_LEN - 1)
ctx_n2 = fluid.create_random_int_lodtensor( ctx_n2 = fluid.create_random_int_lodtensor(
lod, base_shape, place, low=0, high=WORD_DICT_LEN - 1) recursive_seq_lens, base_shape, place, low=0, high=WORD_DICT_LEN - 1)
ctx_n1 = fluid.create_random_int_lodtensor( ctx_n1 = fluid.create_random_int_lodtensor(
lod, base_shape, place, low=0, high=WORD_DICT_LEN - 1) recursive_seq_lens, base_shape, place, low=0, high=WORD_DICT_LEN - 1)
ctx_0 = fluid.create_random_int_lodtensor( ctx_0 = fluid.create_random_int_lodtensor(
lod, base_shape, place, low=0, high=WORD_DICT_LEN - 1) recursive_seq_lens, base_shape, place, low=0, high=WORD_DICT_LEN - 1)
ctx_p1 = fluid.create_random_int_lodtensor( ctx_p1 = fluid.create_random_int_lodtensor(
lod, base_shape, place, low=0, high=WORD_DICT_LEN - 1) recursive_seq_lens, base_shape, place, low=0, high=WORD_DICT_LEN - 1)
ctx_p2 = fluid.create_random_int_lodtensor( ctx_p2 = fluid.create_random_int_lodtensor(
lod, base_shape, place, low=0, high=WORD_DICT_LEN - 1) recursive_seq_lens, base_shape, place, low=0, high=WORD_DICT_LEN - 1)
pred = fluid.create_random_int_lodtensor( pred = fluid.create_random_int_lodtensor(
lod, base_shape, place, low=0, high=PRED_DICT_LEN - 1) recursive_seq_lens, base_shape, place, low=0, high=PRED_DICT_LEN - 1)
mark = fluid.create_random_int_lodtensor( mark = fluid.create_random_int_lodtensor(
lod, base_shape, place, low=0, high=MARK_DICT_LEN - 1) recursive_seq_lens, base_shape, place, low=0, high=MARK_DICT_LEN - 1)
results = inferencer.infer( results = inferencer.infer(
{ {
......
...@@ -229,11 +229,13 @@ def decode_main(use_cuda, is_sparse): ...@@ -229,11 +229,13 @@ def decode_main(use_cuda, is_sparse):
[1. for _ in range(batch_size)], dtype='float32') [1. for _ in range(batch_size)], dtype='float32')
init_ids_data = init_ids_data.reshape((batch_size, 1)) init_ids_data = init_ids_data.reshape((batch_size, 1))
init_scores_data = init_scores_data.reshape((batch_size, 1)) init_scores_data = init_scores_data.reshape((batch_size, 1))
init_lod = [1] * batch_size init_recursive_seq_lens = [1] * batch_size
init_lod = [init_lod, init_lod] init_recursive_seq_lens = [init_recursive_seq_lens, init_recursive_seq_lens]
init_ids = fluid.create_lod_tensor(init_ids_data, init_lod, place) init_ids = fluid.create_lod_tensor(init_ids_data, init_recursive_seq_lens,
init_scores = fluid.create_lod_tensor(init_scores_data, init_lod, place) place)
init_scores = fluid.create_lod_tensor(init_scores_data,
init_recursive_seq_lens, place)
train_data = paddle.batch( train_data = paddle.batch(
paddle.reader.shuffle( paddle.reader.shuffle(
...@@ -257,7 +259,7 @@ def decode_main(use_cuda, is_sparse): ...@@ -257,7 +259,7 @@ def decode_main(use_cuda, is_sparse):
feed=feed_dict, feed=feed_dict,
fetch_list=[translation_ids, translation_scores], fetch_list=[translation_ids, translation_scores],
return_numpy=False) return_numpy=False)
print result_ids.lod() print result_ids.recursive_sequence_lengths()
break break
......
...@@ -209,13 +209,15 @@ def infer(use_cuda, inference_program, params_dirname): ...@@ -209,13 +209,15 @@ def infer(use_cuda, inference_program, params_dirname):
inference_program, param_path=params_dirname, place=place) inference_program, param_path=params_dirname, place=place)
# Use the first data from paddle.dataset.movielens.test() as input. # Use the first data from paddle.dataset.movielens.test() as input.
# Use create_lod_tensor(data, lod, place) API to generate LoD Tensor, # Use create_lod_tensor(data, recursive_sequence_lengths, place) API
# where `data` is a list of sequences of index numbers, `lod` is # to generate LoD Tensor where `data` is a list of sequences of index
# the level of detail (lod) info associated with `data`. # numbers, `recursive_sequence_lengths` is the length-based level of detail
# (lod) info associated with `data`.
# For example, data = [[10, 2, 3], [2, 3]] means that it contains # For example, data = [[10, 2, 3], [2, 3]] means that it contains
# two sequences of indexes, of length 3 and 2, respectively. # two sequences of indexes, of length 3 and 2, respectively.
# Correspondingly, lod = [[3, 2]] contains one level of detail info, # Correspondingly, recursive_sequence_lengths = [[3, 2]] contains one
# indicating that `data` consists of two sequences of length 3 and 2. # level of detail info, indicating that `data` consists of two sequences
# of length 3 and 2, respectively.
user_id = fluid.create_lod_tensor([[1]], [[1]], place) user_id = fluid.create_lod_tensor([[1]], [[1]], place)
gender_id = fluid.create_lod_tensor([[1]], [[1]], place) gender_id = fluid.create_lod_tensor([[1]], [[1]], place)
age_id = fluid.create_lod_tensor([[0]], [[1]], place) age_id = fluid.create_lod_tensor([[0]], [[1]], place)
......
...@@ -128,17 +128,17 @@ def infer(use_cuda, inference_program, params_dirname=None): ...@@ -128,17 +128,17 @@ def infer(use_cuda, inference_program, params_dirname=None):
# Here each word is the basic element of the LoDTensor and the shape of # Here each word is the basic element of the LoDTensor and the shape of
# each word (base_shape) should be [1] since it is simply an index to # each word (base_shape) should be [1] since it is simply an index to
# look up for the corresponding word vector. # look up for the corresponding word vector.
# Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]], # Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]],
# which has only one lod level. Then the created LoDTensor will have only # which has only one level of detail. Then the created LoDTensor will have only
# one higher level structure (sequence of words, or sentence) than the basic # one higher level structure (sequence of words, or sentence) than the basic
# element (word). Hence the LoDTensor will hold data for three sentences of # element (word). Hence the LoDTensor will hold data for three sentences of
# length 3, 4 and 2, respectively. # length 3, 4 and 2, respectively.
# Note that lod info should be a list of lists. # Note that recursive_sequence_lengths should be a list of lists.
lod = [[3, 4, 2]] recursive_seq_lens = [[3, 4, 2]]
base_shape = [1] base_shape = [1]
# The range of random integers is [low, high] # The range of random integers is [low, high]
tensor_words = fluid.create_random_int_lodtensor( tensor_words = fluid.create_random_int_lodtensor(
lod, base_shape, place, low=0, high=len(word_dict) - 1) recursive_seq_lens, base_shape, place, low=0, high=len(word_dict) - 1)
results = inferencer.infer({'words': tensor_words}) results = inferencer.infer({'words': tensor_words})
print("infer results: ", results) print("infer results: ", results)
......
...@@ -143,17 +143,17 @@ def infer(use_cuda, inference_program, params_dirname=None): ...@@ -143,17 +143,17 @@ def infer(use_cuda, inference_program, params_dirname=None):
# Here each word is the basic element of the LoDTensor and the shape of # Here each word is the basic element of the LoDTensor and the shape of
# each word (base_shape) should be [1] since it is simply an index to # each word (base_shape) should be [1] since it is simply an index to
# look up for the corresponding word vector. # look up for the corresponding word vector.
# Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]], # Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]],
# which has only one lod level. Then the created LoDTensor will have only # which has only one level of detail. Then the created LoDTensor will have only
# one higher level structure (sequence of words, or sentence) than the basic # one higher level structure (sequence of words, or sentence) than the basic
# element (word). Hence the LoDTensor will hold data for three sentences of # element (word). Hence the LoDTensor will hold data for three sentences of
# length 3, 4 and 2, respectively. # length 3, 4 and 2, respectively.
# Note that lod info should be a list of lists. # Note that recursive_sequence_lengths should be a list of lists.
lod = [[3, 4, 2]] recursive_seq_lens = [[3, 4, 2]]
base_shape = [1] base_shape = [1]
# The range of random integers is [low, high] # The range of random integers is [low, high]
tensor_words = fluid.create_random_int_lodtensor( tensor_words = fluid.create_random_int_lodtensor(
lod, base_shape, place, low=0, high=len(word_dict) - 1) recursive_seq_lens, base_shape, place, low=0, high=len(word_dict) - 1)
results = inferencer.infer({'words': tensor_words}) results = inferencer.infer({'words': tensor_words})
print("infer results: ", results) print("infer results: ", results)
......
...@@ -138,17 +138,17 @@ def infer(use_cuda, inference_program, params_dirname=None): ...@@ -138,17 +138,17 @@ def infer(use_cuda, inference_program, params_dirname=None):
# Here each word is the basic element of the LoDTensor and the shape of # Here each word is the basic element of the LoDTensor and the shape of
# each word (base_shape) should be [1] since it is simply an index to # each word (base_shape) should be [1] since it is simply an index to
# look up for the corresponding word vector. # look up for the corresponding word vector.
# Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]], # Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]],
# which has only one lod level. Then the created LoDTensor will have only # which has only one level of detail. Then the created LoDTensor will have only
# one higher level structure (sequence of words, or sentence) than the basic # one higher level structure (sequence of words, or sentence) than the basic
# element (word). Hence the LoDTensor will hold data for three sentences of # element (word). Hence the LoDTensor will hold data for three sentences of
# length 3, 4 and 2, respectively. # length 3, 4 and 2, respectively.
# Note that lod info should be a list of lists. # Note that recursive_sequence_lengths should be a list of lists.
lod = [[3, 4, 2]] recursive_seq_lens = [[3, 4, 2]]
base_shape = [1] base_shape = [1]
# The range of random integers is [low, high] # The range of random integers is [low, high]
tensor_words = fluid.create_random_int_lodtensor( tensor_words = fluid.create_random_int_lodtensor(
lod, base_shape, place, low=0, high=len(word_dict) - 1) recursive_seq_lens, base_shape, place, low=0, high=len(word_dict) - 1)
results = inferencer.infer({'words': tensor_words}) results = inferencer.infer({'words': tensor_words})
print("infer results: ", results) print("infer results: ", results)
......
...@@ -124,21 +124,22 @@ def infer(use_cuda, inference_program, params_dirname=None): ...@@ -124,21 +124,22 @@ def infer(use_cuda, inference_program, params_dirname=None):
# Setup inputs by creating 4 LoDTensors representing 4 words. Here each word # Setup inputs by creating 4 LoDTensors representing 4 words. Here each word
# is simply an index to look up for the corresponding word vector and hence # is simply an index to look up for the corresponding word vector and hence
# the shape of word (base_shape) should be [1]. The length-based level of # the shape of word (base_shape) should be [1]. The recursive_sequence_lengths,
# detail (lod) info of each LoDtensor should be [[1]] meaning there is only # which is length-based level of detail (lod) of each LoDTensor, should be [[1]]
# one lod_level and there is only one sequence of one word on this level. # meaning there is only one level of detail and there is only one sequence of
# Note that lod info should be a list of lists. # one word on this level.
lod = [[1]] # Note that recursive_sequence_lengths should be a list of lists.
recursive_seq_lens = [[1]]
base_shape = [1] base_shape = [1]
# The range of random integers is [low, high] # The range of random integers is [low, high]
first_word = fluid.create_random_int_lodtensor( first_word = fluid.create_random_int_lodtensor(
lod, base_shape, place, low=0, high=dict_size - 1) recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1)
second_word = fluid.create_random_int_lodtensor( second_word = fluid.create_random_int_lodtensor(
lod, base_shape, place, low=0, high=dict_size - 1) recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1)
third_word = fluid.create_random_int_lodtensor( third_word = fluid.create_random_int_lodtensor(
lod, base_shape, place, low=0, high=dict_size - 1) recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1)
fourth_word = fluid.create_random_int_lodtensor( fourth_word = fluid.create_random_int_lodtensor(
lod, base_shape, place, low=0, high=dict_size - 1) recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1)
result = inferencer.infer( result = inferencer.infer(
{ {
......
...@@ -238,17 +238,21 @@ def infer(word_dict, use_cuda, save_dirname=None): ...@@ -238,17 +238,21 @@ def infer(word_dict, use_cuda, save_dirname=None):
# Here each word is the basic element of the LoDTensor and the shape of # Here each word is the basic element of the LoDTensor and the shape of
# each word (base_shape) should be [1] since it is simply an index to # each word (base_shape) should be [1] since it is simply an index to
# look up for the corresponding word vector. # look up for the corresponding word vector.
# Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]], # Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]],
# which has only one lod level. Then the created LoDTensor will have only # which has only one level of detail. Then the created LoDTensor will have only
# one higher level structure (sequence of words, or sentence) than the basic # one higher level structure (sequence of words, or sentence) than the basic
# element (word). Hence the LoDTensor will hold data for three sentences of # element (word). Hence the LoDTensor will hold data for three sentences of
# length 3, 4 and 2, respectively. # length 3, 4 and 2, respectively.
# Note that lod info should be a list of lists. # Note that recursive_sequence_lengths should be a list of lists.
lod = [[3, 4, 2]] recursive_seq_lens = [[3, 4, 2]]
base_shape = [1] base_shape = [1]
# The range of random integers is [low, high] # The range of random integers is [low, high]
tensor_words = fluid.create_random_int_lodtensor( tensor_words = fluid.create_random_int_lodtensor(
lod, base_shape, place, low=0, high=word_dict_len - 1) recursive_seq_lens,
base_shape,
place,
low=0,
high=word_dict_len - 1)
# Construct feed as a dictionary of {feed_target_name: feed_target_data} # Construct feed as a dictionary of {feed_target_name: feed_target_data}
# and results will contain a list of data corresponding to fetch_targets. # and results will contain a list of data corresponding to fetch_targets.
...@@ -257,7 +261,7 @@ def infer(word_dict, use_cuda, save_dirname=None): ...@@ -257,7 +261,7 @@ def infer(word_dict, use_cuda, save_dirname=None):
feed={feed_target_names[0]: tensor_words}, feed={feed_target_names[0]: tensor_words},
fetch_list=fetch_targets, fetch_list=fetch_targets,
return_numpy=False) return_numpy=False)
print(results[0].lod()) print(results[0].recursive_sequence_lengths())
np_data = np.array(results[0]) np_data = np.array(results[0])
print("Inference Shape: ", np_data.shape) print("Inference Shape: ", np_data.shape)
print("Inference results: ", np_data) print("Inference results: ", np_data)
......
...@@ -247,35 +247,67 @@ def infer(use_cuda, save_dirname=None): ...@@ -247,35 +247,67 @@ def infer(use_cuda, save_dirname=None):
[inference_program, feed_target_names, [inference_program, feed_target_names,
fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)
# Setup inputs by creating LoDTensors to represent sequences of words. # Setup input by creating LoDTensor to represent sequence of words.
# Here each word is the basic element of these LoDTensors and the shape of # Here each word is the basic element of the LoDTensor and the shape of
# each word (base_shape) should be [1] since it is simply an index to # each word (base_shape) should be [1] since it is simply an index to
# look up for the corresponding word vector. # look up for the corresponding word vector.
# Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]], # Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]],
# which has only one lod level. Then the created LoDTensors will have only # which has only one level of detail. Then the created LoDTensor will have only
# one higher level structure (sequence of words, or sentence) than the basic # one higher level structure (sequence of words, or sentence) than the basic
# element (word). Hence the LoDTensor will hold data for three sentences of # element (word). Hence the LoDTensor will hold data for three sentences of
# length 3, 4 and 2, respectively. # length 3, 4 and 2, respectively.
# Note that lod info should be a list of lists. # Note that recursive_sequence_lengths should be a list of lists.
lod = [[3, 4, 2]] recursive_seq_lens = [[3, 4, 2]]
base_shape = [1] base_shape = [1]
# The range of random integers is [low, high] # The range of random integers is [low, high]
word = fluid.create_random_int_lodtensor( word = fluid.create_random_int_lodtensor(
lod, base_shape, place, low=0, high=word_dict_len - 1) recursive_seq_lens,
base_shape,
place,
low=0,
high=word_dict_len - 1)
pred = fluid.create_random_int_lodtensor( pred = fluid.create_random_int_lodtensor(
lod, base_shape, place, low=0, high=pred_dict_len - 1) recursive_seq_lens,
base_shape,
place,
low=0,
high=pred_dict_len - 1)
ctx_n2 = fluid.create_random_int_lodtensor( ctx_n2 = fluid.create_random_int_lodtensor(
lod, base_shape, place, low=0, high=word_dict_len - 1) recursive_seq_lens,
base_shape,
place,
low=0,
high=word_dict_len - 1)
ctx_n1 = fluid.create_random_int_lodtensor( ctx_n1 = fluid.create_random_int_lodtensor(
lod, base_shape, place, low=0, high=word_dict_len - 1) recursive_seq_lens,
base_shape,
place,
low=0,
high=word_dict_len - 1)
ctx_0 = fluid.create_random_int_lodtensor( ctx_0 = fluid.create_random_int_lodtensor(
lod, base_shape, place, low=0, high=word_dict_len - 1) recursive_seq_lens,
base_shape,
place,
low=0,
high=word_dict_len - 1)
ctx_p1 = fluid.create_random_int_lodtensor( ctx_p1 = fluid.create_random_int_lodtensor(
lod, base_shape, place, low=0, high=word_dict_len - 1) recursive_seq_lens,
base_shape,
place,
low=0,
high=word_dict_len - 1)
ctx_p2 = fluid.create_random_int_lodtensor( ctx_p2 = fluid.create_random_int_lodtensor(
lod, base_shape, place, low=0, high=word_dict_len - 1) recursive_seq_lens,
base_shape,
place,
low=0,
high=word_dict_len - 1)
mark = fluid.create_random_int_lodtensor( mark = fluid.create_random_int_lodtensor(
lod, base_shape, place, low=0, high=mark_dict_len - 1) recursive_seq_lens,
base_shape,
place,
low=0,
high=mark_dict_len - 1)
# Construct feed as a dictionary of {feed_target_name: feed_target_data} # Construct feed as a dictionary of {feed_target_name: feed_target_data}
# and results will contain a list of data corresponding to fetch_targets. # and results will contain a list of data corresponding to fetch_targets.
...@@ -301,7 +333,7 @@ def infer(use_cuda, save_dirname=None): ...@@ -301,7 +333,7 @@ def infer(use_cuda, save_dirname=None):
}, },
fetch_list=fetch_targets, fetch_list=fetch_targets,
return_numpy=False) return_numpy=False)
print(results[0].lod()) print(results[0].recursive_sequence_lengths())
np_data = np.array(results[0]) np_data = np.array(results[0])
print("Inference Shape: ", np_data.shape) print("Inference Shape: ", np_data.shape)
......
...@@ -108,7 +108,7 @@ def decoder_decode(context, is_sparse): ...@@ -108,7 +108,7 @@ def decoder_decode(context, is_sparse):
pre_state = pd.array_read(array=state_array, i=counter) pre_state = pd.array_read(array=state_array, i=counter)
pre_score = pd.array_read(array=scores_array, i=counter) pre_score = pd.array_read(array=scores_array, i=counter)
# expand the lod of pre_state to be the same with pre_score # expand the recursive_sequence_lengths of pre_state to be the same with pre_score
pre_state_expanded = pd.sequence_expand(pre_state, pre_score) pre_state_expanded = pd.sequence_expand(pre_state, pre_score)
pre_ids_emb = pd.embedding( pre_ids_emb = pd.embedding(
...@@ -252,11 +252,13 @@ def decode_main(use_cuda, is_sparse): ...@@ -252,11 +252,13 @@ def decode_main(use_cuda, is_sparse):
[1. for _ in range(batch_size)], dtype='float32') [1. for _ in range(batch_size)], dtype='float32')
init_ids_data = init_ids_data.reshape((batch_size, 1)) init_ids_data = init_ids_data.reshape((batch_size, 1))
init_scores_data = init_scores_data.reshape((batch_size, 1)) init_scores_data = init_scores_data.reshape((batch_size, 1))
init_lod = [1] * batch_size init_recursive_seq_lens = [1] * batch_size
init_lod = [init_lod, init_lod] init_recursive_seq_lens = [init_recursive_seq_lens, init_recursive_seq_lens]
init_ids = fluid.create_lod_tensor(init_ids_data, init_lod, place) init_ids = fluid.create_lod_tensor(init_ids_data, init_recursive_seq_lens,
init_scores = fluid.create_lod_tensor(init_scores_data, init_lod, place) place)
init_scores = fluid.create_lod_tensor(init_scores_data,
init_recursive_seq_lens, place)
train_data = paddle.batch( train_data = paddle.batch(
paddle.reader.shuffle( paddle.reader.shuffle(
...@@ -280,7 +282,7 @@ def decode_main(use_cuda, is_sparse): ...@@ -280,7 +282,7 @@ def decode_main(use_cuda, is_sparse):
feed=feed_dict, feed=feed_dict,
fetch_list=[translation_ids, translation_scores], fetch_list=[translation_ids, translation_scores],
return_numpy=False) return_numpy=False)
print result_ids.lod() print result_ids.recursive_sequence_lengths()
break break
......
...@@ -260,13 +260,15 @@ def infer(use_cuda, save_dirname=None): ...@@ -260,13 +260,15 @@ def infer(use_cuda, save_dirname=None):
# Use the first data from paddle.dataset.movielens.test() as input # Use the first data from paddle.dataset.movielens.test() as input
assert feed_target_names[0] == "user_id" assert feed_target_names[0] == "user_id"
# Use create_lod_tensor(data, lod, place) API to generate LoD Tensor # Use create_lod_tensor(data, recursive_sequence_lengths, place) API
# where `data` is a list of sequences of index numbers, `lod` is # to generate LoD Tensor where `data` is a list of sequences of index
# the level of detail (lod) info associated with `data`. # numbers, `recursive_sequence_lengths` is the length-based level of detail
# (lod) info associated with `data`.
# For example, data = [[10, 2, 3], [2, 3]] means that it contains # For example, data = [[10, 2, 3], [2, 3]] means that it contains
# two sequences of indexes, of length 3 and 2, respectively. # two sequences of indexes, of length 3 and 2, respectively.
# Correspondingly, lod = [[3, 2]] contains one level of detail info, # Correspondingly, recursive_sequence_lengths = [[3, 2]] contains one
# indicating that `data` consists of two sequences of length 3 and 2. # level of detail info, indicating that `data` consists of two sequences
# of length 3 and 2, respectively.
user_id = fluid.create_lod_tensor([[1]], [[1]], place) user_id = fluid.create_lod_tensor([[1]], [[1]], place)
assert feed_target_names[1] == "gender_id" assert feed_target_names[1] == "gender_id"
......
...@@ -216,19 +216,19 @@ def infer(use_cuda, save_dirname=None): ...@@ -216,19 +216,19 @@ def infer(use_cuda, save_dirname=None):
# Here each word is the basic element of the LoDTensor and the shape of # Here each word is the basic element of the LoDTensor and the shape of
# each word (base_shape) should be [1] since it is simply an index to # each word (base_shape) should be [1] since it is simply an index to
# look up for the corresponding word vector. # look up for the corresponding word vector.
# Suppose the length_based level of detail (lod) info is set to [[4, 6]], # Suppose the recursive_sequence_lengths info is set to [[4, 6]],
# which has only one lod level. Then the created LoDTensor will have only # which has only one level of detail. Then the created LoDTensor will have only
# one higher level structure (sequence of words, or sentence) than the basic # one higher level structure (sequence of words, or sentence) than the basic
# element (word). Hence the LoDTensor will hold data for two sentences of # element (word). Hence the LoDTensor will hold data for two sentences of
# length 4 and 6, respectively. # length 4 and 6, respectively.
# Note that lod info should be a list of lists. # Note that recursive_sequence_lengths should be a list of lists.
lod = [[4, 6]] recursive_seq_lens = [[4, 6]]
base_shape = [1] base_shape = [1]
# The range of random integers is [low, high] # The range of random integers is [low, high]
word_data = fluid.create_random_int_lodtensor( word_data = fluid.create_random_int_lodtensor(
lod, base_shape, place, low=0, high=1) recursive_seq_lens, base_shape, place, low=0, high=1)
trg_word = fluid.create_random_int_lodtensor( trg_word = fluid.create_random_int_lodtensor(
lod, base_shape, place, low=0, high=1) recursive_seq_lens, base_shape, place, low=0, high=1)
# Construct feed as a dictionary of {feed_target_name: feed_target_data} # Construct feed as a dictionary of {feed_target_name: feed_target_data}
# and results will contain a list of data corresponding to fetch_targets. # and results will contain a list of data corresponding to fetch_targets.
...@@ -241,7 +241,7 @@ def infer(use_cuda, save_dirname=None): ...@@ -241,7 +241,7 @@ def infer(use_cuda, save_dirname=None):
}, },
fetch_list=fetch_targets, fetch_list=fetch_targets,
return_numpy=False) return_numpy=False)
print(results[0].lod()) print(results[0].recursive_sequence_lengths())
np_data = np.array(results[0]) np_data = np.array(results[0])
print("Inference shape: ", np_data.shape) print("Inference shape: ", np_data.shape)
print("Inference results: ", np_data) print("Inference results: ", np_data)
......
...@@ -168,21 +168,22 @@ def infer(use_cuda, save_dirname=None): ...@@ -168,21 +168,22 @@ def infer(use_cuda, save_dirname=None):
# Setup inputs by creating 4 LoDTensors representing 4 words. Here each word # Setup inputs by creating 4 LoDTensors representing 4 words. Here each word
# is simply an index to look up for the corresponding word vector and hence # is simply an index to look up for the corresponding word vector and hence
# the shape of word (base_shape) should be [1]. The length-based level of # the shape of word (base_shape) should be [1]. The recursive_sequence_lengths,
# detail (lod) info of each LoDtensor should be [[1]] meaning there is only # which is length-based level of detail (lod) of each LoDTensor, should be [[1]]
# one lod_level and there is only one sequence of one word on this level. # meaning there is only one level of detail and there is only one sequence of
# Note that lod info should be a list of lists. # one word on this level.
lod = [[1]] # Note that recursive_sequence_lengths should be a list of lists.
recursive_seq_lens = [[1]]
base_shape = [1] base_shape = [1]
# The range of random integers is [low, high] # The range of random integers is [low, high]
first_word = fluid.create_random_int_lodtensor( first_word = fluid.create_random_int_lodtensor(
lod, base_shape, place, low=0, high=dict_size - 1) recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1)
second_word = fluid.create_random_int_lodtensor( second_word = fluid.create_random_int_lodtensor(
lod, base_shape, place, low=0, high=dict_size - 1) recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1)
third_word = fluid.create_random_int_lodtensor( third_word = fluid.create_random_int_lodtensor(
lod, base_shape, place, low=0, high=dict_size - 1) recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1)
fourth_word = fluid.create_random_int_lodtensor( fourth_word = fluid.create_random_int_lodtensor(
lod, base_shape, place, low=0, high=dict_size - 1) recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1)
assert feed_target_names[0] == 'firstw' assert feed_target_names[0] == 'firstw'
assert feed_target_names[1] == 'secondw' assert feed_target_names[1] == 'secondw'
...@@ -200,7 +201,7 @@ def infer(use_cuda, save_dirname=None): ...@@ -200,7 +201,7 @@ def infer(use_cuda, save_dirname=None):
}, },
fetch_list=fetch_targets, fetch_list=fetch_targets,
return_numpy=False) return_numpy=False)
print(results[0].lod()) print(results[0].recursive_sequence_lengths())
np_data = np.array(results[0]) np_data = np.array(results[0])
print("Inference Shape: ", np_data.shape) print("Inference Shape: ", np_data.shape)
......
...@@ -19,18 +19,21 @@ import unittest ...@@ -19,18 +19,21 @@ import unittest
class TestLoDTensor(unittest.TestCase): class TestLoDTensor(unittest.TestCase):
def test_pybind_lod(self): def test_pybind_recursive_seq_lens(self):
tensor = fluid.LoDTensor() tensor = fluid.LoDTensor()
lod = [] recursive_seq_lens = []
tensor.set_recursive_sequence_lengths(lod) tensor.set_recursive_sequence_lengths(recursive_seq_lens)
lod = [[], [1], [3]] recursive_seq_lens = [[], [1], [3]]
self.assertRaises(Exception, tensor.set_recursive_sequence_lengths, lod) self.assertRaises(Exception, tensor.set_recursive_sequence_lengths,
lod = [[0], [2], [3]] recursive_seq_lens)
self.assertRaises(Exception, tensor.set_recursive_sequence_lengths, lod) recursive_seq_lens = [[0], [2], [3]]
self.assertRaises(Exception, tensor.set_recursive_sequence_lengths,
recursive_seq_lens)
lod = [[1, 2, 3]] recursive_seq_lens = [[1, 2, 3]]
tensor.set_recursive_sequence_lengths(lod) tensor.set_recursive_sequence_lengths(recursive_seq_lens)
self.assertEqual(tensor.recursive_sequence_lengths(), lod) self.assertEqual(tensor.recursive_sequence_lengths(),
recursive_seq_lens)
tensor.set(np.random.random([6, 1]), fluid.CPUPlace()) tensor.set(np.random.random([6, 1]), fluid.CPUPlace())
self.assertTrue(tensor.has_valid_recursive_sequence_lengths()) self.assertTrue(tensor.has_valid_recursive_sequence_lengths())
tensor.set(np.random.random([9, 1]), fluid.CPUPlace()) tensor.set(np.random.random([9, 1]), fluid.CPUPlace())
...@@ -38,13 +41,14 @@ class TestLoDTensor(unittest.TestCase): ...@@ -38,13 +41,14 @@ class TestLoDTensor(unittest.TestCase):
# Each level's sum should be equal to the number of items in the next level # Each level's sum should be equal to the number of items in the next level
# Moreover, last level's sum should be equal to the tensor height # Moreover, last level's sum should be equal to the tensor height
lod = [[2, 3], [1, 3, 1, 2, 2]] recursive_seq_lens = [[2, 3], [1, 3, 1, 2, 2]]
tensor.set_recursive_sequence_lengths(lod) tensor.set_recursive_sequence_lengths(recursive_seq_lens)
self.assertEqual(tensor.recursive_sequence_lengths(), lod) self.assertEqual(tensor.recursive_sequence_lengths(),
recursive_seq_lens)
tensor.set(np.random.random([8, 1]), fluid.CPUPlace()) tensor.set(np.random.random([8, 1]), fluid.CPUPlace())
self.assertFalse(tensor.has_valid_recursive_sequence_lengths()) self.assertFalse(tensor.has_valid_recursive_sequence_lengths())
lod = [[2, 3], [1, 3, 1, 2, 1]] recursive_seq_lens = [[2, 3], [1, 3, 1, 2, 1]]
tensor.set_recursive_sequence_lengths(lod) tensor.set_recursive_sequence_lengths(recursive_seq_lens)
self.assertTrue(tensor.has_valid_recursive_sequence_lengths()) self.assertTrue(tensor.has_valid_recursive_sequence_lengths())
tensor.set(np.random.random([9, 1]), fluid.CPUPlace()) tensor.set(np.random.random([9, 1]), fluid.CPUPlace())
self.assertFalse(tensor.has_valid_recursive_sequence_lengths()) self.assertFalse(tensor.has_valid_recursive_sequence_lengths())
...@@ -52,35 +56,42 @@ class TestLoDTensor(unittest.TestCase): ...@@ -52,35 +56,42 @@ class TestLoDTensor(unittest.TestCase):
def test_create_lod_tensor(self): def test_create_lod_tensor(self):
# Create LoDTensor from a list # Create LoDTensor from a list
data = [[1, 2, 3], [3, 4]] data = [[1, 2, 3], [3, 4]]
wrong_lod = [[2, 2]] wrong_recursive_seq_lens = [[2, 2]]
correct_lod = [[3, 2]] correct_recursive_seq_lens = [[3, 2]]
self.assertRaises(AssertionError, create_lod_tensor, data, wrong_lod, self.assertRaises(AssertionError, create_lod_tensor, data,
fluid.CPUPlace()) wrong_recursive_seq_lens, fluid.CPUPlace())
tensor = create_lod_tensor(data, correct_lod, fluid.CPUPlace()) tensor = create_lod_tensor(data, correct_recursive_seq_lens,
self.assertEqual(tensor.recursive_sequence_lengths(), correct_lod) fluid.CPUPlace())
self.assertEqual(tensor.recursive_sequence_lengths(),
correct_recursive_seq_lens)
# Create LoDTensor from numpy array # Create LoDTensor from numpy array
data = np.random.random([10, 1]) data = np.random.random([10, 1])
lod = [[2, 1], [3, 3, 4]] recursive_seq_lens = [[2, 1], [3, 3, 4]]
tensor = create_lod_tensor(data, lod, fluid.CPUPlace()) tensor = create_lod_tensor(data, recursive_seq_lens, fluid.CPUPlace())
self.assertEqual(tensor.recursive_sequence_lengths(), lod) self.assertEqual(tensor.recursive_sequence_lengths(),
recursive_seq_lens)
# Create LoDTensor from another LoDTensor, they are differnt instances # Create LoDTensor from another LoDTensor, they are differnt instances
new_lod = [[2, 2, 1], [1, 2, 2, 3, 2]] new_recursive_seq_lens = [[2, 2, 1], [1, 2, 2, 3, 2]]
new_tensor = create_lod_tensor(tensor, new_lod, fluid.CPUPlace()) new_tensor = create_lod_tensor(tensor, new_recursive_seq_lens,
self.assertEqual(tensor.recursive_sequence_lengths(), lod) fluid.CPUPlace())
self.assertEqual(new_tensor.recursive_sequence_lengths(), new_lod) self.assertEqual(tensor.recursive_sequence_lengths(),
recursive_seq_lens)
self.assertEqual(new_tensor.recursive_sequence_lengths(),
new_recursive_seq_lens)
def test_create_random_int_lodtensor(self): def test_create_random_int_lodtensor(self):
# The shape of a word, commonly used in speech and NLP problem, is [1] # The shape of a word, commonly used in speech and NLP problem, is [1]
shape = [1] shape = [1]
lod = [[2, 3, 5]] recursive_seq_lens = [[2, 3, 5]]
dict_size = 10000 dict_size = 10000
low = 0 low = 0
high = dict_size - 1 high = dict_size - 1
tensor = create_random_int_lodtensor(lod, shape, tensor = create_random_int_lodtensor(recursive_seq_lens, shape,
fluid.CPUPlace(), low, high) fluid.CPUPlace(), low, high)
self.assertEqual(tensor.recursive_sequence_lengths(), lod) self.assertEqual(tensor.recursive_sequence_lengths(),
recursive_seq_lens)
self.assertEqual(tensor.shape(), [10, 1]) self.assertEqual(tensor.shape(), [10, 1])
......
...@@ -401,7 +401,7 @@ class TestBook(unittest.TestCase): ...@@ -401,7 +401,7 @@ class TestBook(unittest.TestCase):
self.assertIsNotNone(output) self.assertIsNotNone(output)
print(str(program)) print(str(program))
def test_maxout(self): def test_crop(self):
program = Program() program = Program()
with program_guard(program): with program_guard(program):
x = layers.data(name='x', shape=[3, 5], dtype="float32") x = layers.data(name='x', shape=[3, 5], dtype="float32")
...@@ -410,6 +410,15 @@ class TestBook(unittest.TestCase): ...@@ -410,6 +410,15 @@ class TestBook(unittest.TestCase):
self.assertIsNotNone(output) self.assertIsNotNone(output)
print(str(program)) print(str(program))
def test_mean_iou(self):
program = Program()
with program_guard(program):
x = layers.data(name='x', shape=[16], dtype='float32')
y = layers.data(name='label', shape=[1], dtype='int64')
iou = layers.mean_iou(x, y, 2)
self.assertIsNotNone(iou)
print(str(program))
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册