未验证 提交 b38142bc 编写于 作者: M mhy-666 提交者: GitHub

move implement of fluid.layers.sequence_lod to paddle.static.nn.sequence_lod (#49604)

上级 2deada9a
......@@ -81,7 +81,9 @@ def bow_net(
emb = fluid.layers.embedding(
input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim]
)
bow = fluid.layers.sequence_pool(input=emb, pool_type='sum')
bow = paddle.static.nn.sequence_lod.sequence_pool(
input=emb, pool_type='sum'
)
bow_tanh = paddle.tanh(bow)
fc_1 = paddle.static.nn.fc(x=bow_tanh, size=hid_dim, activation="tanh")
fc_2 = paddle.static.nn.fc(x=fc_1, size=hid_dim2, activation="tanh")
......
......@@ -113,7 +113,9 @@ def model():
),
is_sparse=True,
)
dnn_pool = fluid.layers.sequence_pool(input=dnn_embedding, pool_type="sum")
dnn_pool = paddle.static.nn.sequence_lod.sequence_pool(
input=dnn_embedding, pool_type="sum"
)
dnn_out = dnn_pool
for i, dim in enumerate(dnn_layer_dims[1:]):
fc = paddle.static.nn.fc(
......@@ -138,7 +140,9 @@ def model():
),
is_sparse=True,
)
lr_pool = fluid.layers.sequence_pool(input=lr_embbding, pool_type="sum")
lr_pool = paddle.static.nn.sequence_lod.sequence_pool(
input=lr_embbding, pool_type="sum"
)
merge_layer = fluid.layers.concat(input=[dnn_out, lr_pool], axis=1)
......
......@@ -24,7 +24,6 @@ from . import math_op_patch
from .math_op_patch import *
from .learning_rate_scheduler import *
from .collective import *
from .sequence_lod import *
__all__ = []
__all__ += nn.__all__
......@@ -32,4 +31,3 @@ __all__ += io.__all__
__all__ += tensor.__all__
__all__ += control_flow.__all__
__all__ += learning_rate_scheduler.__all__
__all__ += sequence_lod.__all__
......@@ -296,7 +296,7 @@ def sequence_conv_pool(
and :ref:`api_fluid_layers_sequence_pool` .
Args:
input (Variable): 2-D LoDTensor, the input of sequence_conv,
input (Tensor): 2-D LoDTensor, the input of sequence_conv,
which supports variable-time length input sequence.
The underlying of input is a matrix with shape
(T, N), where T is the total time steps in this mini-batch and N is
......@@ -320,7 +320,7 @@ def sequence_conv_pool(
It is a 2-D Tensor, with the same data type as :attr:`input`
Return Type:
Variable
Tensor
Examples:
.. code-block:: python
......@@ -341,7 +341,7 @@ def sequence_conv_pool(
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'input')
conv_out = layers.sequence_conv(
conv_out = paddle.static.nn.sequence_lod.sequence_conv(
input=input,
num_filters=num_filters,
filter_size=filter_size,
......@@ -350,7 +350,9 @@ def sequence_conv_pool(
act=act,
)
pool_out = layers.sequence_pool(input=conv_out, pool_type=pool_type)
pool_out = paddle.static.nn.sequence_lod.sequence_pool(
input=conv_out, pool_type=pool_type
)
return pool_out
......
......@@ -128,7 +128,7 @@ def get_mov_combined_features():
input=category_id, size=[CATEGORY_DICT_SIZE, 32], is_sparse=IS_SPARSE
)
mov_categories_hidden = layers.sequence_pool(
mov_categories_hidden = paddle.static.nn.sequence_lod.sequence_pool(
input=mov_categories_emb, pool_type="sum"
)
......
......@@ -64,7 +64,7 @@ class TestDistCTR2x2(TestDistRunnerBase):
),
is_sparse=IS_SPARSE,
)
dnn_pool = fluid.layers.sequence_pool(
dnn_pool = paddle.static.nn.sequence_lod.sequence_pool(
input=dnn_embedding, pool_type="sum"
)
dnn_out = dnn_pool
......@@ -91,7 +91,9 @@ class TestDistCTR2x2(TestDistRunnerBase):
),
is_sparse=IS_SPARSE,
)
lr_pool = fluid.layers.sequence_pool(input=lr_embbding, pool_type="sum")
lr_pool = paddle.static.nn.sequence_lod.sequence_pool(
input=lr_embbding, pool_type="sum"
)
merge_layer = fluid.layers.concat(input=[dnn_out, lr_pool], axis=1)
......
......@@ -112,7 +112,7 @@ class TestDistCTR2x2(FleetDistRunnerBase):
is_sparse=True,
padding_idx=0,
)
dnn_pool = fluid.layers.sequence_pool(
dnn_pool = paddle.static.nn.sequence_lod.sequence_pool(
input=dnn_embedding, pool_type="sum"
)
dnn_out = dnn_pool
......@@ -140,7 +140,9 @@ class TestDistCTR2x2(FleetDistRunnerBase):
is_sparse=True,
padding_idx=0,
)
lr_pool = fluid.layers.sequence_pool(input=lr_embbding, pool_type="sum")
lr_pool = paddle.static.nn.sequence_lod.sequence_pool(
input=lr_embbding, pool_type="sum"
)
merge_layer = fluid.layers.concat(input=[dnn_out, lr_pool], axis=1)
......
......@@ -82,7 +82,7 @@ class TestHeterPipelinePsCTR2x2(FleetDistHeterRunnerBase):
),
is_sparse=True,
)
dnn_pool = fluid.layers.sequence_pool(
dnn_pool = paddle.static.nn.sequence_lod.sequence_pool(
input=dnn_embedding, pool_type="sum"
)
dnn_out = dnn_pool
......@@ -98,7 +98,7 @@ class TestHeterPipelinePsCTR2x2(FleetDistHeterRunnerBase):
),
is_sparse=True,
)
lr_pool = fluid.layers.sequence_pool(
lr_pool = paddle.static.nn.sequence_lod.sequence_pool(
input=lr_embbding, pool_type="sum"
)
......
......@@ -131,7 +131,9 @@ def train_network(
)
q_emb = paddle.reshape(q_emb, [-1, emb_dim])
# vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum')
q_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=q_emb, pool_type='sum'
)
q_ss = paddle.nn.functional.softsign(q_sum)
# fc layer after conv
q_fc = paddle.static.nn.fc(
......@@ -158,7 +160,9 @@ def train_network(
)
pt_emb = paddle.reshape(pt_emb, [-1, emb_dim])
# vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum')
pt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=pt_emb, pool_type='sum'
)
pt_ss = paddle.nn.functional.softsign(pt_sum)
# fc layer
pt_fc = paddle.static.nn.fc(
......@@ -184,7 +188,9 @@ def train_network(
)
nt_emb = paddle.reshape(nt_emb, [-1, emb_dim])
# vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum')
nt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=nt_emb, pool_type='sum'
)
nt_ss = paddle.nn.functional.softsign(nt_sum)
# fc layer
nt_fc = paddle.static.nn.fc(
......
......@@ -103,7 +103,7 @@ class TestDistCTR2x2(FleetDistRunnerBase):
entry=entry,
param_attr=fluid.ParamAttr(name="deep_embedding", initializer=init),
)
dnn_pool = fluid.layers.sequence_pool(
dnn_pool = paddle.static.nn.sequence_lod.sequence_pool(
input=dnn_embedding, pool_type="sum"
)
dnn_out = dnn_pool
......@@ -131,7 +131,9 @@ class TestDistCTR2x2(FleetDistRunnerBase):
),
)
lr_pool = fluid.layers.sequence_pool(input=lr_embbding, pool_type="sum")
lr_pool = paddle.static.nn.sequence_lod.sequence_pool(
input=lr_embbding, pool_type="sum"
)
merge_layer = fluid.layers.concat(input=[dnn_out, lr_pool], axis=1)
predict = paddle.static.nn.fc(
x=merge_layer, size=2, activation='softmax'
......
......@@ -235,7 +235,7 @@ class BaseModel(fluid.dygraph.Layer):
max_seq_len = src_emb.shape[0]
enc_len_mask = fluid.layers.sequence_mask(
enc_len_mask = paddle.static.nn.sequence_lod.sequence_mask(
src_sequence_length, maxlen=max_seq_len, dtype="float32"
)
enc_len_mask = paddle.transpose(enc_len_mask, [1, 0])
......@@ -301,7 +301,7 @@ class BaseModel(fluid.dygraph.Layer):
)
loss = paddle.squeeze(loss, axes=[2])
max_tar_seq_len = paddle.shape(tar)[1]
tar_mask = fluid.layers.sequence_mask(
tar_mask = paddle.static.nn.sequence_lod.sequence_mask(
tar_sequence_length, maxlen=max_tar_seq_len, dtype='float32'
)
loss = loss * tar_mask
......@@ -337,7 +337,7 @@ class BaseModel(fluid.dygraph.Layer):
max_seq_len = src_emb.shape[0]
enc_len_mask = fluid.layers.sequence_mask(
enc_len_mask = paddle.static.nn.sequence_lod.sequence_mask(
src_sequence_length, maxlen=max_seq_len, dtype="float32"
)
enc_len_mask = paddle.transpose(enc_len_mask, [1, 0])
......@@ -754,7 +754,7 @@ class AttentionModel(fluid.dygraph.Layer):
max_seq_len = src_emb.shape[0]
enc_len_mask = fluid.layers.sequence_mask(
enc_len_mask = paddle.static.nn.sequence_lod.sequence_mask(
src_sequence_length, maxlen=max_seq_len, dtype="float32"
)
enc_padding_mask = enc_len_mask - 1.0
......@@ -839,7 +839,7 @@ class AttentionModel(fluid.dygraph.Layer):
)
loss = paddle.squeeze(loss, axes=[2])
max_tar_seq_len = paddle.shape(tar)[1]
tar_mask = fluid.layers.sequence_mask(
tar_mask = paddle.static.nn.sequence_lod.sequence_mask(
tar_sequence_length, maxlen=max_tar_seq_len, dtype='float32'
)
loss = loss * tar_mask
......
......@@ -74,7 +74,7 @@ def net(batch_size=4, lr=0.01):
),
is_sparse=True,
)
dnn_pool = fluid.layers.sequence_pool(
dnn_pool = paddle.static.nn.sequence_lod.sequence_pool(
input=dnn_embedding, pool_type="sum"
)
dnn_out = dnn_pool
......@@ -90,7 +90,9 @@ def net(batch_size=4, lr=0.01):
),
is_sparse=True,
)
lr_pool = fluid.layers.sequence_pool(input=lr_embbding, pool_type="sum")
lr_pool = paddle.static.nn.sequence_lod.sequence_pool(
input=lr_embbding, pool_type="sum"
)
with fluid.device_guard("gpu"):
for i, dim in enumerate(dnn_layer_dims[1:]):
......
......@@ -176,7 +176,7 @@ class TestSequenceMaskOpError(unittest.TestCase):
def test_Variable():
# the input must be Variable
fluid.layers.sequence_mask(input_data, maxlen=4)
paddle.static.nn.sequence_lod.sequence_mask(input_data, maxlen=4)
self.assertRaises(TypeError, test_Variable)
......
......@@ -23,8 +23,6 @@ import numpy as np
from convert import convert_params_for_net
from rnn_numpy import GRU, LSTM, SimpleRNN
from paddle.fluid.layers import sequence_mask
bidirectional_list = ["bidirectional", "bidirect"]
......@@ -91,7 +89,9 @@ class TestSimpleRNN(unittest.TestCase):
y1, h1 = rnn1(x, sequence_length=sequence_length)
seq_len = paddle.to_tensor(sequence_length)
mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype())
mask = paddle.static.nn.sequence_lod.sequence_mask(
seq_len, dtype=paddle.get_default_dtype()
)
if self.time_major:
mask = paddle.transpose(mask, [1, 0])
y2, h2 = rnn2(paddle.to_tensor(x), sequence_length=seq_len)
......@@ -174,7 +174,9 @@ class TestGRU(unittest.TestCase):
y1, h1 = rnn1(x, sequence_length=sequence_length)
seq_len = paddle.to_tensor(sequence_length)
mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype())
mask = paddle.static.nn.sequence_lod.sequence_mask(
seq_len, dtype=paddle.get_default_dtype()
)
if self.time_major:
mask = paddle.transpose(mask, [1, 0])
y2, h2 = rnn2(paddle.to_tensor(x), sequence_length=seq_len)
......@@ -263,7 +265,9 @@ class TestLSTM(unittest.TestCase):
y1, (h1, c1) = rnn1(x, sequence_length=sequence_length)
seq_len = paddle.to_tensor(sequence_length)
mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype())
mask = paddle.static.nn.sequence_lod.sequence_mask(
seq_len, dtype=paddle.get_default_dtype()
)
if self.time_major:
mask = paddle.transpose(mask, [1, 0])
y2, (h2, c2) = rnn2(paddle.to_tensor(x), sequence_length=seq_len)
......@@ -303,7 +307,9 @@ def predict_test_util(place, mode, stop_gradient=True):
x = paddle.randn((4, 10, 16))
x.stop_gradient = stop_gradient
seq_len = paddle.to_tensor(np.array([10, 6, 8, 5]))
mask = sequence_mask(seq_len, maxlen=10, dtype=x.dtype)
mask = paddle.static.nn.sequence_lod.sequence_mask(
seq_len, maxlen=10, dtype=x.dtype
)
mask = paddle.unsqueeze(mask, [2])
rnn = Net()
y, _ = rnn(x)
......
......@@ -15,7 +15,7 @@
import paddle
paddle.set_default_dtype("float64")
from paddle.fluid.layers import sequence_mask
paddle.enable_static()
......@@ -161,7 +161,9 @@ class TestSimpleRNN(unittest.TestCase):
dtype=paddle.framework.get_default_dtype(),
)
seq_len = paddle.fluid.data("seq_len", [-1], dtype="int64")
mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype())
mask = paddle.static.nn.sequence_lod.sequence_mask(
seq_len, dtype=paddle.get_default_dtype()
)
if self.time_major:
mask = paddle.transpose(mask, [1, 0])
y, h = rnn2(x_data, sequence_length=seq_len)
......@@ -316,7 +318,9 @@ class TestGRU(unittest.TestCase):
dtype=paddle.framework.get_default_dtype(),
)
seq_len = paddle.fluid.data("seq_len", [-1], dtype="int64")
mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype())
mask = paddle.static.nn.sequence_lod.sequence_mask(
seq_len, dtype=paddle.get_default_dtype()
)
if self.time_major:
mask = paddle.transpose(mask, [1, 0])
y, h = rnn2(x_data, sequence_length=seq_len)
......@@ -477,7 +481,9 @@ class TestLSTM(unittest.TestCase):
dtype=paddle.framework.get_default_dtype(),
)
seq_len = paddle.fluid.data("seq_len", [-1], dtype="int64")
mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype())
mask = paddle.static.nn.sequence_lod.sequence_mask(
seq_len, dtype=paddle.get_default_dtype()
)
if self.time_major:
mask = paddle.transpose(mask, [1, 0])
y, (h, c) = rnn2(x_data, sequence_length=seq_len)
......
......@@ -21,8 +21,6 @@ import numpy as np
from convert import convert_params_for_cell
from rnn_numpy import RNN, BiRNN, GRUCell
from paddle.fluid.layers import sequence_mask
class TestRNNWrapper(unittest.TestCase):
def __init__(self, time_major=True, direction="forward", place="cpu"):
......@@ -91,7 +89,9 @@ class TestRNNWrapper(unittest.TestCase):
y1, h1 = rnn1(x, sequence_length=sequence_length)
seq_len = paddle.to_tensor(sequence_length)
mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype())
mask = paddle.static.nn.sequence_lod.sequence_mask(
seq_len, dtype=paddle.get_default_dtype()
)
if self.time_major:
mask = paddle.transpose(mask, [1, 0])
y2, h2 = rnn2(paddle.to_tensor(x), sequence_length=seq_len)
......@@ -174,7 +174,9 @@ class TestBiRNNWrapper(unittest.TestCase):
y1, (fw_h1, bw_h1) = rnn1(x, sequence_length=sequence_length)
seq_len = paddle.to_tensor(sequence_length)
mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype())
mask = paddle.static.nn.sequence_lod.sequence_mask(
seq_len, dtype=paddle.get_default_dtype()
)
if self.time_major:
mask = paddle.transpose(mask, [1, 0])
y2, (fw_h2, bw_h2) = rnn2(paddle.to_tensor(x), sequence_length=seq_len)
......
......@@ -21,7 +21,6 @@ sys.path.append("../")
from op_test import OpTest
import paddle
from paddle import fluid
class TestSequenceConcat(OpTest):
......@@ -92,24 +91,29 @@ class TestSequenceConcatOpError(unittest.TestCase):
x_data = paddle.static.data(
name='x', shape=[-1, 4], dtype='float32'
)
fluid.layers.sequence_concat(input=x_data)
paddle.static.nn.sequence_lod.sequence_concat(input=x_data)
self.assertRaises(TypeError, test_input_list)
def test_variable1():
# the input element type must be Variable
x1_data = np.array([[3, 5]]).astype('float32')
y1_data = paddle.static.data(
name='y1', shape=[-1, 4], dtype='float32'
)
fluid.layers.sequence_concat(input=[x1_data, y1_data])
paddle.static.nn.sequence_lod.sequence_concat(
input=[x1_data, y1_data]
)
def test_variable2():
x2_data = np.array([[3, 5]]).astype('float32')
y2_data = paddle.static.data(
name='y2', shape=[-1, 4], dtype='float32'
)
fluid.layers.sequence_concat(input=[y2_data, x2_data])
paddle.static.nn.sequence_lod.sequence_concat(
input=[y2_data, x2_data]
)
for i in range(2):
if i == 0:
......@@ -126,7 +130,7 @@ class TestSequenceConcatOpError(unittest.TestCase):
name="y3", shape=[-1, 3, 5], dtype='int16'
)
input_list = [x3_data, y3_data]
fluid.layers.sequence_concat(input=input_list)
paddle.static.nn.sequence_lod.sequence_concat(input=input_list)
self.assertRaises(TypeError, test_dtype)
......
......@@ -286,7 +286,7 @@ class TestSeqConvApi(unittest.TestCase):
import paddle.fluid as fluid
x = paddle.static.data('x', shape=[-1, 32], lod_level=1)
y = fluid.layers.sequence_conv(
y = paddle.static.nn.sequence_lod.sequence_conv(
input=x, num_filters=2, filter_size=3, padding_start=None
)
......
......@@ -20,7 +20,7 @@ import numpy as np
sys.path.append("../")
from op_test import OpTest
import paddle.fluid as fluid
import paddle
from paddle.fluid import Program, program_guard
......@@ -92,16 +92,25 @@ class TestSequenceExpandAsOpError(unittest.TestCase):
with program_guard(Program(), Program()):
# the input x must be Variable
x1 = np.random.random((2, 4)).astype("float32")
self.assertRaises(TypeError, fluid.layers.sequence_expand_as, x1)
self.assertRaises(
TypeError, paddle.static.nn.sequence_lod.sequence_expand_as, x1
)
# the dtype of input x must be float32, float64, int32 or int64
x2 = fluid.data(name='x2', shape=[None, 4], dtype="bool")
self.assertRaises(TypeError, fluid.layers.sequence_expand_as, x2)
x2 = paddle.static.data(name='x2', shape=[None, 4], dtype="bool")
self.assertRaises(
TypeError, paddle.static.nn.sequence_lod.sequence_expand_as, x2
)
# the input y must be Variable
x3 = fluid.data(name='x3', shape=[None, 4], dtype="float32")
x3 = paddle.static.data(name='x3', shape=[None, 4], dtype="float32")
y = np.random.random((2, 4)).astype("float32")
self.assertRaises(TypeError, fluid.layers.sequence_expand_as, x3, y)
self.assertRaises(
TypeError,
paddle.static.nn.sequence_lod.sequence_expand_as,
x3,
y,
)
if __name__ == '__main__':
......
......@@ -17,7 +17,6 @@ import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.framework import Program, program_guard
sys.path.append("../")
......@@ -30,7 +29,7 @@ class TestSequenceFirstStepOpError(unittest.TestCase):
def test_Variable():
# the input must be Variable
input_data = np.random.randint(1, 5, [4]).astype("int64")
fluid.layers.sequence_last_step(input_data)
paddle.static.nn.sequence_lod.sequence_last_step(input_data)
self.assertRaises(TypeError, test_Variable)
......@@ -42,7 +41,7 @@ class TestSequenceFirstStepOpError(unittest.TestCase):
dtype='int64',
lod_level=1,
)
fluid.layers.sequence_last_step(type_data)
paddle.static.nn.sequence_lod.sequence_last_step(type_data)
self.assertRaises(TypeError, test_input_dtype)
......
......@@ -17,7 +17,6 @@ import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.framework import Program, program_guard
sys.path.append("../")
......@@ -30,7 +29,7 @@ class TestSequenceLastStepOpError(unittest.TestCase):
def test_Variable():
# the input must be Variable
input_data = np.random.randint(1, 5, [4]).astype("int64")
fluid.layers.sequence_last_step(input_data)
paddle.static.nn.sequence_lod.sequence_last_step(input_data)
self.assertRaises(TypeError, test_Variable)
......@@ -42,7 +41,7 @@ class TestSequenceLastStepOpError(unittest.TestCase):
dtype='int64',
lod_level=1,
)
fluid.layers.sequence_last_step(type_data)
paddle.static.nn.sequence_lod.sequence_last_step(type_data)
self.assertRaises(TypeError, test_input_dtype)
......
......@@ -18,7 +18,6 @@ import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.framework import (
Program,
convert_np_dtype_to_dtype_,
......@@ -167,7 +166,9 @@ class TestSequenceMaskOpError(unittest.TestCase):
def test_Variable():
# the input must be Variable
fluid.layers.sequence_mask(input_data, maxlen=4)
paddle.static.nn.sequence_lod.sequence_mask(
input_data, maxlen=4
)
self.assertRaises(TypeError, test_Variable)
......
......@@ -159,7 +159,7 @@ class TestSequencePadOpError(unittest.TestCase):
pad_value = fluid.layers.assign(
input=np.array([0.0], dtype=np.float32)
)
fluid.layers.sequence_pad(x=x, pad_value=pad_value)
paddle.static.nn.sequence_lod.sequence_pad(x=x, pad_value=pad_value)
self.assertRaises(TypeError, test_x_variable)
......@@ -168,7 +168,9 @@ class TestSequencePadOpError(unittest.TestCase):
name='x1', shape=[-1, 10, 5], dtype='float32', lod_level=1
)
pad_value1 = np.array([0.0], dtype=np.float32)
fluid.layers.sequence_pad(x=x1, pad_value=pad_value1)
paddle.static.nn.sequence_lod.sequence_pad(
x=x1, pad_value=pad_value1
)
self.assertRaises(TypeError, test_pad_value_variable)
......@@ -179,14 +181,18 @@ class TestSequencePadOpError(unittest.TestCase):
pad_value2 = fluid.layers.assign(
input=np.array([0.0], dtype=np.int32)
)
fluid.layers.sequence_pad(x=x2, pad_value=pad_value2)
paddle.static.nn.sequence_lod.sequence_pad(
x=x2, pad_value=pad_value2
)
self.assertRaises(TypeError, test_dtype)
def test_length_dtype(self):
x = fluid.data(name='x', shape=[10, 5], dtype='float32', lod_level=1)
pad_value = fluid.layers.assign(input=np.array([0.0], dtype=np.float32))
out, length = fluid.layers.sequence_pad(x=x, pad_value=pad_value)
out, length = paddle.static.nn.sequence_lod.sequence_pad(
x=x, pad_value=pad_value
)
# check if the dtype of length is int64 in compile time
self.assertEqual(length.dtype, core.VarDesc.VarType.INT64)
......
......@@ -22,8 +22,6 @@ import paddle
sys.path.append("../")
from op_test import OpTest
import paddle.fluid as fluid
class TestSequenceReshape(OpTest):
def init_data(self):
......@@ -90,7 +88,7 @@ class TestSequenceReshapeOpError(unittest.TestCase):
def test_error(self):
def test_variable():
x = np.random.random((2, 4)).astype("float32")
fluid.layers.sequence_reshape(x=x, new_dim=4)
paddle.static.nn.sequence_lod.sequence_reshape(x=x, new_dim=4)
self.assertRaises(TypeError, test_variable)
......@@ -101,7 +99,7 @@ class TestSequenceReshapeOpError(unittest.TestCase):
dtype='float16',
lod_level=1,
)
fluid.layers.sequence_reshape(x=x1, new_dim=4)
paddle.static.nn.sequence_lod.sequence_reshape(x=x1, new_dim=4)
self.assertRaises(TypeError, test_dtype)
......
......@@ -18,7 +18,6 @@ import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
sys.path.append("../")
from op_test import OpTest
......@@ -101,16 +100,17 @@ class TestSequenceReverseOpError(unittest.TestCase):
def test_variable():
# the input type must be Variable
x_data = np.random.random((2, 4)).astype("float32")
fluid.layers.sequence_reverse(x=x_data)
paddle.static.nn.sequence_lod.sequence_reverse(x=x_data)
self.assertRaises(TypeError, test_variable)
def test_dtype():
# dtype must be 'float32', 'float64', 'int8', 'int32', 'int64'
x2_data = paddle.static.data(
name='x2', shape=[-1, 4], dtype='float16'
)
fluid.layers.sequence_reverse(x=x2_data)
paddle.static.nn.sequence_lod.sequence_reverse(x=x2_data)
self.assertRaises(TypeError, test_dtype)
......
......@@ -20,7 +20,7 @@ import numpy as np
sys.path.append("../")
from op_test import OpTest
import paddle.fluid as fluid
import paddle
class TestSequenceUnpadOp(OpTest):
......@@ -91,29 +91,29 @@ class TestSequenceUnpadOpError(unittest.TestCase):
def test_error(self):
def test_x_variable():
x = np.random.random((10, 5)).astype("float64")
len = fluid.data(name='length2', shape=[10], dtype='int64')
fluid.layers.sequence_pad(x=x, length=len)
len = paddle.static.data(name='length2', shape=[10], dtype='int64')
paddle.static.nn.sequence_lod.sequence_pad(x=x, length=len)
self.assertRaises(TypeError, test_x_variable)
def test_length_variable():
x1 = fluid.data(name='x1', shape=[10, 5], dtype='float32')
x1 = paddle.static.data(name='x1', shape=[10, 5], dtype='float32')
len1 = np.random.random((10)).astype("int64")
fluid.layers.sequence_pad(x=x1, length=len1)
paddle.static.nn.sequence_lod.sequence_pad(x=x1, length=len1)
self.assertRaises(TypeError, test_length_variable)
def test_x_dtype():
x2 = fluid.data(name='x2', shape=[10, 5], dtype='float16')
len2 = fluid.data(name='length2', shape=[10], dtype='int64')
fluid.layers.sequence_pad(x=x2, length=len2)
x2 = paddle.static.data(name='x2', shape=[10, 5], dtype='float16')
len2 = paddle.static.data(name='length2', shape=[10], dtype='int64')
paddle.static.nn.sequence_lod.sequence_pad(x=x2, length=len2)
self.assertRaises(TypeError, test_x_dtype)
def test_length_dtype():
x3 = fluid.data(name='x3', shape=[10, 5], dtype='float64')
len3 = fluid.data(name='length3', shape=[10], dtype='int32')
fluid.layers.sequence_pad(x=x3, length=len3)
x3 = paddle.static.data(name='x3', shape=[10, 5], dtype='float64')
len3 = paddle.static.data(name='length3', shape=[10], dtype='int32')
paddle.static.nn.sequence_lod.sequence_pad(x=x3, length=len3)
self.assertRaises(TypeError, test_length_dtype)
......
......@@ -96,7 +96,9 @@ def bow_net(
emb = fluid.layers.embedding(
input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim]
)
bow = fluid.layers.sequence_pool(input=emb, pool_type='sum')
bow = paddle.static.nn.sequence_lod.sequence_pool(
input=emb, pool_type='sum'
)
bow_tanh = paddle.tanh(bow)
fc_1 = paddle.static.nn.fc(x=bow_tanh, size=hid_dim, activation="tanh")
fc_2 = paddle.static.nn.fc(x=fc_1, size=hid_dim2, activation="tanh")
......
......@@ -46,7 +46,9 @@ class TestCommunicatorGeoEnd2End(unittest.TestCase):
is_sparse=True,
)
pool = fluid.layers.sequence_pool(input=emb, pool_type="sum")
pool = paddle.static.nn.sequence_lod.sequence_pool(
input=emb, pool_type="sum"
)
z = fluid.layers.concat(input=[x, pool], axis=1)
y_predict = paddle.static.nn.fc(x=z, size=1)
y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32')
......
......@@ -949,7 +949,9 @@ class TestDatasetWithFetchHandler(unittest.TestCase):
name=slot, shape=[-1, 1], dtype="int64", lod_level=1
)
var = fluid.layers.cast(x=data, dtype='float32')
pool = fluid.layers.sequence_pool(input=var, pool_type='AVERAGE')
pool = paddle.static.nn.sequence_lod.sequence_pool(
input=var, pool_type='AVERAGE'
)
slots_vars.append(data)
poolings.append(pool)
......
......@@ -84,7 +84,9 @@ class TestPSMinimize(unittest.TestCase):
)
q_emb = paddle.reshape(q_emb, [-1, emb_dim])
# vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum')
q_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=q_emb, pool_type='sum'
)
q_ss = paddle.nn.functional.softsign(q_sum)
# fc layer after conv
q_fc = paddle.static.nn.fc(
......@@ -114,7 +116,9 @@ class TestPSMinimize(unittest.TestCase):
)
pt_emb = paddle.reshape(pt_emb, [-1, emb_dim])
# vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum')
pt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=pt_emb, pool_type='sum'
)
pt_ss = paddle.nn.functional.softsign(pt_sum)
# fc layer
pt_fc = paddle.static.nn.fc(
......@@ -143,7 +147,9 @@ class TestPSMinimize(unittest.TestCase):
)
nt_emb = paddle.reshape(nt_emb, [-1, emb_dim])
# vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum')
nt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=nt_emb, pool_type='sum'
)
nt_ss = paddle.nn.functional.softsign(nt_sum)
# fc layer
nt_fc = paddle.static.nn.fc(
......
......@@ -86,7 +86,9 @@ class TestPSPassWithBow(unittest.TestCase):
)
q_emb = paddle.reshape(q_emb, [-1, emb_dim])
# vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum')
q_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=q_emb, pool_type='sum'
)
q_ss = paddle.nn.functional.softsign(q_sum)
# fc layer after conv
q_fc = paddle.static.nn.fc(
......@@ -118,7 +120,9 @@ class TestPSPassWithBow(unittest.TestCase):
)
pt_emb = paddle.reshape(pt_emb, [-1, emb_dim])
# vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum')
pt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=pt_emb, pool_type='sum'
)
pt_ss = paddle.nn.functional.softsign(pt_sum)
# fc layer
pt_fc = paddle.static.nn.fc(
......@@ -149,7 +153,9 @@ class TestPSPassWithBow(unittest.TestCase):
)
nt_emb = paddle.reshape(nt_emb, [-1, emb_dim])
# vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum')
nt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=nt_emb, pool_type='sum'
)
nt_ss = paddle.nn.functional.softsign(nt_sum)
# fc layer
nt_fc = paddle.static.nn.fc(
......
......@@ -84,7 +84,9 @@ class TestPSPassWithBow(unittest.TestCase):
)
q_emb = paddle.reshape(q_emb, [-1, emb_dim])
# vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum')
q_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=q_emb, pool_type='sum'
)
q_ss = paddle.nn.functional.softsign(q_sum)
# fc layer after conv
q_fc = paddle.static.nn.fc(
......@@ -114,7 +116,9 @@ class TestPSPassWithBow(unittest.TestCase):
)
pt_emb = paddle.reshape(pt_emb, [-1, emb_dim])
# vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum')
pt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=pt_emb, pool_type='sum'
)
pt_ss = paddle.nn.functional.softsign(pt_sum)
# fc layer
pt_fc = paddle.static.nn.fc(
......@@ -143,7 +147,9 @@ class TestPSPassWithBow(unittest.TestCase):
)
nt_emb = paddle.reshape(nt_emb, [-1, emb_dim])
# vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum')
nt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=nt_emb, pool_type='sum'
)
nt_ss = paddle.nn.functional.softsign(nt_sum)
# fc layer
nt_fc = paddle.static.nn.fc(
......
......@@ -87,7 +87,9 @@ class TestPSPassWithBow(unittest.TestCase):
)
q_emb = paddle.reshape(q_emb, [-1, emb_dim])
# vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum')
q_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=q_emb, pool_type='sum'
)
q_ss = paddle.nn.functional.softsign(q_sum)
# fc layer after conv
q_fc = paddle.static.nn.fc(
......@@ -117,7 +119,9 @@ class TestPSPassWithBow(unittest.TestCase):
)
pt_emb = paddle.reshape(pt_emb, [-1, emb_dim])
# vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum')
pt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=pt_emb, pool_type='sum'
)
pt_ss = paddle.nn.functional.softsign(pt_sum)
# fc layer
pt_fc = paddle.static.nn.fc(
......@@ -146,7 +150,9 @@ class TestPSPassWithBow(unittest.TestCase):
)
nt_emb = paddle.reshape(nt_emb, [-1, emb_dim])
# vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum')
nt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=nt_emb, pool_type='sum'
)
nt_ss = paddle.nn.functional.softsign(nt_sum)
# fc layer
nt_fc = paddle.static.nn.fc(
......
......@@ -88,7 +88,9 @@ class TestPSPassWithBow(unittest.TestCase):
)
q_emb = paddle.reshape(q_emb, [-1, emb_dim])
# vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum')
q_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=q_emb, pool_type='sum'
)
q_ss = paddle.nn.functional.softsign(q_sum)
# fc layer after conv
q_fc = paddle.static.nn.fc(
......@@ -118,7 +120,9 @@ class TestPSPassWithBow(unittest.TestCase):
)
pt_emb = paddle.reshape(pt_emb, [-1, emb_dim])
# vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum')
pt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=pt_emb, pool_type='sum'
)
pt_ss = paddle.nn.functional.softsign(pt_sum)
# fc layer
pt_fc = paddle.static.nn.fc(
......@@ -147,7 +151,9 @@ class TestPSPassWithBow(unittest.TestCase):
)
nt_emb = paddle.reshape(nt_emb, [-1, emb_dim])
# vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum')
nt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=nt_emb, pool_type='sum'
)
nt_ss = paddle.nn.functional.softsign(nt_sum)
# fc layer
nt_fc = paddle.static.nn.fc(
......
......@@ -87,7 +87,9 @@ class TestPSPassWithBow(unittest.TestCase):
)
q_emb = paddle.reshape(q_emb, [-1, emb_dim])
# vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum')
q_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=q_emb, pool_type='sum'
)
q_ss = paddle.nn.functional.softsign(q_sum)
q_ss = paddle.static.nn.data_norm(input=q_ss)
# fc layer after conv
......@@ -118,7 +120,9 @@ class TestPSPassWithBow(unittest.TestCase):
)
pt_emb = paddle.reshape(pt_emb, [-1, emb_dim])
# vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum')
pt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=pt_emb, pool_type='sum'
)
pt_ss = paddle.nn.functional.softsign(pt_sum)
# fc layer
pt_fc = paddle.static.nn.fc(
......@@ -147,7 +151,9 @@ class TestPSPassWithBow(unittest.TestCase):
)
nt_emb = paddle.reshape(nt_emb, [-1, emb_dim])
# vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum')
nt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=nt_emb, pool_type='sum'
)
nt_ss = paddle.nn.functional.softsign(nt_sum)
# fc layer
nt_fc = paddle.static.nn.fc(
......
......@@ -86,7 +86,9 @@ class TestPSPassWithBow(unittest.TestCase):
)
q_emb = paddle.reshape(q_emb, [-1, emb_dim])
# vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum')
q_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=q_emb, pool_type='sum'
)
q_ss = paddle.nn.functional.softsign(q_sum)
# fc layer after conv
q_fc = paddle.static.nn.fc(
......@@ -118,7 +120,9 @@ class TestPSPassWithBow(unittest.TestCase):
)
pt_emb = paddle.reshape(pt_emb, [-1, emb_dim])
# vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum')
pt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=pt_emb, pool_type='sum'
)
pt_ss = paddle.nn.functional.softsign(pt_sum)
# fc layer
pt_fc = paddle.static.nn.fc(
......@@ -149,7 +153,9 @@ class TestPSPassWithBow(unittest.TestCase):
)
nt_emb = paddle.reshape(nt_emb, [-1, emb_dim])
# vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum')
nt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=nt_emb, pool_type='sum'
)
nt_ss = paddle.nn.functional.softsign(nt_sum)
# fc layer
nt_fc = paddle.static.nn.fc(
......
......@@ -84,7 +84,9 @@ class TestPSPassWithBow(unittest.TestCase):
)
q_emb = paddle.reshape(q_emb, [-1, emb_dim])
# vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum')
q_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=q_emb, pool_type='sum'
)
q_ss = paddle.nn.functional.softsign(q_sum)
# fc layer after conv
q_fc = paddle.static.nn.fc(
......@@ -114,7 +116,9 @@ class TestPSPassWithBow(unittest.TestCase):
)
pt_emb = paddle.reshape(pt_emb, [-1, emb_dim])
# vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum')
pt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=pt_emb, pool_type='sum'
)
pt_ss = paddle.nn.functional.softsign(pt_sum)
# fc layer
pt_fc = paddle.static.nn.fc(
......@@ -143,7 +147,9 @@ class TestPSPassWithBow(unittest.TestCase):
)
nt_emb = paddle.reshape(nt_emb, [-1, emb_dim])
# vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum')
nt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=nt_emb, pool_type='sum'
)
nt_ss = paddle.nn.functional.softsign(nt_sum)
# fc layer
nt_fc = paddle.static.nn.fc(
......
......@@ -86,7 +86,9 @@ class TestPSPassWithBow(unittest.TestCase):
)
q_emb = paddle.reshape(q_emb, [-1, emb_dim])
# vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum')
q_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=q_emb, pool_type='sum'
)
q_ss = paddle.nn.functional.softsign(q_sum)
# fc layer after conv
q_fc = paddle.static.nn.fc(
......@@ -118,7 +120,9 @@ class TestPSPassWithBow(unittest.TestCase):
)
pt_emb = paddle.reshape(pt_emb, [-1, emb_dim])
# vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum')
pt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=pt_emb, pool_type='sum'
)
pt_ss = paddle.nn.functional.softsign(pt_sum)
# fc layer
pt_fc = paddle.static.nn.fc(
......@@ -149,7 +153,9 @@ class TestPSPassWithBow(unittest.TestCase):
)
nt_emb = paddle.reshape(nt_emb, [-1, emb_dim])
# vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum')
nt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=nt_emb, pool_type='sum'
)
nt_ss = paddle.nn.functional.softsign(nt_sum)
# fc layer
nt_fc = paddle.static.nn.fc(
......
......@@ -84,7 +84,9 @@ class TestPSPassWithBow(unittest.TestCase):
)
q_emb = paddle.reshape(q_emb, [-1, emb_dim])
# vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum')
q_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=q_emb, pool_type='sum'
)
q_ss = paddle.nn.functional.softsign(q_sum)
# fc layer after conv
q_fc = paddle.static.nn.fc(
......@@ -114,7 +116,9 @@ class TestPSPassWithBow(unittest.TestCase):
)
pt_emb = paddle.reshape(pt_emb, [-1, emb_dim])
# vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum')
pt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=pt_emb, pool_type='sum'
)
pt_ss = paddle.nn.functional.softsign(pt_sum)
# fc layer
pt_fc = paddle.static.nn.fc(
......@@ -143,7 +147,9 @@ class TestPSPassWithBow(unittest.TestCase):
)
nt_emb = paddle.reshape(nt_emb, [-1, emb_dim])
# vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum')
nt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=nt_emb, pool_type='sum'
)
nt_ss = paddle.nn.functional.softsign(nt_sum)
# fc layer
nt_fc = paddle.static.nn.fc(
......
......@@ -222,7 +222,7 @@ class TestDistMnistAsync2x2WithGauss(TestFleetBase):
name="deep_embedding", initializer=init
),
)
dnn_pool = fluid.layers.sequence_pool(
dnn_pool = paddle.static.nn.sequence_lod.sequence_pool(
input=dnn_embedding, pool_type="sum"
)
dnn_out = dnn_pool
......@@ -249,7 +249,7 @@ class TestDistMnistAsync2x2WithGauss(TestFleetBase):
),
)
lr_pool = fluid.layers.sequence_pool(
lr_pool = paddle.static.nn.sequence_lod.sequence_pool(
input=lr_embbding, pool_type="sum"
)
merge_layer = fluid.layers.concat(input=[dnn_out, lr_pool], axis=1)
......
......@@ -82,7 +82,9 @@ class TestSPMT(unittest.TestCase):
)
q_emb = paddle.reshape(q_emb, [-1, emb_dim])
# vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum')
q_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=q_emb, pool_type='sum'
)
q_ss = paddle.nn.functional.softsign(q_sum)
# fc layer after conv
q_fc = paddle.static.nn.fc(
......@@ -112,7 +114,9 @@ class TestSPMT(unittest.TestCase):
)
pt_emb = paddle.reshape(pt_emb, [-1, emb_dim])
# vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum')
pt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=pt_emb, pool_type='sum'
)
pt_ss = paddle.nn.functional.softsign(pt_sum)
# fc layer
pt_fc = paddle.static.nn.fc(
......@@ -141,7 +145,9 @@ class TestSPMT(unittest.TestCase):
)
nt_emb = paddle.reshape(nt_emb, [-1, emb_dim])
# vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum')
nt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=nt_emb, pool_type='sum'
)
nt_ss = paddle.nn.functional.softsign(nt_sum)
# fc layer
nt_fc = paddle.static.nn.fc(
......
......@@ -716,7 +716,9 @@ class TestDistLookupTableBase(TranspilerTest):
is_sparse=is_sparse,
is_distributed=is_distributed,
)
pool = fluid.layers.sequence_pool(input=emb, pool_type='average')
pool = paddle.static.nn.sequence_lod.sequence_pool(
input=emb, pool_type='average'
)
return pool
title_ids = paddle.static.data(
......
......@@ -78,7 +78,10 @@ class EntryAttrChecks(unittest.TestCase):
entry=prob,
param_attr=fluid.ParamAttr(name="deep_embedding"),
)
pool = fluid.layers.sequence_pool(input=emb, pool_type="sum")
pool = paddle.static.nn.sequence_lod.sequence_pool(
input=emb, pool_type="sum"
)
predict = paddle.static.nn.fc(
x=pool, size=2, activation='softmax'
)
......
......@@ -38,7 +38,10 @@ class EntryAttrChecks(unittest.TestCase):
is_distributed=True,
param_attr=fluid.ParamAttr(name="deep_embedding"),
)
pool = fluid.layers.sequence_pool(input=emb, pool_type="sum")
pool = paddle.static.nn.sequence_lod.sequence_pool(
input=emb, pool_type="sum"
)
predict = paddle.static.nn.fc(
x=pool, size=2, activation='softmax'
)
......
......@@ -65,7 +65,9 @@ class TestFleet1(unittest.TestCase):
is_distributed=True,
param_attr=fluid.ParamAttr(name="embedding"),
)
bow = fluid.layers.sequence_pool(input=emb, pool_type='sum')
bow = paddle.static.nn.sequence_lod.sequence_pool(
input=emb, pool_type='sum'
)
bow = paddle.static.nn.data_norm(
input=bow, epsilon=1e-4, name="norm"
)
......
......@@ -36,7 +36,9 @@ def bow_net(
emb = fluid.layers.embedding(
input=data, is_sparse=True, size=[dict_dim, emb_dim]
)
bow = fluid.layers.sequence_pool(input=emb, pool_type='sum')
bow = paddle.static.nn.sequence_lod.sequence_pool(
input=emb, pool_type='sum'
)
bow_tanh = paddle.tanh(bow)
fc_1 = paddle.static.nn.fc(x=bow_tanh, size=hid_dim, activation="tanh")
fc_2 = paddle.static.nn.fc(x=fc_1, size=hid_dim2, activation="tanh")
......
......@@ -2096,86 +2096,6 @@ class TestBook(LayerTest):
)
return out
def test_sequence_expand(self):
# TODO(minqiyang): dygraph do not support lod now
with self.static_graph():
x = paddle.static.data(name='x', shape=[-1, 10], dtype='float32')
y = paddle.static.data(
name='y', shape=[-1, 10, 20], dtype='float32', lod_level=2
)
return layers.sequence_expand(x=x, y=y, ref_level=1)
def test_sequence_reshape(self):
# TODO(minqiyang): dygraph do not support lod now
with self.static_graph():
x = paddle.static.data(
name='x', shape=[-1, 8], dtype='float32', lod_level=1
)
out = layers.sequence_reshape(input=x, new_dim=16)
return out
def test_sequence_unpad(self):
# TODO(minqiyang): dygraph do not support lod now
with self.static_graph():
x = paddle.static.data(name='x', shape=[-1, 10, 5], dtype='float32')
length = paddle.static.data(
name='length', shape=[-1], dtype='int64'
)
return layers.sequence_unpad(x=x, length=length)
def test_sequence_softmax(self):
# TODO(minqiyang): dygraph do not support lod now
with self.static_graph():
seq_data = paddle.static.data(
name='seq_data',
shape=[-1, 10, 10],
dtype='float32',
lod_level=1,
)
seq = paddle.static.nn.fc(x=seq_data, size=20)
return layers.sequence_softmax(seq)
def test_sequence_unsqueeze(self):
# TODO(minqiyang): dygraph do not support lod now
with self.static_graph():
x = paddle.static.data(name='x', shape=[-1, 8, 2], dtype='float32')
out = paddle.unsqueeze(x, axis=[1])
return out
def test_sequence_scatter(self):
# TODO(minqiyang): dygraph do not support lod now
with self.static_graph():
x = paddle.static.data(name='x', shape=[3, 6], dtype='float32')
idx = paddle.static.data(
name='idx',
shape=[12, 1],
dtype='int32',
lod_level=1,
)
updates = paddle.static.data(
name='updates',
shape=[12, 1],
dtype='float32',
lod_level=1,
)
out = layers.sequence_scatter(input=x, index=idx, updates=updates)
return out
def test_sequence_slice(self):
# TODO(minqiyang): dygraph do not support lod now
with self.static_graph():
import numpy as np
seqs = paddle.static.data(
name='x', shape=[-1, 10, 5], dtype='float32', lod_level=1
)
offset = layers.assign(input=np.array([[0, 1]]).astype('int32'))
length = layers.assign(input=np.array([[2, 1]]).astype('int32'))
out = layers.sequence_slice(
input=seqs, offset=offset, length=length
)
return out
def test_shuffle_batch(self):
# TODO(minqiyang): dygraph do not support lod now
with self.static_graph():
......@@ -2238,14 +2158,6 @@ class TestBook(LayerTest):
)
return out
def test_sequence_enumerate(self):
# TODO(minqiyang): dygraph do not support lod now
with self.static_graph():
x = paddle.static.data(
name="input", shape=[-1, 1], dtype='int32', lod_level=1
)
out = layers.sequence_enumerate(input=x, win_size=2, pad_value=0)
def test_row_conv(self):
# TODO(minqiyang): dygraph do not support lod now
with self.static_graph():
......
......@@ -136,7 +136,9 @@ def bow_net(
emb = fluid.layers.embedding(
input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim]
)
bow = fluid.layers.sequence_pool(input=emb, pool_type='sum')
bow = paddle.static.nn.sequence_lod.sequence_pool(
input=emb, pool_type='sum'
)
bow_tanh = paddle.tanh(bow)
fc_1 = paddle.static.nn.fc(x=bow_tanh, size=hid_dim, activation="tanh")
fc_2 = paddle.static.nn.fc(x=fc_1, size=hid_dim2, activation="tanh")
......
......@@ -42,7 +42,9 @@ def bow_net(
emb = fluid.layers.embedding(
input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim]
)
bow = fluid.layers.sequence_pool(input=emb, pool_type='sum')
bow = paddle.static.nn.sequence_lod.sequence_pool(
input=emb, pool_type='sum'
)
bow_tanh = paddle.tanh(bow)
fc_1 = paddle.static.nn.fc(x=bow_tanh, size=hid_dim, activation="tanh")
fc_2 = paddle.static.nn.fc(x=fc_1, size=hid_dim2, activation="tanh")
......
......@@ -150,7 +150,9 @@ class MLE:
use_softmax=False,
)
max_seq_len = paddle.shape(probs)[1]
mask = layers.sequence_mask(length, maxlen=max_seq_len, dtype="float32")
mask = paddle.static.nn.sequence_lod.sequence_mask(
length, maxlen=max_seq_len, dtype="float32"
)
loss = loss * mask
loss = paddle.mean(loss, axis=[0])
loss = paddle.sum(loss)
......
......@@ -58,7 +58,9 @@ def bow_net(
emb = fluid.layers.embedding(
input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim]
)
bow = fluid.layers.sequence_pool(input=emb, pool_type='sum')
bow = paddle.static.nn.sequence_lod.sequence_pool(
input=emb, pool_type='sum'
)
bow_tanh = paddle.tanh(bow)
fc_1 = paddle.static.nn.fc(x=bow_tanh, size=hid_dim, activation="tanh")
fc_2 = paddle.static.nn.fc(x=fc_1, size=hid_dim2, activation="tanh")
......
......@@ -434,10 +434,9 @@ class TestSeqConvApi(unittest.TestCase):
import paddle.fluid as fluid
x = paddle.static.data('x', shape=[-1, 32], lod_level=1)
y = fluid.layers.sequence_conv(
y = paddle.static.nn.sequence_lod.sequence_conv(
input=x, num_filters=2, filter_size=3, padding_start=None
)
place = fluid.CPUPlace()
x_tensor = fluid.create_lod_tensor(
np.random.rand(10, 32).astype("float32"), [[2, 3, 1, 4]], place
......
......@@ -27,7 +27,6 @@ from xpu.get_test_cover_info import (
)
import paddle
import paddle.fluid as fluid
paddle.enable_static()
......@@ -105,13 +104,13 @@ class XPUTestSequenceUnpadOp(XPUOpTestWrapper):
class TestSequenceUnpadOpError(unittest.TestCase):
def test_error(self):
"""
The type of 'x' in fluid.layers.sequence_unpad must be <class 'paddle.fluid.framework.Variable'>, but received <class 'numpy.ndarray'>.
The type of 'x' in paddle.static.nn.sequence_unpad must be <class 'paddle.fluid.framework.Variable'>, but received <class 'numpy.ndarray'>.
"""
def test_x_variable():
x = np.random.random((10, 5)).astype("float64")
len = fluid.data(name='length2', shape=[10], dtype='int64')
fluid.layers.sequence_unpad(x=x, length=len)
len = paddle.static.data(name='length2', shape=[10], dtype='int64')
paddle.static.nn.sequence_lod.sequence_unpad(x=x, length=len)
self.assertRaises(TypeError, test_x_variable)
"""
......@@ -119,9 +118,9 @@ class TestSequenceUnpadOpError(unittest.TestCase):
"""
def test_length_variable():
x1 = fluid.data(name='x1', shape=[10, 5], dtype='float32')
x1 = paddle.static.data(name='x1', shape=[10, 5], dtype='float32')
len1 = np.random.random((10)).astype("int64")
fluid.layers.sequence_unpad(x=x1, length=len1)
paddle.static.nn.sequence_lod.sequence_unpad(x=x1, length=len1)
self.assertRaises(TypeError, test_length_variable)
"""
......@@ -129,9 +128,9 @@ class TestSequenceUnpadOpError(unittest.TestCase):
"""
def test_x_dtype():
x2 = fluid.data(name='x2', shape=[10, 5], dtype='float16')
len2 = fluid.data(name='length2', shape=[10], dtype='int64')
fluid.layers.sequence_unpad(x=x2, length=len2)
x2 = paddle.static.data(name='x2', shape=[10, 5], dtype='float16')
len2 = paddle.static.data(name='length2', shape=[10], dtype='int64')
paddle.static.nn.sequence_lod.sequence_unpad(x=x2, length=len2)
self.assertRaises(TypeError, test_x_dtype)
"""
......@@ -139,9 +138,9 @@ class TestSequenceUnpadOpError(unittest.TestCase):
"""
def test_length_dtype():
x3 = fluid.data(name='x3', shape=[10, 5], dtype='float64')
len3 = fluid.data(name='length3', shape=[10], dtype='int32')
fluid.layers.sequence_unpad(x=x3, length=len3)
x3 = paddle.static.data(name='x3', shape=[10, 5], dtype='float64')
len3 = paddle.static.data(name='length3', shape=[10], dtype='int32')
paddle.static.nn.sequence_lod.sequence_unpad(x=x3, length=len3)
self.assertRaises(TypeError, test_length_dtype)
......
......@@ -28,7 +28,7 @@ from paddle.fluid.framework import (
in_dygraph_mode,
program_guard,
)
from paddle.fluid.layers import control_flow, sequence_lod, utils
from paddle.fluid.layers import control_flow, utils
from paddle.fluid.layers.utils import flatten, map_structure
from paddle.framework import core
from paddle.nn import Layer
......@@ -171,7 +171,7 @@ def _rnn_dynamic_graph(
inputs = map_structure(_transpose_batch_time, inputs)
if sequence_length is not None:
mask = sequence_lod.sequence_mask(
mask = paddle.static.nn.sequence_lod.sequence_mask(
sequence_length, maxlen=time_steps, dtype=inputs.dtype
)
mask = paddle.transpose(mask, [1, 0])
......@@ -256,7 +256,7 @@ def _rnn_static_graph(
max_seq_len = paddle.shape(flatten(inputs)[0])[0]
if sequence_length:
mask = sequence_lod.sequence_mask(
mask = paddle.static.nn.sequence_lod.sequence_mask(
sequence_length,
maxlen=max_seq_len,
dtype=flatten(initial_states)[0].dtype,
......
......@@ -42,21 +42,21 @@ from ...fluid.input import embedding # noqa: F401
from ...fluid.contrib.layers import sparse_embedding # noqa: F401
from ...fluid.layers import StaticRNN # noqa: F401
from ...fluid.layers.sequence_lod import sequence_conv # noqa: F401
from ...fluid.layers.sequence_lod import sequence_softmax # noqa: F401
from ...fluid.layers.sequence_lod import sequence_pool # noqa: F401
from ...fluid.layers.sequence_lod import sequence_concat # noqa: F401
from ...fluid.layers.sequence_lod import sequence_first_step # noqa: F401
from ...fluid.layers.sequence_lod import sequence_last_step # noqa: F401
from ...fluid.layers.sequence_lod import sequence_slice # noqa: F401
from ...fluid.layers.sequence_lod import sequence_expand # noqa: F401
from ...fluid.layers.sequence_lod import sequence_expand_as # noqa: F401
from ...fluid.layers.sequence_lod import sequence_pad # noqa: F401
from ...fluid.layers.sequence_lod import sequence_unpad # noqa: F401
from ...fluid.layers.sequence_lod import sequence_reshape # noqa: F401
from ...fluid.layers.sequence_lod import sequence_scatter # noqa: F401
from ...fluid.layers.sequence_lod import sequence_enumerate # noqa: F401
from ...fluid.layers.sequence_lod import sequence_reverse # noqa: F401
from .sequence_lod import sequence_conv # noqa: F401
from .sequence_lod import sequence_softmax # noqa: F401
from .sequence_lod import sequence_pool # noqa: F401
from .sequence_lod import sequence_concat # noqa: F401
from .sequence_lod import sequence_first_step # noqa: F401
from .sequence_lod import sequence_last_step # noqa: F401
from .sequence_lod import sequence_slice # noqa: F401
from .sequence_lod import sequence_expand # noqa: F401
from .sequence_lod import sequence_expand_as # noqa: F401
from .sequence_lod import sequence_pad # noqa: F401
from .sequence_lod import sequence_unpad # noqa: F401
from .sequence_lod import sequence_reshape # noqa: F401
from .sequence_lod import sequence_scatter # noqa: F401
from .sequence_lod import sequence_enumerate # noqa: F401
from .sequence_lod import sequence_reverse # noqa: F401
from .control_flow import cond
......
......@@ -13,36 +13,13 @@
# limitations under the License.
import paddle
from .layer_function_generator import templatedoc
from ..framework import (
core,
Variable,
in_dygraph_mode,
convert_np_dtype_to_dtype_,
)
from ..layer_helper import LayerHelper
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype
from ..core import VarDesc
from paddle import _C_ops, _legacy_C_ops
__all__ = [
'sequence_conv',
'sequence_softmax',
'sequence_pool',
'sequence_concat',
'sequence_first_step',
'sequence_last_step',
'sequence_slice',
'sequence_expand',
'sequence_expand_as',
'sequence_pad',
'sequence_unpad',
'sequence_reshape',
'sequence_scatter',
'sequence_enumerate',
'sequence_mask',
'sequence_reverse',
]
from paddle.fluid.core import VarDesc
from paddle.fluid.data_feeder import check_type, check_variable_and_dtype
from paddle.fluid.framework import Variable, _non_static_mode
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.layers.layer_function_generator import templatedoc
__all__ = []
@templatedoc()
......@@ -61,7 +38,7 @@ def sequence_conv(
r"""
Note:
Only receives LoDTensor as input. If your input is Tensor, please use conv2d Op.(fluid.layers.** :ref:`api_paddle_nn_functional_conv2d` ).
Only receives Tensor as input. If your input is Tensor, please use conv2d Op.(fluid.layers.** :ref:`api_fluid_layers_conv2d` ).
This operator receives input sequences with variable length and other convolutional
configuration parameters(num_filters, filter_size) to apply the convolution operation.
......@@ -75,7 +52,7 @@ def sequence_conv(
Here we will illustrate the details of the padding operation:
For a mini-batch of 2 variable lengths sentences, containing 3, and 1 time-steps:
Assumed input (X) is a [4, N] float LoDTensor, and for the sake of simplicity, we assume N=2.
Assumed input (X) is a [4, N] float Tensor, and for the sake of simplicity, we assume N=2.
input.data = [[1, 1],
[2, 2],
[3, 3],
......@@ -108,11 +85,11 @@ def sequence_conv(
Args:
input (Variable): LoDTensor with shape :math:`(M, K)`, where M is the total time-step of mini-batch
input (Tensor): Tensor with shape :math:`(M, K)`, where M is the total time-step of mini-batch
and K is hidden_size of input. Only lod_level of 1 is supported. The data type should be float32 or
float64.
num_filters (int): the number of filters.
filter_size (int, optional): the height of filter. Specified filter width is not supported, the width is
filter_size (int): the height of filter. Specified filter width is not supported, the width is
hidden_size by default. Default: 3.
filter_stride (int, optional): stride of the filter. Currently only supports :attr:`stride` = 1.
padding (bool, optional): the parameter :attr:`padding` take no effect and will be discarded in the
......@@ -121,26 +98,26 @@ def sequence_conv(
input sequence may be shorter than :attr:`filter\_size`, which will cause the convolution
result to not be computed correctly. These padding data will not be trainable or updated
while training. Default: True.
padding_start (int, optional): It is used to indicate the start index for padding the input
padding_start (int): It is used to indicate the start index for padding the input
sequence, which can be negative. The negative number means to pad
:attr:`|padding_start|` time-steps of all-zero data at the beginning of each instance.
The positive number means to skip :attr:`padding_start` time-steps of each instance,
and it will pad :math:`filter\_size + padding\_start - 1` time-steps of all-zero data
at the end of the sequence to ensure that the output is the same length as the input.
If set None, the same length :math:`\frac{filter\_size}{2}` of data will be filled
If set None, the same length :math:`\\frac{filter\_size}{2}` of data will be filled
on both sides of the sequence. If set 0, the length of :math:`filter\_size - 1` data
is padded at the end of each input sequence. Default: None.
bias_attr (ParamAttr, optional): To specify the bias parameter property. Default: None, which means the
default bias parameter property is used. See usage for details in :ref:`api_paddle_ParamAttr` .
param_attr (ParamAttr, optional): To specify the weight parameter property. Default: None, which means the
default weight parameter property is used. See usage for details in :ref:`api_paddle_ParamAttr` .
act (str, optional): Activation to be applied to the output of this layer, such as tanh, softmax,
bias_attr (ParamAttr): To specify the bias parameter property. Default: None, which means the
default bias parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` .
param_attr (ParamAttr): To specify the weight parameter property. Default: None, which means the
default weight parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` .
act (str): Activation to be applied to the output of this layer, such as tanh, softmax,
sigmoid, relu. For more information, please refer to :ref:`api_guide_activations_en` . Default: None.
name (str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: LoDTensor with the same length as input. The data type is float32 or float64, which is same as input.
Tensor: Tensor with the same length as input. The data type is float32 or float64, which is same as input.
Examples:
......@@ -154,7 +131,7 @@ def sequence_conv(
"""
assert (
not in_dygraph_mode()
not _non_static_mode()
), "sequence layer is not supported in dygraph mode yet."
check_variable_and_dtype(
input, 'input', ['float32', 'float64'], 'sequence_conv'
......@@ -190,7 +167,7 @@ def sequence_softmax(input, use_cudnn=False, name=None):
r"""
Note:
The input type of the OP must be LoDTensor. For Tensor, use:** :ref:`api_fluid_layers_softmax`
The input type of the OP must be Tensor. For Tensor, use:** :ref:`api_static_nn_softmax`
A LoD-tensor can be regarded as several sequences, and this op apply softmax algo on each sequence.
The shape of input Tensor can be :math:`[N, 1]` or :math:`[N]`, where :math:`N`
......@@ -230,7 +207,7 @@ def sequence_softmax(input, use_cudnn=False, name=None):
Args:
input (Variable):A LoDTensor with shape of :math:`[N, 1]` or :math:`[N]`, Recommended usage: :math:`[N]`.
input (Tensor):A Tensor with shape of :math:`[N, 1]` or :math:`[N]`, Recommended usage: :math:`[N]`.
Supported data types: float32, float64.
use_cudnn (bool, optional): Use cudnn kernel or not. Effective only when the cudnn version of the paddle
library is installed and GPU is used for training or reasoning. Default: False.
......@@ -238,7 +215,7 @@ def sequence_softmax(input, use_cudnn=False, name=None):
For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: A LoD-Tensor which has the same shape and data type with input.
Tensor: A LoD-Tensor which has the same shape and data type with input.
Examples:
......@@ -256,7 +233,7 @@ def sequence_softmax(input, use_cudnn=False, name=None):
x_sequence_softmax_2 = paddle.static.nn.sequence_softmax(input=y)
"""
assert (
not in_dygraph_mode()
not _non_static_mode()
), "sequence layer is not supported in dygraph mode yet."
helper = LayerHelper('sequence_softmax', **locals())
check_variable_and_dtype(
......@@ -277,10 +254,10 @@ def sequence_pool(input, pool_type, is_test=False, pad_value=0.0):
r"""
Note:
Only receives LoDTensor as input. If your input is Tensor, please use pool2d Op.(fluid.layers.** :ref:`api_fluid_layers_pool2d` ).
Only receives Tensor as input. If your input is Tensor, please use pool2d Op.(static.nn.** :ref:`api_static_nn_pool2d` ).
This operator only supports LoDTensor as input. It will apply specified pooling
operation on the input LoDTensor. It pools features of all time-steps of each
This operator only supports Tensor as input. It will apply specified pooling
operation on the input Tensor. It pools features of all time-steps of each
sequence at the last lod_level using :attr:`pool_type` mentioned in the parameters,
such as sum, average, sqrt, etc.
......@@ -298,12 +275,12 @@ def sequence_pool(input, pool_type, is_test=False, pad_value=0.0):
.. code-block:: text
Case 1:
input is a 1-level LoDTensor and pad_value = 0.0:
input is a 1-level Tensor and pad_value = 0.0:
input.lod = [[0, 2, 5, 7, 7]]
input.data = [[1.], [3.], [2.], [4.], [6.], [5.], [1.]]
input.shape = [7, 1]
output is LoDTensor:
output is Tensor:
out.shape = [4, 1]
with condition out.shape[0] == len(x.lod[-1]) == 4
......@@ -318,7 +295,7 @@ def sequence_pool(input, pool_type, is_test=False, pad_value=0.0):
and all above [0.0] at last of out.data is padding data.
Case 2:
input is a 2-level LoDTensor containing 3 sequences with length info [2, 0, 3],
input is a 2-level Tensor containing 3 sequences with length info [2, 0, 3],
where 0 means empty sequence.
The first sequence contains 2 subsequence with length info [1, 2];
The last sequence contains 3 subsequence with length info [1, 0, 3].
......@@ -327,7 +304,7 @@ def sequence_pool(input, pool_type, is_test=False, pad_value=0.0):
input.shape = [7, 1]
If pool_typ = sum, it will apply pooling on last lod_level [0, 1, 3, 4, 4, 7]. pad_value = 0.0
output is LoDTensor:
output is Tensor:
out.shape= [5, 1]
out.lod = [[0, 2, 2, 5]]
where out.shape[0] == len(x.lod[-1]) == 5
......@@ -335,7 +312,7 @@ def sequence_pool(input, pool_type, is_test=False, pad_value=0.0):
where 1.=1., 5.=3. + 2., 4.=4., 0.0=pad_value, 12.=6. + 5. + 1.
Args:
input (variable): LoDTensor with lod_level no more than 2. The data type should be float32 or float64.
input (variable): Tensor with lod_level no more than 2. The data type should be float32 or float64.
pool_type (str): The pooling type that supports average, sum, sqrt, max, last or first.
is_test (bool): Only works when :attr:`pool_type` is max. If set False, a temporary Tenosr maxIndex is
created to record the index information corresponding to the maximum value, which is used for backward
......@@ -343,7 +320,7 @@ def sequence_pool(input, pool_type, is_test=False, pad_value=0.0):
pad_value (float): Used to pad the pooling result for empty input sequence. Default: 0.0
Returns:
Variable: LoDTensor after pooling with data type float32 or float64.
Tensor: Tensor after pooling with data type float32 or float64.
Examples:
......@@ -361,7 +338,7 @@ def sequence_pool(input, pool_type, is_test=False, pad_value=0.0):
first_x = paddle.static.nn.sequence_pool(input=x, pool_type='first')
"""
assert (
not in_dygraph_mode()
not _non_static_mode()
), "sequence layer is not supported in dygraph mode yet."
check_variable_and_dtype(
input, 'input', ['float32', 'float64'], 'sequence_pool'
......@@ -395,14 +372,14 @@ def sequence_concat(input, name=None):
"""
Note:
Only receives LoDTensor as input. If your input is Tensor, please use concat Op.(fluid.layers.** :ref:`api_fluid_layers_concat` ).
Only receives Tensor as input. If your input is Tensor, please use concat Op.(static.nn.** :ref:`api_static_nn_concat` ).
This operator only supports LoDTensor as input. It concatenates the multiple LoDTensor from input by the LoD information,
and outputs the concatenated LoDTensor.
This operator only supports Tensor as input. It concatenates the multiple Tensor from input by the LoD information,
and outputs the concatenated Tensor.
.. code-block:: text
input is a list of LoDTensor:
input is a list of Tensor:
input = [x1, x2]
where:
x1.lod = [[0, 3, 5]]
......@@ -414,19 +391,19 @@ def sequence_concat(input, name=None):
x2.shape = [4, 1]
and should satisfy: len(x1.lod[0]) == len(x2.lod[0])
output is LoDTensor:
output is Tensor:
out.lod = [[0, 3+2, 5+4]]
out.data = [[1], [2], [3], [6], [7], [4], [5], [8], [9]]
out.shape = [9, 1]
Args:
input(list of Variable): List of LoDTensor to be concatenated. The length of each LoDTensor should be same.
input(list of Tensor): List of Tensor to be concatenated. The length of each Tensor should be same.
The data type can be float32, float64 or int64.
name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: Output the concatenated LoDTensor. The data type is same as input.
Tensor: Output the concatenated Tensor. The data type is same as input.
Examples:
.. code-block:: python
......@@ -439,17 +416,19 @@ def sequence_concat(input, name=None):
out = paddle.static.nn.sequence_concat(input=[x, y])
"""
assert (
not in_dygraph_mode()
not _non_static_mode()
), "sequence layer is not supported in dygraph mode yet."
helper = LayerHelper('sequence_concat', **locals())
check_type(input, 'input', list, 'fluid.layers.sequence_concat')
check_type(
input, 'input', list, 'paddle.static.nn.sequence_lod.sequence_concat'
)
for i, input_x in enumerate(input):
check_variable_and_dtype(
input_x,
'input[' + str(i) + ']',
['int64', 'float32', 'float64'],
'fluid.layers.sequence_concat',
'paddle.static.nn.sequence_lod.sequence_concat',
)
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
......@@ -462,24 +441,24 @@ def sequence_concat(input, name=None):
def sequence_first_step(input):
"""
Only supports LoDTensor as input. Given the input LoDTensor, it will
Only supports Tensor as input. Given the input Tensor, it will
select first time-step feature of each sequence as output.
.. code-block:: text
Case 1:
input is 1-level LoDTensor:
input is 1-level Tensor:
input.lod = [[0, 2, 5, 7]]
input.data = [[1.], [3.], [2.], [4.], [6.], [5.], [1.]]
input.shape = [7, 1]
output is a LoDTensor:
output is a Tensor:
out.shape = [3, 1]
out.shape[0] == len(x.lod[-1]) == 3
out.data = [[1.], [2.], [5.]], where 1.=first(1., 3.), 2.=first(2., 4., 6.), 5.=first(5., 1.)
Case 2:
input is a 2-level LoDTensor containing 3 sequences with length info [2, 0, 3],
input is a 2-level Tensor containing 3 sequences with length info [2, 0, 3],
where 0 means empty sequence.
The first sequence contains 2 subsequence with length info [1, 2];
The last sequence contains 3 subsequence with length info [1, 0, 3].
......@@ -488,7 +467,7 @@ def sequence_first_step(input):
input.shape = [7, 1]
It will apply pooling on last lod_level [0, 1, 3, 4, 4, 7]. pad_value = 0.0
output is a LoDTensor:
output is a Tensor:
out.shape= [5, 1]
out.lod = [[0, 2, 2, 5]]
out.shape[0] == len(x.lod[-1]) == 5
......@@ -496,10 +475,10 @@ def sequence_first_step(input):
where 1.=first(1.), 3.=first(3., 2.), 4.=first(4.), 0.0 = pad_value, 6.=first(6., 5., 1.)
Args:
input(Variable): LoDTensor with lod_level no more than 2. The data type should be float32 or float64.
input(Tensor): Tensor with lod_level no more than 2. The data type should be float32 or float64.
Returns:
Variable: LoDTensor consist of the sequence's first step vector. The data type is float32 or float64.
Tensor: Tensor consist of the sequence's first step vector. The data type is float32 or float64.
Examples:
......@@ -520,24 +499,24 @@ def sequence_first_step(input):
def sequence_last_step(input):
"""
Only supports LoDTensor as input. Given the input LoDTensor, it will
Only supports Tensor as input. Given the input Tensor, it will
select last time-step feature of each sequence as output.
.. code-block:: text
Case 1:
input is 1-level LoDTensor:
input is 1-level Tensor:
input.lod = [[0, 2, 5, 7]]
input.data = [[1.], [3.], [2.], [4.], [6.], [5.], [1.]]
input.shape = [7, 1]
output is a LoDTensor:
output is a Tensor:
out.shape = [3, 1]
out.shape[0] == len(x.lod[-1]) == 3
out.data = [[3.], [6.], [1.]], where 3.=last(1., 3.), 6.=last(2., 4., 6.), 1.=last(5., 1.)
Case 2:
input is a 2-level LoDTensor containing 3 sequences with length info [2, 0, 3],
input is a 2-level Tensor containing 3 sequences with length info [2, 0, 3],
where 0 means empty sequence.
The first sequence contains 2 subsequence with length info [1, 2];
The last sequence contains 3 subsequence with length info [1, 0, 3].
......@@ -546,7 +525,7 @@ def sequence_last_step(input):
input.shape = [7, 1]
It will apply pooling on last lod_level [0, 1, 3, 4, 4, 7]. pad_value = 0.0
output is a LoDTensor:
output is a Tensor:
out.shape= [5, 1]
out.lod = [[0, 2, 2, 5]]
out.shape[0] == len(x.lod[-1]) == 5
......@@ -555,10 +534,10 @@ def sequence_last_step(input):
Args:
input(Variable): LoDTensor with lod_level no more than 2. The data type should be float32.
input(Tensor): Tensor with lod_level no more than 2. The data type should be float32.
Returns:
Variable: LoDTensor consist of the sequence's last step vector. The data type is float32.
Tensor: Tensor consist of the sequence's last step vector. The data type is float32.
Examples:
......@@ -584,13 +563,13 @@ def sequence_slice(input, offset, length, name=None):
The layer crops a subsequence from given sequence with given start
offset and subsequence length.
It only supports sequence data (LoDTensor with lod_level equal to 1).
It only supports sequence data (Tensor with lod_level equal to 1).
.. code-block:: text
- Case:
Given the input Variable **input**:
Given the input Tensor **input**:
input.data = [[a1, a2], [b1, b2], [c1, c2], [d1, d2], [e1, e2]],
input.lod = [[3, 2]],
......@@ -598,7 +577,7 @@ def sequence_slice(input, offset, length, name=None):
with offset.data = [[0], [1]] and length.data = [[2], [1]],
the output Variable will be
the output Tensor will be
out.data = [[a1, a2], [b1, b2], [e1, e2]],
out.lod = [[2, 1]],
......@@ -609,18 +588,18 @@ def sequence_slice(input, offset, length, name=None):
should be equal. The **offset** should start from 0.
Args:
input(Variable): LoDTensor, The input Variable which consists of the complete
input(Tensor): Tensor, The input Tensor which consists of the complete
sequences.The data type can be float32, float64, int32 or int64
offset(Variable): LoDTensor, The offset to slice each sequence. The data
offset(Tensor): Tensor, The offset to slice each sequence. The data
type is int32 or int64.
length(Variable): LoDTensor, The length of each subsequence. The data
length(Tensor): Tensor, The length of each subsequence. The data
type is int32 or int64.
name(str|None): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
Returns:
Variable: The output subsequences.
Tensor: The output subsequences.
Examples:
......@@ -638,7 +617,7 @@ def sequence_slice(input, offset, length, name=None):
length=length)
"""
assert (
not in_dygraph_mode()
not _non_static_mode()
), "sequence layer is not supported in dygraph mode yet."
helper = LayerHelper("sequence_slice", **locals())
......@@ -683,8 +662,8 @@ def sequence_expand(x, y, ref_level=-1, name=None):
Note:
Please note that the input ``x`` should be LodTensor or Tensor, \
and input ``y`` must be LodTensor.
Please note that the input ``x`` should be Tensor or Tensor, \
and input ``y`` must be Tensor.
**Following examples will explain how sequence_expand works:**
......@@ -695,18 +674,18 @@ def sequence_expand(x, y, ref_level=-1, name=None):
Consider 2 sequences [a][b] and [c][d], now we want to expand them to [a][b], [a][b], [c][d] and [c][d].
Sequence [a][b] expand twice and [c][d] expands twice, so the lod which according to is [2, 2].
Input x is a 1-level LoDTensor:
Input x is a 1-level Tensor:
x.lod = [[2, 2]] #lod based on length may be easier to understand
x.data = [[a], [b], [c], [d]]
x.dims = [4, 1]
input y is a LoDTensor:
input y is a Tensor:
y.lod = [[2, 2], #the 0th level lod, according to this level
[3, 3, 1, 1]] #the 1st level lod, it has nothing to do with this level
ref_level: 0
then output is a 1-level LoDTensor out:
then output is a 1-level Tensor out:
out.lod = [[2, 2, 2, 2]] #lod based on offset
out.data = [[a], [b], [a], [b], [c], [d], [c], [d]]
out.dims = [8, 1]
......@@ -721,20 +700,20 @@ def sequence_expand(x, y, ref_level=-1, name=None):
x.data = [[a], [b], [c]]
x.dims = [3, 1]
y is a LoDTensor:
y is a Tensor:
y.lod = [[2, 0, 3]]
ref_level: -1
then output is a 1-level LodTensor:
then output is a 1-level Tensor:
out.data = [[a], [a], [c], [c], [c]]
out.dims = [5, 1]
Args:
x (Variable): The input variable which is a Tensor or LoDTensor, with the \
x (Tensor): The input variable which is a Tensor or Tensor, with the \
dims ``[M, K]``. The lod level is at most 1. The data type should be \
float32, float64, int32 or int64.
y (Variable): The input variable which is a LoDTensor, the lod level is \
y (Tensor): The input variable which is a Tensor, the lod level is \
at least 1.
ref_level (int): Lod level of ``y`` to be referred by ``x``. If set to -1, \
refer the last level of lod.
......@@ -743,7 +722,7 @@ def sequence_expand(x, y, ref_level=-1, name=None):
None by default.
Returns:
Tensor, The expanded variable which is a LoDTensor, with dims ``[N, K]``. \
Tensor, The expanded variable which is a Tensor, with dims ``[N, K]``. \
``N`` depends on the lod info of ``x`` and ``y``. \
The data type is same as input.
......@@ -773,7 +752,7 @@ def sequence_expand(x, y, ref_level=-1, name=None):
# data: [1 2 3 4]
np_data = np.array([[1], [2], [3], [4], [5], [6], [7], [8]]).astype('float32')
y_lod_tensor = fluid.create_lod_tensor(np_data, [[2, 2], [3,3,1,1]], place)
y_lod_tensor = fluid.create_lod_tensor(np_data, [[2, 2], [3,3,1,1]], place)
print(y_lod_tensor)
#lod: [[0, 2, 4][0, 3, 6, 7, 8]]
# dim: 8, 1
......@@ -792,7 +771,7 @@ def sequence_expand(x, y, ref_level=-1, name=None):
# data: [1 2 1 2 3 4 3 4]
"""
assert (
not in_dygraph_mode()
not _non_static_mode()
), "sequence layer is not supported in dygraph mode yet."
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'sequence_expand'
......@@ -816,12 +795,12 @@ def sequence_expand_as(x, y, name=None):
according to the zeroth level lod of ``y``. Current implementation requires \
the level number of ``y``'s lod must be 1, and the first dimension of \
``x`` should be equal to the size of ``y``'s zeroth level lod, thus \
the expanded LodTensor has the same lod info as ``y``. The expanded result \
the expanded Tensor has the same lod info as ``y``. The expanded result \
has nothing to do with ``x``'s lod, so the lod of Input(X) is not considered.
Note:
Please note that the input ``x`` should be LodTensor or Tensor, \
and input ``y`` must be LodTensor.
Please note that the input ``x`` should be Tensor or Tensor, \
and input ``y`` must be Tensor.
Following examples will explain how sequence_expand_as works:
......@@ -831,13 +810,13 @@ def sequence_expand_as(x, y, name=None):
Consider 4 sequences [a], [b], [c], [d], now we want to expand them to [a][a][a], [b][b][b], [c] and [d].
It's obvious that the lod info of expanded sequences is [0, 3, 6, 7, 8].
Given a 1-level LodTensor ``x``:
Given a 1-level Tensor ``x``:
x.data = [[a], [b], [c], [d]]
x.dims = [4, 1]
and input ``y``
y.lod = [[3, 3, 1, 1]] #lod based on length may be easier to understand
then we get 1-level LoDTensor out:
then we get 1-level Tensor out:
Out.lod = [[0, 3, 6, 7, 8]] #based on offset
Out.data = [[a], [a], [a], [b], [b], [b], [c], [d]]
Out.dims = [8, 1]
......@@ -851,22 +830,22 @@ def sequence_expand_as(x, y, name=None):
and input ``y``:
y.lod = [[0, 2, 3, 6]]
then we get a 1-level LoDTensor:
then we get a 1-level Tensor:
out.lod = [[0, 2, 3, 6]]
out.data = [[a, b], [a, b] [c, d], [e, f], [e, f], [e, f]]
out.dims = [6, 2]
Args:
x (Variable): The input variable which is a Tensor or LoDTensor, with the \
x (Tensor): The input variable which is a Tensor or Tensor, with the \
dims ``[M, K]``. The data type should be float32, float64, int32 \
or int64.
y (Variable): The input variable which is a LoDTensor with 1-level lod.
y (Tensor): The input variable which is a Tensor with 1-level lod.
name (str, optional): For detailed information, please refer \
to :ref:`api_guide_Name`. Usually name is no need to set and \
None by default.
Returns:
Tensor, The expanded variable which is a LoDTensor with the dims ``[N, K]``. \
Tensor, The expanded variable which is a Tensor with the dims ``[N, K]``. \
``N`` depends on the lod of ``y``, and the lod level must be 1. \
The data type is same as input.
......@@ -895,7 +874,7 @@ def sequence_expand_as(x, y, name=None):
# data: [1 2 3 4]
np_data = np.array([[1], [2], [3], [4], [5], [6], [7], [8]]).astype('float32')
y_lod_tensor = fluid.create_lod_tensor(np_data, [[3,3,1,1]], place)
y_lod_tensor = fluid.create_lod_tensor(np_data, [[3,3,1,1]], place)
print(y_lod_tensor)
#lod: [[0, 3, 6, 7, 8]]
# dim: 8, 1
......@@ -914,7 +893,7 @@ def sequence_expand_as(x, y, name=None):
# data: [1 1 1 2 2 2 3 4]
"""
assert (
not in_dygraph_mode()
not _non_static_mode()
), "sequence layer is not supported in dygraph mode yet."
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'sequence_expand_as'
......@@ -935,16 +914,16 @@ def sequence_pad(x, pad_value, maxlen=None, name=None):
This layer padding the sequences in a same batch to a common length (according
to ``maxlen``). The padding value is defined by ``pad_value``, and will be
appended to the tail of sequences. The result is a Python tuple ``(Out, Length)``:
the LodTensor ``Out`` is the padded sequences, and LodTensor ``Length`` is
the length information of input sequences. For removing padding data (unpadding operation), See :ref:`api_fluid_layers_sequence_unpad`.
the Tensor ``Out`` is the padded sequences, and Tensor ``Length`` is
the length information of input sequences. For removing padding data (unpadding operation), See :ref:`api_static_static_sequence_unpad`.
Note:
Please note that the input ``x`` should be LodTensor.
Please note that the input ``x`` should be Tensor.
.. code-block:: text
Case 1:
Given input 1-level LoDTensor x:
Given input 1-level Tensor x:
x.lod = [[0, 2, 5]]
x.data = [[a],[b],[c],[d],[e]]
pad_value:
......@@ -956,7 +935,7 @@ def sequence_pad(x, pad_value, maxlen=None, name=None):
Length.data = [2, 3] #Original sequences length
Case 2:
Given input 1-level LoDTensor x:
Given input 1-level Tensor x:
x.lod = [[0, 2, 5]]
x.data = [[a1,a2],[b1,b2],[c1,c2],[d1,d2],[e1,e2]]
pad_value:
......@@ -968,7 +947,7 @@ def sequence_pad(x, pad_value, maxlen=None, name=None):
Length.data = [2, 3]
Case 3:
Given input 1-level LoDTensor x:
Given input 1-level Tensor x:
x.lod = [[0, 2, 5]]
x.data = [[a1,a2],[b1,b2],[c1,c2],[d1,d2],[e1,e2]]
pad_value:
......@@ -982,10 +961,10 @@ def sequence_pad(x, pad_value, maxlen=None, name=None):
Args:
x (Variable): Input 1-level LodTensor with dims ``[M, K]``. The batch \
x (Tensor): Input 1-level Tensor with dims ``[M, K]``. The batch \
size is described by lod infor (the number of sequences ). \
The data type should be float32, float64, int8, int32 or int64.
pad_value (Variable): Padding value. It can be a scalar or a 1D tensor \
pad_value (Tensor): Padding value. It can be a scalar or a 1D tensor \
with length ``K``. If it's a scalar, it will be automatically broadcasted \
to a Tensor. The data type should be as same as ``x``.
maxlen (int, optional): The length of padded sequences, None by default. \
......@@ -997,9 +976,9 @@ def sequence_pad(x, pad_value, maxlen=None, name=None):
None by default.
Returns:
tuple, A Python tuple (Out, Length): the 1st is a 0 level LodTensor \
tuple, A Python tuple (Out, Length): the 1st is a 0 level Tensor \
``Out``, with the shape ``[batch_size, maxlen, K]``; the second is the original \
sequences length infor ``Length``, which should be a 0-level 1D LodTensor. \
sequences length infor ``Length``, which should be a 0-level 1D Tensor. \
The size of ``Length`` is equal to batch size, and the data type is int64.
Examples:
......@@ -1017,20 +996,20 @@ def sequence_pad(x, pad_value, maxlen=None, name=None):
"""
assert (
not in_dygraph_mode()
not _non_static_mode()
), "sequence layer is not supported in dygraph mode yet."
helper = LayerHelper('sequence_pad', **locals())
check_variable_and_dtype(
x,
'x',
['float32', 'float64', 'int32', 'int64'],
'fluid.layers.sequence_pad',
'paddle.static.nn.sequence_lod.sequence_pad',
)
check_variable_and_dtype(
pad_value,
'pad_value',
['float32', 'float64', 'int32', 'int64'],
'fluid.layers.sequence_pad',
'paddle.static.nn.sequence_lod.sequence_pad',
)
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
......@@ -1054,39 +1033,39 @@ def sequence_unpad(x, length, name=None):
"""
Note:
The input of this API is Tensor and the output is LoDTensor. For padding operation, See:** :ref:`api_fluid_layers_sequence_pad`
The input of the OP is Tensor and the output is Tensor. For padding operation, See:** :ref:`api_static_nn_sequence_pad`
Remove the padding data from the input based on the length information and returns a LoDTensor.
Remove the padding data from the input based on the length information and returns a Tensor.
.. code-block:: text
Case 1:
Given input Variable **x**:
Given input Tensor **x**:
x.data = [[ 1.0, 2.0, 3.0, 4.0, 5.0],
[ 6.0, 7.0, 8.0, 9.0, 10.0],
[11.0, 12.0, 13.0, 14.0, 15.0]],
in which there are 3 sequences padded to length 5, and the actual length
specified by input Variable **length**:
specified by input Tensor **length**:
length.data = [2, 3, 4],
after unpadding, the output Variable will be:
after unpadding, the output Tensor will be:
out.data = [[1.0, 2.0, 6.0, 7.0, 8.0, 11.0, 12.0, 13.0, 14.0]]
out.lod = [[0, 2, 5, 9]]
Args:
x(Variable): A Tensor which contains padding data, and its shape size can not be less than 2.
x(Tensor): A Tensor which contains padding data, and its shape size can not be less than 2.
Supported data types: float32, float64, int32, int64.
length(Variable): A 1D Tensor that stores the actual length of each sample, and the Tensor
length(Tensor): A 1D Tensor that stores the actual length of each sample, and the Tensor
has the same shape with the 0th dimension of the X . Supported data types: int64.
name(str|None, optional): The default value is None. Normally there is no need for user to set this property.
name(str|None): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: A LoDTensor whose recursive sequence length is consistent with the information of the length parameter and it has the same data type with input.
Tensor: A Tensor whose recursive sequence length is consistent with the information of the length parameter and it has the same data type with input.
Examples:
.. code-block:: python
......@@ -1106,17 +1085,20 @@ def sequence_unpad(x, length, name=None):
"""
assert (
not in_dygraph_mode()
not _non_static_mode()
), "sequence layer is not supported in dygraph mode yet."
helper = LayerHelper('sequence_unpad', **locals())
check_variable_and_dtype(
x,
'x',
['float32', 'float64', 'int32', 'int64'],
'fluid.layers.sequence_unpad',
'paddle.static.nn.sequence_lod.sequence_unpad',
)
check_variable_and_dtype(
length, 'length', ['int64'], 'fluid.layers.sequence_unpad'
length,
'length',
['int64'],
'paddle.static.nn.sequence_lod.sequence_unpad',
)
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
......@@ -1135,18 +1117,18 @@ def sequence_reshape(input, new_dim):
"""
Note:
Only receives LoDTensor as input. If your input is Tensor, please use reshape Op.(fluid.layers.** :ref:`api_fluid_layers_reshape` ).
Only receives Tensor as input. If your input is Tensor, please use reshape Op.(static.nn.** :ref:`api_static_nn_reshape` ).
Only supports LoDTensor as input. Given :attr:`new_dim` ,
Only supports Tensor as input. Given :attr:`new_dim` ,
it will compute new shape according to original length of each sequence,
original dimensions and :attr:`new_dim` . Then it will output a new LoDTensor
containing :attr:`new_dim` . Currently it only supports 1-level LoDTensor.
original dimensions and :attr:`new_dim` . Then it will output a new Tensor
containing :attr:`new_dim` . Currently it only supports 1-level Tensor.
Please make sure that (original length * original dimensions) can be divided
by the :attr:`new_dim` with no remainder for each sequence.
.. code-block:: text
input is a LoDTensor:
input is a Tensor:
input.lod = [[0, 2, 6]]
input.data = [[1, 2], [3, 4],
[5, 6], [7, 8],
......@@ -1154,7 +1136,7 @@ def sequence_reshape(input, new_dim):
input.shape = [6, 2]
set new_dim = 4
out is a LoDTensor:
out is a Tensor:
out.lod = [[0, 1, 3]]
out.data = [[1, 2, 3, 4],
[5, 6, 7, 8],
......@@ -1164,12 +1146,12 @@ def sequence_reshape(input, new_dim):
Args:
input (Variable): 1-level LoDTensor with shape :math:`[M, K]` . The data type should
input (Tensor): 1-level Tensor with shape :math:`[M, K]` . The data type should
be int32, int64, float32 or float64.
new_dim (int): New dimension that the input LoDTensor is reshaped to.
new_dim (int): New dimension that the input Tensor is reshaped to.
Returns:
Variable: Reshaped LoDTensor according to new dimension. The data type is same as input.
Tensor: Reshaped Tensor according to new dimension. The data type is same as input.
Examples:
.. code-block:: python
......@@ -1181,14 +1163,14 @@ def sequence_reshape(input, new_dim):
x_reshaped = paddle.static.nn.sequence_reshape(input=x, new_dim=4)
"""
assert (
not in_dygraph_mode()
not _non_static_mode()
), "sequence layer is not supported in dygraph mode yet."
helper = LayerHelper('sequence_reshape', **locals())
check_variable_and_dtype(
input,
'input',
['float32', 'float64', 'int32', 'int64'],
'fluid.layers.sequence_reshape',
'static.nn.sequence_reshape',
)
out = helper.create_variable_for_type_inference(helper.input_dtype())
helper.append_op(
......@@ -1204,7 +1186,7 @@ def sequence_scatter(input, index, updates, name=None):
"""
Note:
The index and updates parameters of the OP must be LoDTensor.
The index and updates parameters of the OP must be Tensor.
Plus the updates data to the corresponding input according to the index.
......@@ -1242,15 +1224,15 @@ def sequence_scatter(input, index, updates, name=None):
out.dims = X.dims = [3, 6]
Args:
input (Variable): A Tensor with shape of :math:`[N, k_1... k_n]`. Supported data types: float32, float64, int32, int64.
index (Variable): A LoDTensor contains index information. Its LoD level must be 1 and its data type can be int32 or int64.
updates (Variable): A LodTensor contains updates information. It has the same LoD level with the index and has the
input (Tensor): A Tensor with shape of :math:`[N, k_1... k_n]`. Supported data types: float32, float64, int32, int64.
index (Tensor): A Tensor contains index information. Its LoD level must be 1 and its data type can be int32 or int64.
updates (Tensor): A Tensor contains updates information. It has the same LoD level with the index and has the
same data type with the input. Supported data types: float32, float64, int32, int64.
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
Returns:
Variable: A Tensor which has been updated. It has the same shape and data type with input.
Tensor: A Tensor which has been updated. It has the same shape and data type with input.
Examples:
......@@ -1266,7 +1248,7 @@ def sequence_scatter(input, index, updates, name=None):
"""
assert (
not in_dygraph_mode()
not _non_static_mode()
), "sequence layer is not supported in dygraph mode yet."
helper = LayerHelper('sequence_scatter', **locals())
......@@ -1300,11 +1282,11 @@ def sequence_enumerate(input, win_size, pad_value=0, name=None):
r"""
Generate a new sequence for the input index sequence with \
shape ``[d_1, win_size]``, which enumerates all the \
sub-sequences with length ``win_size`` of the input with \
shape ``[d_1, 1]``, and padded by ``pad_value`` if necessary in generation.
shape ``[d_1, win_size]``, which enumerates all the \
sub-sequences with length ``win_size`` of the input with \
shape ``[d_1, 1]``, and padded by ``pad_value`` if necessary in generation.
Please note that the `input` must be LodTensor.
Please note that the `input` must be Tensor.
.. code-block:: text
......@@ -1324,8 +1306,8 @@ def sequence_enumerate(input, win_size, pad_value=0, name=None):
Args:
input (Variable): The input variable which is a index sequence, \
which should be a LodTensor with shape ``[d_1, 1]`` and 1-level lod info. \
input (Tensor): The input variable which is a index sequence, \
which should be a Tensor with shape ``[d_1, 1]`` and 1-level lod info. \
The data type should be int32 or int64.
win_size (int): The window size for enumerating all sub-sequences.
pad_value (int, optional): The padding value, default 0.
......@@ -1334,7 +1316,7 @@ def sequence_enumerate(input, win_size, pad_value=0, name=None):
None by default.
Returns:
Tensor, The enumerate sequence variable which is a LoDTensor with \
Tensor, The enumerate sequence variable which is a Tensor with \
shape ``[d_1, win_size]`` and 1-level lod info. \
The data type is same as ``input``.
......@@ -1348,7 +1330,7 @@ def sequence_enumerate(input, win_size, pad_value=0, name=None):
out = paddle.static.nn.sequence_enumerate(input=x, win_size=3, pad_value=0)
"""
assert (
not in_dygraph_mode()
not _non_static_mode()
), "sequence layer is not supported in dygraph mode yet."
check_variable_and_dtype(
input, 'input', ['int32', 'int64'], 'sequence_enumerate'
......@@ -1394,9 +1376,9 @@ def sequence_mask(x, maxlen=None, dtype='int64', name=None):
[0, 0, 0, 0]]
Args:
x (Variable): Input tensor of sequence_mask layer, \
x (Tensor): Input tensor of sequence_mask layer, \
whose elements are integers less than :code:`maxlen`. \
Tensor or LodTensor with shape [d_1, d_2, ..., d_n].
Tensor or Tensor with shape [d_1, d_2, ..., d_n].
maxlen (int, optional): Maximum length of the sequence. If :code:`maxlen` \
is None, it would be replace with :math:`max(x)`.
dtype (np.dtype|paddle.dtype|str, optional): Data type of the output, \
......@@ -1432,15 +1414,15 @@ def sequence_mask(x, maxlen=None, dtype='int64', name=None):
def sequence_reverse(x, name=None):
"""
Note:
Only receives LoDTensor as input. If your input is Tensor, please use reverse Op.(fluid.layers.** :ref:`api_fluid_layers_reverse` ).
Only receives Tensor as input. If your input is Tensor, please use reverse Op.(static.nn.** :ref:`api_static_nn_reverse` ).
Only supports LoDTensor as input. It will reverse each sequence for input LoDTensor.
Currently it only supports 1-level LoDTensor. This operator is very useful when building a
Only supports Tensor as input. It will reverse each sequence for input Tensor.
Currently it only supports 1-level Tensor. This operator is very useful when building a
reverse :ref:`api_fluid_layers_DynamicRNN` network.
.. code-block:: text
input(x) is a LoDTensor:
input(x) is a Tensor:
x.lod = [[0, 2, 5]]
x.data = [[1, 2, 3, 4],
[5, 6, 7, 8],
......@@ -1449,7 +1431,7 @@ def sequence_reverse(x, name=None):
[17,18, 19, 20]]
x.shape = [5, 4]
output LoDTensor with same shape and LoD info:
output Tensor with same shape and LoD info:
out.lod = [[0, 2, 5]]
out.data = [[5, 6, 7, 8],
[1, 2, 3, 4],
......@@ -1459,13 +1441,13 @@ def sequence_reverse(x, name=None):
out.shape = [5, 4]
Args:
x(Variable): LoDTensor with 1-level LoD info. Currently it only supports 1-level LoDTensor.
x(Tensor): Tensor with 1-level LoD info. Currently it only supports 1-level Tensor.
The data type should be float32, float64, int8, int32 or int64.
name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: LoDTensor reversed from input. The data type is same with input.
Tensor: Tensor reversed from input. The data type is same with input.
Examples:
.. code-block:: python
......@@ -1477,14 +1459,14 @@ def sequence_reverse(x, name=None):
x_reversed = paddle.static.nn.sequence_reverse(x)
"""
assert (
not in_dygraph_mode()
not _non_static_mode()
), "sequence layer is not supported in dygraph mode yet."
helper = LayerHelper("sequence_reverse", **locals())
check_variable_and_dtype(
x,
'x',
['float32', 'float64', 'int8', 'int32', 'int64'],
'fluid.layers.sequence_reverse',
'static.nn.sequence_reverse',
)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册