未验证 提交 b38142bc 编写于 作者: M mhy-666 提交者: GitHub

move implement of fluid.layers.sequence_lod to paddle.static.nn.sequence_lod (#49604)

上级 2deada9a
...@@ -81,7 +81,9 @@ def bow_net( ...@@ -81,7 +81,9 @@ def bow_net(
emb = fluid.layers.embedding( emb = fluid.layers.embedding(
input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim] input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim]
) )
bow = fluid.layers.sequence_pool(input=emb, pool_type='sum') bow = paddle.static.nn.sequence_lod.sequence_pool(
input=emb, pool_type='sum'
)
bow_tanh = paddle.tanh(bow) bow_tanh = paddle.tanh(bow)
fc_1 = paddle.static.nn.fc(x=bow_tanh, size=hid_dim, activation="tanh") fc_1 = paddle.static.nn.fc(x=bow_tanh, size=hid_dim, activation="tanh")
fc_2 = paddle.static.nn.fc(x=fc_1, size=hid_dim2, activation="tanh") fc_2 = paddle.static.nn.fc(x=fc_1, size=hid_dim2, activation="tanh")
......
...@@ -113,7 +113,9 @@ def model(): ...@@ -113,7 +113,9 @@ def model():
), ),
is_sparse=True, is_sparse=True,
) )
dnn_pool = fluid.layers.sequence_pool(input=dnn_embedding, pool_type="sum") dnn_pool = paddle.static.nn.sequence_lod.sequence_pool(
input=dnn_embedding, pool_type="sum"
)
dnn_out = dnn_pool dnn_out = dnn_pool
for i, dim in enumerate(dnn_layer_dims[1:]): for i, dim in enumerate(dnn_layer_dims[1:]):
fc = paddle.static.nn.fc( fc = paddle.static.nn.fc(
...@@ -138,7 +140,9 @@ def model(): ...@@ -138,7 +140,9 @@ def model():
), ),
is_sparse=True, is_sparse=True,
) )
lr_pool = fluid.layers.sequence_pool(input=lr_embbding, pool_type="sum") lr_pool = paddle.static.nn.sequence_lod.sequence_pool(
input=lr_embbding, pool_type="sum"
)
merge_layer = fluid.layers.concat(input=[dnn_out, lr_pool], axis=1) merge_layer = fluid.layers.concat(input=[dnn_out, lr_pool], axis=1)
......
...@@ -24,7 +24,6 @@ from . import math_op_patch ...@@ -24,7 +24,6 @@ from . import math_op_patch
from .math_op_patch import * from .math_op_patch import *
from .learning_rate_scheduler import * from .learning_rate_scheduler import *
from .collective import * from .collective import *
from .sequence_lod import *
__all__ = [] __all__ = []
__all__ += nn.__all__ __all__ += nn.__all__
...@@ -32,4 +31,3 @@ __all__ += io.__all__ ...@@ -32,4 +31,3 @@ __all__ += io.__all__
__all__ += tensor.__all__ __all__ += tensor.__all__
__all__ += control_flow.__all__ __all__ += control_flow.__all__
__all__ += learning_rate_scheduler.__all__ __all__ += learning_rate_scheduler.__all__
__all__ += sequence_lod.__all__
...@@ -296,7 +296,7 @@ def sequence_conv_pool( ...@@ -296,7 +296,7 @@ def sequence_conv_pool(
and :ref:`api_fluid_layers_sequence_pool` . and :ref:`api_fluid_layers_sequence_pool` .
Args: Args:
input (Variable): 2-D LoDTensor, the input of sequence_conv, input (Tensor): 2-D LoDTensor, the input of sequence_conv,
which supports variable-time length input sequence. which supports variable-time length input sequence.
The underlying of input is a matrix with shape The underlying of input is a matrix with shape
(T, N), where T is the total time steps in this mini-batch and N is (T, N), where T is the total time steps in this mini-batch and N is
...@@ -320,7 +320,7 @@ def sequence_conv_pool( ...@@ -320,7 +320,7 @@ def sequence_conv_pool(
It is a 2-D Tensor, with the same data type as :attr:`input` It is a 2-D Tensor, with the same data type as :attr:`input`
Return Type: Return Type:
Variable Tensor
Examples: Examples:
.. code-block:: python .. code-block:: python
...@@ -341,7 +341,7 @@ def sequence_conv_pool( ...@@ -341,7 +341,7 @@ def sequence_conv_pool(
""" """
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'input') check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'input')
conv_out = layers.sequence_conv( conv_out = paddle.static.nn.sequence_lod.sequence_conv(
input=input, input=input,
num_filters=num_filters, num_filters=num_filters,
filter_size=filter_size, filter_size=filter_size,
...@@ -350,7 +350,9 @@ def sequence_conv_pool( ...@@ -350,7 +350,9 @@ def sequence_conv_pool(
act=act, act=act,
) )
pool_out = layers.sequence_pool(input=conv_out, pool_type=pool_type) pool_out = paddle.static.nn.sequence_lod.sequence_pool(
input=conv_out, pool_type=pool_type
)
return pool_out return pool_out
......
...@@ -128,7 +128,7 @@ def get_mov_combined_features(): ...@@ -128,7 +128,7 @@ def get_mov_combined_features():
input=category_id, size=[CATEGORY_DICT_SIZE, 32], is_sparse=IS_SPARSE input=category_id, size=[CATEGORY_DICT_SIZE, 32], is_sparse=IS_SPARSE
) )
mov_categories_hidden = layers.sequence_pool( mov_categories_hidden = paddle.static.nn.sequence_lod.sequence_pool(
input=mov_categories_emb, pool_type="sum" input=mov_categories_emb, pool_type="sum"
) )
......
...@@ -64,7 +64,7 @@ class TestDistCTR2x2(TestDistRunnerBase): ...@@ -64,7 +64,7 @@ class TestDistCTR2x2(TestDistRunnerBase):
), ),
is_sparse=IS_SPARSE, is_sparse=IS_SPARSE,
) )
dnn_pool = fluid.layers.sequence_pool( dnn_pool = paddle.static.nn.sequence_lod.sequence_pool(
input=dnn_embedding, pool_type="sum" input=dnn_embedding, pool_type="sum"
) )
dnn_out = dnn_pool dnn_out = dnn_pool
...@@ -91,7 +91,9 @@ class TestDistCTR2x2(TestDistRunnerBase): ...@@ -91,7 +91,9 @@ class TestDistCTR2x2(TestDistRunnerBase):
), ),
is_sparse=IS_SPARSE, is_sparse=IS_SPARSE,
) )
lr_pool = fluid.layers.sequence_pool(input=lr_embbding, pool_type="sum") lr_pool = paddle.static.nn.sequence_lod.sequence_pool(
input=lr_embbding, pool_type="sum"
)
merge_layer = fluid.layers.concat(input=[dnn_out, lr_pool], axis=1) merge_layer = fluid.layers.concat(input=[dnn_out, lr_pool], axis=1)
......
...@@ -112,7 +112,7 @@ class TestDistCTR2x2(FleetDistRunnerBase): ...@@ -112,7 +112,7 @@ class TestDistCTR2x2(FleetDistRunnerBase):
is_sparse=True, is_sparse=True,
padding_idx=0, padding_idx=0,
) )
dnn_pool = fluid.layers.sequence_pool( dnn_pool = paddle.static.nn.sequence_lod.sequence_pool(
input=dnn_embedding, pool_type="sum" input=dnn_embedding, pool_type="sum"
) )
dnn_out = dnn_pool dnn_out = dnn_pool
...@@ -140,7 +140,9 @@ class TestDistCTR2x2(FleetDistRunnerBase): ...@@ -140,7 +140,9 @@ class TestDistCTR2x2(FleetDistRunnerBase):
is_sparse=True, is_sparse=True,
padding_idx=0, padding_idx=0,
) )
lr_pool = fluid.layers.sequence_pool(input=lr_embbding, pool_type="sum") lr_pool = paddle.static.nn.sequence_lod.sequence_pool(
input=lr_embbding, pool_type="sum"
)
merge_layer = fluid.layers.concat(input=[dnn_out, lr_pool], axis=1) merge_layer = fluid.layers.concat(input=[dnn_out, lr_pool], axis=1)
......
...@@ -82,7 +82,7 @@ class TestHeterPipelinePsCTR2x2(FleetDistHeterRunnerBase): ...@@ -82,7 +82,7 @@ class TestHeterPipelinePsCTR2x2(FleetDistHeterRunnerBase):
), ),
is_sparse=True, is_sparse=True,
) )
dnn_pool = fluid.layers.sequence_pool( dnn_pool = paddle.static.nn.sequence_lod.sequence_pool(
input=dnn_embedding, pool_type="sum" input=dnn_embedding, pool_type="sum"
) )
dnn_out = dnn_pool dnn_out = dnn_pool
...@@ -98,7 +98,7 @@ class TestHeterPipelinePsCTR2x2(FleetDistHeterRunnerBase): ...@@ -98,7 +98,7 @@ class TestHeterPipelinePsCTR2x2(FleetDistHeterRunnerBase):
), ),
is_sparse=True, is_sparse=True,
) )
lr_pool = fluid.layers.sequence_pool( lr_pool = paddle.static.nn.sequence_lod.sequence_pool(
input=lr_embbding, pool_type="sum" input=lr_embbding, pool_type="sum"
) )
......
...@@ -131,7 +131,9 @@ def train_network( ...@@ -131,7 +131,9 @@ def train_network(
) )
q_emb = paddle.reshape(q_emb, [-1, emb_dim]) q_emb = paddle.reshape(q_emb, [-1, emb_dim])
# vsum # vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') q_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=q_emb, pool_type='sum'
)
q_ss = paddle.nn.functional.softsign(q_sum) q_ss = paddle.nn.functional.softsign(q_sum)
# fc layer after conv # fc layer after conv
q_fc = paddle.static.nn.fc( q_fc = paddle.static.nn.fc(
...@@ -158,7 +160,9 @@ def train_network( ...@@ -158,7 +160,9 @@ def train_network(
) )
pt_emb = paddle.reshape(pt_emb, [-1, emb_dim]) pt_emb = paddle.reshape(pt_emb, [-1, emb_dim])
# vsum # vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') pt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=pt_emb, pool_type='sum'
)
pt_ss = paddle.nn.functional.softsign(pt_sum) pt_ss = paddle.nn.functional.softsign(pt_sum)
# fc layer # fc layer
pt_fc = paddle.static.nn.fc( pt_fc = paddle.static.nn.fc(
...@@ -184,7 +188,9 @@ def train_network( ...@@ -184,7 +188,9 @@ def train_network(
) )
nt_emb = paddle.reshape(nt_emb, [-1, emb_dim]) nt_emb = paddle.reshape(nt_emb, [-1, emb_dim])
# vsum # vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') nt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=nt_emb, pool_type='sum'
)
nt_ss = paddle.nn.functional.softsign(nt_sum) nt_ss = paddle.nn.functional.softsign(nt_sum)
# fc layer # fc layer
nt_fc = paddle.static.nn.fc( nt_fc = paddle.static.nn.fc(
......
...@@ -103,7 +103,7 @@ class TestDistCTR2x2(FleetDistRunnerBase): ...@@ -103,7 +103,7 @@ class TestDistCTR2x2(FleetDistRunnerBase):
entry=entry, entry=entry,
param_attr=fluid.ParamAttr(name="deep_embedding", initializer=init), param_attr=fluid.ParamAttr(name="deep_embedding", initializer=init),
) )
dnn_pool = fluid.layers.sequence_pool( dnn_pool = paddle.static.nn.sequence_lod.sequence_pool(
input=dnn_embedding, pool_type="sum" input=dnn_embedding, pool_type="sum"
) )
dnn_out = dnn_pool dnn_out = dnn_pool
...@@ -131,7 +131,9 @@ class TestDistCTR2x2(FleetDistRunnerBase): ...@@ -131,7 +131,9 @@ class TestDistCTR2x2(FleetDistRunnerBase):
), ),
) )
lr_pool = fluid.layers.sequence_pool(input=lr_embbding, pool_type="sum") lr_pool = paddle.static.nn.sequence_lod.sequence_pool(
input=lr_embbding, pool_type="sum"
)
merge_layer = fluid.layers.concat(input=[dnn_out, lr_pool], axis=1) merge_layer = fluid.layers.concat(input=[dnn_out, lr_pool], axis=1)
predict = paddle.static.nn.fc( predict = paddle.static.nn.fc(
x=merge_layer, size=2, activation='softmax' x=merge_layer, size=2, activation='softmax'
......
...@@ -235,7 +235,7 @@ class BaseModel(fluid.dygraph.Layer): ...@@ -235,7 +235,7 @@ class BaseModel(fluid.dygraph.Layer):
max_seq_len = src_emb.shape[0] max_seq_len = src_emb.shape[0]
enc_len_mask = fluid.layers.sequence_mask( enc_len_mask = paddle.static.nn.sequence_lod.sequence_mask(
src_sequence_length, maxlen=max_seq_len, dtype="float32" src_sequence_length, maxlen=max_seq_len, dtype="float32"
) )
enc_len_mask = paddle.transpose(enc_len_mask, [1, 0]) enc_len_mask = paddle.transpose(enc_len_mask, [1, 0])
...@@ -301,7 +301,7 @@ class BaseModel(fluid.dygraph.Layer): ...@@ -301,7 +301,7 @@ class BaseModel(fluid.dygraph.Layer):
) )
loss = paddle.squeeze(loss, axes=[2]) loss = paddle.squeeze(loss, axes=[2])
max_tar_seq_len = paddle.shape(tar)[1] max_tar_seq_len = paddle.shape(tar)[1]
tar_mask = fluid.layers.sequence_mask( tar_mask = paddle.static.nn.sequence_lod.sequence_mask(
tar_sequence_length, maxlen=max_tar_seq_len, dtype='float32' tar_sequence_length, maxlen=max_tar_seq_len, dtype='float32'
) )
loss = loss * tar_mask loss = loss * tar_mask
...@@ -337,7 +337,7 @@ class BaseModel(fluid.dygraph.Layer): ...@@ -337,7 +337,7 @@ class BaseModel(fluid.dygraph.Layer):
max_seq_len = src_emb.shape[0] max_seq_len = src_emb.shape[0]
enc_len_mask = fluid.layers.sequence_mask( enc_len_mask = paddle.static.nn.sequence_lod.sequence_mask(
src_sequence_length, maxlen=max_seq_len, dtype="float32" src_sequence_length, maxlen=max_seq_len, dtype="float32"
) )
enc_len_mask = paddle.transpose(enc_len_mask, [1, 0]) enc_len_mask = paddle.transpose(enc_len_mask, [1, 0])
...@@ -754,7 +754,7 @@ class AttentionModel(fluid.dygraph.Layer): ...@@ -754,7 +754,7 @@ class AttentionModel(fluid.dygraph.Layer):
max_seq_len = src_emb.shape[0] max_seq_len = src_emb.shape[0]
enc_len_mask = fluid.layers.sequence_mask( enc_len_mask = paddle.static.nn.sequence_lod.sequence_mask(
src_sequence_length, maxlen=max_seq_len, dtype="float32" src_sequence_length, maxlen=max_seq_len, dtype="float32"
) )
enc_padding_mask = enc_len_mask - 1.0 enc_padding_mask = enc_len_mask - 1.0
...@@ -839,7 +839,7 @@ class AttentionModel(fluid.dygraph.Layer): ...@@ -839,7 +839,7 @@ class AttentionModel(fluid.dygraph.Layer):
) )
loss = paddle.squeeze(loss, axes=[2]) loss = paddle.squeeze(loss, axes=[2])
max_tar_seq_len = paddle.shape(tar)[1] max_tar_seq_len = paddle.shape(tar)[1]
tar_mask = fluid.layers.sequence_mask( tar_mask = paddle.static.nn.sequence_lod.sequence_mask(
tar_sequence_length, maxlen=max_tar_seq_len, dtype='float32' tar_sequence_length, maxlen=max_tar_seq_len, dtype='float32'
) )
loss = loss * tar_mask loss = loss * tar_mask
......
...@@ -74,7 +74,7 @@ def net(batch_size=4, lr=0.01): ...@@ -74,7 +74,7 @@ def net(batch_size=4, lr=0.01):
), ),
is_sparse=True, is_sparse=True,
) )
dnn_pool = fluid.layers.sequence_pool( dnn_pool = paddle.static.nn.sequence_lod.sequence_pool(
input=dnn_embedding, pool_type="sum" input=dnn_embedding, pool_type="sum"
) )
dnn_out = dnn_pool dnn_out = dnn_pool
...@@ -90,7 +90,9 @@ def net(batch_size=4, lr=0.01): ...@@ -90,7 +90,9 @@ def net(batch_size=4, lr=0.01):
), ),
is_sparse=True, is_sparse=True,
) )
lr_pool = fluid.layers.sequence_pool(input=lr_embbding, pool_type="sum") lr_pool = paddle.static.nn.sequence_lod.sequence_pool(
input=lr_embbding, pool_type="sum"
)
with fluid.device_guard("gpu"): with fluid.device_guard("gpu"):
for i, dim in enumerate(dnn_layer_dims[1:]): for i, dim in enumerate(dnn_layer_dims[1:]):
......
...@@ -176,7 +176,7 @@ class TestSequenceMaskOpError(unittest.TestCase): ...@@ -176,7 +176,7 @@ class TestSequenceMaskOpError(unittest.TestCase):
def test_Variable(): def test_Variable():
# the input must be Variable # the input must be Variable
fluid.layers.sequence_mask(input_data, maxlen=4) paddle.static.nn.sequence_lod.sequence_mask(input_data, maxlen=4)
self.assertRaises(TypeError, test_Variable) self.assertRaises(TypeError, test_Variable)
......
...@@ -23,8 +23,6 @@ import numpy as np ...@@ -23,8 +23,6 @@ import numpy as np
from convert import convert_params_for_net from convert import convert_params_for_net
from rnn_numpy import GRU, LSTM, SimpleRNN from rnn_numpy import GRU, LSTM, SimpleRNN
from paddle.fluid.layers import sequence_mask
bidirectional_list = ["bidirectional", "bidirect"] bidirectional_list = ["bidirectional", "bidirect"]
...@@ -91,7 +89,9 @@ class TestSimpleRNN(unittest.TestCase): ...@@ -91,7 +89,9 @@ class TestSimpleRNN(unittest.TestCase):
y1, h1 = rnn1(x, sequence_length=sequence_length) y1, h1 = rnn1(x, sequence_length=sequence_length)
seq_len = paddle.to_tensor(sequence_length) seq_len = paddle.to_tensor(sequence_length)
mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype()) mask = paddle.static.nn.sequence_lod.sequence_mask(
seq_len, dtype=paddle.get_default_dtype()
)
if self.time_major: if self.time_major:
mask = paddle.transpose(mask, [1, 0]) mask = paddle.transpose(mask, [1, 0])
y2, h2 = rnn2(paddle.to_tensor(x), sequence_length=seq_len) y2, h2 = rnn2(paddle.to_tensor(x), sequence_length=seq_len)
...@@ -174,7 +174,9 @@ class TestGRU(unittest.TestCase): ...@@ -174,7 +174,9 @@ class TestGRU(unittest.TestCase):
y1, h1 = rnn1(x, sequence_length=sequence_length) y1, h1 = rnn1(x, sequence_length=sequence_length)
seq_len = paddle.to_tensor(sequence_length) seq_len = paddle.to_tensor(sequence_length)
mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype()) mask = paddle.static.nn.sequence_lod.sequence_mask(
seq_len, dtype=paddle.get_default_dtype()
)
if self.time_major: if self.time_major:
mask = paddle.transpose(mask, [1, 0]) mask = paddle.transpose(mask, [1, 0])
y2, h2 = rnn2(paddle.to_tensor(x), sequence_length=seq_len) y2, h2 = rnn2(paddle.to_tensor(x), sequence_length=seq_len)
...@@ -263,7 +265,9 @@ class TestLSTM(unittest.TestCase): ...@@ -263,7 +265,9 @@ class TestLSTM(unittest.TestCase):
y1, (h1, c1) = rnn1(x, sequence_length=sequence_length) y1, (h1, c1) = rnn1(x, sequence_length=sequence_length)
seq_len = paddle.to_tensor(sequence_length) seq_len = paddle.to_tensor(sequence_length)
mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype()) mask = paddle.static.nn.sequence_lod.sequence_mask(
seq_len, dtype=paddle.get_default_dtype()
)
if self.time_major: if self.time_major:
mask = paddle.transpose(mask, [1, 0]) mask = paddle.transpose(mask, [1, 0])
y2, (h2, c2) = rnn2(paddle.to_tensor(x), sequence_length=seq_len) y2, (h2, c2) = rnn2(paddle.to_tensor(x), sequence_length=seq_len)
...@@ -303,7 +307,9 @@ def predict_test_util(place, mode, stop_gradient=True): ...@@ -303,7 +307,9 @@ def predict_test_util(place, mode, stop_gradient=True):
x = paddle.randn((4, 10, 16)) x = paddle.randn((4, 10, 16))
x.stop_gradient = stop_gradient x.stop_gradient = stop_gradient
seq_len = paddle.to_tensor(np.array([10, 6, 8, 5])) seq_len = paddle.to_tensor(np.array([10, 6, 8, 5]))
mask = sequence_mask(seq_len, maxlen=10, dtype=x.dtype) mask = paddle.static.nn.sequence_lod.sequence_mask(
seq_len, maxlen=10, dtype=x.dtype
)
mask = paddle.unsqueeze(mask, [2]) mask = paddle.unsqueeze(mask, [2])
rnn = Net() rnn = Net()
y, _ = rnn(x) y, _ = rnn(x)
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import paddle import paddle
paddle.set_default_dtype("float64") paddle.set_default_dtype("float64")
from paddle.fluid.layers import sequence_mask
paddle.enable_static() paddle.enable_static()
...@@ -161,7 +161,9 @@ class TestSimpleRNN(unittest.TestCase): ...@@ -161,7 +161,9 @@ class TestSimpleRNN(unittest.TestCase):
dtype=paddle.framework.get_default_dtype(), dtype=paddle.framework.get_default_dtype(),
) )
seq_len = paddle.fluid.data("seq_len", [-1], dtype="int64") seq_len = paddle.fluid.data("seq_len", [-1], dtype="int64")
mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype()) mask = paddle.static.nn.sequence_lod.sequence_mask(
seq_len, dtype=paddle.get_default_dtype()
)
if self.time_major: if self.time_major:
mask = paddle.transpose(mask, [1, 0]) mask = paddle.transpose(mask, [1, 0])
y, h = rnn2(x_data, sequence_length=seq_len) y, h = rnn2(x_data, sequence_length=seq_len)
...@@ -316,7 +318,9 @@ class TestGRU(unittest.TestCase): ...@@ -316,7 +318,9 @@ class TestGRU(unittest.TestCase):
dtype=paddle.framework.get_default_dtype(), dtype=paddle.framework.get_default_dtype(),
) )
seq_len = paddle.fluid.data("seq_len", [-1], dtype="int64") seq_len = paddle.fluid.data("seq_len", [-1], dtype="int64")
mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype()) mask = paddle.static.nn.sequence_lod.sequence_mask(
seq_len, dtype=paddle.get_default_dtype()
)
if self.time_major: if self.time_major:
mask = paddle.transpose(mask, [1, 0]) mask = paddle.transpose(mask, [1, 0])
y, h = rnn2(x_data, sequence_length=seq_len) y, h = rnn2(x_data, sequence_length=seq_len)
...@@ -477,7 +481,9 @@ class TestLSTM(unittest.TestCase): ...@@ -477,7 +481,9 @@ class TestLSTM(unittest.TestCase):
dtype=paddle.framework.get_default_dtype(), dtype=paddle.framework.get_default_dtype(),
) )
seq_len = paddle.fluid.data("seq_len", [-1], dtype="int64") seq_len = paddle.fluid.data("seq_len", [-1], dtype="int64")
mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype()) mask = paddle.static.nn.sequence_lod.sequence_mask(
seq_len, dtype=paddle.get_default_dtype()
)
if self.time_major: if self.time_major:
mask = paddle.transpose(mask, [1, 0]) mask = paddle.transpose(mask, [1, 0])
y, (h, c) = rnn2(x_data, sequence_length=seq_len) y, (h, c) = rnn2(x_data, sequence_length=seq_len)
......
...@@ -21,8 +21,6 @@ import numpy as np ...@@ -21,8 +21,6 @@ import numpy as np
from convert import convert_params_for_cell from convert import convert_params_for_cell
from rnn_numpy import RNN, BiRNN, GRUCell from rnn_numpy import RNN, BiRNN, GRUCell
from paddle.fluid.layers import sequence_mask
class TestRNNWrapper(unittest.TestCase): class TestRNNWrapper(unittest.TestCase):
def __init__(self, time_major=True, direction="forward", place="cpu"): def __init__(self, time_major=True, direction="forward", place="cpu"):
...@@ -91,7 +89,9 @@ class TestRNNWrapper(unittest.TestCase): ...@@ -91,7 +89,9 @@ class TestRNNWrapper(unittest.TestCase):
y1, h1 = rnn1(x, sequence_length=sequence_length) y1, h1 = rnn1(x, sequence_length=sequence_length)
seq_len = paddle.to_tensor(sequence_length) seq_len = paddle.to_tensor(sequence_length)
mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype()) mask = paddle.static.nn.sequence_lod.sequence_mask(
seq_len, dtype=paddle.get_default_dtype()
)
if self.time_major: if self.time_major:
mask = paddle.transpose(mask, [1, 0]) mask = paddle.transpose(mask, [1, 0])
y2, h2 = rnn2(paddle.to_tensor(x), sequence_length=seq_len) y2, h2 = rnn2(paddle.to_tensor(x), sequence_length=seq_len)
...@@ -174,7 +174,9 @@ class TestBiRNNWrapper(unittest.TestCase): ...@@ -174,7 +174,9 @@ class TestBiRNNWrapper(unittest.TestCase):
y1, (fw_h1, bw_h1) = rnn1(x, sequence_length=sequence_length) y1, (fw_h1, bw_h1) = rnn1(x, sequence_length=sequence_length)
seq_len = paddle.to_tensor(sequence_length) seq_len = paddle.to_tensor(sequence_length)
mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype()) mask = paddle.static.nn.sequence_lod.sequence_mask(
seq_len, dtype=paddle.get_default_dtype()
)
if self.time_major: if self.time_major:
mask = paddle.transpose(mask, [1, 0]) mask = paddle.transpose(mask, [1, 0])
y2, (fw_h2, bw_h2) = rnn2(paddle.to_tensor(x), sequence_length=seq_len) y2, (fw_h2, bw_h2) = rnn2(paddle.to_tensor(x), sequence_length=seq_len)
......
...@@ -21,7 +21,6 @@ sys.path.append("../") ...@@ -21,7 +21,6 @@ sys.path.append("../")
from op_test import OpTest from op_test import OpTest
import paddle import paddle
from paddle import fluid
class TestSequenceConcat(OpTest): class TestSequenceConcat(OpTest):
...@@ -92,24 +91,29 @@ class TestSequenceConcatOpError(unittest.TestCase): ...@@ -92,24 +91,29 @@ class TestSequenceConcatOpError(unittest.TestCase):
x_data = paddle.static.data( x_data = paddle.static.data(
name='x', shape=[-1, 4], dtype='float32' name='x', shape=[-1, 4], dtype='float32'
) )
fluid.layers.sequence_concat(input=x_data) paddle.static.nn.sequence_lod.sequence_concat(input=x_data)
self.assertRaises(TypeError, test_input_list) self.assertRaises(TypeError, test_input_list)
def test_variable1(): def test_variable1():
# the input element type must be Variable # the input element type must be Variable
x1_data = np.array([[3, 5]]).astype('float32') x1_data = np.array([[3, 5]]).astype('float32')
y1_data = paddle.static.data( y1_data = paddle.static.data(
name='y1', shape=[-1, 4], dtype='float32' name='y1', shape=[-1, 4], dtype='float32'
) )
fluid.layers.sequence_concat(input=[x1_data, y1_data]) paddle.static.nn.sequence_lod.sequence_concat(
input=[x1_data, y1_data]
)
def test_variable2(): def test_variable2():
x2_data = np.array([[3, 5]]).astype('float32') x2_data = np.array([[3, 5]]).astype('float32')
y2_data = paddle.static.data( y2_data = paddle.static.data(
name='y2', shape=[-1, 4], dtype='float32' name='y2', shape=[-1, 4], dtype='float32'
) )
fluid.layers.sequence_concat(input=[y2_data, x2_data]) paddle.static.nn.sequence_lod.sequence_concat(
input=[y2_data, x2_data]
)
for i in range(2): for i in range(2):
if i == 0: if i == 0:
...@@ -126,7 +130,7 @@ class TestSequenceConcatOpError(unittest.TestCase): ...@@ -126,7 +130,7 @@ class TestSequenceConcatOpError(unittest.TestCase):
name="y3", shape=[-1, 3, 5], dtype='int16' name="y3", shape=[-1, 3, 5], dtype='int16'
) )
input_list = [x3_data, y3_data] input_list = [x3_data, y3_data]
fluid.layers.sequence_concat(input=input_list) paddle.static.nn.sequence_lod.sequence_concat(input=input_list)
self.assertRaises(TypeError, test_dtype) self.assertRaises(TypeError, test_dtype)
......
...@@ -286,7 +286,7 @@ class TestSeqConvApi(unittest.TestCase): ...@@ -286,7 +286,7 @@ class TestSeqConvApi(unittest.TestCase):
import paddle.fluid as fluid import paddle.fluid as fluid
x = paddle.static.data('x', shape=[-1, 32], lod_level=1) x = paddle.static.data('x', shape=[-1, 32], lod_level=1)
y = fluid.layers.sequence_conv( y = paddle.static.nn.sequence_lod.sequence_conv(
input=x, num_filters=2, filter_size=3, padding_start=None input=x, num_filters=2, filter_size=3, padding_start=None
) )
......
...@@ -20,7 +20,7 @@ import numpy as np ...@@ -20,7 +20,7 @@ import numpy as np
sys.path.append("../") sys.path.append("../")
from op_test import OpTest from op_test import OpTest
import paddle.fluid as fluid import paddle
from paddle.fluid import Program, program_guard from paddle.fluid import Program, program_guard
...@@ -92,16 +92,25 @@ class TestSequenceExpandAsOpError(unittest.TestCase): ...@@ -92,16 +92,25 @@ class TestSequenceExpandAsOpError(unittest.TestCase):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
# the input x must be Variable # the input x must be Variable
x1 = np.random.random((2, 4)).astype("float32") x1 = np.random.random((2, 4)).astype("float32")
self.assertRaises(TypeError, fluid.layers.sequence_expand_as, x1) self.assertRaises(
TypeError, paddle.static.nn.sequence_lod.sequence_expand_as, x1
)
# the dtype of input x must be float32, float64, int32 or int64 # the dtype of input x must be float32, float64, int32 or int64
x2 = fluid.data(name='x2', shape=[None, 4], dtype="bool") x2 = paddle.static.data(name='x2', shape=[None, 4], dtype="bool")
self.assertRaises(TypeError, fluid.layers.sequence_expand_as, x2) self.assertRaises(
TypeError, paddle.static.nn.sequence_lod.sequence_expand_as, x2
)
# the input y must be Variable # the input y must be Variable
x3 = fluid.data(name='x3', shape=[None, 4], dtype="float32") x3 = paddle.static.data(name='x3', shape=[None, 4], dtype="float32")
y = np.random.random((2, 4)).astype("float32") y = np.random.random((2, 4)).astype("float32")
self.assertRaises(TypeError, fluid.layers.sequence_expand_as, x3, y) self.assertRaises(
TypeError,
paddle.static.nn.sequence_lod.sequence_expand_as,
x3,
y,
)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -17,7 +17,6 @@ import unittest ...@@ -17,7 +17,6 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
from paddle.fluid.framework import Program, program_guard from paddle.fluid.framework import Program, program_guard
sys.path.append("../") sys.path.append("../")
...@@ -30,7 +29,7 @@ class TestSequenceFirstStepOpError(unittest.TestCase): ...@@ -30,7 +29,7 @@ class TestSequenceFirstStepOpError(unittest.TestCase):
def test_Variable(): def test_Variable():
# the input must be Variable # the input must be Variable
input_data = np.random.randint(1, 5, [4]).astype("int64") input_data = np.random.randint(1, 5, [4]).astype("int64")
fluid.layers.sequence_last_step(input_data) paddle.static.nn.sequence_lod.sequence_last_step(input_data)
self.assertRaises(TypeError, test_Variable) self.assertRaises(TypeError, test_Variable)
...@@ -42,7 +41,7 @@ class TestSequenceFirstStepOpError(unittest.TestCase): ...@@ -42,7 +41,7 @@ class TestSequenceFirstStepOpError(unittest.TestCase):
dtype='int64', dtype='int64',
lod_level=1, lod_level=1,
) )
fluid.layers.sequence_last_step(type_data) paddle.static.nn.sequence_lod.sequence_last_step(type_data)
self.assertRaises(TypeError, test_input_dtype) self.assertRaises(TypeError, test_input_dtype)
......
...@@ -17,7 +17,6 @@ import unittest ...@@ -17,7 +17,6 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
from paddle.fluid.framework import Program, program_guard from paddle.fluid.framework import Program, program_guard
sys.path.append("../") sys.path.append("../")
...@@ -30,7 +29,7 @@ class TestSequenceLastStepOpError(unittest.TestCase): ...@@ -30,7 +29,7 @@ class TestSequenceLastStepOpError(unittest.TestCase):
def test_Variable(): def test_Variable():
# the input must be Variable # the input must be Variable
input_data = np.random.randint(1, 5, [4]).astype("int64") input_data = np.random.randint(1, 5, [4]).astype("int64")
fluid.layers.sequence_last_step(input_data) paddle.static.nn.sequence_lod.sequence_last_step(input_data)
self.assertRaises(TypeError, test_Variable) self.assertRaises(TypeError, test_Variable)
...@@ -42,7 +41,7 @@ class TestSequenceLastStepOpError(unittest.TestCase): ...@@ -42,7 +41,7 @@ class TestSequenceLastStepOpError(unittest.TestCase):
dtype='int64', dtype='int64',
lod_level=1, lod_level=1,
) )
fluid.layers.sequence_last_step(type_data) paddle.static.nn.sequence_lod.sequence_last_step(type_data)
self.assertRaises(TypeError, test_input_dtype) self.assertRaises(TypeError, test_input_dtype)
......
...@@ -18,7 +18,6 @@ import unittest ...@@ -18,7 +18,6 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
from paddle.fluid.framework import ( from paddle.fluid.framework import (
Program, Program,
convert_np_dtype_to_dtype_, convert_np_dtype_to_dtype_,
...@@ -167,7 +166,9 @@ class TestSequenceMaskOpError(unittest.TestCase): ...@@ -167,7 +166,9 @@ class TestSequenceMaskOpError(unittest.TestCase):
def test_Variable(): def test_Variable():
# the input must be Variable # the input must be Variable
fluid.layers.sequence_mask(input_data, maxlen=4) paddle.static.nn.sequence_lod.sequence_mask(
input_data, maxlen=4
)
self.assertRaises(TypeError, test_Variable) self.assertRaises(TypeError, test_Variable)
......
...@@ -159,7 +159,7 @@ class TestSequencePadOpError(unittest.TestCase): ...@@ -159,7 +159,7 @@ class TestSequencePadOpError(unittest.TestCase):
pad_value = fluid.layers.assign( pad_value = fluid.layers.assign(
input=np.array([0.0], dtype=np.float32) input=np.array([0.0], dtype=np.float32)
) )
fluid.layers.sequence_pad(x=x, pad_value=pad_value) paddle.static.nn.sequence_lod.sequence_pad(x=x, pad_value=pad_value)
self.assertRaises(TypeError, test_x_variable) self.assertRaises(TypeError, test_x_variable)
...@@ -168,7 +168,9 @@ class TestSequencePadOpError(unittest.TestCase): ...@@ -168,7 +168,9 @@ class TestSequencePadOpError(unittest.TestCase):
name='x1', shape=[-1, 10, 5], dtype='float32', lod_level=1 name='x1', shape=[-1, 10, 5], dtype='float32', lod_level=1
) )
pad_value1 = np.array([0.0], dtype=np.float32) pad_value1 = np.array([0.0], dtype=np.float32)
fluid.layers.sequence_pad(x=x1, pad_value=pad_value1) paddle.static.nn.sequence_lod.sequence_pad(
x=x1, pad_value=pad_value1
)
self.assertRaises(TypeError, test_pad_value_variable) self.assertRaises(TypeError, test_pad_value_variable)
...@@ -179,14 +181,18 @@ class TestSequencePadOpError(unittest.TestCase): ...@@ -179,14 +181,18 @@ class TestSequencePadOpError(unittest.TestCase):
pad_value2 = fluid.layers.assign( pad_value2 = fluid.layers.assign(
input=np.array([0.0], dtype=np.int32) input=np.array([0.0], dtype=np.int32)
) )
fluid.layers.sequence_pad(x=x2, pad_value=pad_value2) paddle.static.nn.sequence_lod.sequence_pad(
x=x2, pad_value=pad_value2
)
self.assertRaises(TypeError, test_dtype) self.assertRaises(TypeError, test_dtype)
def test_length_dtype(self): def test_length_dtype(self):
x = fluid.data(name='x', shape=[10, 5], dtype='float32', lod_level=1) x = fluid.data(name='x', shape=[10, 5], dtype='float32', lod_level=1)
pad_value = fluid.layers.assign(input=np.array([0.0], dtype=np.float32)) pad_value = fluid.layers.assign(input=np.array([0.0], dtype=np.float32))
out, length = fluid.layers.sequence_pad(x=x, pad_value=pad_value) out, length = paddle.static.nn.sequence_lod.sequence_pad(
x=x, pad_value=pad_value
)
# check if the dtype of length is int64 in compile time # check if the dtype of length is int64 in compile time
self.assertEqual(length.dtype, core.VarDesc.VarType.INT64) self.assertEqual(length.dtype, core.VarDesc.VarType.INT64)
......
...@@ -22,8 +22,6 @@ import paddle ...@@ -22,8 +22,6 @@ import paddle
sys.path.append("../") sys.path.append("../")
from op_test import OpTest from op_test import OpTest
import paddle.fluid as fluid
class TestSequenceReshape(OpTest): class TestSequenceReshape(OpTest):
def init_data(self): def init_data(self):
...@@ -90,7 +88,7 @@ class TestSequenceReshapeOpError(unittest.TestCase): ...@@ -90,7 +88,7 @@ class TestSequenceReshapeOpError(unittest.TestCase):
def test_error(self): def test_error(self):
def test_variable(): def test_variable():
x = np.random.random((2, 4)).astype("float32") x = np.random.random((2, 4)).astype("float32")
fluid.layers.sequence_reshape(x=x, new_dim=4) paddle.static.nn.sequence_lod.sequence_reshape(x=x, new_dim=4)
self.assertRaises(TypeError, test_variable) self.assertRaises(TypeError, test_variable)
...@@ -101,7 +99,7 @@ class TestSequenceReshapeOpError(unittest.TestCase): ...@@ -101,7 +99,7 @@ class TestSequenceReshapeOpError(unittest.TestCase):
dtype='float16', dtype='float16',
lod_level=1, lod_level=1,
) )
fluid.layers.sequence_reshape(x=x1, new_dim=4) paddle.static.nn.sequence_lod.sequence_reshape(x=x1, new_dim=4)
self.assertRaises(TypeError, test_dtype) self.assertRaises(TypeError, test_dtype)
......
...@@ -18,7 +18,6 @@ import unittest ...@@ -18,7 +18,6 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
sys.path.append("../") sys.path.append("../")
from op_test import OpTest from op_test import OpTest
...@@ -101,16 +100,17 @@ class TestSequenceReverseOpError(unittest.TestCase): ...@@ -101,16 +100,17 @@ class TestSequenceReverseOpError(unittest.TestCase):
def test_variable(): def test_variable():
# the input type must be Variable # the input type must be Variable
x_data = np.random.random((2, 4)).astype("float32") x_data = np.random.random((2, 4)).astype("float32")
fluid.layers.sequence_reverse(x=x_data) paddle.static.nn.sequence_lod.sequence_reverse(x=x_data)
self.assertRaises(TypeError, test_variable) self.assertRaises(TypeError, test_variable)
def test_dtype(): def test_dtype():
# dtype must be 'float32', 'float64', 'int8', 'int32', 'int64' # dtype must be 'float32', 'float64', 'int8', 'int32', 'int64'
x2_data = paddle.static.data( x2_data = paddle.static.data(
name='x2', shape=[-1, 4], dtype='float16' name='x2', shape=[-1, 4], dtype='float16'
) )
fluid.layers.sequence_reverse(x=x2_data) paddle.static.nn.sequence_lod.sequence_reverse(x=x2_data)
self.assertRaises(TypeError, test_dtype) self.assertRaises(TypeError, test_dtype)
......
...@@ -20,7 +20,7 @@ import numpy as np ...@@ -20,7 +20,7 @@ import numpy as np
sys.path.append("../") sys.path.append("../")
from op_test import OpTest from op_test import OpTest
import paddle.fluid as fluid import paddle
class TestSequenceUnpadOp(OpTest): class TestSequenceUnpadOp(OpTest):
...@@ -91,29 +91,29 @@ class TestSequenceUnpadOpError(unittest.TestCase): ...@@ -91,29 +91,29 @@ class TestSequenceUnpadOpError(unittest.TestCase):
def test_error(self): def test_error(self):
def test_x_variable(): def test_x_variable():
x = np.random.random((10, 5)).astype("float64") x = np.random.random((10, 5)).astype("float64")
len = fluid.data(name='length2', shape=[10], dtype='int64') len = paddle.static.data(name='length2', shape=[10], dtype='int64')
fluid.layers.sequence_pad(x=x, length=len) paddle.static.nn.sequence_lod.sequence_pad(x=x, length=len)
self.assertRaises(TypeError, test_x_variable) self.assertRaises(TypeError, test_x_variable)
def test_length_variable(): def test_length_variable():
x1 = fluid.data(name='x1', shape=[10, 5], dtype='float32') x1 = paddle.static.data(name='x1', shape=[10, 5], dtype='float32')
len1 = np.random.random((10)).astype("int64") len1 = np.random.random((10)).astype("int64")
fluid.layers.sequence_pad(x=x1, length=len1) paddle.static.nn.sequence_lod.sequence_pad(x=x1, length=len1)
self.assertRaises(TypeError, test_length_variable) self.assertRaises(TypeError, test_length_variable)
def test_x_dtype(): def test_x_dtype():
x2 = fluid.data(name='x2', shape=[10, 5], dtype='float16') x2 = paddle.static.data(name='x2', shape=[10, 5], dtype='float16')
len2 = fluid.data(name='length2', shape=[10], dtype='int64') len2 = paddle.static.data(name='length2', shape=[10], dtype='int64')
fluid.layers.sequence_pad(x=x2, length=len2) paddle.static.nn.sequence_lod.sequence_pad(x=x2, length=len2)
self.assertRaises(TypeError, test_x_dtype) self.assertRaises(TypeError, test_x_dtype)
def test_length_dtype(): def test_length_dtype():
x3 = fluid.data(name='x3', shape=[10, 5], dtype='float64') x3 = paddle.static.data(name='x3', shape=[10, 5], dtype='float64')
len3 = fluid.data(name='length3', shape=[10], dtype='int32') len3 = paddle.static.data(name='length3', shape=[10], dtype='int32')
fluid.layers.sequence_pad(x=x3, length=len3) paddle.static.nn.sequence_lod.sequence_pad(x=x3, length=len3)
self.assertRaises(TypeError, test_length_dtype) self.assertRaises(TypeError, test_length_dtype)
......
...@@ -96,7 +96,9 @@ def bow_net( ...@@ -96,7 +96,9 @@ def bow_net(
emb = fluid.layers.embedding( emb = fluid.layers.embedding(
input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim] input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim]
) )
bow = fluid.layers.sequence_pool(input=emb, pool_type='sum') bow = paddle.static.nn.sequence_lod.sequence_pool(
input=emb, pool_type='sum'
)
bow_tanh = paddle.tanh(bow) bow_tanh = paddle.tanh(bow)
fc_1 = paddle.static.nn.fc(x=bow_tanh, size=hid_dim, activation="tanh") fc_1 = paddle.static.nn.fc(x=bow_tanh, size=hid_dim, activation="tanh")
fc_2 = paddle.static.nn.fc(x=fc_1, size=hid_dim2, activation="tanh") fc_2 = paddle.static.nn.fc(x=fc_1, size=hid_dim2, activation="tanh")
......
...@@ -46,7 +46,9 @@ class TestCommunicatorGeoEnd2End(unittest.TestCase): ...@@ -46,7 +46,9 @@ class TestCommunicatorGeoEnd2End(unittest.TestCase):
is_sparse=True, is_sparse=True,
) )
pool = fluid.layers.sequence_pool(input=emb, pool_type="sum") pool = paddle.static.nn.sequence_lod.sequence_pool(
input=emb, pool_type="sum"
)
z = fluid.layers.concat(input=[x, pool], axis=1) z = fluid.layers.concat(input=[x, pool], axis=1)
y_predict = paddle.static.nn.fc(x=z, size=1) y_predict = paddle.static.nn.fc(x=z, size=1)
y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32')
......
...@@ -949,7 +949,9 @@ class TestDatasetWithFetchHandler(unittest.TestCase): ...@@ -949,7 +949,9 @@ class TestDatasetWithFetchHandler(unittest.TestCase):
name=slot, shape=[-1, 1], dtype="int64", lod_level=1 name=slot, shape=[-1, 1], dtype="int64", lod_level=1
) )
var = fluid.layers.cast(x=data, dtype='float32') var = fluid.layers.cast(x=data, dtype='float32')
pool = fluid.layers.sequence_pool(input=var, pool_type='AVERAGE') pool = paddle.static.nn.sequence_lod.sequence_pool(
input=var, pool_type='AVERAGE'
)
slots_vars.append(data) slots_vars.append(data)
poolings.append(pool) poolings.append(pool)
......
...@@ -84,7 +84,9 @@ class TestPSMinimize(unittest.TestCase): ...@@ -84,7 +84,9 @@ class TestPSMinimize(unittest.TestCase):
) )
q_emb = paddle.reshape(q_emb, [-1, emb_dim]) q_emb = paddle.reshape(q_emb, [-1, emb_dim])
# vsum # vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') q_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=q_emb, pool_type='sum'
)
q_ss = paddle.nn.functional.softsign(q_sum) q_ss = paddle.nn.functional.softsign(q_sum)
# fc layer after conv # fc layer after conv
q_fc = paddle.static.nn.fc( q_fc = paddle.static.nn.fc(
...@@ -114,7 +116,9 @@ class TestPSMinimize(unittest.TestCase): ...@@ -114,7 +116,9 @@ class TestPSMinimize(unittest.TestCase):
) )
pt_emb = paddle.reshape(pt_emb, [-1, emb_dim]) pt_emb = paddle.reshape(pt_emb, [-1, emb_dim])
# vsum # vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') pt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=pt_emb, pool_type='sum'
)
pt_ss = paddle.nn.functional.softsign(pt_sum) pt_ss = paddle.nn.functional.softsign(pt_sum)
# fc layer # fc layer
pt_fc = paddle.static.nn.fc( pt_fc = paddle.static.nn.fc(
...@@ -143,7 +147,9 @@ class TestPSMinimize(unittest.TestCase): ...@@ -143,7 +147,9 @@ class TestPSMinimize(unittest.TestCase):
) )
nt_emb = paddle.reshape(nt_emb, [-1, emb_dim]) nt_emb = paddle.reshape(nt_emb, [-1, emb_dim])
# vsum # vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') nt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=nt_emb, pool_type='sum'
)
nt_ss = paddle.nn.functional.softsign(nt_sum) nt_ss = paddle.nn.functional.softsign(nt_sum)
# fc layer # fc layer
nt_fc = paddle.static.nn.fc( nt_fc = paddle.static.nn.fc(
......
...@@ -86,7 +86,9 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -86,7 +86,9 @@ class TestPSPassWithBow(unittest.TestCase):
) )
q_emb = paddle.reshape(q_emb, [-1, emb_dim]) q_emb = paddle.reshape(q_emb, [-1, emb_dim])
# vsum # vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') q_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=q_emb, pool_type='sum'
)
q_ss = paddle.nn.functional.softsign(q_sum) q_ss = paddle.nn.functional.softsign(q_sum)
# fc layer after conv # fc layer after conv
q_fc = paddle.static.nn.fc( q_fc = paddle.static.nn.fc(
...@@ -118,7 +120,9 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -118,7 +120,9 @@ class TestPSPassWithBow(unittest.TestCase):
) )
pt_emb = paddle.reshape(pt_emb, [-1, emb_dim]) pt_emb = paddle.reshape(pt_emb, [-1, emb_dim])
# vsum # vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') pt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=pt_emb, pool_type='sum'
)
pt_ss = paddle.nn.functional.softsign(pt_sum) pt_ss = paddle.nn.functional.softsign(pt_sum)
# fc layer # fc layer
pt_fc = paddle.static.nn.fc( pt_fc = paddle.static.nn.fc(
...@@ -149,7 +153,9 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -149,7 +153,9 @@ class TestPSPassWithBow(unittest.TestCase):
) )
nt_emb = paddle.reshape(nt_emb, [-1, emb_dim]) nt_emb = paddle.reshape(nt_emb, [-1, emb_dim])
# vsum # vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') nt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=nt_emb, pool_type='sum'
)
nt_ss = paddle.nn.functional.softsign(nt_sum) nt_ss = paddle.nn.functional.softsign(nt_sum)
# fc layer # fc layer
nt_fc = paddle.static.nn.fc( nt_fc = paddle.static.nn.fc(
......
...@@ -84,7 +84,9 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -84,7 +84,9 @@ class TestPSPassWithBow(unittest.TestCase):
) )
q_emb = paddle.reshape(q_emb, [-1, emb_dim]) q_emb = paddle.reshape(q_emb, [-1, emb_dim])
# vsum # vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') q_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=q_emb, pool_type='sum'
)
q_ss = paddle.nn.functional.softsign(q_sum) q_ss = paddle.nn.functional.softsign(q_sum)
# fc layer after conv # fc layer after conv
q_fc = paddle.static.nn.fc( q_fc = paddle.static.nn.fc(
...@@ -114,7 +116,9 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -114,7 +116,9 @@ class TestPSPassWithBow(unittest.TestCase):
) )
pt_emb = paddle.reshape(pt_emb, [-1, emb_dim]) pt_emb = paddle.reshape(pt_emb, [-1, emb_dim])
# vsum # vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') pt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=pt_emb, pool_type='sum'
)
pt_ss = paddle.nn.functional.softsign(pt_sum) pt_ss = paddle.nn.functional.softsign(pt_sum)
# fc layer # fc layer
pt_fc = paddle.static.nn.fc( pt_fc = paddle.static.nn.fc(
...@@ -143,7 +147,9 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -143,7 +147,9 @@ class TestPSPassWithBow(unittest.TestCase):
) )
nt_emb = paddle.reshape(nt_emb, [-1, emb_dim]) nt_emb = paddle.reshape(nt_emb, [-1, emb_dim])
# vsum # vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') nt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=nt_emb, pool_type='sum'
)
nt_ss = paddle.nn.functional.softsign(nt_sum) nt_ss = paddle.nn.functional.softsign(nt_sum)
# fc layer # fc layer
nt_fc = paddle.static.nn.fc( nt_fc = paddle.static.nn.fc(
......
...@@ -87,7 +87,9 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -87,7 +87,9 @@ class TestPSPassWithBow(unittest.TestCase):
) )
q_emb = paddle.reshape(q_emb, [-1, emb_dim]) q_emb = paddle.reshape(q_emb, [-1, emb_dim])
# vsum # vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') q_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=q_emb, pool_type='sum'
)
q_ss = paddle.nn.functional.softsign(q_sum) q_ss = paddle.nn.functional.softsign(q_sum)
# fc layer after conv # fc layer after conv
q_fc = paddle.static.nn.fc( q_fc = paddle.static.nn.fc(
...@@ -117,7 +119,9 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -117,7 +119,9 @@ class TestPSPassWithBow(unittest.TestCase):
) )
pt_emb = paddle.reshape(pt_emb, [-1, emb_dim]) pt_emb = paddle.reshape(pt_emb, [-1, emb_dim])
# vsum # vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') pt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=pt_emb, pool_type='sum'
)
pt_ss = paddle.nn.functional.softsign(pt_sum) pt_ss = paddle.nn.functional.softsign(pt_sum)
# fc layer # fc layer
pt_fc = paddle.static.nn.fc( pt_fc = paddle.static.nn.fc(
...@@ -146,7 +150,9 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -146,7 +150,9 @@ class TestPSPassWithBow(unittest.TestCase):
) )
nt_emb = paddle.reshape(nt_emb, [-1, emb_dim]) nt_emb = paddle.reshape(nt_emb, [-1, emb_dim])
# vsum # vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') nt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=nt_emb, pool_type='sum'
)
nt_ss = paddle.nn.functional.softsign(nt_sum) nt_ss = paddle.nn.functional.softsign(nt_sum)
# fc layer # fc layer
nt_fc = paddle.static.nn.fc( nt_fc = paddle.static.nn.fc(
......
...@@ -88,7 +88,9 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -88,7 +88,9 @@ class TestPSPassWithBow(unittest.TestCase):
) )
q_emb = paddle.reshape(q_emb, [-1, emb_dim]) q_emb = paddle.reshape(q_emb, [-1, emb_dim])
# vsum # vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') q_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=q_emb, pool_type='sum'
)
q_ss = paddle.nn.functional.softsign(q_sum) q_ss = paddle.nn.functional.softsign(q_sum)
# fc layer after conv # fc layer after conv
q_fc = paddle.static.nn.fc( q_fc = paddle.static.nn.fc(
...@@ -118,7 +120,9 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -118,7 +120,9 @@ class TestPSPassWithBow(unittest.TestCase):
) )
pt_emb = paddle.reshape(pt_emb, [-1, emb_dim]) pt_emb = paddle.reshape(pt_emb, [-1, emb_dim])
# vsum # vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') pt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=pt_emb, pool_type='sum'
)
pt_ss = paddle.nn.functional.softsign(pt_sum) pt_ss = paddle.nn.functional.softsign(pt_sum)
# fc layer # fc layer
pt_fc = paddle.static.nn.fc( pt_fc = paddle.static.nn.fc(
...@@ -147,7 +151,9 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -147,7 +151,9 @@ class TestPSPassWithBow(unittest.TestCase):
) )
nt_emb = paddle.reshape(nt_emb, [-1, emb_dim]) nt_emb = paddle.reshape(nt_emb, [-1, emb_dim])
# vsum # vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') nt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=nt_emb, pool_type='sum'
)
nt_ss = paddle.nn.functional.softsign(nt_sum) nt_ss = paddle.nn.functional.softsign(nt_sum)
# fc layer # fc layer
nt_fc = paddle.static.nn.fc( nt_fc = paddle.static.nn.fc(
......
...@@ -87,7 +87,9 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -87,7 +87,9 @@ class TestPSPassWithBow(unittest.TestCase):
) )
q_emb = paddle.reshape(q_emb, [-1, emb_dim]) q_emb = paddle.reshape(q_emb, [-1, emb_dim])
# vsum # vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') q_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=q_emb, pool_type='sum'
)
q_ss = paddle.nn.functional.softsign(q_sum) q_ss = paddle.nn.functional.softsign(q_sum)
q_ss = paddle.static.nn.data_norm(input=q_ss) q_ss = paddle.static.nn.data_norm(input=q_ss)
# fc layer after conv # fc layer after conv
...@@ -118,7 +120,9 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -118,7 +120,9 @@ class TestPSPassWithBow(unittest.TestCase):
) )
pt_emb = paddle.reshape(pt_emb, [-1, emb_dim]) pt_emb = paddle.reshape(pt_emb, [-1, emb_dim])
# vsum # vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') pt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=pt_emb, pool_type='sum'
)
pt_ss = paddle.nn.functional.softsign(pt_sum) pt_ss = paddle.nn.functional.softsign(pt_sum)
# fc layer # fc layer
pt_fc = paddle.static.nn.fc( pt_fc = paddle.static.nn.fc(
...@@ -147,7 +151,9 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -147,7 +151,9 @@ class TestPSPassWithBow(unittest.TestCase):
) )
nt_emb = paddle.reshape(nt_emb, [-1, emb_dim]) nt_emb = paddle.reshape(nt_emb, [-1, emb_dim])
# vsum # vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') nt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=nt_emb, pool_type='sum'
)
nt_ss = paddle.nn.functional.softsign(nt_sum) nt_ss = paddle.nn.functional.softsign(nt_sum)
# fc layer # fc layer
nt_fc = paddle.static.nn.fc( nt_fc = paddle.static.nn.fc(
......
...@@ -86,7 +86,9 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -86,7 +86,9 @@ class TestPSPassWithBow(unittest.TestCase):
) )
q_emb = paddle.reshape(q_emb, [-1, emb_dim]) q_emb = paddle.reshape(q_emb, [-1, emb_dim])
# vsum # vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') q_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=q_emb, pool_type='sum'
)
q_ss = paddle.nn.functional.softsign(q_sum) q_ss = paddle.nn.functional.softsign(q_sum)
# fc layer after conv # fc layer after conv
q_fc = paddle.static.nn.fc( q_fc = paddle.static.nn.fc(
...@@ -118,7 +120,9 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -118,7 +120,9 @@ class TestPSPassWithBow(unittest.TestCase):
) )
pt_emb = paddle.reshape(pt_emb, [-1, emb_dim]) pt_emb = paddle.reshape(pt_emb, [-1, emb_dim])
# vsum # vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') pt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=pt_emb, pool_type='sum'
)
pt_ss = paddle.nn.functional.softsign(pt_sum) pt_ss = paddle.nn.functional.softsign(pt_sum)
# fc layer # fc layer
pt_fc = paddle.static.nn.fc( pt_fc = paddle.static.nn.fc(
...@@ -149,7 +153,9 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -149,7 +153,9 @@ class TestPSPassWithBow(unittest.TestCase):
) )
nt_emb = paddle.reshape(nt_emb, [-1, emb_dim]) nt_emb = paddle.reshape(nt_emb, [-1, emb_dim])
# vsum # vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') nt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=nt_emb, pool_type='sum'
)
nt_ss = paddle.nn.functional.softsign(nt_sum) nt_ss = paddle.nn.functional.softsign(nt_sum)
# fc layer # fc layer
nt_fc = paddle.static.nn.fc( nt_fc = paddle.static.nn.fc(
......
...@@ -84,7 +84,9 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -84,7 +84,9 @@ class TestPSPassWithBow(unittest.TestCase):
) )
q_emb = paddle.reshape(q_emb, [-1, emb_dim]) q_emb = paddle.reshape(q_emb, [-1, emb_dim])
# vsum # vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') q_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=q_emb, pool_type='sum'
)
q_ss = paddle.nn.functional.softsign(q_sum) q_ss = paddle.nn.functional.softsign(q_sum)
# fc layer after conv # fc layer after conv
q_fc = paddle.static.nn.fc( q_fc = paddle.static.nn.fc(
...@@ -114,7 +116,9 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -114,7 +116,9 @@ class TestPSPassWithBow(unittest.TestCase):
) )
pt_emb = paddle.reshape(pt_emb, [-1, emb_dim]) pt_emb = paddle.reshape(pt_emb, [-1, emb_dim])
# vsum # vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') pt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=pt_emb, pool_type='sum'
)
pt_ss = paddle.nn.functional.softsign(pt_sum) pt_ss = paddle.nn.functional.softsign(pt_sum)
# fc layer # fc layer
pt_fc = paddle.static.nn.fc( pt_fc = paddle.static.nn.fc(
...@@ -143,7 +147,9 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -143,7 +147,9 @@ class TestPSPassWithBow(unittest.TestCase):
) )
nt_emb = paddle.reshape(nt_emb, [-1, emb_dim]) nt_emb = paddle.reshape(nt_emb, [-1, emb_dim])
# vsum # vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') nt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=nt_emb, pool_type='sum'
)
nt_ss = paddle.nn.functional.softsign(nt_sum) nt_ss = paddle.nn.functional.softsign(nt_sum)
# fc layer # fc layer
nt_fc = paddle.static.nn.fc( nt_fc = paddle.static.nn.fc(
......
...@@ -86,7 +86,9 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -86,7 +86,9 @@ class TestPSPassWithBow(unittest.TestCase):
) )
q_emb = paddle.reshape(q_emb, [-1, emb_dim]) q_emb = paddle.reshape(q_emb, [-1, emb_dim])
# vsum # vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') q_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=q_emb, pool_type='sum'
)
q_ss = paddle.nn.functional.softsign(q_sum) q_ss = paddle.nn.functional.softsign(q_sum)
# fc layer after conv # fc layer after conv
q_fc = paddle.static.nn.fc( q_fc = paddle.static.nn.fc(
...@@ -118,7 +120,9 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -118,7 +120,9 @@ class TestPSPassWithBow(unittest.TestCase):
) )
pt_emb = paddle.reshape(pt_emb, [-1, emb_dim]) pt_emb = paddle.reshape(pt_emb, [-1, emb_dim])
# vsum # vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') pt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=pt_emb, pool_type='sum'
)
pt_ss = paddle.nn.functional.softsign(pt_sum) pt_ss = paddle.nn.functional.softsign(pt_sum)
# fc layer # fc layer
pt_fc = paddle.static.nn.fc( pt_fc = paddle.static.nn.fc(
...@@ -149,7 +153,9 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -149,7 +153,9 @@ class TestPSPassWithBow(unittest.TestCase):
) )
nt_emb = paddle.reshape(nt_emb, [-1, emb_dim]) nt_emb = paddle.reshape(nt_emb, [-1, emb_dim])
# vsum # vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') nt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=nt_emb, pool_type='sum'
)
nt_ss = paddle.nn.functional.softsign(nt_sum) nt_ss = paddle.nn.functional.softsign(nt_sum)
# fc layer # fc layer
nt_fc = paddle.static.nn.fc( nt_fc = paddle.static.nn.fc(
......
...@@ -84,7 +84,9 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -84,7 +84,9 @@ class TestPSPassWithBow(unittest.TestCase):
) )
q_emb = paddle.reshape(q_emb, [-1, emb_dim]) q_emb = paddle.reshape(q_emb, [-1, emb_dim])
# vsum # vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') q_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=q_emb, pool_type='sum'
)
q_ss = paddle.nn.functional.softsign(q_sum) q_ss = paddle.nn.functional.softsign(q_sum)
# fc layer after conv # fc layer after conv
q_fc = paddle.static.nn.fc( q_fc = paddle.static.nn.fc(
...@@ -114,7 +116,9 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -114,7 +116,9 @@ class TestPSPassWithBow(unittest.TestCase):
) )
pt_emb = paddle.reshape(pt_emb, [-1, emb_dim]) pt_emb = paddle.reshape(pt_emb, [-1, emb_dim])
# vsum # vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') pt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=pt_emb, pool_type='sum'
)
pt_ss = paddle.nn.functional.softsign(pt_sum) pt_ss = paddle.nn.functional.softsign(pt_sum)
# fc layer # fc layer
pt_fc = paddle.static.nn.fc( pt_fc = paddle.static.nn.fc(
...@@ -143,7 +147,9 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -143,7 +147,9 @@ class TestPSPassWithBow(unittest.TestCase):
) )
nt_emb = paddle.reshape(nt_emb, [-1, emb_dim]) nt_emb = paddle.reshape(nt_emb, [-1, emb_dim])
# vsum # vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') nt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=nt_emb, pool_type='sum'
)
nt_ss = paddle.nn.functional.softsign(nt_sum) nt_ss = paddle.nn.functional.softsign(nt_sum)
# fc layer # fc layer
nt_fc = paddle.static.nn.fc( nt_fc = paddle.static.nn.fc(
......
...@@ -222,7 +222,7 @@ class TestDistMnistAsync2x2WithGauss(TestFleetBase): ...@@ -222,7 +222,7 @@ class TestDistMnistAsync2x2WithGauss(TestFleetBase):
name="deep_embedding", initializer=init name="deep_embedding", initializer=init
), ),
) )
dnn_pool = fluid.layers.sequence_pool( dnn_pool = paddle.static.nn.sequence_lod.sequence_pool(
input=dnn_embedding, pool_type="sum" input=dnn_embedding, pool_type="sum"
) )
dnn_out = dnn_pool dnn_out = dnn_pool
...@@ -249,7 +249,7 @@ class TestDistMnistAsync2x2WithGauss(TestFleetBase): ...@@ -249,7 +249,7 @@ class TestDistMnistAsync2x2WithGauss(TestFleetBase):
), ),
) )
lr_pool = fluid.layers.sequence_pool( lr_pool = paddle.static.nn.sequence_lod.sequence_pool(
input=lr_embbding, pool_type="sum" input=lr_embbding, pool_type="sum"
) )
merge_layer = fluid.layers.concat(input=[dnn_out, lr_pool], axis=1) merge_layer = fluid.layers.concat(input=[dnn_out, lr_pool], axis=1)
......
...@@ -82,7 +82,9 @@ class TestSPMT(unittest.TestCase): ...@@ -82,7 +82,9 @@ class TestSPMT(unittest.TestCase):
) )
q_emb = paddle.reshape(q_emb, [-1, emb_dim]) q_emb = paddle.reshape(q_emb, [-1, emb_dim])
# vsum # vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') q_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=q_emb, pool_type='sum'
)
q_ss = paddle.nn.functional.softsign(q_sum) q_ss = paddle.nn.functional.softsign(q_sum)
# fc layer after conv # fc layer after conv
q_fc = paddle.static.nn.fc( q_fc = paddle.static.nn.fc(
...@@ -112,7 +114,9 @@ class TestSPMT(unittest.TestCase): ...@@ -112,7 +114,9 @@ class TestSPMT(unittest.TestCase):
) )
pt_emb = paddle.reshape(pt_emb, [-1, emb_dim]) pt_emb = paddle.reshape(pt_emb, [-1, emb_dim])
# vsum # vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') pt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=pt_emb, pool_type='sum'
)
pt_ss = paddle.nn.functional.softsign(pt_sum) pt_ss = paddle.nn.functional.softsign(pt_sum)
# fc layer # fc layer
pt_fc = paddle.static.nn.fc( pt_fc = paddle.static.nn.fc(
...@@ -141,7 +145,9 @@ class TestSPMT(unittest.TestCase): ...@@ -141,7 +145,9 @@ class TestSPMT(unittest.TestCase):
) )
nt_emb = paddle.reshape(nt_emb, [-1, emb_dim]) nt_emb = paddle.reshape(nt_emb, [-1, emb_dim])
# vsum # vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') nt_sum = paddle.static.nn.sequence_lod.sequence_pool(
input=nt_emb, pool_type='sum'
)
nt_ss = paddle.nn.functional.softsign(nt_sum) nt_ss = paddle.nn.functional.softsign(nt_sum)
# fc layer # fc layer
nt_fc = paddle.static.nn.fc( nt_fc = paddle.static.nn.fc(
......
...@@ -716,7 +716,9 @@ class TestDistLookupTableBase(TranspilerTest): ...@@ -716,7 +716,9 @@ class TestDistLookupTableBase(TranspilerTest):
is_sparse=is_sparse, is_sparse=is_sparse,
is_distributed=is_distributed, is_distributed=is_distributed,
) )
pool = fluid.layers.sequence_pool(input=emb, pool_type='average') pool = paddle.static.nn.sequence_lod.sequence_pool(
input=emb, pool_type='average'
)
return pool return pool
title_ids = paddle.static.data( title_ids = paddle.static.data(
......
...@@ -78,7 +78,10 @@ class EntryAttrChecks(unittest.TestCase): ...@@ -78,7 +78,10 @@ class EntryAttrChecks(unittest.TestCase):
entry=prob, entry=prob,
param_attr=fluid.ParamAttr(name="deep_embedding"), param_attr=fluid.ParamAttr(name="deep_embedding"),
) )
pool = fluid.layers.sequence_pool(input=emb, pool_type="sum")
pool = paddle.static.nn.sequence_lod.sequence_pool(
input=emb, pool_type="sum"
)
predict = paddle.static.nn.fc( predict = paddle.static.nn.fc(
x=pool, size=2, activation='softmax' x=pool, size=2, activation='softmax'
) )
......
...@@ -38,7 +38,10 @@ class EntryAttrChecks(unittest.TestCase): ...@@ -38,7 +38,10 @@ class EntryAttrChecks(unittest.TestCase):
is_distributed=True, is_distributed=True,
param_attr=fluid.ParamAttr(name="deep_embedding"), param_attr=fluid.ParamAttr(name="deep_embedding"),
) )
pool = fluid.layers.sequence_pool(input=emb, pool_type="sum")
pool = paddle.static.nn.sequence_lod.sequence_pool(
input=emb, pool_type="sum"
)
predict = paddle.static.nn.fc( predict = paddle.static.nn.fc(
x=pool, size=2, activation='softmax' x=pool, size=2, activation='softmax'
) )
......
...@@ -65,7 +65,9 @@ class TestFleet1(unittest.TestCase): ...@@ -65,7 +65,9 @@ class TestFleet1(unittest.TestCase):
is_distributed=True, is_distributed=True,
param_attr=fluid.ParamAttr(name="embedding"), param_attr=fluid.ParamAttr(name="embedding"),
) )
bow = fluid.layers.sequence_pool(input=emb, pool_type='sum') bow = paddle.static.nn.sequence_lod.sequence_pool(
input=emb, pool_type='sum'
)
bow = paddle.static.nn.data_norm( bow = paddle.static.nn.data_norm(
input=bow, epsilon=1e-4, name="norm" input=bow, epsilon=1e-4, name="norm"
) )
......
...@@ -36,7 +36,9 @@ def bow_net( ...@@ -36,7 +36,9 @@ def bow_net(
emb = fluid.layers.embedding( emb = fluid.layers.embedding(
input=data, is_sparse=True, size=[dict_dim, emb_dim] input=data, is_sparse=True, size=[dict_dim, emb_dim]
) )
bow = fluid.layers.sequence_pool(input=emb, pool_type='sum') bow = paddle.static.nn.sequence_lod.sequence_pool(
input=emb, pool_type='sum'
)
bow_tanh = paddle.tanh(bow) bow_tanh = paddle.tanh(bow)
fc_1 = paddle.static.nn.fc(x=bow_tanh, size=hid_dim, activation="tanh") fc_1 = paddle.static.nn.fc(x=bow_tanh, size=hid_dim, activation="tanh")
fc_2 = paddle.static.nn.fc(x=fc_1, size=hid_dim2, activation="tanh") fc_2 = paddle.static.nn.fc(x=fc_1, size=hid_dim2, activation="tanh")
......
...@@ -2096,86 +2096,6 @@ class TestBook(LayerTest): ...@@ -2096,86 +2096,6 @@ class TestBook(LayerTest):
) )
return out return out
def test_sequence_expand(self):
# TODO(minqiyang): dygraph do not support lod now
with self.static_graph():
x = paddle.static.data(name='x', shape=[-1, 10], dtype='float32')
y = paddle.static.data(
name='y', shape=[-1, 10, 20], dtype='float32', lod_level=2
)
return layers.sequence_expand(x=x, y=y, ref_level=1)
def test_sequence_reshape(self):
# TODO(minqiyang): dygraph do not support lod now
with self.static_graph():
x = paddle.static.data(
name='x', shape=[-1, 8], dtype='float32', lod_level=1
)
out = layers.sequence_reshape(input=x, new_dim=16)
return out
def test_sequence_unpad(self):
# TODO(minqiyang): dygraph do not support lod now
with self.static_graph():
x = paddle.static.data(name='x', shape=[-1, 10, 5], dtype='float32')
length = paddle.static.data(
name='length', shape=[-1], dtype='int64'
)
return layers.sequence_unpad(x=x, length=length)
def test_sequence_softmax(self):
# TODO(minqiyang): dygraph do not support lod now
with self.static_graph():
seq_data = paddle.static.data(
name='seq_data',
shape=[-1, 10, 10],
dtype='float32',
lod_level=1,
)
seq = paddle.static.nn.fc(x=seq_data, size=20)
return layers.sequence_softmax(seq)
def test_sequence_unsqueeze(self):
# TODO(minqiyang): dygraph do not support lod now
with self.static_graph():
x = paddle.static.data(name='x', shape=[-1, 8, 2], dtype='float32')
out = paddle.unsqueeze(x, axis=[1])
return out
def test_sequence_scatter(self):
# TODO(minqiyang): dygraph do not support lod now
with self.static_graph():
x = paddle.static.data(name='x', shape=[3, 6], dtype='float32')
idx = paddle.static.data(
name='idx',
shape=[12, 1],
dtype='int32',
lod_level=1,
)
updates = paddle.static.data(
name='updates',
shape=[12, 1],
dtype='float32',
lod_level=1,
)
out = layers.sequence_scatter(input=x, index=idx, updates=updates)
return out
def test_sequence_slice(self):
# TODO(minqiyang): dygraph do not support lod now
with self.static_graph():
import numpy as np
seqs = paddle.static.data(
name='x', shape=[-1, 10, 5], dtype='float32', lod_level=1
)
offset = layers.assign(input=np.array([[0, 1]]).astype('int32'))
length = layers.assign(input=np.array([[2, 1]]).astype('int32'))
out = layers.sequence_slice(
input=seqs, offset=offset, length=length
)
return out
def test_shuffle_batch(self): def test_shuffle_batch(self):
# TODO(minqiyang): dygraph do not support lod now # TODO(minqiyang): dygraph do not support lod now
with self.static_graph(): with self.static_graph():
...@@ -2238,14 +2158,6 @@ class TestBook(LayerTest): ...@@ -2238,14 +2158,6 @@ class TestBook(LayerTest):
) )
return out return out
def test_sequence_enumerate(self):
# TODO(minqiyang): dygraph do not support lod now
with self.static_graph():
x = paddle.static.data(
name="input", shape=[-1, 1], dtype='int32', lod_level=1
)
out = layers.sequence_enumerate(input=x, win_size=2, pad_value=0)
def test_row_conv(self): def test_row_conv(self):
# TODO(minqiyang): dygraph do not support lod now # TODO(minqiyang): dygraph do not support lod now
with self.static_graph(): with self.static_graph():
......
...@@ -136,7 +136,9 @@ def bow_net( ...@@ -136,7 +136,9 @@ def bow_net(
emb = fluid.layers.embedding( emb = fluid.layers.embedding(
input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim] input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim]
) )
bow = fluid.layers.sequence_pool(input=emb, pool_type='sum') bow = paddle.static.nn.sequence_lod.sequence_pool(
input=emb, pool_type='sum'
)
bow_tanh = paddle.tanh(bow) bow_tanh = paddle.tanh(bow)
fc_1 = paddle.static.nn.fc(x=bow_tanh, size=hid_dim, activation="tanh") fc_1 = paddle.static.nn.fc(x=bow_tanh, size=hid_dim, activation="tanh")
fc_2 = paddle.static.nn.fc(x=fc_1, size=hid_dim2, activation="tanh") fc_2 = paddle.static.nn.fc(x=fc_1, size=hid_dim2, activation="tanh")
......
...@@ -42,7 +42,9 @@ def bow_net( ...@@ -42,7 +42,9 @@ def bow_net(
emb = fluid.layers.embedding( emb = fluid.layers.embedding(
input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim] input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim]
) )
bow = fluid.layers.sequence_pool(input=emb, pool_type='sum') bow = paddle.static.nn.sequence_lod.sequence_pool(
input=emb, pool_type='sum'
)
bow_tanh = paddle.tanh(bow) bow_tanh = paddle.tanh(bow)
fc_1 = paddle.static.nn.fc(x=bow_tanh, size=hid_dim, activation="tanh") fc_1 = paddle.static.nn.fc(x=bow_tanh, size=hid_dim, activation="tanh")
fc_2 = paddle.static.nn.fc(x=fc_1, size=hid_dim2, activation="tanh") fc_2 = paddle.static.nn.fc(x=fc_1, size=hid_dim2, activation="tanh")
......
...@@ -150,7 +150,9 @@ class MLE: ...@@ -150,7 +150,9 @@ class MLE:
use_softmax=False, use_softmax=False,
) )
max_seq_len = paddle.shape(probs)[1] max_seq_len = paddle.shape(probs)[1]
mask = layers.sequence_mask(length, maxlen=max_seq_len, dtype="float32") mask = paddle.static.nn.sequence_lod.sequence_mask(
length, maxlen=max_seq_len, dtype="float32"
)
loss = loss * mask loss = loss * mask
loss = paddle.mean(loss, axis=[0]) loss = paddle.mean(loss, axis=[0])
loss = paddle.sum(loss) loss = paddle.sum(loss)
......
...@@ -58,7 +58,9 @@ def bow_net( ...@@ -58,7 +58,9 @@ def bow_net(
emb = fluid.layers.embedding( emb = fluid.layers.embedding(
input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim] input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim]
) )
bow = fluid.layers.sequence_pool(input=emb, pool_type='sum') bow = paddle.static.nn.sequence_lod.sequence_pool(
input=emb, pool_type='sum'
)
bow_tanh = paddle.tanh(bow) bow_tanh = paddle.tanh(bow)
fc_1 = paddle.static.nn.fc(x=bow_tanh, size=hid_dim, activation="tanh") fc_1 = paddle.static.nn.fc(x=bow_tanh, size=hid_dim, activation="tanh")
fc_2 = paddle.static.nn.fc(x=fc_1, size=hid_dim2, activation="tanh") fc_2 = paddle.static.nn.fc(x=fc_1, size=hid_dim2, activation="tanh")
......
...@@ -434,10 +434,9 @@ class TestSeqConvApi(unittest.TestCase): ...@@ -434,10 +434,9 @@ class TestSeqConvApi(unittest.TestCase):
import paddle.fluid as fluid import paddle.fluid as fluid
x = paddle.static.data('x', shape=[-1, 32], lod_level=1) x = paddle.static.data('x', shape=[-1, 32], lod_level=1)
y = fluid.layers.sequence_conv( y = paddle.static.nn.sequence_lod.sequence_conv(
input=x, num_filters=2, filter_size=3, padding_start=None input=x, num_filters=2, filter_size=3, padding_start=None
) )
place = fluid.CPUPlace() place = fluid.CPUPlace()
x_tensor = fluid.create_lod_tensor( x_tensor = fluid.create_lod_tensor(
np.random.rand(10, 32).astype("float32"), [[2, 3, 1, 4]], place np.random.rand(10, 32).astype("float32"), [[2, 3, 1, 4]], place
......
...@@ -27,7 +27,6 @@ from xpu.get_test_cover_info import ( ...@@ -27,7 +27,6 @@ from xpu.get_test_cover_info import (
) )
import paddle import paddle
import paddle.fluid as fluid
paddle.enable_static() paddle.enable_static()
...@@ -105,13 +104,13 @@ class XPUTestSequenceUnpadOp(XPUOpTestWrapper): ...@@ -105,13 +104,13 @@ class XPUTestSequenceUnpadOp(XPUOpTestWrapper):
class TestSequenceUnpadOpError(unittest.TestCase): class TestSequenceUnpadOpError(unittest.TestCase):
def test_error(self): def test_error(self):
""" """
The type of 'x' in fluid.layers.sequence_unpad must be <class 'paddle.fluid.framework.Variable'>, but received <class 'numpy.ndarray'>. The type of 'x' in paddle.static.nn.sequence_unpad must be <class 'paddle.fluid.framework.Variable'>, but received <class 'numpy.ndarray'>.
""" """
def test_x_variable(): def test_x_variable():
x = np.random.random((10, 5)).astype("float64") x = np.random.random((10, 5)).astype("float64")
len = fluid.data(name='length2', shape=[10], dtype='int64') len = paddle.static.data(name='length2', shape=[10], dtype='int64')
fluid.layers.sequence_unpad(x=x, length=len) paddle.static.nn.sequence_lod.sequence_unpad(x=x, length=len)
self.assertRaises(TypeError, test_x_variable) self.assertRaises(TypeError, test_x_variable)
""" """
...@@ -119,9 +118,9 @@ class TestSequenceUnpadOpError(unittest.TestCase): ...@@ -119,9 +118,9 @@ class TestSequenceUnpadOpError(unittest.TestCase):
""" """
def test_length_variable(): def test_length_variable():
x1 = fluid.data(name='x1', shape=[10, 5], dtype='float32') x1 = paddle.static.data(name='x1', shape=[10, 5], dtype='float32')
len1 = np.random.random((10)).astype("int64") len1 = np.random.random((10)).astype("int64")
fluid.layers.sequence_unpad(x=x1, length=len1) paddle.static.nn.sequence_lod.sequence_unpad(x=x1, length=len1)
self.assertRaises(TypeError, test_length_variable) self.assertRaises(TypeError, test_length_variable)
""" """
...@@ -129,9 +128,9 @@ class TestSequenceUnpadOpError(unittest.TestCase): ...@@ -129,9 +128,9 @@ class TestSequenceUnpadOpError(unittest.TestCase):
""" """
def test_x_dtype(): def test_x_dtype():
x2 = fluid.data(name='x2', shape=[10, 5], dtype='float16') x2 = paddle.static.data(name='x2', shape=[10, 5], dtype='float16')
len2 = fluid.data(name='length2', shape=[10], dtype='int64') len2 = paddle.static.data(name='length2', shape=[10], dtype='int64')
fluid.layers.sequence_unpad(x=x2, length=len2) paddle.static.nn.sequence_lod.sequence_unpad(x=x2, length=len2)
self.assertRaises(TypeError, test_x_dtype) self.assertRaises(TypeError, test_x_dtype)
""" """
...@@ -139,9 +138,9 @@ class TestSequenceUnpadOpError(unittest.TestCase): ...@@ -139,9 +138,9 @@ class TestSequenceUnpadOpError(unittest.TestCase):
""" """
def test_length_dtype(): def test_length_dtype():
x3 = fluid.data(name='x3', shape=[10, 5], dtype='float64') x3 = paddle.static.data(name='x3', shape=[10, 5], dtype='float64')
len3 = fluid.data(name='length3', shape=[10], dtype='int32') len3 = paddle.static.data(name='length3', shape=[10], dtype='int32')
fluid.layers.sequence_unpad(x=x3, length=len3) paddle.static.nn.sequence_lod.sequence_unpad(x=x3, length=len3)
self.assertRaises(TypeError, test_length_dtype) self.assertRaises(TypeError, test_length_dtype)
......
...@@ -28,7 +28,7 @@ from paddle.fluid.framework import ( ...@@ -28,7 +28,7 @@ from paddle.fluid.framework import (
in_dygraph_mode, in_dygraph_mode,
program_guard, program_guard,
) )
from paddle.fluid.layers import control_flow, sequence_lod, utils from paddle.fluid.layers import control_flow, utils
from paddle.fluid.layers.utils import flatten, map_structure from paddle.fluid.layers.utils import flatten, map_structure
from paddle.framework import core from paddle.framework import core
from paddle.nn import Layer from paddle.nn import Layer
...@@ -171,7 +171,7 @@ def _rnn_dynamic_graph( ...@@ -171,7 +171,7 @@ def _rnn_dynamic_graph(
inputs = map_structure(_transpose_batch_time, inputs) inputs = map_structure(_transpose_batch_time, inputs)
if sequence_length is not None: if sequence_length is not None:
mask = sequence_lod.sequence_mask( mask = paddle.static.nn.sequence_lod.sequence_mask(
sequence_length, maxlen=time_steps, dtype=inputs.dtype sequence_length, maxlen=time_steps, dtype=inputs.dtype
) )
mask = paddle.transpose(mask, [1, 0]) mask = paddle.transpose(mask, [1, 0])
...@@ -256,7 +256,7 @@ def _rnn_static_graph( ...@@ -256,7 +256,7 @@ def _rnn_static_graph(
max_seq_len = paddle.shape(flatten(inputs)[0])[0] max_seq_len = paddle.shape(flatten(inputs)[0])[0]
if sequence_length: if sequence_length:
mask = sequence_lod.sequence_mask( mask = paddle.static.nn.sequence_lod.sequence_mask(
sequence_length, sequence_length,
maxlen=max_seq_len, maxlen=max_seq_len,
dtype=flatten(initial_states)[0].dtype, dtype=flatten(initial_states)[0].dtype,
......
...@@ -42,21 +42,21 @@ from ...fluid.input import embedding # noqa: F401 ...@@ -42,21 +42,21 @@ from ...fluid.input import embedding # noqa: F401
from ...fluid.contrib.layers import sparse_embedding # noqa: F401 from ...fluid.contrib.layers import sparse_embedding # noqa: F401
from ...fluid.layers import StaticRNN # noqa: F401 from ...fluid.layers import StaticRNN # noqa: F401
from ...fluid.layers.sequence_lod import sequence_conv # noqa: F401 from .sequence_lod import sequence_conv # noqa: F401
from ...fluid.layers.sequence_lod import sequence_softmax # noqa: F401 from .sequence_lod import sequence_softmax # noqa: F401
from ...fluid.layers.sequence_lod import sequence_pool # noqa: F401 from .sequence_lod import sequence_pool # noqa: F401
from ...fluid.layers.sequence_lod import sequence_concat # noqa: F401 from .sequence_lod import sequence_concat # noqa: F401
from ...fluid.layers.sequence_lod import sequence_first_step # noqa: F401 from .sequence_lod import sequence_first_step # noqa: F401
from ...fluid.layers.sequence_lod import sequence_last_step # noqa: F401 from .sequence_lod import sequence_last_step # noqa: F401
from ...fluid.layers.sequence_lod import sequence_slice # noqa: F401 from .sequence_lod import sequence_slice # noqa: F401
from ...fluid.layers.sequence_lod import sequence_expand # noqa: F401 from .sequence_lod import sequence_expand # noqa: F401
from ...fluid.layers.sequence_lod import sequence_expand_as # noqa: F401 from .sequence_lod import sequence_expand_as # noqa: F401
from ...fluid.layers.sequence_lod import sequence_pad # noqa: F401 from .sequence_lod import sequence_pad # noqa: F401
from ...fluid.layers.sequence_lod import sequence_unpad # noqa: F401 from .sequence_lod import sequence_unpad # noqa: F401
from ...fluid.layers.sequence_lod import sequence_reshape # noqa: F401 from .sequence_lod import sequence_reshape # noqa: F401
from ...fluid.layers.sequence_lod import sequence_scatter # noqa: F401 from .sequence_lod import sequence_scatter # noqa: F401
from ...fluid.layers.sequence_lod import sequence_enumerate # noqa: F401 from .sequence_lod import sequence_enumerate # noqa: F401
from ...fluid.layers.sequence_lod import sequence_reverse # noqa: F401 from .sequence_lod import sequence_reverse # noqa: F401
from .control_flow import cond from .control_flow import cond
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册