From 3545d3a61d5cdfe0df90b87939056f026fc8e103 Mon Sep 17 00:00:00 2001 From: yangyaming Date: Thu, 14 Dec 2017 14:11:15 +0800 Subject: [PATCH] Expose seq_expand op. --- doc/api/v2/fluid/layers.rst | 4 ++ python/paddle/v2/fluid/layers.py | 69 ++++++++++++++++++++- python/paddle/v2/fluid/tests/test_layers.py | 9 +++ 3 files changed, 81 insertions(+), 1 deletion(-) diff --git a/doc/api/v2/fluid/layers.rst b/doc/api/v2/fluid/layers.rst index 89e5fec13b..c3436ca6bc 100644 --- a/doc/api/v2/fluid/layers.rst +++ b/doc/api/v2/fluid/layers.rst @@ -300,3 +300,7 @@ conv2d_transpose .. autofunction:: paddle.v2.fluid.layers.conv2d_transpose :noindex: +seq_expand +--------- +.. autofunction:: paddle.v2.fluid.layers.seq_expand + :noindex: diff --git a/python/paddle/v2/fluid/layers.py b/python/paddle/v2/fluid/layers.py index f67d6d08c7..bdb9e1ba8c 100644 --- a/python/paddle/v2/fluid/layers.py +++ b/python/paddle/v2/fluid/layers.py @@ -11,7 +11,7 @@ from param_attr import ParamAttr __all__ = [ 'fc', 'data', 'cross_entropy', 'conv2d', 'pool2d', 'embedding', 'concat', 'StaticRNN', 'cast', 'sequence_conv', 'sequence_pool', 'sums', 'cos_sim', - 'batch_norm', 'accuracy', 'split_lod_tensor', 'While' + 'batch_norm', 'accuracy', 'split_lod_tensor', 'While', 'seq_expand' ] _REGISTER_LAYER_FROM_OPS = [ @@ -2023,3 +2023,70 @@ class DynamicRNN(object): if self.status != DynamicRNN.IN_RNN: raise ValueError("{0} can only be invoked inside rnn block.".format( method)) + + +def seq_expand(x, y, main_program=None, startup_program=None): + """Sequence Expand Layer. This layer will expand the input variable **x** + according to LoD information of **y**. And the following examples will + explain how seq_expand works: + + .. code-block:: text + + * Case 1 + x is a LoDTensor: + x.lod = [[0, 2, 3], + [0, 1, 3, 4]] + x.data = [a, b, c, d] + x.dims = [4, 1] + + y is a LoDTensor: + y.lod = [[0, 2, 4], + [0, 3, 6, 7, 8]] + + with condition len(y.lod[-1]) - 1 == x.dims[0] + + then output is a 2-level LoDTensor: + out.lod = [[0, 2, 4], + [0, 3, 6, 7, 8]] + out.data = [a, a, a, b, b, b, c, d] + out.dims = [8, 1] + + * Case 2 + x is a Tensor: + x.data = [a, b, c] + x.dims = [3, 1] + + y is a LoDTensor: + Y.lod = [[0, 2, 3, 6]] + + with condition len(y.lod[-1]) - 1 == x.dims[0] + + then output is a 1-level LoDTensor: + out.lod = [[0, 2, 3, 6]] + out.data = [a, a, b, c, c, c] + out.dims = [6, 1] + + Args: + x (Variable): The input variable which is a Tensor or LoDTensor. + y (Variable): The input variable which is a LoDTensor. + main_program (Program): The main program. + startup_program (Program): The startup program. + + Returns: + Variable: The expanded variable which is a LoDTensor. + + Examples: + .. code-block:: python + + x = fluid.layers.data(name='x', shape=[10], dtype='float32') + y = fluid.layers.data(name='y', shape=[10, 20], + dtype='float32', lod_level=1) + out = layers.seq_expand(x=x, y=y) + """ + helper = LayerHelper('seq_expand', input=x, **locals()) + dtype = helper.input_dtype() + tmp = helper.create_tmp_variable(dtype) + helper.append_op( + type='seq_expand', inputs={'X': x, + 'Y': y}, outputs={'Out': tmp}) + return tmp diff --git a/python/paddle/v2/fluid/tests/test_layers.py b/python/paddle/v2/fluid/tests/test_layers.py index 9b88080158..d6f939af23 100644 --- a/python/paddle/v2/fluid/tests/test_layers.py +++ b/python/paddle/v2/fluid/tests/test_layers.py @@ -161,6 +161,15 @@ class TestBook(unittest.TestCase): x=dat, label=lbl)) print(str(program)) + def test_seq_expand(self): + program = Program() + with program_guard(program): + x = layers.data(name='x', shape=[10], dtype='float32') + y = layers.data( + name='y', shape=[10, 20], dtype='float32', lod_level=1) + self.assertIsNotNone(layers.seq_expand(x=x, y=y)) + print(str(program)) + if __name__ == '__main__': unittest.main() -- GitLab