diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt index 73e0c2394a65ff37de6d845e977c75569445af96..0c1388b2e68c1914ae0abf5c7c8ac80c69c698a1 100644 --- a/python/paddle/fluid/tests/unittests/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/CMakeLists.txt @@ -207,8 +207,8 @@ set(TEST_OPS_WITH_GC test_pad2d_op test_scatter_op test_sequence_concat - test_seq_conv - test_seq_pool + test_sequence_conv + test_sequence_pool test_sequence_expand_as test_sequence_expand test_sequence_pad_op diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index abf15778ea2fe724e2dba46fc25c9a41996fb563..ec2c13522e045e2a9534f28552e8b409a834b96f 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -374,6 +374,24 @@ class OpTest(unittest.TestCase): else: return fluid.dygraph.base.to_variable(value) + def get_sequence_batch_size_1_input(self, lod=None, shape=None): + """Get LoD input data whose batch size is 1. + All sequence related OP unittests should call this function to contain the case of batch size = 1. + Args: + lod (list[list of int], optional): Length-based LoD, length of lod[0] should be 1. Default: [[13]]. + shape (list, optional): Shape of input, shape[0] should be equals to lod[0][0]. Default: [13, 23]. + Returns: + tuple (ndarray, lod) : LoD input data whose batch size is 1. + """ + if lod is None: + lod = [[13]] + if shape is None: + shape = [13, 23] + assert len(lod[0]) == 1 + assert lod[0][0] == shape[0] + x = np.random.uniform(0.1, 1, shape).astype('float32') + return (x, lod) + def append_input_output_for_dygraph(self, op_proto, np_list, is_input, if_return_inputs_grad_dict, block): def create_var(np_value, name, is_input, if_return_inputs_grad_dict): diff --git a/python/paddle/fluid/tests/unittests/sequence/test_seq_conv.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_conv.py similarity index 100% rename from python/paddle/fluid/tests/unittests/sequence/test_seq_conv.py rename to python/paddle/fluid/tests/unittests/sequence/test_sequence_conv.py diff --git a/python/paddle/fluid/tests/unittests/sequence/test_seq_pool.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_pool.py similarity index 94% rename from python/paddle/fluid/tests/unittests/sequence/test_seq_pool.py rename to python/paddle/fluid/tests/unittests/sequence/test_sequence_pool.py index 94eb091bc010510664ae348c726f3548b2357b75..f4c422a9a801caf0df946dbcbbbf770429a513cc 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_seq_pool.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_pool.py @@ -57,26 +57,30 @@ class TestSeqAvgPool(OpTest): def set_lod(self): return [[11]] - def set_data(self): - self.op_type = 'sequence_pool' + def set_lod_data(self): x = np.random.uniform(0.1, 1, [11, 23]).astype('float32') + return x + + def set_data(self): + x = self.set_lod_data() lod = self.set_lod() level = len(lod) - 1 self.inputs = {'X': (x, lod)} offset = convert_to_offset(lod) - out = np.zeros((len(lod[level]), 23)).astype('float32') + out = np.zeros((len(lod[level]), x.shape[1])).astype('float32') self.outputs = {'Out': out} - return x, offset, out + return x, lod, offset, out def compute(self, x, offset, out): self.attrs = {"pad_value": 0.0, 'pooltype': "AVERAGE"} compute_seqpool_avg(x, offset, out, self.attrs["pad_value"]) def setUp(self): - x, offset, out = self.set_data() + self.op_type = 'sequence_pool' + x, lod, offset, out = self.set_data() self.compute(x, offset, out) if len(offset) > 1: - self.outputs = {'Out': (out, [self.set_lod()[0]])} + self.outputs = {'Out': (out, [lod[0]])} def test_check_output(self): self.check_output(check_dygraph=False) @@ -90,6 +94,17 @@ class TestSeqAvgPool(OpTest): self.check_grad(["X"], "Out", check_dygraph=False) +class TestSeqAvgPoolBatch1(TestSeqAvgPool): + def set_lod(self): + return [[11]] + + def set_lod_data(self): + lod = self.set_lod() + x, _ = self.get_sequence_batch_size_1_input( + lod=lod, shape=[lod[0][0], 23]) + return x + + class TestSeqAvgPoolLen0(TestSeqAvgPool): def set_lod(self): return [[0, 4, 0, 7, 0]] @@ -135,7 +150,7 @@ class TestSeqMaxPool(TestSeqAvgPool): out = np.zeros((len(lod[level]), 23)).astype('float32') self.outputs = {'Out': out} - return x, offset, out + return x, lod, offset, out def compute(self, x, offset, out): self.attrs = {"pad_value": 0.5, 'pooltype': "MAX"} @@ -232,7 +247,7 @@ class TestSeqAvgPool2D(TestSeqAvgPool): out = np.zeros((len(lod[level]), 3, 17)).astype('float32') self.outputs = {'Out': out} - return x, offset, out + return x, lod, offset, out def compute(self, x, offset, out): self.attrs = {"pad_value": 0.0, 'pooltype': "AVERAGE"} @@ -321,19 +336,19 @@ class TestSeqMaxPool2D(TestSeqAvgPool2D): def set_data(self): self.op_type = 'sequence_pool' x = np.random.uniform(0.1, 1, [13, 3, 11]).astype('float32') - self.lod = self.set_lod() - level = len(self.lod) - 1 - self.inputs = {'X': (x, self.lod)} - offset = convert_to_offset(self.lod) + lod = self.set_lod() + level = len(lod) - 1 + self.inputs = {'X': (x, lod)} + offset = convert_to_offset(lod) for i in range(len(offset[level]) - 1): l = offset[level][i + 1] - offset[level][i] if l == 0: continue x[offset[level][i] + np.random.randint(l), :] += 1.0 - out = np.zeros((len(self.lod[level]), 3, 11)).astype('float32') + out = np.zeros((len(lod[level]), 3, 11)).astype('float32') self.outputs = {'Out': out} - return x, offset, out + return x, lod, offset, out def compute(self, x, offset, out): self.attrs = {"pad_value": 0.0, 'pooltype': "MAX"} diff --git a/python/paddle/fluid/tests/unittests/test_fusion_seqconv_eltadd_relu_op.py b/python/paddle/fluid/tests/unittests/test_fusion_seqconv_eltadd_relu_op.py index bd5e1895aa6efc4c8053769f07b7db5109ec1c0c..b6d643c357140e7086391e328b295fb53ea85607 100644 --- a/python/paddle/fluid/tests/unittests/test_fusion_seqconv_eltadd_relu_op.py +++ b/python/paddle/fluid/tests/unittests/test_fusion_seqconv_eltadd_relu_op.py @@ -18,7 +18,7 @@ import unittest import numpy as np import random from op_test import OpTest -from sequence.test_seq_conv import seqconv +from sequence.test_sequence_conv import seqconv class TestSeqConvEltAddRelu(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_fusion_seqpool_concat_op.py b/python/paddle/fluid/tests/unittests/test_fusion_seqpool_concat_op.py index d8054a85cb60768837077c56f8425321c190d6f1..fa42f5d09b86ad0ffe03091c2610a663884b6201 100644 --- a/python/paddle/fluid/tests/unittests/test_fusion_seqpool_concat_op.py +++ b/python/paddle/fluid/tests/unittests/test_fusion_seqpool_concat_op.py @@ -18,7 +18,7 @@ import unittest import numpy as np from op_test import OpTest from test_reorder_lod_tensor import convert_to_offset -from sequence.test_seq_pool import compute_seqpool_sum, compute_seqpool_avg, compute_seqpool_sqrt +from sequence.test_sequence_pool import compute_seqpool_sum, compute_seqpool_avg, compute_seqpool_sqrt class TestFusionSeqPoolConcatOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_fusion_seqpool_cvm_concat_op.py b/python/paddle/fluid/tests/unittests/test_fusion_seqpool_cvm_concat_op.py index a4dd2659dcb5617bad79a765206c4b74e2b16b21..eb681b1f167ad24dbc43b257c69b6169fd4eeb33 100644 --- a/python/paddle/fluid/tests/unittests/test_fusion_seqpool_cvm_concat_op.py +++ b/python/paddle/fluid/tests/unittests/test_fusion_seqpool_cvm_concat_op.py @@ -18,7 +18,7 @@ import unittest import numpy as np from op_test import OpTest from test_reorder_lod_tensor import convert_to_offset -from sequence.test_seq_pool import compute_seqpool_sum, compute_seqpool_avg, compute_seqpool_sqrt +from sequence.test_sequence_pool import compute_seqpool_sum, compute_seqpool_avg, compute_seqpool_sqrt from test_cvm_op import cvm_compute