提交 0d82baf8 编写于 作者: S songyouwei 提交者: Tao Luo

add batch_size_1_input data function for sequence ops unittests (#22172)

上级 2f3e2a84
......@@ -207,8 +207,8 @@ set(TEST_OPS_WITH_GC
test_pad2d_op
test_scatter_op
test_sequence_concat
test_seq_conv
test_seq_pool
test_sequence_conv
test_sequence_pool
test_sequence_expand_as
test_sequence_expand
test_sequence_pad_op
......
......@@ -374,6 +374,24 @@ class OpTest(unittest.TestCase):
else:
return fluid.dygraph.base.to_variable(value)
def get_sequence_batch_size_1_input(self, lod=None, shape=None):
"""Get LoD input data whose batch size is 1.
All sequence related OP unittests should call this function to contain the case of batch size = 1.
Args:
lod (list[list of int], optional): Length-based LoD, length of lod[0] should be 1. Default: [[13]].
shape (list, optional): Shape of input, shape[0] should be equals to lod[0][0]. Default: [13, 23].
Returns:
tuple (ndarray, lod) : LoD input data whose batch size is 1.
"""
if lod is None:
lod = [[13]]
if shape is None:
shape = [13, 23]
assert len(lod[0]) == 1
assert lod[0][0] == shape[0]
x = np.random.uniform(0.1, 1, shape).astype('float32')
return (x, lod)
def append_input_output_for_dygraph(self, op_proto, np_list, is_input,
if_return_inputs_grad_dict, block):
def create_var(np_value, name, is_input, if_return_inputs_grad_dict):
......
......@@ -57,26 +57,30 @@ class TestSeqAvgPool(OpTest):
def set_lod(self):
return [[11]]
def set_data(self):
self.op_type = 'sequence_pool'
def set_lod_data(self):
x = np.random.uniform(0.1, 1, [11, 23]).astype('float32')
return x
def set_data(self):
x = self.set_lod_data()
lod = self.set_lod()
level = len(lod) - 1
self.inputs = {'X': (x, lod)}
offset = convert_to_offset(lod)
out = np.zeros((len(lod[level]), 23)).astype('float32')
out = np.zeros((len(lod[level]), x.shape[1])).astype('float32')
self.outputs = {'Out': out}
return x, offset, out
return x, lod, offset, out
def compute(self, x, offset, out):
self.attrs = {"pad_value": 0.0, 'pooltype': "AVERAGE"}
compute_seqpool_avg(x, offset, out, self.attrs["pad_value"])
def setUp(self):
x, offset, out = self.set_data()
self.op_type = 'sequence_pool'
x, lod, offset, out = self.set_data()
self.compute(x, offset, out)
if len(offset) > 1:
self.outputs = {'Out': (out, [self.set_lod()[0]])}
self.outputs = {'Out': (out, [lod[0]])}
def test_check_output(self):
self.check_output(check_dygraph=False)
......@@ -90,6 +94,17 @@ class TestSeqAvgPool(OpTest):
self.check_grad(["X"], "Out", check_dygraph=False)
class TestSeqAvgPoolBatch1(TestSeqAvgPool):
def set_lod(self):
return [[11]]
def set_lod_data(self):
lod = self.set_lod()
x, _ = self.get_sequence_batch_size_1_input(
lod=lod, shape=[lod[0][0], 23])
return x
class TestSeqAvgPoolLen0(TestSeqAvgPool):
def set_lod(self):
return [[0, 4, 0, 7, 0]]
......@@ -135,7 +150,7 @@ class TestSeqMaxPool(TestSeqAvgPool):
out = np.zeros((len(lod[level]), 23)).astype('float32')
self.outputs = {'Out': out}
return x, offset, out
return x, lod, offset, out
def compute(self, x, offset, out):
self.attrs = {"pad_value": 0.5, 'pooltype': "MAX"}
......@@ -232,7 +247,7 @@ class TestSeqAvgPool2D(TestSeqAvgPool):
out = np.zeros((len(lod[level]), 3, 17)).astype('float32')
self.outputs = {'Out': out}
return x, offset, out
return x, lod, offset, out
def compute(self, x, offset, out):
self.attrs = {"pad_value": 0.0, 'pooltype': "AVERAGE"}
......@@ -321,19 +336,19 @@ class TestSeqMaxPool2D(TestSeqAvgPool2D):
def set_data(self):
self.op_type = 'sequence_pool'
x = np.random.uniform(0.1, 1, [13, 3, 11]).astype('float32')
self.lod = self.set_lod()
level = len(self.lod) - 1
self.inputs = {'X': (x, self.lod)}
offset = convert_to_offset(self.lod)
lod = self.set_lod()
level = len(lod) - 1
self.inputs = {'X': (x, lod)}
offset = convert_to_offset(lod)
for i in range(len(offset[level]) - 1):
l = offset[level][i + 1] - offset[level][i]
if l == 0:
continue
x[offset[level][i] + np.random.randint(l), :] += 1.0
out = np.zeros((len(self.lod[level]), 3, 11)).astype('float32')
out = np.zeros((len(lod[level]), 3, 11)).astype('float32')
self.outputs = {'Out': out}
return x, offset, out
return x, lod, offset, out
def compute(self, x, offset, out):
self.attrs = {"pad_value": 0.0, 'pooltype': "MAX"}
......
......@@ -18,7 +18,7 @@ import unittest
import numpy as np
import random
from op_test import OpTest
from sequence.test_seq_conv import seqconv
from sequence.test_sequence_conv import seqconv
class TestSeqConvEltAddRelu(OpTest):
......
......@@ -18,7 +18,7 @@ import unittest
import numpy as np
from op_test import OpTest
from test_reorder_lod_tensor import convert_to_offset
from sequence.test_seq_pool import compute_seqpool_sum, compute_seqpool_avg, compute_seqpool_sqrt
from sequence.test_sequence_pool import compute_seqpool_sum, compute_seqpool_avg, compute_seqpool_sqrt
class TestFusionSeqPoolConcatOp(OpTest):
......
......@@ -18,7 +18,7 @@ import unittest
import numpy as np
from op_test import OpTest
from test_reorder_lod_tensor import convert_to_offset
from sequence.test_seq_pool import compute_seqpool_sum, compute_seqpool_avg, compute_seqpool_sqrt
from sequence.test_sequence_pool import compute_seqpool_sum, compute_seqpool_avg, compute_seqpool_sqrt
from test_cvm_op import cvm_compute
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册