未验证 提交 96d2f337 编写于 作者: T tanzhipeng 提交者: GitHub

modify sequence_conv_xpu op test. test=kunlun (#40347)

上级 7dad9f70
......@@ -21,6 +21,8 @@ import random
import sys
sys.path.append("../")
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types
from xpu.get_test_cover_info import XPUOpTestWrapper
paddle.enable_static()
np.set_printoptions(threshold=np.inf)
......@@ -73,10 +75,15 @@ def seqconv(x,
return np.dot(col, filter)
class TestSeqProject(XPUOpTest):
class XPUTestSequenceConv(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'sequence_conv'
class TestSeqProject(XPUOpTest):
def setUp(self):
self.init_test_case()
self.op_type = 'sequence_conv'
self.dtype = self.in_type
self.use_xpu = True
if self.context_length == 1 \
......@@ -90,16 +97,17 @@ class TestSeqProject(XPUOpTest):
# one level, batch size
x = np.random.uniform(-6.10907e-05, 0.000104218,
[self.input_size[0],
self.input_size[1]]).astype('float32')
self.input_size[1]]).astype(self.dtype)
w = np.random.uniform(-3.17068e-05, 0.000159822, [
self.context_length * self.input_size[1], self.output_represention
]).astype('float32')
self.context_length * self.input_size[1],
self.output_represention
]).astype(self.dtype)
begin_pad = np.max([0, -self.context_start])
end_pad = np.max([0, self.context_start + self.context_length - 1])
total_pad = begin_pad + end_pad
padding_data = np.random.uniform(
0, 0, [total_pad, self.input_size[1]]).astype('float32')
0, 0, [total_pad, self.input_size[1]]).astype(self.dtype)
self.pad_data = padding_data
self.inputs = {
'X': (x, self.lod),
......@@ -121,8 +129,9 @@ class TestSeqProject(XPUOpTest):
'paddingTrainable': self.padding_trainable,
'contextStride': self.context_stride
}
out = seqconv(x, self.lod, w, self.context_length, self.context_start,
self.padding_trainable, self.pad_data)
out = seqconv(x, self.lod, w, self.context_length,
self.context_start, self.padding_trainable,
self.pad_data)
self.outputs = {'Out': out}
def test_check_output(self):
......@@ -153,7 +162,8 @@ class TestSeqProject(XPUOpTest):
def test_check_grad_padding_filter(self):
if self.padding_trainable:
self.check_grad(self.inputs_val_no_x, 'Out', no_grad_set=set(['X']))
self.check_grad(
self.inputs_val_no_x, 'Out', no_grad_set=set(['X']))
def init_test_case(self):
self.input_row = 7
......@@ -171,8 +181,7 @@ class TestSeqProject(XPUOpTest):
self.lod[0].append(offset_lod[0][i + 1] - offset_lod[0][i])
self.output_represention = 8 # output feature size
class TestSeqProjectCase1(TestSeqProject):
class TestSeqProjectCase1(TestSeqProject):
def init_test_case(self):
self.input_row = 11
self.context_start = -2
......@@ -188,8 +197,7 @@ class TestSeqProjectCase1(TestSeqProject):
self.lod[0].append(offset_lod[0][i + 1] - offset_lod[0][i])
self.output_represention = 8 # output feature size
class TestSeqProjectCase2Len0(TestSeqProject):
class TestSeqProjectCase2Len0(TestSeqProject):
def init_test_case(self):
self.input_row = 11
self.context_start = -2
......@@ -205,8 +213,7 @@ class TestSeqProjectCase2Len0(TestSeqProject):
self.lod[0].append(offset_lod[0][i + 1] - offset_lod[0][i])
self.output_represention = 8 # output feature size
class TestSeqProjectCase3(TestSeqProject):
class TestSeqProjectCase3(TestSeqProject):
def init_test_case(self):
self.input_row = 25
self.context_start = -2
......@@ -225,8 +232,7 @@ class TestSeqProjectCase3(TestSeqProject):
self.lod[0].append(offset_lod[0][i + 1] - offset_lod[0][i])
self.output_represention = 8 # output feature size
class TestSeqProjectCase4(TestSeqProject):
class TestSeqProjectCase4(TestSeqProject):
def init_test_case(self):
self.input_row = 7835
self.input_col = 128
......@@ -237,18 +243,19 @@ class TestSeqProjectCase4(TestSeqProject):
self.input_size = [self.input_row, self.input_col]
offset_lod = [[
0, 1, 2, 3, 131, 241, 242, 263, 264, 265, 266, 267, 268, 387, 515,
516, 644, 645, 772, 794, 922, 923, 924, 944, 945, 1073, 1074, 1202,
1330, 1458, 1556, 1557, 1558, 1686, 1748, 1876, 1912, 1913, 1914,
2032, 2066, 2194, 2308, 2309, 2347, 2475, 2476, 2477, 2478, 2606,
2607, 2735, 2736, 2737, 2738, 2838, 2966, 2967, 2968, 2969, 3097,
3225, 3353, 3481, 3482, 3520, 3642, 3643, 3754, 3882, 3883, 4010,
4011, 4012, 4140, 4219, 4228, 4356, 4357, 4415, 4475, 4476, 4604,
4605, 4606, 4694, 4695, 4808, 4936, 4961, 4962, 5004, 5132, 5260,
5312, 5440, 5441, 5569, 5570, 5675, 5676, 5750, 5810, 5811, 5939,
6021, 6149, 6277, 6278, 6364, 6425, 6519, 6647, 6648, 6739, 6867,
6995, 6996, 7120, 7223, 7244, 7367, 7407, 7408, 7467, 7595, 7699,
7827, 7835
0, 1, 2, 3, 131, 241, 242, 263, 264, 265, 266, 267, 268, 387,
515, 516, 644, 645, 772, 794, 922, 923, 924, 944, 945, 1073,
1074, 1202, 1330, 1458, 1556, 1557, 1558, 1686, 1748, 1876,
1912, 1913, 1914, 2032, 2066, 2194, 2308, 2309, 2347, 2475,
2476, 2477, 2478, 2606, 2607, 2735, 2736, 2737, 2738, 2838,
2966, 2967, 2968, 2969, 3097, 3225, 3353, 3481, 3482, 3520,
3642, 3643, 3754, 3882, 3883, 4010, 4011, 4012, 4140, 4219,
4228, 4356, 4357, 4415, 4475, 4476, 4604, 4605, 4606, 4694,
4695, 4808, 4936, 4961, 4962, 5004, 5132, 5260, 5312, 5440,
5441, 5569, 5570, 5675, 5676, 5750, 5810, 5811, 5939, 6021,
6149, 6277, 6278, 6364, 6425, 6519, 6647, 6648, 6739, 6867,
6995, 6996, 7120, 7223, 7244, 7367, 7407, 7408, 7467, 7595,
7699, 7827, 7835
]]
self.lod = [[]]
# convert from offset-based lod to length-based lod
......@@ -257,6 +264,11 @@ class TestSeqProjectCase4(TestSeqProject):
self.output_represention = 8 # output feature size
support_types = get_xpu_op_support_types('sequence_conv')
for stype in support_types:
create_test_class(globals(), XPUTestSequenceConv, stype)
class TestSeqConvApi(unittest.TestCase):
def test_api(self):
import paddle.fluid as fluid
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册