未验证 提交 9a1fdad3 编写于 作者: Y ykkk2333 提交者: GitHub

update new unittests of flatten ops and layernorm, *test=kunlun (#43895)

上级 37f2151f
......@@ -23,67 +23,80 @@ import paddle
import paddle.fluid as fluid
from op_test import OpTest
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
class TestFlatten2Op(XPUOpTest):
class XPUTestFlatten2Op(XPUOpTestWrapper):
def setUp(self):
self.set_xpu()
self.op_type = "flatten2"
self.place = paddle.XPUPlace(0)
self.init_test_case()
self.inputs = {"X": np.random.random(self.in_shape).astype("float32")}
self.init_attrs()
self.outputs = {
"Out": self.inputs["X"].reshape(self.new_shape),
"XShape": np.random.random(self.in_shape).astype("float32")
}
def __init__(self):
self.op_name = 'flatten2'
self.use_dynamic_create_class = False
def set_xpu(self):
self.__class__.use_xpu = True
class TestFlatten2Op(XPUOpTest):
def test_check_output(self):
self.check_output_with_place(self.place, no_check_set=["XShape"])
def setUp(self):
self.set_xpu()
self.op_type = "flatten2"
self.dtype = self.in_type
self.place = paddle.XPUPlace(0)
self.init_test_case()
self.inputs = {
"X": np.random.random(self.in_shape).astype(self.dtype)
}
self.init_attrs()
self.outputs = {
"Out": self.inputs["X"].reshape(self.new_shape),
"XShape": np.random.random(self.in_shape).astype(self.dtype)
}
def test_check_grad(self):
self.check_grad_with_place(self.place, ["X"], "Out")
def set_xpu(self):
self.__class__.use_xpu = True
def init_test_case(self):
self.in_shape = (3, 2, 4, 5)
self.axis = 1
self.new_shape = (3, 40)
def test_check_output(self):
self.check_output_with_place(self.place, no_check_set=["XShape"])
def init_attrs(self):
self.attrs = {"axis": self.axis}
def test_check_grad(self):
self.check_grad_with_place(self.place, ["X"], "Out")
def init_test_case(self):
self.in_shape = (3, 2, 4, 5)
self.axis = 1
self.new_shape = (3, 40)
class TestFlatten2OpWithCornerAxis(TestFlatten2Op):
def init_attrs(self):
self.attrs = {"axis": self.axis}
def init_test_case(self):
self.in_shape = (3, 2, 5, 4)
self.axis = 0
self.new_shape = (1, 120)
class TestFlatten2OpWithCornerAxis(TestFlatten2Op):
def init_test_case(self):
self.in_shape = (3, 2, 5, 4)
self.axis = 0
self.new_shape = (1, 120)
class TestFlatten2OpWithDefaultAxis(TestFlatten2Op):
class TestFlatten2OpWithDefaultAxis(TestFlatten2Op):
def init_test_case(self):
self.in_shape = (10, 2, 2, 3)
self.new_shape = (10, 12)
def init_test_case(self):
self.in_shape = (10, 2, 2, 3)
self.new_shape = (10, 12)
def init_attrs(self):
self.attrs = {}
def init_attrs(self):
self.attrs = {}
class TestFlatten2OpSixDims(TestFlatten2Op):
class TestFlatten2OpSixDims(TestFlatten2Op):
def init_test_case(self):
self.in_shape = (3, 2, 3, 2, 4, 4)
self.axis = 4
self.new_shape = (36, 16)
def init_test_case(self):
self.in_shape = (3, 2, 3, 2, 4, 4)
self.axis = 4
self.new_shape = (36, 16)
support_types = get_xpu_op_support_types('flatten2')
support_types_for_grad = get_xpu_op_support_types('mean')
for stype in support_types:
if stype in support_types_for_grad:
create_test_class(globals(), XPUTestFlatten2Op, stype)
if __name__ == "__main__":
unittest.main()
......@@ -17,7 +17,6 @@ from __future__ import print_function
import sys
sys.path.append("..")
import numpy as np
import unittest
import sys
......@@ -27,215 +26,214 @@ from op_test import OpTest
from op_test_xpu import XPUOpTest
import paddle
import paddle.fluid as fluid
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
class TestFlattenOp(XPUOpTest):
def setUp(self):
self.set_xpu()
self.op_type = "flatten_contiguous_range"
self.place = paddle.XPUPlace(0)
self.use_xpu = True
self.use_mkldnn = False
self.start_axis = 0
self.stop_axis = -1
self.dtype = np.float32
self.init_test_case()
self.inputs = {"X": np.random.random(self.in_shape).astype(self.dtype)}
self.init_attrs()
self.outputs = {
"Out": self.inputs["X"].reshape(self.new_shape),
"XShape": np.random.random(self.in_shape).astype("float32")
}
def set_xpu(self):
self.__class__.use_xpu = True
def test_check_output(self):
self.check_output_with_place(self.place, no_check_set=["XShape"])
def test_check_grad(self):
self.check_grad_with_place(self.place, ["X"], "Out")
def init_test_case(self):
self.in_shape = (3, 2, 5, 4)
self.start_axis = 0
self.stop_axis = -1
self.new_shape = (120)
def init_attrs(self):
self.attrs = {
"start_axis": self.start_axis,
"stop_axis": self.stop_axis,
'use_xpu': True,
}
class TestFlattenOp_1(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 5, 4)
self.start_axis = 1
self.stop_axis = 2
self.new_shape = (3, 10, 4)
def init_attrs(self):
self.attrs = {
"start_axis": self.start_axis,
"stop_axis": self.stop_axis
}
class TestFlattenOp_2(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 5, 4)
self.start_axis = 0
self.stop_axis = 1
self.new_shape = (6, 5, 4)
def init_attrs(self):
self.attrs = {
"start_axis": self.start_axis,
"stop_axis": self.stop_axis
}
class TestFlattenOp_3(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 5, 4)
self.start_axis = 0
self.stop_axis = 2
self.new_shape = (30, 4)
def init_attrs(self):
self.attrs = {
"start_axis": self.start_axis,
"stop_axis": self.stop_axis
}
class TestFlattenOp_4(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 5, 4)
self.start_axis = -2
self.stop_axis = -1
self.new_shape = (3, 2, 20)
def init_attrs(self):
self.attrs = {
"start_axis": self.start_axis,
"stop_axis": self.stop_axis
}
class TestFlattenOp_5(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 5, 4)
self.start_axis = 2
self.stop_axis = 2
self.new_shape = (3, 2, 5, 4)
def init_attrs(self):
self.attrs = {
"start_axis": self.start_axis,
"stop_axis": self.stop_axis
}
class TestFlattenOpSixDims(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 3, 2, 4, 4)
self.start_axis = 3
self.stop_axis = 5
self.new_shape = (3, 2, 3, 32)
def init_attrs(self):
self.attrs = {
"start_axis": self.start_axis,
"stop_axis": self.stop_axis
}
class TestFlattenOp_Float32(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 5, 4)
self.start_axis = 0
self.stop_axis = 1
self.new_shape = (6, 5, 4)
self.dtype = np.float32
def init_attrs(self):
self.attrs = {
"start_axis": self.start_axis,
"stop_axis": self.stop_axis
}
class TestFlattenOp_int32(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 5, 4)
self.start_axis = 0
self.stop_axis = 1
self.new_shape = (6, 5, 4)
self.dtype = np.int32
def init_attrs(self):
self.attrs = {
"start_axis": self.start_axis,
"stop_axis": self.stop_axis,
'use_xpu': True
}
def test_check_grad(self):
pass
class TestFlattenOp_int8(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 5, 4)
self.start_axis = 0
self.stop_axis = 1
self.new_shape = (6, 5, 4)
self.dtype = np.int8
def init_attrs(self):
self.attrs = {
"start_axis": self.start_axis,
"stop_axis": self.stop_axis
}
def test_check_grad(self):
pass
class TestFlattenOp_int64(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 5, 4)
self.start_axis = 0
self.stop_axis = 1
self.new_shape = (6, 5, 4)
self.dtype = np.int64
def init_attrs(self):
self.attrs = {
"start_axis": self.start_axis,
"stop_axis": self.stop_axis
}
def test_check_grad(self):
pass
class XPUTestFlattenOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'flatten_contiguous_range'
self.use_dynamic_create_class = False
class TestFlattenOp(XPUOpTest):
def setUp(self):
self.set_xpu()
self.op_type = "flatten_contiguous_range"
self.place = paddle.XPUPlace(0)
self.use_xpu = True
self.use_mkldnn = False
self.start_axis = 0
self.stop_axis = -1
self.dtype = self.in_type
self.init_test_case()
self.inputs = {
"X": np.random.random(self.in_shape).astype(self.dtype)
}
self.init_attrs()
self.outputs = {
"Out": self.inputs["X"].reshape(self.new_shape),
"XShape": np.random.random(self.in_shape).astype(self.dtype)
}
def set_xpu(self):
self.__class__.use_xpu = True
def test_check_output(self):
self.check_output_with_place(self.place, no_check_set=["XShape"])
def test_check_grad(self):
self.check_grad_with_place(self.place, ["X"], "Out")
def init_test_case(self):
self.in_shape = (3, 2, 5, 4)
self.start_axis = 0
self.stop_axis = -1
self.new_shape = (120)
def init_attrs(self):
self.attrs = {
"start_axis": self.start_axis,
"stop_axis": self.stop_axis,
'use_xpu': True,
}
class TestFlattenOp_1(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 5, 4)
self.start_axis = 1
self.stop_axis = 2
self.new_shape = (3, 10, 4)
def init_attrs(self):
self.attrs = {
"start_axis": self.start_axis,
"stop_axis": self.stop_axis
}
class TestFlattenOp_2(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 5, 4)
self.start_axis = 0
self.stop_axis = 1
self.new_shape = (6, 5, 4)
def init_attrs(self):
self.attrs = {
"start_axis": self.start_axis,
"stop_axis": self.stop_axis
}
class TestFlattenOp_3(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 5, 4)
self.start_axis = 0
self.stop_axis = 2
self.new_shape = (30, 4)
def init_attrs(self):
self.attrs = {
"start_axis": self.start_axis,
"stop_axis": self.stop_axis
}
class TestFlattenOp_4(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 5, 4)
self.start_axis = -2
self.stop_axis = -1
self.new_shape = (3, 2, 20)
def init_attrs(self):
self.attrs = {
"start_axis": self.start_axis,
"stop_axis": self.stop_axis
}
class TestFlattenOp_5(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 5, 4)
self.start_axis = 2
self.stop_axis = 2
self.new_shape = (3, 2, 5, 4)
def init_attrs(self):
self.attrs = {
"start_axis": self.start_axis,
"stop_axis": self.stop_axis
}
class TestFlattenOpSixDims(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 3, 2, 4, 4)
self.start_axis = 3
self.stop_axis = 5
self.new_shape = (3, 2, 3, 32)
def init_attrs(self):
self.attrs = {
"start_axis": self.start_axis,
"stop_axis": self.stop_axis
}
class TestFlattenOp_Float32(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 5, 4)
self.start_axis = 0
self.stop_axis = 1
self.new_shape = (6, 5, 4)
self.dtype = np.float32
def init_attrs(self):
self.attrs = {
"start_axis": self.start_axis,
"stop_axis": self.stop_axis
}
class TestFlattenOp_int32(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 5, 4)
self.start_axis = 0
self.stop_axis = 1
self.new_shape = (6, 5, 4)
self.dtype = np.int32
def init_attrs(self):
self.attrs = {
"start_axis": self.start_axis,
"stop_axis": self.stop_axis,
'use_xpu': True
}
def test_check_grad(self):
pass
class TestFlattenOp_int8(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 5, 4)
self.start_axis = 0
self.stop_axis = 1
self.new_shape = (6, 5, 4)
self.dtype = np.int8
def init_attrs(self):
self.attrs = {
"start_axis": self.start_axis,
"stop_axis": self.stop_axis
}
def test_check_grad(self):
pass
class TestFlattenOp_int64(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 5, 4)
self.start_axis = 0
self.stop_axis = 1
self.new_shape = (6, 5, 4)
self.dtype = np.int64
def init_attrs(self):
self.attrs = {
"start_axis": self.start_axis,
"stop_axis": self.stop_axis
}
def test_check_grad(self):
pass
class TestFlatten2OpError(unittest.TestCase):
......@@ -338,5 +336,11 @@ class TestFlattenPython(unittest.TestCase):
self.assertTrue((2, 3, 16) == res_shape)
support_types = get_xpu_op_support_types('flatten_contiguous_range')
support_types_for_grad = get_xpu_op_support_types('mean')
for stype in support_types:
if stype in support_types_for_grad:
create_test_class(globals(), XPUTestFlattenOp, stype)
if __name__ == "__main__":
unittest.main()
......@@ -23,61 +23,74 @@ import paddle
import paddle.fluid as fluid
from op_test import OpTest
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
class TestFlattenOp(XPUOpTest):
class XPUTestFlattenOp(XPUOpTestWrapper):
def setUp(self):
self.op_type = "flatten"
self.use_xpu = True
self.place = paddle.XPUPlace(0)
self.init_test_case()
self.inputs = {"X": np.random.random(self.in_shape).astype("float32")}
self.init_attrs()
self.outputs = {"Out": self.inputs["X"].reshape(self.new_shape)}
def __init__(self):
self.op_name = 'flatten'
self.use_dynamic_create_class = False
def test_check_output(self):
self.check_output_with_place(self.place)
class TestFlattenOp(XPUOpTest):
def test_check_grad(self):
self.check_grad_with_place(self.place, ["X"], "Out")
def setUp(self):
self.op_type = "flatten"
self.use_xpu = True
self.place = paddle.XPUPlace(0)
self.init_test_case()
self.dtype = self.in_type
self.inputs = {
"X": np.random.random(self.in_shape).astype(self.dtype)
}
self.init_attrs()
self.outputs = {"Out": self.inputs["X"].reshape(self.new_shape)}
def init_test_case(self):
self.in_shape = (3, 2, 2, 10)
self.axis = 1
self.new_shape = (3, 40)
def test_check_output(self):
self.check_output_with_place(self.place)
def init_attrs(self):
self.attrs = {"axis": self.axis}
def test_check_grad(self):
self.check_grad_with_place(self.place, ["X"], "Out")
def init_test_case(self):
self.in_shape = (3, 2, 2, 10)
self.axis = 1
self.new_shape = (3, 40)
class TestFlattenOp1(TestFlattenOp):
def init_attrs(self):
self.attrs = {"axis": self.axis}
def init_test_case(self):
self.in_shape = (3, 2, 2, 10)
self.axis = 0
self.new_shape = (1, 120)
class TestFlattenOp1(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 2, 10)
self.axis = 0
self.new_shape = (1, 120)
class TestFlattenOpWithDefaultAxis(TestFlattenOp):
class TestFlattenOpWithDefaultAxis(TestFlattenOp):
def init_test_case(self):
self.in_shape = (10, 2, 2, 3)
self.new_shape = (10, 12)
def init_test_case(self):
self.in_shape = (10, 2, 2, 3)
self.new_shape = (10, 12)
def init_attrs(self):
self.attrs = {}
def init_attrs(self):
self.attrs = {}
class TestFlattenOpSixDims(TestFlattenOp):
class TestFlattenOpSixDims(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 3, 2, 4, 4)
self.axis = 4
self.new_shape = (36, 16)
def init_test_case(self):
self.in_shape = (3, 2, 3, 2, 4, 4)
self.axis = 4
self.new_shape = (36, 16)
support_types = get_xpu_op_support_types('flatten')
support_types_for_grad = get_xpu_op_support_types('mean')
for stype in support_types:
if stype in support_types_for_grad:
create_test_class(globals(), XPUTestFlattenOp, stype)
if __name__ == "__main__":
unittest.main()
......@@ -20,7 +20,9 @@ from functools import reduce
sys.path.append("..")
from op_test import OpTest
from op_test_xpu import XPUOpTest
from operator import mul
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
......@@ -42,77 +44,77 @@ def ref_layer_norm(x, scale, bias, epsilon, begin_norm_axis=1):
return y, mean, variance
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestXPULayerNormOp(OpTest):
class XPUTestLayerNormOp(XPUOpTestWrapper):
def setUp(self):
self.op_type = "layer_norm"
self.dtype = np.float32
self.shape = [2, 3, 4, 5]
self.epsilon = 1e-05
self.begin_norm_axis = 1
self.set_attrs()
def __init__(self):
self.op_name = 'layer_norm'
self.use_dynamic_create_class = False
right = reduce(mul, self.shape[self.begin_norm_axis:len(self.shape)], 1)
np.random.seed(10)
x_np = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
scale_np = np.random.uniform(0.1, 1, [right]).astype(self.dtype)
bias_np = np.random.uniform(0.1, 1, [right]).astype(self.dtype)
ref_y_np, ref_mean_np, ref_variance_np = ref_layer_norm(
x_np, scale_np, bias_np, self.epsilon, self.begin_norm_axis)
class TestXPULayerNormOp(XPUOpTest):
self.inputs = {'X': x_np, 'Scale': scale_np, 'Bias': bias_np}
self.outputs = {
'Y': ref_y_np,
'Mean': ref_mean_np,
'Variance': ref_variance_np
}
self.attrs = {'begin_norm_axis': self.begin_norm_axis, 'use_xpu': True}
def setUp(self):
self.op_type = "layer_norm"
self.dtype = self.in_type
self.shape = [2, 3, 4, 5]
self.epsilon = 1e-05
self.begin_norm_axis = 1
self.set_attrs()
def set_attrs(self):
pass
right = reduce(mul,
self.shape[self.begin_norm_axis:len(self.shape)], 1)
np.random.seed(10)
x_np = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
scale_np = np.random.uniform(0.1, 1, [right]).astype(self.dtype)
bias_np = np.random.uniform(0.1, 1, [right]).astype(self.dtype)
ref_y_np, ref_mean_np, ref_variance_np = ref_layer_norm(
x_np, scale_np, bias_np, self.epsilon, self.begin_norm_axis)
def test_check_output(self):
self.check_output_with_place(paddle.XPUPlace(0), atol=1e-4)
self.inputs = {'X': x_np, 'Scale': scale_np, 'Bias': bias_np}
self.outputs = {
'Y': ref_y_np,
'Mean': ref_mean_np,
'Variance': ref_variance_np
}
self.attrs = {
'begin_norm_axis': self.begin_norm_axis,
'use_xpu': True
}
def test_check_grad(self):
self.check_grad_with_place(paddle.XPUPlace(0), ['X'],
'Y',
max_relative_error=0.02)
def set_attrs(self):
pass
def test_check_output(self):
self.check_output_with_place(paddle.XPUPlace(0), atol=1e-4)
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestXPULayerNormOpAxis2(TestXPULayerNormOp):
def test_check_grad(self):
self.check_grad_with_place(paddle.XPUPlace(0), ['X'],
'Y',
max_relative_error=0.02)
def set_attrs(self):
self.begin_norm_axis = 2
class TestXPULayerNormOpAxis2(TestXPULayerNormOp):
def set_attrs(self):
self.begin_norm_axis = 2
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestXPULayerNormOpAxis3(TestXPULayerNormOp):
class TestXPULayerNormOpAxis3(TestXPULayerNormOp):
def set_attrs(self):
self.begin_norm_axis = 3
def set_attrs(self):
self.begin_norm_axis = 3
class TestXPULayerNormOp2D(TestXPULayerNormOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestXPULayerNormOp2D(TestXPULayerNormOp):
def set_attrs(self):
self.shape = [10, 12]
def set_attrs(self):
self.shape = [10, 12]
class TestXPULayerNormOp3D(TestXPULayerNormOp):
def set_attrs(self):
self.shape = [4, 5, 6]
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestXPULayerNormOp3D(TestXPULayerNormOp):
def set_attrs(self):
self.shape = [4, 5, 6]
support_types = get_xpu_op_support_types('layer_norm')
for stype in support_types:
create_test_class(globals(), XPUTestLayerNormOp, stype)
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册