未验证 提交 c5179772 编写于 作者: z8hanghuan's avatar z8hanghuan 提交者: GitHub

new way of unit test , *test=kunlun (#39650)

* new way of unit test , *test=kunlun

* new way of ut, *test=kunlun
上级 dc39eb18
...@@ -23,37 +23,29 @@ from paddle.fluid.op import Operator ...@@ -23,37 +23,29 @@ from paddle.fluid.op import Operator
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle import paddle
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
class TestAdamOp1(OpTest):
def setUp(self):
'''Test Adam Op with supplied attributes
'''
self.op_type = "adam"
param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32")
# The second moment is positive
moment2 = np.random.random((102, 105)).astype("float32")
learning_rate = 0.004 class XPUTestAdamOp(XPUOpTestWrapper):
beta1 = 0.78 def __init__(self):
beta2 = 0.836 self.op_name = 'adam'
epsilon = 1e-4 self.use_dynamic_create_class = False
beta1_pow = beta1**10
beta2_pow = beta2**10
self.inputs = {
'Param': param,
'Grad': grad,
'Moment1': moment1,
'Moment2': moment2,
'LearningRate': np.array([learning_rate]).astype("float32"),
'Beta1Pow': np.array([beta1_pow]).astype("float32"),
'Beta2Pow': np.array([beta2_pow]).astype("float32")
}
self.attrs = {'epsilon': epsilon, 'beta1': beta1, 'beta2': beta2} class TestAdamOp(XPUOpTest):
'''Test Adam Op with supplied attributes
'''
def setUp(self):
self.init_dtype()
self.set_xpu()
self.op_type = "adam"
self.place = paddle.XPUPlace(0)
self.set_data()
self.set_attrs()
self.set_shape()
self.set_inputs()
self.set_steps()
param_out, moment1_out, \ param_out, moment1_out, \
moment2_out = adam_step(self.inputs, self.attrs) moment2_out = adam_step(self.inputs, self.attrs)
...@@ -61,94 +53,113 @@ class TestAdamOp1(OpTest): ...@@ -61,94 +53,113 @@ class TestAdamOp1(OpTest):
'Moment1Out': moment1_out, 'Moment1Out': moment1_out,
'Moment2Out': moment2_out, 'Moment2Out': moment2_out,
'ParamOut': param_out, 'ParamOut': param_out,
'Beta1PowOut': np.array([beta1_pow]).astype("float32") * beta1, 'Beta1PowOut':
'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2 np.array([self.beta1_pow]).astype("float32") * self.beta1,
'Beta2PowOut':
np.array([self.beta2_pow]).astype("float32") * self.beta2
} }
def test_check_output(self): def set_xpu(self):
self.check_output_with_place(place=paddle.XPUPlace(0), atol=1e-2) self.__class__.use_xpu = True
self.__class__.no_need_check_grad = True
self.__class__.op_type = self.in_type
def init_dtype(self):
self.dtype = self.in_type
class TestAdamOp2(OpTest): def set_attrs(self):
def setUp(self): self.attrs = {
'''Test Adam Op with supplied attributes 'epsilon': self.epsilon,
''' 'beta1': self.beta1,
self.op_type = "adam" 'beta2': self.beta2
param = np.random.uniform(-1, 1, (102, 105)).astype("float32") }
grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32") def set_data(self):
self.beta1 = 0.78
self.beta2 = 0.836
self.learning_rate = 0.004
self.epsilon = 1e-4
def set_steps(self):
self.num_steps = 1
def set_shape(self):
self.shape = (102, 105)
def set_inputs(self):
param = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
grad = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
moment1 = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
# The second moment is positive # The second moment is positive
moment2 = np.random.random((102, 105)).astype("float32") moment2 = np.random.random(self.shape).astype(self.dtype)
learning_rate = 0.001 self.beta1_pow = self.beta1**10
beta1 = 0.9 self.beta2_pow = self.beta2**10
beta2 = 0.999
epsilon = 1e-8
beta1_pow = beta1**10
beta2_pow = beta2**10
self.inputs = { self.inputs = {
'Param': param, 'Param': param,
'Grad': grad, 'Grad': grad,
'Moment1': moment1, 'Moment1': moment1,
'Moment2': moment2, 'Moment2': moment2,
'LearningRate': np.array([learning_rate]).astype("float32"), 'LearningRate':
'Beta1Pow': np.array([beta1_pow]).astype("float32"), np.array([self.learning_rate]).astype("float32"),
'Beta2Pow': np.array([beta2_pow]).astype("float32") 'Beta1Pow': np.array([self.beta1_pow]).astype("float32"),
'Beta2Pow': np.array([self.beta2_pow]).astype("float32")
} }
attributes = {'epsilon': epsilon, 'beta1': beta1, 'beta2': beta2} def test_check_output(self):
self.check_output_with_place(place=paddle.XPUPlace(0), atol=1e-2)
param_out, moment1_out, \ class TestAdamOp2(TestAdamOp):
moment2_out = adam_step(self.inputs, attributes) '''Test Adam Op with supplied attributes
'''
self.outputs = { def set_data(self):
'Moment1Out': moment1_out, self.beta1 = 0.9
'Moment2Out': moment2_out, self.beta2 = 0.999
'ParamOut': param_out, self.learning_rate = 0.001
'Beta1PowOut': np.array([beta1_pow]).astype("float32") * beta1, self.epsilon = 1e-8
'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2
}
def test_check_output(self): class TestAdamOp3(TestAdamOp2):
self.check_output_with_place(place=paddle.XPUPlace(0), atol=1e-2) '''Test Adam Op with supplied attributes
'''
def set_shape(self):
self.shape = (101, 47)
class TestAdamOpMultipleSteps(OpTest): class TestAdamOp4(TestAdamOp2):
def setUp(self): '''Test Adam Op with supplied attributes
'''Test Adam Operator with supplied attributes
''' '''
self.op_type = "adam"
self.num_steps = 10
param = np.random.uniform(-1, 1, (102, 105)).astype("float32") def set_shape(self):
grad = np.random.uniform(-1, 1, (102, 105)).astype("float32") self.shape = (512, 26)
moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32")
# The second moment is positive
moment2 = np.random.random((102, 105)).astype("float32")
learning_rate = 0.001 class TestAdamOp5(TestAdamOp2):
self.beta1 = 0.9 '''Test Adam Op with supplied attributes
self.beta2 = 0.999 '''
epsilon = 1e-8
self.beta1_pow = self.beta1**10
self.beta2_pow = self.beta2**10
self.inputs = { def set_shape(self):
'Param': param, self.shape = (11, 1)
'Grad': grad,
'Moment1': moment1,
'Moment2': moment2,
'LearningRate': np.array([learning_rate]).astype("float32"),
'Beta1Pow': np.array([self.beta1_pow]).astype("float32"),
'Beta2Pow': np.array([self.beta2_pow]).astype("float32")
}
self.attrs = { class TestAdamOp6(TestAdamOp2):
'epsilon': epsilon, '''Test Adam Op with beta as Variable
'beta1': self.beta1, '''
'beta2': self.beta2
} def set_shape(self):
self.shape = (10, 10)
def set_data(self):
self.beta1 = 0.85
self.beta2 = 0.95
self.learning_rate = 0.001
self.epsilon = 1e-8
class TestAdamOpMultipleSteps(TestAdamOp2):
'''Test Adam Operator with supplied attributes
'''
def set_steps(self):
self.num_steps = 10
def test_check_output(self): def test_check_output(self):
for _ in range(self.num_steps): for _ in range(self.num_steps):
...@@ -166,7 +177,8 @@ class TestAdamOpMultipleSteps(OpTest): ...@@ -166,7 +177,8 @@ class TestAdamOpMultipleSteps(OpTest):
} }
# Verify output for this step # Verify output for this step
self.check_output_with_place(place=paddle.XPUPlace(0), atol=1e-2) self.check_output_with_place(
place=paddle.XPUPlace(0), atol=1e-2)
# Output of this step becomes input for next step # Output of this step becomes input for next step
self.inputs['Param'] = param_out self.inputs['Param'] = param_out
...@@ -354,52 +366,9 @@ class TestSparseAdamOp(unittest.TestCase): ...@@ -354,52 +366,9 @@ class TestSparseAdamOp(unittest.TestCase):
self.check_with_place(paddle.XPUPlace(0), False) self.check_with_place(paddle.XPUPlace(0), False)
class TestAdamOpBetaVariable(OpTest): support_types = get_xpu_op_support_types('adam')
def setUp(self): for stype in support_types:
'''Test Adam Op with beta as Variable create_test_class(globals(), XPUTestAdamOp, stype)
'''
self.op_type = "adam"
param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32")
# The second moment is positive
moment2 = np.random.random((102, 105)).astype("float32")
beta1 = 0.85
beta2 = 0.95
learning_rate = 0.001
epsilon = 1e-8
beta1_pow = beta1**10
beta2_pow = beta2**10
self.inputs = {
'Param': param,
'Grad': grad,
'Moment1': moment1,
'Moment2': moment2,
'LearningRate': np.array([learning_rate]).astype("float32"),
'Beta1Pow': np.array([beta1_pow]).astype("float32"),
'Beta2Pow': np.array([beta2_pow]).astype("float32"),
"Beta1Tensor": np.array([beta1]).astype("float32"),
"Beta2Tensor": np.array([beta2]).astype("float32"),
}
attributes = {'epsilon': epsilon, 'beta1': beta1, 'beta2': beta2}
param_out, moment1_out, \
moment2_out = adam_step(self.inputs, attributes)
self.outputs = {
'Moment1Out': moment1_out,
'Moment2Out': moment2_out,
'ParamOut': param_out,
'Beta1PowOut': np.array([beta1_pow]).astype("float32") * beta1,
'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2
}
def test_check_output(self):
self.check_output_with_place(place=paddle.XPUPlace(0), atol=1e-2)
if __name__ == "__main__": if __name__ == "__main__":
paddle.enable_static() paddle.enable_static()
......
...@@ -20,21 +20,32 @@ sys.path.append("..") ...@@ -20,21 +20,32 @@ sys.path.append("..")
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, skip_check_grad_ci
from op_test_xpu import XPUOpTest
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard, core from paddle.fluid import compiler, Program, program_guard, core
import paddle import paddle
from op_test import OpTest, skip_check_grad_ci
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
class TestConcatOp(XPUOpTest):
class XPUTestConcatOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'concat'
self.use_dynamic_create_class = False
class TestConcatOp(XPUOpTest):
def setUp(self): def setUp(self):
self.set_xpu()
self.op_type = "concat" self.op_type = "concat"
self.dtype = self.get_dtype() self.place = paddle.XPUPlace(0)
self.use_xpu = True self.init_dtype()
self.use_mkldnn = False self.init_axis()
self.init_test_data() self.set_inputs()
self.inputs = {'X': [('x0', self.x0), ('x1', self.x1), ('x2', self.x2)]} self.inputs = {
'X': [('x0', self.x0), ('x1', self.x1), ('x2', self.x2)]
}
self.attrs = {'axis': self.axis} self.attrs = {'axis': self.axis}
if self.axis < 0: if self.axis < 0:
self.actual_axis = self.axis + len(self.x0.shape) self.actual_axis = self.axis + len(self.x0.shape)
...@@ -47,13 +58,23 @@ class TestConcatOp(XPUOpTest): ...@@ -47,13 +58,23 @@ class TestConcatOp(XPUOpTest):
(self.x0, self.x1, self.x2), axis=self.actual_axis) (self.x0, self.x1, self.x2), axis=self.actual_axis)
} }
def get_dtype(self): def set_inputs(self):
return "float32" self.x0 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
self.x1 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
self.x2 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
def set_xpu(self):
self.__class__.use_xpu = True
self.__class__.no_need_check_grad = True
def init_dtype(self):
self.dtype = self.in_type
def init_axis(self):
self.axis = -1
def test_check_output(self): def test_check_output(self):
if paddle.is_compiled_with_xpu(): self.check_output_with_place(self.place)
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad(self): def test_check_grad(self):
if paddle.is_compiled_with_xpu(): if paddle.is_compiled_with_xpu():
...@@ -62,25 +83,43 @@ class TestConcatOp(XPUOpTest): ...@@ -62,25 +83,43 @@ class TestConcatOp(XPUOpTest):
self.check_grad_with_place(place, ['x1'], 'Out') self.check_grad_with_place(place, ['x1'], 'Out')
self.check_grad_with_place(place, ['x2'], 'Out') self.check_grad_with_place(place, ['x2'], 'Out')
def init_test_data(self): class TestConcatOpAxis0XPU(TestConcatOp):
def init_axis(self):
self.axis = 0
class TestConcatOpAxis1XPU(TestConcatOp):
def set_inputs(self):
self.x0 = np.random.random((5, 1, 4, 5)).astype(self.dtype) self.x0 = np.random.random((5, 1, 4, 5)).astype(self.dtype)
self.x1 = np.random.random((5, 2, 4, 5)).astype(self.dtype) self.x1 = np.random.random((5, 2, 4, 5)).astype(self.dtype)
self.x2 = np.random.random((5, 3, 4, 5)).astype(self.dtype) self.x2 = np.random.random((5, 3, 4, 5)).astype(self.dtype)
def init_axis(self):
self.axis = 1 self.axis = 1
class TestConcatOpAxis2XPU(TestConcatOp):
def init_axis(self):
self.axis = 2
class TestConcatOp2(TestConcatOp): class TestConcatOpAxis3XPU(TestConcatOp):
def init_test_data(self): def init_axis(self):
self.x0 = np.random.random((2, 3, 4, 5)).astype(self.dtype) self.axis = 3
self.x1 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
self.x2 = np.random.random((2, 3, 4, 5)).astype(self.dtype) class TestConcatOpAxisNeg1XPU(TestConcatOp):
self.axis = 1 def init_axis(self):
self.axis = -1
class TestConcatOpAxisNeg2XPU(TestConcatOp):
def init_axis(self):
self.axis = -2
class TestConcatOpAxisNeg3XPU(TestConcatOp):
def init_axis(self):
self.axis = -3
@skip_check_grad_ci( @skip_check_grad_ci(
reason="The function 'check_grad' for large inputs is too slow.") reason="The function 'check_grad' for large inputs is too slow.")
class TestConcatOp3(TestConcatOp): class TestConcatOp3(TestConcatOp):
def init_test_data(self): def set_inputs(self):
self.x0 = np.random.random((1, 256, 170, 256)).astype(self.dtype) self.x0 = np.random.random((1, 256, 170, 256)).astype(self.dtype)
self.x1 = np.random.random((1, 128, 170, 256)).astype(self.dtype) self.x1 = np.random.random((1, 128, 170, 256)).astype(self.dtype)
self.x2 = np.random.random((1, 128, 170, 256)).astype(self.dtype) self.x2 = np.random.random((1, 128, 170, 256)).astype(self.dtype)
...@@ -89,12 +128,11 @@ class TestConcatOp3(TestConcatOp): ...@@ -89,12 +128,11 @@ class TestConcatOp3(TestConcatOp):
def test_check_grad(self): def test_check_grad(self):
pass pass
@skip_check_grad_ci(
@skip_check_grad_ci(
reason="This test will meet fetch error when there is a null grad. The detailed information is in PR#17015." reason="This test will meet fetch error when there is a null grad. The detailed information is in PR#17015."
) )
class TestConcatOp4(TestConcatOp): class TestConcatOp4(TestConcatOp):
def init_test_data(self): def set_inputs(self):
self.x0 = np.random.random((2, 3, 4, 5)).astype(self.dtype) self.x0 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
self.x1 = np.random.random((2, 3, 4, 5)).astype(self.dtype) self.x1 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
self.x2 = np.random.random((0, 3, 4, 5)).astype(self.dtype) self.x2 = np.random.random((0, 3, 4, 5)).astype(self.dtype)
...@@ -104,52 +142,9 @@ class TestConcatOp4(TestConcatOp): ...@@ -104,52 +142,9 @@ class TestConcatOp4(TestConcatOp):
pass pass
class TestConcatOp5(TestConcatOp): support_types = get_xpu_op_support_types('concat')
def init_test_data(self): for stype in support_types:
self.x0 = np.random.random((5, 1, 4, 5)).astype(self.dtype) create_test_class(globals(), XPUTestConcatOp, stype)
self.x1 = np.random.random((5, 2, 4, 5)).astype(self.dtype)
self.x2 = np.random.random((5, 3, 4, 5)).astype(self.dtype)
self.axis = -3
class TestConcatOp6(TestConcatOp):
def setUp(self):
self.op_type = "concat"
self.dtype = self.get_dtype()
self.init_test_data()
self.lod = [[20, 80]]
self.out_lod = [[20, 80, 20, 80, 20, 80]]
self.inputs = {
'X': [('x0', (self.x0, self.lod)), ('x1', (self.x1, self.lod)),
('x2', (self.x2, self.lod))]
}
self.attrs = {'axis': self.axis}
if self.axis < 0:
self.actual_axis = self.axis + len(self.x0.shape)
self.actual_axis = self.actual_axis if self.actual_axis > 0 else 0
else:
self.actual_axis = self.axis
out = np.concatenate((self.x0, self.x1, self.x2), axis=self.actual_axis)
self.outputs = {'Out': (out, self.out_lod)}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place, check_dygraph=False)
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['x0'], 'Out')
self.check_grad_with_place(place, ['x1'], 'Out')
self.check_grad_with_place(place, ['x2'], 'Out')
def init_test_data(self):
self.x0 = np.random.random([100]).astype(self.dtype)
self.x1 = np.random.random([100]).astype(self.dtype)
self.x2 = np.random.random([100]).astype(self.dtype)
self.axis = 0
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static() paddle.enable_static()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册