未验证 提交 c5179772 编写于 作者: z8hanghuan's avatar z8hanghuan 提交者: GitHub

new way of unit test , *test=kunlun (#39650)

* new way of unit test , *test=kunlun

* new way of ut, *test=kunlun
上级 dc39eb18
...@@ -23,163 +23,175 @@ from paddle.fluid.op import Operator ...@@ -23,163 +23,175 @@ from paddle.fluid.op import Operator
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle import paddle
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
class TestAdamOp1(OpTest):
def setUp(self): class XPUTestAdamOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'adam'
self.use_dynamic_create_class = False
class TestAdamOp(XPUOpTest):
'''Test Adam Op with supplied attributes '''Test Adam Op with supplied attributes
''' '''
self.op_type = "adam"
param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32")
# The second moment is positive
moment2 = np.random.random((102, 105)).astype("float32")
learning_rate = 0.004
beta1 = 0.78
beta2 = 0.836
epsilon = 1e-4
beta1_pow = beta1**10
beta2_pow = beta2**10
self.inputs = {
'Param': param,
'Grad': grad,
'Moment1': moment1,
'Moment2': moment2,
'LearningRate': np.array([learning_rate]).astype("float32"),
'Beta1Pow': np.array([beta1_pow]).astype("float32"),
'Beta2Pow': np.array([beta2_pow]).astype("float32")
}
self.attrs = {'epsilon': epsilon, 'beta1': beta1, 'beta2': beta2} def setUp(self):
self.init_dtype()
self.set_xpu()
self.op_type = "adam"
self.place = paddle.XPUPlace(0)
self.set_data()
self.set_attrs()
self.set_shape()
self.set_inputs()
self.set_steps()
param_out, moment1_out, \
moment2_out = adam_step(self.inputs, self.attrs)
param_out, moment1_out, \ self.outputs = {
moment2_out = adam_step(self.inputs, self.attrs) 'Moment1Out': moment1_out,
'Moment2Out': moment2_out,
'ParamOut': param_out,
'Beta1PowOut':
np.array([self.beta1_pow]).astype("float32") * self.beta1,
'Beta2PowOut':
np.array([self.beta2_pow]).astype("float32") * self.beta2
}
self.outputs = { def set_xpu(self):
'Moment1Out': moment1_out, self.__class__.use_xpu = True
'Moment2Out': moment2_out, self.__class__.no_need_check_grad = True
'ParamOut': param_out, self.__class__.op_type = self.in_type
'Beta1PowOut': np.array([beta1_pow]).astype("float32") * beta1,
'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2
}
def test_check_output(self): def init_dtype(self):
self.check_output_with_place(place=paddle.XPUPlace(0), atol=1e-2) self.dtype = self.in_type
def set_attrs(self):
self.attrs = {
'epsilon': self.epsilon,
'beta1': self.beta1,
'beta2': self.beta2
}
def set_data(self):
self.beta1 = 0.78
self.beta2 = 0.836
self.learning_rate = 0.004
self.epsilon = 1e-4
def set_steps(self):
self.num_steps = 1
def set_shape(self):
self.shape = (102, 105)
def set_inputs(self):
param = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
grad = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
moment1 = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
# The second moment is positive
moment2 = np.random.random(self.shape).astype(self.dtype)
self.beta1_pow = self.beta1**10
self.beta2_pow = self.beta2**10
self.inputs = {
'Param': param,
'Grad': grad,
'Moment1': moment1,
'Moment2': moment2,
'LearningRate':
np.array([self.learning_rate]).astype("float32"),
'Beta1Pow': np.array([self.beta1_pow]).astype("float32"),
'Beta2Pow': np.array([self.beta2_pow]).astype("float32")
}
class TestAdamOp2(OpTest): def test_check_output(self):
def setUp(self): self.check_output_with_place(place=paddle.XPUPlace(0), atol=1e-2)
class TestAdamOp2(TestAdamOp):
'''Test Adam Op with supplied attributes '''Test Adam Op with supplied attributes
''' '''
self.op_type = "adam"
param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32")
# The second moment is positive
moment2 = np.random.random((102, 105)).astype("float32")
learning_rate = 0.001
beta1 = 0.9
beta2 = 0.999
epsilon = 1e-8
beta1_pow = beta1**10
beta2_pow = beta2**10
self.inputs = {
'Param': param,
'Grad': grad,
'Moment1': moment1,
'Moment2': moment2,
'LearningRate': np.array([learning_rate]).astype("float32"),
'Beta1Pow': np.array([beta1_pow]).astype("float32"),
'Beta2Pow': np.array([beta2_pow]).astype("float32")
}
attributes = {'epsilon': epsilon, 'beta1': beta1, 'beta2': beta2} def set_data(self):
self.beta1 = 0.9
self.beta2 = 0.999
self.learning_rate = 0.001
self.epsilon = 1e-8
class TestAdamOp3(TestAdamOp2):
'''Test Adam Op with supplied attributes
'''
param_out, moment1_out, \ def set_shape(self):
moment2_out = adam_step(self.inputs, attributes) self.shape = (101, 47)
self.outputs = { class TestAdamOp4(TestAdamOp2):
'Moment1Out': moment1_out, '''Test Adam Op with supplied attributes
'Moment2Out': moment2_out, '''
'ParamOut': param_out,
'Beta1PowOut': np.array([beta1_pow]).astype("float32") * beta1, def set_shape(self):
'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2 self.shape = (512, 26)
}
def test_check_output(self): class TestAdamOp5(TestAdamOp2):
self.check_output_with_place(place=paddle.XPUPlace(0), atol=1e-2) '''Test Adam Op with supplied attributes
'''
def set_shape(self):
self.shape = (11, 1)
class TestAdamOp6(TestAdamOp2):
'''Test Adam Op with beta as Variable
'''
def set_shape(self):
self.shape = (10, 10)
class TestAdamOpMultipleSteps(OpTest): def set_data(self):
def setUp(self): self.beta1 = 0.85
self.beta2 = 0.95
self.learning_rate = 0.001
self.epsilon = 1e-8
class TestAdamOpMultipleSteps(TestAdamOp2):
'''Test Adam Operator with supplied attributes '''Test Adam Operator with supplied attributes
''' '''
self.op_type = "adam"
self.num_steps = 10
param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32")
# The second moment is positive
moment2 = np.random.random((102, 105)).astype("float32")
learning_rate = 0.001
self.beta1 = 0.9
self.beta2 = 0.999
epsilon = 1e-8
self.beta1_pow = self.beta1**10
self.beta2_pow = self.beta2**10
self.inputs = {
'Param': param,
'Grad': grad,
'Moment1': moment1,
'Moment2': moment2,
'LearningRate': np.array([learning_rate]).astype("float32"),
'Beta1Pow': np.array([self.beta1_pow]).astype("float32"),
'Beta2Pow': np.array([self.beta2_pow]).astype("float32")
}
self.attrs = { def set_steps(self):
'epsilon': epsilon, self.num_steps = 10
'beta1': self.beta1,
'beta2': self.beta2
}
def test_check_output(self): def test_check_output(self):
for _ in range(self.num_steps): for _ in range(self.num_steps):
param_out, moment1_out, \ param_out, moment1_out, \
moment2_out = adam_step(self.inputs, self.attrs) moment2_out = adam_step(self.inputs, self.attrs)
beta1_pow_out = self.inputs['Beta1Pow'] * self.beta1 beta1_pow_out = self.inputs['Beta1Pow'] * self.beta1
beta2_pow_out = self.inputs['Beta2Pow'] * self.beta2 beta2_pow_out = self.inputs['Beta2Pow'] * self.beta2
self.outputs = { self.outputs = {
'Moment1Out': moment1_out, 'Moment1Out': moment1_out,
'Moment2Out': moment2_out, 'Moment2Out': moment2_out,
'ParamOut': param_out, 'ParamOut': param_out,
'Beta1PowOut': beta1_pow_out, 'Beta1PowOut': beta1_pow_out,
'Beta2PowOut': beta2_pow_out 'Beta2PowOut': beta2_pow_out
} }
# Verify output for this step # Verify output for this step
self.check_output_with_place(place=paddle.XPUPlace(0), atol=1e-2) self.check_output_with_place(
place=paddle.XPUPlace(0), atol=1e-2)
# Output of this step becomes input for next step # Output of this step becomes input for next step
self.inputs['Param'] = param_out self.inputs['Param'] = param_out
self.inputs['Moment1'] = moment1_out self.inputs['Moment1'] = moment1_out
self.inputs['Moment2'] = moment2_out self.inputs['Moment2'] = moment2_out
# Update powers of Beta1 and Beta2 for next time step # Update powers of Beta1 and Beta2 for next time step
self.inputs['Beta1Pow'] = beta1_pow_out self.inputs['Beta1Pow'] = beta1_pow_out
self.inputs['Beta2Pow'] = beta2_pow_out self.inputs['Beta2Pow'] = beta2_pow_out
# Randomize gradient for next step # Randomize gradient for next step
self.inputs['Grad'] = np.random.uniform( self.inputs['Grad'] = np.random.uniform(
-1, 1, (102, 105)).astype("float32") -1, 1, (102, 105)).astype("float32")
def adam_step(inputs, attributes): def adam_step(inputs, attributes):
...@@ -354,52 +366,9 @@ class TestSparseAdamOp(unittest.TestCase): ...@@ -354,52 +366,9 @@ class TestSparseAdamOp(unittest.TestCase):
self.check_with_place(paddle.XPUPlace(0), False) self.check_with_place(paddle.XPUPlace(0), False)
class TestAdamOpBetaVariable(OpTest): support_types = get_xpu_op_support_types('adam')
def setUp(self): for stype in support_types:
'''Test Adam Op with beta as Variable create_test_class(globals(), XPUTestAdamOp, stype)
'''
self.op_type = "adam"
param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32")
# The second moment is positive
moment2 = np.random.random((102, 105)).astype("float32")
beta1 = 0.85
beta2 = 0.95
learning_rate = 0.001
epsilon = 1e-8
beta1_pow = beta1**10
beta2_pow = beta2**10
self.inputs = {
'Param': param,
'Grad': grad,
'Moment1': moment1,
'Moment2': moment2,
'LearningRate': np.array([learning_rate]).astype("float32"),
'Beta1Pow': np.array([beta1_pow]).astype("float32"),
'Beta2Pow': np.array([beta2_pow]).astype("float32"),
"Beta1Tensor": np.array([beta1]).astype("float32"),
"Beta2Tensor": np.array([beta2]).astype("float32"),
}
attributes = {'epsilon': epsilon, 'beta1': beta1, 'beta2': beta2}
param_out, moment1_out, \
moment2_out = adam_step(self.inputs, attributes)
self.outputs = {
'Moment1Out': moment1_out,
'Moment2Out': moment2_out,
'ParamOut': param_out,
'Beta1PowOut': np.array([beta1_pow]).astype("float32") * beta1,
'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2
}
def test_check_output(self):
self.check_output_with_place(place=paddle.XPUPlace(0), atol=1e-2)
if __name__ == "__main__": if __name__ == "__main__":
paddle.enable_static() paddle.enable_static()
......
...@@ -20,136 +20,131 @@ sys.path.append("..") ...@@ -20,136 +20,131 @@ sys.path.append("..")
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, skip_check_grad_ci
from op_test_xpu import XPUOpTest
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard, core from paddle.fluid import compiler, Program, program_guard, core
import paddle import paddle
from op_test import OpTest, skip_check_grad_ci
from op_test_xpu import XPUOpTest
class TestConcatOp(XPUOpTest): from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
def setUp(self):
self.op_type = "concat" paddle.enable_static()
self.dtype = self.get_dtype()
self.use_xpu = True
self.use_mkldnn = False class XPUTestConcatOp(XPUOpTestWrapper):
self.init_test_data() def __init__(self):
self.inputs = {'X': [('x0', self.x0), ('x1', self.x1), ('x2', self.x2)]} self.op_name = 'concat'
self.attrs = {'axis': self.axis} self.use_dynamic_create_class = False
if self.axis < 0:
self.actual_axis = self.axis + len(self.x0.shape) class TestConcatOp(XPUOpTest):
self.actual_axis = self.actual_axis if self.actual_axis > 0 else 0 def setUp(self):
else: self.set_xpu()
self.actual_axis = self.axis self.op_type = "concat"
self.place = paddle.XPUPlace(0)
self.outputs = { self.init_dtype()
'Out': np.concatenate( self.init_axis()
(self.x0, self.x1, self.x2), axis=self.actual_axis) self.set_inputs()
} self.inputs = {
'X': [('x0', self.x0), ('x1', self.x1), ('x2', self.x2)]
def get_dtype(self): }
return "float32" self.attrs = {'axis': self.axis}
if self.axis < 0:
def test_check_output(self): self.actual_axis = self.axis + len(self.x0.shape)
if paddle.is_compiled_with_xpu(): self.actual_axis = self.actual_axis if self.actual_axis > 0 else 0
place = paddle.XPUPlace(0) else:
self.check_output_with_place(place) self.actual_axis = self.axis
def test_check_grad(self): self.outputs = {
if paddle.is_compiled_with_xpu(): 'Out': np.concatenate(
place = paddle.XPUPlace(0) (self.x0, self.x1, self.x2), axis=self.actual_axis)
self.check_grad_with_place(place, ['x0'], 'Out') }
self.check_grad_with_place(place, ['x1'], 'Out')
self.check_grad_with_place(place, ['x2'], 'Out') def set_inputs(self):
self.x0 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
def init_test_data(self): self.x1 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
self.x0 = np.random.random((5, 1, 4, 5)).astype(self.dtype) self.x2 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
self.x1 = np.random.random((5, 2, 4, 5)).astype(self.dtype)
self.x2 = np.random.random((5, 3, 4, 5)).astype(self.dtype) def set_xpu(self):
self.axis = 1 self.__class__.use_xpu = True
self.__class__.no_need_check_grad = True
class TestConcatOp2(TestConcatOp): def init_dtype(self):
def init_test_data(self): self.dtype = self.in_type
self.x0 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
self.x1 = np.random.random((2, 3, 4, 5)).astype(self.dtype) def init_axis(self):
self.x2 = np.random.random((2, 3, 4, 5)).astype(self.dtype) self.axis = -1
self.axis = 1
def test_check_output(self):
self.check_output_with_place(self.place)
@skip_check_grad_ci(
reason="The function 'check_grad' for large inputs is too slow.") def test_check_grad(self):
class TestConcatOp3(TestConcatOp): if paddle.is_compiled_with_xpu():
def init_test_data(self): place = paddle.XPUPlace(0)
self.x0 = np.random.random((1, 256, 170, 256)).astype(self.dtype) self.check_grad_with_place(place, ['x0'], 'Out')
self.x1 = np.random.random((1, 128, 170, 256)).astype(self.dtype) self.check_grad_with_place(place, ['x1'], 'Out')
self.x2 = np.random.random((1, 128, 170, 256)).astype(self.dtype) self.check_grad_with_place(place, ['x2'], 'Out')
self.axis = 1
class TestConcatOpAxis0XPU(TestConcatOp):
def test_check_grad(self): def init_axis(self):
pass self.axis = 0
class TestConcatOpAxis1XPU(TestConcatOp):
@skip_check_grad_ci( def set_inputs(self):
reason="This test will meet fetch error when there is a null grad. The detailed information is in PR#17015." self.x0 = np.random.random((5, 1, 4, 5)).astype(self.dtype)
) self.x1 = np.random.random((5, 2, 4, 5)).astype(self.dtype)
class TestConcatOp4(TestConcatOp): self.x2 = np.random.random((5, 3, 4, 5)).astype(self.dtype)
def init_test_data(self):
self.x0 = np.random.random((2, 3, 4, 5)).astype(self.dtype) def init_axis(self):
self.x1 = np.random.random((2, 3, 4, 5)).astype(self.dtype) self.axis = 1
self.x2 = np.random.random((0, 3, 4, 5)).astype(self.dtype)
self.axis = 0 class TestConcatOpAxis2XPU(TestConcatOp):
def init_axis(self):
def test_check_grad(self): self.axis = 2
pass
class TestConcatOpAxis3XPU(TestConcatOp):
def init_axis(self):
class TestConcatOp5(TestConcatOp): self.axis = 3
def init_test_data(self):
self.x0 = np.random.random((5, 1, 4, 5)).astype(self.dtype) class TestConcatOpAxisNeg1XPU(TestConcatOp):
self.x1 = np.random.random((5, 2, 4, 5)).astype(self.dtype) def init_axis(self):
self.x2 = np.random.random((5, 3, 4, 5)).astype(self.dtype) self.axis = -1
self.axis = -3
class TestConcatOpAxisNeg2XPU(TestConcatOp):
def init_axis(self):
class TestConcatOp6(TestConcatOp): self.axis = -2
def setUp(self):
self.op_type = "concat" class TestConcatOpAxisNeg3XPU(TestConcatOp):
self.dtype = self.get_dtype() def init_axis(self):
self.init_test_data() self.axis = -3
self.lod = [[20, 80]]
self.out_lod = [[20, 80, 20, 80, 20, 80]] @skip_check_grad_ci(
self.inputs = { reason="The function 'check_grad' for large inputs is too slow.")
'X': [('x0', (self.x0, self.lod)), ('x1', (self.x1, self.lod)), class TestConcatOp3(TestConcatOp):
('x2', (self.x2, self.lod))] def set_inputs(self):
} self.x0 = np.random.random((1, 256, 170, 256)).astype(self.dtype)
self.attrs = {'axis': self.axis} self.x1 = np.random.random((1, 128, 170, 256)).astype(self.dtype)
if self.axis < 0: self.x2 = np.random.random((1, 128, 170, 256)).astype(self.dtype)
self.actual_axis = self.axis + len(self.x0.shape) self.axis = 1
self.actual_axis = self.actual_axis if self.actual_axis > 0 else 0
else: def test_check_grad(self):
self.actual_axis = self.axis pass
out = np.concatenate((self.x0, self.x1, self.x2), axis=self.actual_axis)
self.outputs = {'Out': (out, self.out_lod)} @skip_check_grad_ci(
reason="This test will meet fetch error when there is a null grad. The detailed information is in PR#17015."
def test_check_output(self): )
if paddle.is_compiled_with_xpu(): class TestConcatOp4(TestConcatOp):
place = paddle.XPUPlace(0) def set_inputs(self):
self.check_output_with_place(place, check_dygraph=False) self.x0 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
self.x1 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
def test_check_grad(self): self.x2 = np.random.random((0, 3, 4, 5)).astype(self.dtype)
if paddle.is_compiled_with_xpu(): self.axis = 0
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['x0'], 'Out') def test_check_grad(self):
self.check_grad_with_place(place, ['x1'], 'Out') pass
self.check_grad_with_place(place, ['x2'], 'Out')
def init_test_data(self): support_types = get_xpu_op_support_types('concat')
self.x0 = np.random.random([100]).astype(self.dtype) for stype in support_types:
self.x1 = np.random.random([100]).astype(self.dtype) create_test_class(globals(), XPUTestConcatOp, stype)
self.x2 = np.random.random([100]).astype(self.dtype)
self.axis = 0
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static() paddle.enable_static()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册