未验证 提交 ac495981 编写于 作者: C Charles-hit 提交者: GitHub

add reduce_mean and gelu test (#51447)

上级 54331f1a
......@@ -1220,7 +1220,8 @@ set(TEST_CINN_OPS
test_gather_nd_op
test_elementwise_pow_op
test_transpose_op
test_reshape_op)
test_reshape_op
test_mean_op)
foreach(TEST_CINN_OPS ${TEST_CINN_OPS})
if(WITH_CINN)
......
......@@ -109,7 +109,7 @@ class TestExpFp32_Prim(OpTest):
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
self.if_skip_cinn()
self.if_enable_cinn()
def test_check_output(self):
self.check_output()
......@@ -123,7 +123,7 @@ class TestExpFp32_Prim(OpTest):
def init_shape(self):
self.shape = [12, 17]
def if_skip_cinn(self):
def if_enable_cinn(self):
self.enable_cinn = True
......@@ -136,7 +136,7 @@ class TestExpPrim_ZeroDim(TestExpFp32_Prim):
def init_shape(self):
self.shape = []
def if_skip_cinn(self):
def if_enable_cinn(self):
self.enable_cinn = False
......@@ -319,7 +319,7 @@ class TestSilu(TestActivation):
self.python_api = paddle.nn.functional.silu
self.init_dtype()
self.init_shape()
self.if_skip_cinn()
self.if_enable_cinn()
np.random.seed(1024)
x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
......@@ -331,7 +331,7 @@ class TestSilu(TestActivation):
def init_dtype(self):
self.dtype = np.float32
def if_skip_cinn(self):
def if_enable_cinn(self):
pass
def test_check_grad(self):
......@@ -342,7 +342,7 @@ class TestSilu_ZeroDim(TestSilu):
def init_shape(self):
self.shape = []
def if_skip_cinn(self):
def if_enable_cinn(self):
self.enable_cinn = False
......@@ -1935,47 +1935,65 @@ def gelu(x, approximate):
class TestGeluApproximate(TestActivation):
def setUp(self):
self.op_type = "gelu"
self.prim_op_type = "comp"
self.python_api = paddle.nn.functional.gelu
self.init_dtype()
self.init_shape()
approximate = True
np.random.seed(1024)
x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
out = gelu(x, approximate)
self.enable_cinn = False
self.inputs = {'X': x}
self.outputs = {'Out': out}
self.attrs = {"approximate": approximate}
def test_check_output(self):
self.check_output(check_prim=True)
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_prim=True)
class TestGelu(TestActivation):
def setUp(self):
self.op_type = "gelu"
self.prim_op_type = "comp"
self.python_api = paddle.nn.functional.gelu
self.init_dtype()
self.init_shape()
approximate = False
np.random.seed(2048)
x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
out = gelu(x, approximate)
self.if_enable_cinn()
self.inputs = {'X': x}
self.outputs = {'Out': out}
self.attrs = {"approximate": approximate}
def if_enable_cinn(self):
self.enable_cinn = False
def test_check_output(self):
self.check_output(check_prim=True)
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_prim=True)
class TestGelu_ZeroDim(TestGelu):
def init_shape(self):
self.shape = []
def if_enable_cinn(self):
self.enable_cinn = False
class TestGELUAPI(unittest.TestCase):
# test paddle.nn.GELU, paddle.nn.functional.gelu
......@@ -3760,7 +3778,7 @@ def create_test_act_fp16_class(
def init_dtype(self):
self.dtype = np.float16
def if_skip_cinn(self):
def if_enable_cinn(self):
self.enable_cinn = enable_cinn
def test_check_output(self):
......@@ -3814,7 +3832,7 @@ create_test_act_fp16_class(TestAsinh, grad_atol=0.85)
create_test_act_fp16_class(TestAtanh, grad_atol=0.85)
create_test_act_fp16_class(TestRound, grad_check=False)
create_test_act_fp16_class(TestRelu, check_prim=True)
create_test_act_fp16_class(TestGelu)
create_test_act_fp16_class(TestGelu, check_prim=True)
create_test_act_fp16_class(TestBRelu)
create_test_act_fp16_class(TestRelu6)
create_test_act_fp16_class(TestSoftRelu, grad_atol=0.85)
......
......@@ -38,7 +38,7 @@ class TestElementwiseAddOp(OpTest):
self.init_kernel_type()
self.init_axis()
self.if_check_prim()
self.if_skip_cinn()
self.if_enable_cinn()
self.inputs = {
'X': OpTest.np_dtype_to_fluid_dtype(self.x),
......@@ -105,7 +105,7 @@ class TestElementwiseAddOp(OpTest):
def if_check_prim(self):
self.check_prim = self.axis == -1
def if_skip_cinn(self):
def if_enable_cinn(self):
pass
......@@ -115,7 +115,7 @@ class TestElementwiseAddOp_ZeroDim1(TestElementwiseAddOp):
self.y = np.random.uniform(0.1, 1, []).astype(self.dtype)
self.out = np.add(self.x, self.y)
def if_skip_cinn(self):
def if_enable_cinn(self):
self.enable_cinn = False
......@@ -182,7 +182,7 @@ class TestBF16ElementwiseAddOp(OpTest):
}
self.attrs = {'axis': self.axis, 'use_mkldnn': False}
self.outputs = {'Out': convert_float_to_uint16(self.out)}
self.if_skip_cinn()
self.if_enable_cinn()
def test_check_output(self):
place = core.CUDAPlace(0)
......@@ -204,7 +204,7 @@ class TestBF16ElementwiseAddOp(OpTest):
place, ['X'], 'Out', no_grad_set=set('Y'), check_prim=True
)
def if_skip_cinn(self):
def if_enable_cinn(self):
self.enable_cinn = False
......@@ -481,7 +481,7 @@ class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp):
self.y = np.random.rand(100, 1).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 100, 1)
def if_skip_cinn(self):
def if_enable_cinn(self):
self.enable_cinn = False
......
......@@ -38,7 +38,7 @@ class ElementwiseDivOp(OpTest):
self.init_dtype()
self.init_shape()
self.if_check_prim()
self.if_skip_cinn()
self.if_enable_cinn()
x = self.gen_data(self.x_shape).astype(self.val_dtype)
y = self.gen_data(self.y_shape).astype(self.val_dtype)
......@@ -64,7 +64,7 @@ class ElementwiseDivOp(OpTest):
self.grad_x = grad_x
self.grad_y = grad_y
def if_skip_cinn(self):
def if_enable_cinn(self):
pass
def init_args(self):
......@@ -136,7 +136,7 @@ class TestElementwiseDivPrimOpFp32(ElementwiseDivOp):
self.dtype = np.float32
self.val_dtype = np.float32
def if_skip_cinn(self):
def if_enable_cinn(self):
pass
......@@ -145,7 +145,7 @@ class TestElementwiseDivOp_ZeroDim1(ElementwiseDivOp):
self.x_shape = []
self.y_shape = []
def if_skip_cinn(self):
def if_enable_cinn(self):
self.enable_cinn = False
......@@ -163,7 +163,7 @@ class TestElementwiseDivOp_ZeroDim2(ElementwiseDivOp):
def compute_gradient_y(self, grad_out, out, y):
return np.sum(-1 * grad_out * out / y.reshape([1, 1]))
def if_skip_cinn(self):
def if_enable_cinn(self):
self.enable_cinn = False
......@@ -181,7 +181,7 @@ class TestElementwiseDivOp_ZeroDim3(ElementwiseDivOp):
def compute_gradient_y(self, grad_out, out, y):
return -1 * grad_out * out / y
def if_skip_cinn(self):
def if_enable_cinn(self):
self.enable_cinn = False
......@@ -375,7 +375,7 @@ class TestElementwiseDivOpXsizeLessThanYsize(ElementwiseDivOp):
def compute_gradient_x(self, grad_out, y):
return np.sum(grad_out / y, axis=(0, 1))
def if_skip_cinn(self):
def if_enable_cinn(self):
self.enable_cinn = False
......@@ -399,7 +399,7 @@ class TestElementwiseDivOpFp16(ElementwiseDivOp):
self.dtype = np.float16
self.val_dtype = np.float16
def if_skip_cinn(self):
def if_enable_cinn(self):
self.enable_cinn = False
......
......@@ -35,7 +35,7 @@ class ElementwiseMulOp(OpTest):
self.init_input_output()
self.init_kernel_type()
self.init_axis()
self.if_skip_cinn()
self.if_enable_cinn()
self.inputs = {
'X': OpTest.np_dtype_to_fluid_dtype(self.x),
......@@ -88,7 +88,7 @@ class ElementwiseMulOp(OpTest):
def init_axis(self):
pass
def if_skip_cinn(self):
def if_enable_cinn(self):
pass
......@@ -98,7 +98,7 @@ class TestElementwiseMulOp_ZeroDim1(ElementwiseMulOp):
self.y = np.random.uniform(0.1, 1, []).astype(self.dtype)
self.out = np.multiply(self.x, self.y)
def if_skip_cinn(self):
def if_enable_cinn(self):
self.enable_cinn = False
......@@ -108,7 +108,7 @@ class TestElementwiseMulOp_ZeroDim2(ElementwiseMulOp):
self.y = np.random.uniform(0.1, 1, []).astype(self.dtype)
self.out = np.multiply(self.x, self.y)
def if_skip_cinn(self):
def if_enable_cinn(self):
self.enable_cinn = False
......@@ -118,7 +118,7 @@ class TestElementwiseMulOp_ZeroDim3(ElementwiseMulOp):
self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
self.out = np.multiply(self.x, self.y)
def if_skip_cinn(self):
def if_enable_cinn(self):
self.enable_cinn = False
......@@ -145,7 +145,7 @@ class TestBF16ElementwiseMulOp(OpTest):
}
self.outputs = {'Out': convert_float_to_uint16(self.out)}
self.attrs = {'axis': self.axis, 'use_mkldnn': False}
self.if_skip_cinn()
self.if_enable_cinn()
def test_check_output(self):
self.check_output()
......@@ -159,7 +159,7 @@ class TestBF16ElementwiseMulOp(OpTest):
def test_check_grad_ingore_y(self):
self.check_grad(['X'], 'Out', no_grad_set=set('Y'), check_prim=True)
def if_skip_cinn(self):
def if_enable_cinn(self):
self.enable_cinn = False
......@@ -349,7 +349,7 @@ class TestElementwiseMulOpFp16(ElementwiseMulOp):
def init_dtype(self):
self.dtype = np.float16
def if_skip_cinn(self):
def if_enable_cinn(self):
pass
......
......@@ -35,7 +35,7 @@ class TestElementwiseOp(OpTest):
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
self.if_check_prim()
self.if_skip_cinn()
self.if_enable_cinn()
def test_check_output(self):
self.check_output()
......@@ -64,7 +64,7 @@ class TestElementwiseOp(OpTest):
def if_check_prim(self):
self.check_prim = True
def if_skip_cinn(self):
def if_enable_cinn(self):
pass
......@@ -79,12 +79,12 @@ class TestElementwiseSubOp_ZeroDim1(TestElementwiseOp):
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
self.if_check_prim()
self.if_skip_cinn()
self.if_enable_cinn()
def if_check_prim(self):
self.check_prim = True
def if_skip_cinn(self):
def if_enable_cinn(self):
self.enable_cinn = False
......@@ -99,12 +99,12 @@ class TestElementwiseSubOp_ZeroDim2(TestElementwiseOp):
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
self.if_check_prim()
self.if_skip_cinn()
self.if_enable_cinn()
def if_check_prim(self):
self.check_prim = True
def if_skip_cinn(self):
def if_enable_cinn(self):
self.enable_cinn = False
......@@ -119,12 +119,12 @@ class TestElementwiseSubOp_ZeroDim3(TestElementwiseOp):
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
self.if_check_prim()
self.if_skip_cinn()
self.if_enable_cinn()
def if_check_prim(self):
self.check_prim = True
def if_skip_cinn(self):
def if_enable_cinn(self):
self.enable_cinn = False
......@@ -144,7 +144,7 @@ class TestBF16ElementwiseOp(OpTest):
}
self.outputs = {'Out': convert_float_to_uint16(out)}
self.if_check_prim()
self.if_skip_cinn()
self.if_enable_cinn()
def test_check_output(self):
self.check_output()
......@@ -165,7 +165,7 @@ class TestBF16ElementwiseOp(OpTest):
def if_check_prim(self):
self.check_prim = True
def if_skip_cinn(self):
def if_enable_cinn(self):
self.enable_cinn = False
......@@ -371,7 +371,7 @@ class TestComplexElementwiseSubOp(OpTest):
self.attrs = {'axis': -1, 'use_mkldnn': False}
self.outputs = {'Out': self.out}
self.if_check_prim()
self.if_skip_cinn()
self.if_enable_cinn()
def init_base_dtype(self):
self.dtype = np.float64
......@@ -424,7 +424,7 @@ class TestComplexElementwiseSubOp(OpTest):
check_prim=self.check_prim,
)
def if_skip_cinn(self):
def if_enable_cinn(self):
self.enable_cinn = False
def if_check_prim(self):
......@@ -446,7 +446,7 @@ class TestRealComplexElementwiseSubOp(TestComplexElementwiseSubOp):
self.grad_x = np.real(self.grad_out)
self.grad_y = -self.grad_out
def if_skip_cinn(self):
def if_enable_cinn(self):
self.enable_cinn = False
def if_check_prim(self):
......
......@@ -41,7 +41,7 @@ class TestFillAnyLikeOp(OpTest):
self.inputs = {'X': np.random.random((219, 232)).astype(self.dtype)}
self.attrs = {'value': self.value}
self.outputs = {'Out': self.value * np.ones_like(self.inputs["X"])}
self.if_skip_cinn()
self.if_enable_cinn()
def init(self):
pass
......@@ -49,7 +49,7 @@ class TestFillAnyLikeOp(OpTest):
def test_check_output(self):
self.check_output(check_prim=True)
def if_skip_cinn(self):
def if_enable_cinn(self):
pass
......@@ -58,7 +58,7 @@ class TestFillAnyLikeOpFloat32(TestFillAnyLikeOp):
self.dtype = np.float32
self.value = 0.0
def if_skip_cinn(self):
def if_enable_cinn(self):
pass
......@@ -79,13 +79,13 @@ class TestFillAnyLikeOpBfloat16(OpTest):
self.value * np.ones_like(self.inputs["X"])
)
}
self.if_skip_cinn()
self.if_enable_cinn()
def test_check_output(self):
place = core.CUDAPlace(0)
self.check_output_with_place(place, check_prim=True)
def if_skip_cinn(self):
def if_enable_cinn(self):
self.enable_cinn = False
......@@ -93,7 +93,7 @@ class TestFillAnyLikeOpValue1(TestFillAnyLikeOp):
def init(self):
self.value = 1.0
def if_skip_cinn(self):
def if_enable_cinn(self):
pass
......@@ -101,7 +101,7 @@ class TestFillAnyLikeOpValue2(TestFillAnyLikeOp):
def init(self):
self.value = 1e-10
def if_skip_cinn(self):
def if_enable_cinn(self):
pass
......@@ -109,7 +109,7 @@ class TestFillAnyLikeOpValue3(TestFillAnyLikeOp):
def init(self):
self.value = 1e-100
def if_skip_cinn(self):
def if_enable_cinn(self):
pass
......@@ -131,9 +131,9 @@ class TestFillAnyLikeOpType(TestFillAnyLikeOp):
* np.ones_like(self.inputs["X"]).astype(np.float32)
}
self.if_skip_cinn()
self.if_enable_cinn()
def if_skip_cinn(self):
def if_enable_cinn(self):
pass
......@@ -141,7 +141,7 @@ class TestFillAnyLikeOpFloat16(TestFillAnyLikeOp):
def init(self):
self.dtype = np.float16
def if_skip_cinn(self):
def if_enable_cinn(self):
pass
......
......@@ -112,7 +112,7 @@ class TestFullLikeOp1(OpTest):
self.prim_op_type = "comp"
self.python_api = fill_any_like_wrapper
self.init_data()
self.if_skip_cinn()
self.if_enable_cinn()
x = np.zeros(self.shape)
out = np.full_like(x, self.fill_value, self.dtype)
......@@ -132,7 +132,7 @@ class TestFullLikeOp1(OpTest):
def test_check_output(self):
self.check_output(check_eager=True, check_prim=True)
def if_skip_cinn(self):
def if_enable_cinn(self):
pass
......@@ -142,7 +142,7 @@ class TestFullLikeOp2(TestFullLikeOp1):
self.shape = [1024, 1024]
self.dtype = np.float64
def if_skip_cinn(self):
def if_enable_cinn(self):
pass
......@@ -152,7 +152,7 @@ class TestFullLikeOp3(TestFullLikeOp1):
self.shape = [5000, 5000]
self.dtype = np.int64
def if_skip_cinn(self):
def if_enable_cinn(self):
pass
......
......@@ -150,11 +150,13 @@ class TestReduceMeanOp(OpTest):
def setUp(self):
self.op_type = 'reduce_mean'
self.python_api = reduce_mean_wrapper
self.prim_op_type = "comp"
self.dtype = 'float64'
self.shape = [2, 3, 4, 5]
self.axis = [0]
self.keepdim = False
self.set_attrs()
self.if_enable_cinn()
np.random.seed(10)
x_np = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
......@@ -173,20 +175,23 @@ class TestReduceMeanOp(OpTest):
def set_attrs(self):
pass
def if_enable_cinn(self):
pass
def test_check_output(self):
if self.dtype != 'float16':
self.check_output(check_eager=True)
self.check_output(check_eager=True, check_prim=True)
else:
place = paddle.CUDAPlace(0)
self.check_output_with_place(place=place)
self.check_output_with_place(place=place, check_prim=True)
def test_check_grad(self):
if self.dtype != 'float16':
self.check_grad(['X'], ['Out'], check_eager=True)
self.check_grad(['X'], ['Out'], check_eager=True, check_prim=True)
else:
place = paddle.CUDAPlace(0)
self.check_grad_with_place(
place, ['X'], ['Out'], numeric_grad_delta=0.5
place, ['X'], ['Out'], numeric_grad_delta=0.5, check_prim=True
)
......@@ -199,11 +204,13 @@ class TestReduceMeanBF16Op(OpTest):
def setUp(self):
self.op_type = 'reduce_mean'
self.python_api = reduce_mean_wrapper
self.prim_op_type = "comp"
self.dtype = np.uint16
self.shape = [2, 3, 4, 5]
self.axis = [0]
self.keepdim = False
self.set_attrs()
self.enable_cinn = False
np.random.seed(10)
x_np = np.random.uniform(-1, 1, self.shape).astype(np.float32)
......@@ -224,12 +231,12 @@ class TestReduceMeanBF16Op(OpTest):
def test_check_output(self):
place = paddle.CUDAPlace(0)
self.check_output_with_place(place)
self.check_output_with_place(place, check_prim=True)
def test_check_grad(self):
place = paddle.CUDAPlace(0)
self.check_grad_with_place(
place, ['X'], ['Out'], numeric_grad_delta=0.05
place, ['X'], ['Out'], numeric_grad_delta=0.05, check_prim=True
)
......@@ -237,6 +244,7 @@ class TestReduceMeanOpDefaultAttrs(TestReduceMeanOp):
def setUp(self):
self.op_type = 'reduce_mean'
self.python_api = reduce_mean_wrapper
self.prim_op_type = "comp"
self.dtype = 'float64'
self.shape = [2, 3, 4, 5]
......@@ -282,6 +290,9 @@ class TestReduceMeanOpShape6DFP16(TestReduceMeanOp):
self.shape = [2, 3, 4, 5, 6, 7]
self.dtype = 'float16'
def if_enable_cinn(self):
self.enable_cinn = False
class TestReduceMeanOpAxisAll(TestReduceMeanOp):
def set_attrs(self):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册