From 14e0ce71d7b435c0964a252662d610ace569e8f2 Mon Sep 17 00:00:00 2001 From: Charles-hit <56987902+Charles-hit@users.noreply.github.com> Date: Fri, 19 May 2023 11:25:17 +0800 Subject: [PATCH] fix meshgird and expand_as test (#53951) --- .../tests/unittests/test_expand_as_v2_op.py | 48 ++----------------- .../fluid/tests/unittests/test_meshgrid_op.py | 10 ++-- 2 files changed, 8 insertions(+), 50 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_expand_as_v2_op.py b/python/paddle/fluid/tests/unittests/test_expand_as_v2_op.py index ee472130f64..990ea9be131 100755 --- a/python/paddle/fluid/tests/unittests/test_expand_as_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_expand_as_v2_op.py @@ -103,10 +103,7 @@ class TestExpandAsOpRank2(TestExpandAsBasic): or not core.is_bfloat16_supported(core.CUDAPlace(0)), "core is not compiled with CUDA or not support the bfloat16", ) -class TestExpandAsOpRank2BFP16OP(TestExpandAsOpRank2): - def init_dtype(self): - self.dtype = np.uint16 - +class TestExpandAsOpRank2BFP16OP(TestExpandAsBasicBFP16OP): def init_inputs_and_outputs(self): x = np.random.rand(10, 12).astype(np.float32) target_tensor = np.random.rand(10, 12).astype(np.float32) @@ -119,17 +116,6 @@ class TestExpandAsOpRank2BFP16OP(TestExpandAsOpRank2): output = np.tile(x, bcast_dims) self.outputs = {'Out': convert_float_to_uint16(output)} - def if_enable_cinn(self): - self.enable_cinn = False - - def test_check_output(self): - self.check_output_with_place(place=paddle.CUDAPlace(0)) - - def test_check_grad(self): - self.check_grad_with_place( - paddle.CUDAPlace(0), ['X'], 'Out', check_prim=True - ) - class TestExpandAsOpRank3(TestExpandAsBasic): def init_inputs_and_outputs(self): @@ -147,10 +133,7 @@ class TestExpandAsOpRank3(TestExpandAsBasic): or not core.is_bfloat16_supported(core.CUDAPlace(0)), "core is not compiled with CUDA or not support the bfloat16", ) -class TestExpandAsOpRank3BFP16OP(TestExpandAsOpRank3): - def init_dtype(self): - self.dtype = np.uint16 - +class TestExpandAsOpRank3BFP16OP(TestExpandAsBasicBFP16OP): def init_inputs_and_outputs(self): x = np.random.rand(2, 3, 20).astype(np.float32) target_tensor = np.random.rand(2, 3, 20).astype(np.float32) @@ -163,17 +146,6 @@ class TestExpandAsOpRank3BFP16OP(TestExpandAsOpRank3): output = np.tile(x, bcast_dims) self.outputs = {'Out': convert_float_to_uint16(output)} - def if_enable_cinn(self): - self.enable_cinn = False - - def test_check_output(self): - self.check_output_with_place(place=paddle.CUDAPlace(0)) - - def test_check_grad(self): - self.check_grad_with_place( - paddle.CUDAPlace(0), ['X'], 'Out', check_prim=True - ) - class TestExpandAsOpRank4(TestExpandAsBasic): def init_inputs_and_outputs(self): @@ -191,10 +163,7 @@ class TestExpandAsOpRank4(TestExpandAsBasic): or not core.is_bfloat16_supported(core.CUDAPlace(0)), "core is not compiled with CUDA or not support the bfloat16", ) -class TestExpandAsOpRank4BFP16OP(TestExpandAsOpRank3): - def init_dtype(self): - self.dtype = np.uint16 - +class TestExpandAsOpRank4BFP16OP(TestExpandAsBasicBFP16OP): def init_inputs_and_outputs(self): x = np.random.rand(1, 1, 7, 16).astype(np.float32) target_tensor = np.random.rand(4, 6, 7, 16).astype(np.float32) @@ -207,17 +176,6 @@ class TestExpandAsOpRank4BFP16OP(TestExpandAsOpRank3): output = np.tile(x, bcast_dims) self.outputs = {'Out': convert_float_to_uint16(output)} - def if_enable_cinn(self): - self.enable_cinn = False - - def test_check_output(self): - self.check_output_with_place(place=paddle.CUDAPlace(0)) - - def test_check_grad(self): - self.check_grad_with_place( - paddle.CUDAPlace(0), ['X'], 'Out', check_prim=True - ) - class TestExpandAsOpRank5(TestExpandAsBasic): no_need_check_grad = True diff --git a/python/paddle/fluid/tests/unittests/test_meshgrid_op.py b/python/paddle/fluid/tests/unittests/test_meshgrid_op.py index 1928108bfae..377699e3855 100644 --- a/python/paddle/fluid/tests/unittests/test_meshgrid_op.py +++ b/python/paddle/fluid/tests/unittests/test_meshgrid_op.py @@ -32,13 +32,13 @@ class TestMeshgridOp(OpTest): self.prim_op_type = "comp" self.python_api = meshgrid_wrapper self.public_python_api = meshgrid_wrapper - self.dtype = self.get_dtype() + self.init_data_type() self.init_inputs_and_outputs() self.python_out_sig = ['out0', 'out1'] self.if_enable_cinn() - def get_dtype(self): - return "float64" + def init_data_type(self): + self.dtype = np.float64 def test_check_output(self): self.check_output(check_prim=True) @@ -80,8 +80,8 @@ class TestMeshgridOp2Fp16(TestMeshgridOp): def get_x_shape(self): return [100, 300] - def get_dtype(self): - return np.float16 + def init_data_type(self): + self.dtype = np.float16 @unittest.skipIf( -- GitLab