From 18de0c9496d670ecb3bb6174e0cfa9ee6aed60ed Mon Sep 17 00:00:00 2001 From: HongyuJia Date: Wed, 26 Jul 2023 19:06:25 +0800 Subject: [PATCH] [0D-Tensor] Fix test_elementwise_max_op unittest of FP16 (#55683) --- test/legacy_test/test_elementwise_max_op.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/test/legacy_test/test_elementwise_max_op.py b/test/legacy_test/test_elementwise_max_op.py index 2d202f1c084..1283cfe2872 100644 --- a/test/legacy_test/test_elementwise_max_op.py +++ b/test/legacy_test/test_elementwise_max_op.py @@ -130,9 +130,6 @@ class TestElementwiseMaxFP16Op_ZeroDim1(TestElementwiseFP16Op): self.x = np.random.uniform(0.1, 1, []).astype(np.float16) self.y = np.random.uniform(0.1, 1, []).astype(np.float16) - def if_enbale_cinn(self): - self.enable_cinn = False - class TestElementwiseMaxOp_ZeroDim2(TestElementwiseOp): def init_data(self): @@ -145,9 +142,6 @@ class TestElementwiseMaxFP16Op_ZeroDim2(TestElementwiseFP16Op): self.x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float16) self.y = np.random.uniform(0.1, 1, []).astype(np.float16) - def if_enbale_cinn(self): - self.enable_cinn = False - class TestElementwiseMaxOp_ZeroDim3(TestElementwiseOp): def init_data(self): @@ -160,9 +154,6 @@ class TestElementwiseMaxFP16Op_ZeroDim3(TestElementwiseFP16Op): self.x = np.random.uniform(0.1, 1, []).astype(np.float16) self.y = np.random.uniform(0.1, 1, [13, 17]).astype(np.float16) - def if_enbale_cinn(self): - self.enable_cinn = False - @unittest.skipIf( core.is_compiled_with_cuda() -- GitLab