From a90711c7a23a3ce456f6e9f7057e7e3a735d6127 Mon Sep 17 00:00:00 2001 From: "joanna.wozna.intel" Date: Thu, 1 Oct 2020 15:35:14 +0200 Subject: [PATCH] Add avx512 core instructions check 2 (#27750) * Add test skip from cmake * Remove print --- .../tests/unittests/mkldnn/test_conv2d_bf16_mkldnn_op.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_bf16_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_bf16_mkldnn_op.py index 4b7b4b5811a..6f0b4f9076e 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_bf16_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_bf16_mkldnn_op.py @@ -35,6 +35,8 @@ def conv2d_residual_naive(out, residual): return out +@unittest.skipIf(not core.supports_bfloat16(), + "place does not support BF16 evaluation") class TestConv2dBf16Op(TestConv2dOp): def setUp(self): self.op_type = "conv2d" @@ -42,9 +44,9 @@ class TestConv2dBf16Op(TestConv2dOp): self.exhaustive_search = False self.use_cuda = False self.use_mkldnn = True + self._cpu_only = True self.weight_type = np.float32 self.input_type = np.float32 - self.use_mkldnn = True self.mkldnn_data_type = "bfloat16" self.force_fp32_output = False self.init_group() @@ -205,5 +207,4 @@ class TestWithInput1x1Filter1x1(TestConv2dBf16Op): if __name__ == '__main__': - if core.supports_bfloat16(): - unittest.main() + unittest.main() -- GitLab