From 70120c7f98229df0697657c336c83654db5c185e Mon Sep 17 00:00:00 2001 From: wangxinxin08 <69842442+wangxinxin08@users.noreply.github.com> Date: Thu, 5 May 2022 12:02:14 +0800 Subject: [PATCH] fix unittest of conv2d due to V100 do not support bfloat16 (#42483) --- python/paddle/fluid/tests/unittests/test_conv2d_op.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_op.py index 6a9f7a47f66..fdb93e1f1af 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_op.py @@ -172,9 +172,9 @@ def create_test_cudnn_fp16_class(parent, grad_check=True): def create_test_cudnn_bf16_class(parent): @unittest.skipIf( - not core.is_compiled_with_cuda() or core.cudnn_version() < 8100, - "core is not compiled with CUDA and cudnn version need larger than 8.1.0" - ) + not core.is_compiled_with_cuda() or + not core.is_bfloat16_supported(core.CUDAPlace(0)), + "core is not compiled with CUDA and do not support bfloat16") class TestConv2DCUDNNBF16(parent): def get_numeric_grad(self, place, check_name): scope = core.Scope() -- GitLab