diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 3a242fe2582a5051a050375062f57bf019ab13c3..dc554a9c5ae1a594cd054d8a5efd4f3094887768 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -3022,6 +3022,10 @@ All parameter, weight, gradient are variables in Paddle. // Only GPUs with Compute Capability >= 53 support float16 return platform::GetGPUComputeCapability(place.device) >= 53; }); + m.def("is_bfloat16_supported", [](const platform::CUDAPlace &place) -> bool { + // Only GPUs with Compute Capability >= 80 support bfloat16 + return platform::GetGPUComputeCapability(place.device) >= 80; + }); #endif m.def("set_feed_variable", diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py index d50241e58dea3a80673f7bcbcbcc1097f6dac190..27dbd3752b5502153594c848d0e42c573de0e3b1 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py @@ -60,9 +60,9 @@ class ElementwiseDivOp(OpTest): pass -@unittest.skipIf( - not core.is_compiled_with_cuda() or core.cudnn_version() < 8100, - "core is not compiled with CUDA and cudnn version need larger than 8.1.0") +@unittest.skipIf(not core.is_compiled_with_cuda() or + not core.is_bfloat16_supported(core.CUDAPlace(0)), + "core is not compiled with CUDA and not support the bfloat16") class TestElementwiseDivOpBF16(OpTest): def setUp(self): self.op_type = "elementwise_div" diff --git a/python/paddle/fluid/tests/unittests/test_matmul_v2_op.py b/python/paddle/fluid/tests/unittests/test_matmul_v2_op.py index 492f300e3b8481cb2d39266c359b916ada346981..3e06b69278d347c98ccc8e30fd53a8dfea211db7 100644 --- a/python/paddle/fluid/tests/unittests/test_matmul_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_matmul_v2_op.py @@ -385,9 +385,9 @@ create_test_fp16_class(TestMatMulOp17) def create_test_bf16_class(parent, atol=0.01): @unittest.skipIf( - not core.is_compiled_with_cuda() or core.cudnn_version() < 8100, - "core is not compiled with CUDA and cudnn version need larger than 8.1.0" - ) + not core.is_compiled_with_cuda() or + not core.is_bfloat16_supported(core.CUDAPlace(0)), + "core is not compiled with CUDA and not support the bfloat16") class TestMatMulOpBf16Case(parent): def get_numeric_grad(self, place, check_name): scope = core.Scope()