diff --git a/paddle/fluid/pybind/place.cc b/paddle/fluid/pybind/place.cc index d1d336b5bb0095cdb429133f252a614bb1c4d033..aec21c6b0f6292bc3df441036c9aff64d864b4c5 100644 --- a/paddle/fluid/pybind/place.cc +++ b/paddle/fluid/pybind/place.cc @@ -373,7 +373,16 @@ void BindPlace(pybind11::module &m) { // NOLINT #endif .def("__repr__", string::to_string) .def("__str__", string::to_string); - +#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) + m.def("is_float16_supported", [](const platform::CUDAPlace &place) -> bool { + // Only GPUs with Compute Capability >= 53 support float16 + return platform::GetGPUComputeCapability(place.device) >= 53; + }); + m.def("is_bfloat16_supported", [](const platform::CUDAPlace &place) -> bool { + // Only GPUs with Compute Capability >= 80 support bfloat16 + return platform::GetGPUComputeCapability(place.device) >= 80; + }); +#endif py::class_ xpuplace(m, "XPUPlace", R"DOC( **Note**: Examples: @@ -492,7 +501,18 @@ void BindPlace(pybind11::module &m) { // NOLINT &IsSamePlace) .def("__repr__", string::to_string) .def("__str__", string::to_string); - + m.def("is_float16_supported", + [](const platform::CPUPlace &place) -> bool { return false; }); + m.def("is_bfloat16_supported", [](const platform::CPUPlace &place) -> bool { +#ifndef PADDLE_WITH_MKLDNN + return false; +#else + if (phi::backends::cpu::MayIUse(phi::backends::cpu::cpu_isa_t::avx512_core)) + return true; + else + return false; +#endif + }); py::class_ cudapinnedplace( m, "CUDAPinnedPlace", R"DOC( CUDAPinnedPlace is a descriptor of a device. diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 65aa609e34fde1a05a505d7e5b270442c74f7c2d..bde6357ccbe2f9ca63034323dd32eebbdf0e852a 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -1960,17 +1960,6 @@ All parameter, weight, gradient are variables in Paddle. py::arg("sleep_inter") = 0, py::arg("redirect_stderr") = false); -#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) - m.def("is_float16_supported", [](const platform::CUDAPlace &place) -> bool { - // Only GPUs with Compute Capability >= 53 support float16 - return platform::GetGPUComputeCapability(place.device) >= 53; - }); - m.def("is_bfloat16_supported", [](const platform::CUDAPlace &place) -> bool { - // Only GPUs with Compute Capability >= 80 support bfloat16 - return platform::GetGPUComputeCapability(place.device) >= 80; - }); -#endif - m.def("set_feed_variable", static_cast= 8.0, + "run test when maximum gpu's compute capability is 8.0.", + ) + def test_unsupported_bfloat16(self): + self.verify_trans_dtype( + test_type='bfloat16', + corrected_dtype=paddle.float32, + ) + + @unittest.skipIf( + not core.is_compiled_with_cuda() + or paddle.device.cuda.get_device_capability()[0] < 8.0, + "run test when gpu's compute capability is at least 8.0.", + ) + def test_supported_bfloat16(self): + self.verify_trans_dtype( + test_type='bfloat16', + corrected_dtype=paddle.bfloat16, + ) + + def test_float32(self): + paddle.set_default_dtype('float16') + self.verify_trans_dtype( + test_type='float32', + corrected_dtype=paddle.float32, + ) + paddle.set_default_dtype('float32') + + def test_excluded_layers_type_error(self): + self.assertRaises( + TypeError, self.verify_trans_dtype, excluded_layers=111 + ) + + +@unittest.skipIf( + not core.is_compiled_with_cuda(), "Require compiled with CUDA." +) +class TestSupportedTypeInfo(unittest.TestCase): + def test_cpu(self): + res = paddle.amp.is_float16_supported('cpu') + self.assertEqual(res, False) + res = paddle.amp.is_bfloat16_supported('cpu') + self.assertEqual(res, True) + + def test_gpu_fp16_supported(self): + res = paddle.amp.is_float16_supported() + self.assertEqual(res, True) + res = paddle.amp.is_float16_supported('gpu') + self.assertEqual(res, True) + res = paddle.amp.is_float16_supported('gpu:0') + self.assertEqual(res, True) + + @unittest.skipIf( + not core.is_compiled_with_cuda() + or paddle.device.cuda.get_device_capability()[0] >= 8.0, + "run test when maximum gpu's compute capability is 8.0.", + ) + def test_gpu_bf16_unsupported(self): + res = paddle.amp.is_bfloat16_supported() + self.assertEqual(res, False) + res = paddle.amp.is_bfloat16_supported('gpu') + self.assertEqual(res, False) + + @unittest.skipIf( + not core.is_compiled_with_cuda() + or paddle.device.cuda.get_device_capability()[0] < 8.0, + "run test when gpu's compute capability is at least 8.0.", + ) + def test_gpu_bf16_supported(self): + res = paddle.amp.is_bfloat16_supported() + self.assertEqual(res, True) + res = paddle.amp.is_bfloat16_supported('gpu') + self.assertEqual(res, True) + + def test_device_value_error(self): + self.assertRaises( + ValueError, paddle.amp.is_float16_supported, device='xxx' + ) + self.assertRaises( + ValueError, paddle.amp.is_float16_supported, device=111 + ) + + +if __name__ == '__main__': + unittest.main()