From a64d50b720b9cf4354a1dd45bdbdfcc639c15b18 Mon Sep 17 00:00:00 2001 From: liuruyan <44316842+liuruyan@users.noreply.github.com> Date: Wed, 12 Apr 2023 15:48:22 +0800 Subject: [PATCH] Add layer func: float(), half(), bfloat16(). (#51635) --- paddle/fluid/pybind/place.cc | 24 +++- paddle/fluid/pybind/pybind.cc | 11 -- python/paddle/amp/__init__.py | 66 +++++++++- python/paddle/nn/layer/layers.py | 176 ++++++++++++++++++++++++++- test/amp/test_layer_convert_dtype.py | 172 ++++++++++++++++++++++++++ 5 files changed, 434 insertions(+), 15 deletions(-) create mode 100644 test/amp/test_layer_convert_dtype.py diff --git a/paddle/fluid/pybind/place.cc b/paddle/fluid/pybind/place.cc index d1d336b5bb0..aec21c6b0f6 100644 --- a/paddle/fluid/pybind/place.cc +++ b/paddle/fluid/pybind/place.cc @@ -373,7 +373,16 @@ void BindPlace(pybind11::module &m) { // NOLINT #endif .def("__repr__", string::to_string) .def("__str__", string::to_string); - +#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) + m.def("is_float16_supported", [](const platform::CUDAPlace &place) -> bool { + // Only GPUs with Compute Capability >= 53 support float16 + return platform::GetGPUComputeCapability(place.device) >= 53; + }); + m.def("is_bfloat16_supported", [](const platform::CUDAPlace &place) -> bool { + // Only GPUs with Compute Capability >= 80 support bfloat16 + return platform::GetGPUComputeCapability(place.device) >= 80; + }); +#endif py::class_ xpuplace(m, "XPUPlace", R"DOC( **Note**: Examples: @@ -492,7 +501,18 @@ void BindPlace(pybind11::module &m) { // NOLINT &IsSamePlace) .def("__repr__", string::to_string) .def("__str__", string::to_string); - + m.def("is_float16_supported", + [](const platform::CPUPlace &place) -> bool { return false; }); + m.def("is_bfloat16_supported", [](const platform::CPUPlace &place) -> bool { +#ifndef PADDLE_WITH_MKLDNN + return false; +#else + if (phi::backends::cpu::MayIUse(phi::backends::cpu::cpu_isa_t::avx512_core)) + return true; + else + return false; +#endif + }); py::class_ cudapinnedplace( m, "CUDAPinnedPlace", R"DOC( CUDAPinnedPlace is a descriptor of a device. diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 65aa609e34f..bde6357ccbe 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -1960,17 +1960,6 @@ All parameter, weight, gradient are variables in Paddle. py::arg("sleep_inter") = 0, py::arg("redirect_stderr") = false); -#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) - m.def("is_float16_supported", [](const platform::CUDAPlace &place) -> bool { - // Only GPUs with Compute Capability >= 53 support float16 - return platform::GetGPUComputeCapability(place.device) >= 53; - }); - m.def("is_bfloat16_supported", [](const platform::CUDAPlace &place) -> bool { - // Only GPUs with Compute Capability >= 80 support bfloat16 - return platform::GetGPUComputeCapability(place.device) >= 80; - }); -#endif - m.def("set_feed_variable", static_cast= 8.0, + "run test when maximum gpu's compute capability is 8.0.", + ) + def test_unsupported_bfloat16(self): + self.verify_trans_dtype( + test_type='bfloat16', + corrected_dtype=paddle.float32, + ) + + @unittest.skipIf( + not core.is_compiled_with_cuda() + or paddle.device.cuda.get_device_capability()[0] < 8.0, + "run test when gpu's compute capability is at least 8.0.", + ) + def test_supported_bfloat16(self): + self.verify_trans_dtype( + test_type='bfloat16', + corrected_dtype=paddle.bfloat16, + ) + + def test_float32(self): + paddle.set_default_dtype('float16') + self.verify_trans_dtype( + test_type='float32', + corrected_dtype=paddle.float32, + ) + paddle.set_default_dtype('float32') + + def test_excluded_layers_type_error(self): + self.assertRaises( + TypeError, self.verify_trans_dtype, excluded_layers=111 + ) + + +@unittest.skipIf( + not core.is_compiled_with_cuda(), "Require compiled with CUDA." +) +class TestSupportedTypeInfo(unittest.TestCase): + def test_cpu(self): + res = paddle.amp.is_float16_supported('cpu') + self.assertEqual(res, False) + res = paddle.amp.is_bfloat16_supported('cpu') + self.assertEqual(res, True) + + def test_gpu_fp16_supported(self): + res = paddle.amp.is_float16_supported() + self.assertEqual(res, True) + res = paddle.amp.is_float16_supported('gpu') + self.assertEqual(res, True) + res = paddle.amp.is_float16_supported('gpu:0') + self.assertEqual(res, True) + + @unittest.skipIf( + not core.is_compiled_with_cuda() + or paddle.device.cuda.get_device_capability()[0] >= 8.0, + "run test when maximum gpu's compute capability is 8.0.", + ) + def test_gpu_bf16_unsupported(self): + res = paddle.amp.is_bfloat16_supported() + self.assertEqual(res, False) + res = paddle.amp.is_bfloat16_supported('gpu') + self.assertEqual(res, False) + + @unittest.skipIf( + not core.is_compiled_with_cuda() + or paddle.device.cuda.get_device_capability()[0] < 8.0, + "run test when gpu's compute capability is at least 8.0.", + ) + def test_gpu_bf16_supported(self): + res = paddle.amp.is_bfloat16_supported() + self.assertEqual(res, True) + res = paddle.amp.is_bfloat16_supported('gpu') + self.assertEqual(res, True) + + def test_device_value_error(self): + self.assertRaises( + ValueError, paddle.amp.is_float16_supported, device='xxx' + ) + self.assertRaises( + ValueError, paddle.amp.is_float16_supported, device=111 + ) + + +if __name__ == '__main__': + unittest.main() -- GitLab