未验证 提交 69b5d74d 编写于 作者: L Leo Chen 提交者: GitHub

skip bf16 test if not supported (#42503)

上级 09a13294
...@@ -1254,18 +1254,17 @@ class TestBf16(unittest.TestCase): ...@@ -1254,18 +1254,17 @@ class TestBf16(unittest.TestCase):
def test_bf16(self): def test_bf16(self):
def func_isinstance(): def func_isinstance():
if fluid.core.is_compiled_with_cuda(): if fluid.core.is_compiled_with_cuda(
cudnn_version = paddle.device.get_cudnn_version() ) and fluid.core.is_bfloat16_supported(paddle.CUDAPlace(0)):
if cudnn_version is not None and cudnn_version >= 8100: out_fp32 = self.train(enable_amp=False)
out_fp32 = self.train(enable_amp=False) out_bf16_O1 = self.train(enable_amp=True, amp_level='O1')
out_bf16_O1 = self.train(enable_amp=True, amp_level='O1') out_bf16_O2 = self.train(enable_amp=True, amp_level='O2')
out_bf16_O2 = self.train(enable_amp=True, amp_level='O2') self.assertTrue(
self.assertTrue( np.allclose(
np.allclose( out_fp32, out_bf16_O1, rtol=1.e-3, atol=1.e-1))
out_fp32, out_bf16_O1, rtol=1.e-3, atol=1.e-1)) self.assertTrue(
self.assertTrue( np.allclose(
np.allclose( out_fp32, out_bf16_O2, rtol=1.e-3, atol=1.e-1))
out_fp32, out_bf16_O2, rtol=1.e-3, atol=1.e-1))
with _test_eager_guard(): with _test_eager_guard():
func_isinstance() func_isinstance()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册