diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py index 714ef764a9262fb85211cfb24ff53a1ec01aee67..0de8ec22b4896ea9914bc3d15da4929a54b9bdbb 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py @@ -108,8 +108,10 @@ class TestFP16ElementwiseAddOp(TestElementwiseAddOp): @unittest.skipIf( - not core.is_compiled_with_cuda() or core.cudnn_version() < 8100, - "core is not compiled with CUDA and cudnn version need larger than 8.1.0") + not core.is_compiled_with_cuda() or core.cudnn_version() < 8100 + or paddle.device.cuda.get_device_capability()[0] < 8, + "only support compiled with CUDA and cudnn version need larger than 8.1.0 and device's compute capability is at least 8.0" +) class TestBF16ElementwiseAddOp(OpTest): def setUp(self): diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py index 1ab1bf07b0d5c7d2dbba5967b57bbe9175cd74ad..38bdff98c3bf9a93535744c88e0d87826fa5a1e1 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py @@ -62,9 +62,11 @@ class TestElementwiseOp(OpTest): no_grad_set=set('Y')) -@unittest.skipIf( - core.is_compiled_with_cuda() and core.cudnn_version() < 8100, - "run test when gpu is availble and the minimum cudnn version is 8.1.0.") +@unittest.skipIf(core.is_compiled_with_cuda() and ( + core.cudnn_version() < 8100 + or paddle.device.cuda.get_device_capability()[0] < 8 +), "run test when gpu is availble and the minimum cudnn version is 8.1.0 and gpu's compute capability is at least 8.0." + ) class TestElementwiseBF16Op(OpTest): def setUp(self): diff --git a/python/paddle/fluid/tests/unittests/test_layer_norm_op.py b/python/paddle/fluid/tests/unittests/test_layer_norm_op.py index 6ddf90748950b05410c86707fb4dcf00d419b557..646b2fcbb288baa81bafed88468e28defd8b19dd 100644 --- a/python/paddle/fluid/tests/unittests/test_layer_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_layer_norm_op.py @@ -417,7 +417,9 @@ class TestBF16ScaleBiasLayerNorm(unittest.TestCase): return y_np, x_g_np, w_g_np, b_g_np def test_main(self): - if (not core.is_compiled_with_cuda()) or (core.cudnn_version() < 8100): + if (not core.is_compiled_with_cuda()) or ( + core.cudnn_version() < + 8100) or (paddle.device.cuda.get_device_capability()[0] < 8): return x_np = np.random.random([10, 20]).astype('float32') weight_np = np.random.random([20]).astype('float32') diff --git a/python/paddle/fluid/tests/unittests/test_softmax_op.py b/python/paddle/fluid/tests/unittests/test_softmax_op.py index 49c43c0b2bc9134be5fa1c3b971e1f50c3a5dfea..c5a34e4f6197cb8f0c3f76ee14776c5ca20cf26a 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_op.py @@ -360,8 +360,10 @@ class TestSoftmaxBF16Op(OpTest): @unittest.skipIf( - not core.is_compiled_with_cuda() or core.cudnn_version() < 8100, - "core is not compiled with CUDA and cudnn version need larger than 8.1.0") + not core.is_compiled_with_cuda() or core.cudnn_version() < 8100 + or paddle.device.cuda.get_device_capability()[0] < 8, + "only support compiled with CUDA and cudnn version need larger than 8.1.0 and device's compute capability is at least 8.0" +) class TestSoftmaxBF16CUDNNOp(TestSoftmaxBF16Op): def init_cudnn(self):