未验证 提交 db700d10 编写于 作者: X xuxinyi389 提交者: GitHub

Fix bugs of windows CI: skip the unit tests related to devices (#55889)

上级 5d26d79f
...@@ -20,9 +20,15 @@ from amp_base_models import AmpTestBase ...@@ -20,9 +20,15 @@ from amp_base_models import AmpTestBase
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle import nn from paddle import nn
from paddle.fluid import core
from paddle.static import amp from paddle.static import amp
@unittest.skipIf(
not core.is_compiled_with_cuda()
or paddle.device.cuda.get_device_capability()[0] < 7.0,
"run test when gpu's compute capability is at least 7.0.",
)
class TestAutoCast(AmpTestBase): class TestAutoCast(AmpTestBase):
def setUp(self): def setUp(self):
self._conv = paddle.nn.Conv2D( self._conv = paddle.nn.Conv2D(
...@@ -56,6 +62,11 @@ class SimpleConvNet(nn.Layer): ...@@ -56,6 +62,11 @@ class SimpleConvNet(nn.Layer):
return out3 return out3
@unittest.skipIf(
not core.is_compiled_with_cuda()
or paddle.device.cuda.get_device_capability()[0] < 7.0,
"run test when gpu's compute capability is at least 7.0.",
)
class TestStaticDecorate(AmpTestBase): class TestStaticDecorate(AmpTestBase):
def check_results( def check_results(
self, use_amp, dtype, level, use_promote, expected_op_calls self, use_amp, dtype, level, use_promote, expected_op_calls
...@@ -127,6 +138,11 @@ class TestStaticDecorate(AmpTestBase): ...@@ -127,6 +138,11 @@ class TestStaticDecorate(AmpTestBase):
paddle.disable_static() paddle.disable_static()
@unittest.skipIf(
not core.is_compiled_with_cuda()
or paddle.device.cuda.get_device_capability()[0] < 7.0,
"run test when gpu's compute capability is at least 7.0.",
)
class TestGradScaler(AmpTestBase): class TestGradScaler(AmpTestBase):
def test_amp_grad_scaler(self): def test_amp_grad_scaler(self):
model = paddle.nn.Conv2D(3, 2, 3) model = paddle.nn.Conv2D(3, 2, 3)
...@@ -154,6 +170,11 @@ class TestGradScaler(AmpTestBase): ...@@ -154,6 +170,11 @@ class TestGradScaler(AmpTestBase):
self.assertTrue('check_finite_and_unscale' not in op_list) self.assertTrue('check_finite_and_unscale' not in op_list)
@unittest.skipIf(
not core.is_compiled_with_cuda()
or paddle.device.cuda.get_device_capability()[0] < 7.0,
"run test when gpu's compute capability is at least 7.0.",
)
class TestFp16Guard(AmpTestBase): class TestFp16Guard(AmpTestBase):
def test_fp16_gurad(self): def test_fp16_gurad(self):
paddle.enable_static() paddle.enable_static()
......
...@@ -16,6 +16,7 @@ import unittest ...@@ -16,6 +16,7 @@ import unittest
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle.fluid import core
class ConvBNLayer(paddle.nn.Layer): class ConvBNLayer(paddle.nn.Layer):
...@@ -77,6 +78,11 @@ class Model(paddle.nn.Layer): ...@@ -77,6 +78,11 @@ class Model(paddle.nn.Layer):
return x return x
@unittest.skipIf(
not core.is_compiled_with_cuda()
or paddle.device.cuda.get_device_capability()[0] < 7.0,
"run test when gpu's compute capability is at least 7.0.",
)
class TestAMPDecorate(unittest.TestCase): class TestAMPDecorate(unittest.TestCase):
def check_results(self, fp32_layers=[], fp16_layers=[]): def check_results(self, fp32_layers=[], fp16_layers=[]):
for idx in range(len(fp32_layers)): for idx in range(len(fp32_layers)):
......
...@@ -19,6 +19,11 @@ from paddle.fluid import core ...@@ -19,6 +19,11 @@ from paddle.fluid import core
from paddle.static.amp import AutoMixedPrecisionLists, fp16_lists from paddle.static.amp import AutoMixedPrecisionLists, fp16_lists
@unittest.skipIf(
not core.is_compiled_with_cuda()
or paddle.device.cuda.get_device_capability()[0] < 7.0,
"run test when gpu's compute capability is at least 7.0.",
)
class TestAMPList(unittest.TestCase): class TestAMPList(unittest.TestCase):
def setUp(self): def setUp(self):
self.default_black_list = [ self.default_black_list = [
......
...@@ -35,6 +35,11 @@ class SimpleNet(paddle.nn.Layer): ...@@ -35,6 +35,11 @@ class SimpleNet(paddle.nn.Layer):
or not core.is_float16_supported(core.CUDAPlace(0)), or not core.is_float16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the float16", "core is not complied with CUDA and not support the float16",
) )
@unittest.skipIf(
not core.is_compiled_with_cuda()
or paddle.device.cuda.get_device_capability()[0] < 7.0,
"run test when gpu's compute capability is at least 7.0.",
)
class TestMasterGrad(unittest.TestCase): class TestMasterGrad(unittest.TestCase):
def check_results( def check_results(
self, fp32_grads, op_list, total_steps, accumulate_batchs_num self, fp32_grads, op_list, total_steps, accumulate_batchs_num
......
...@@ -18,9 +18,15 @@ import numpy as np ...@@ -18,9 +18,15 @@ import numpy as np
from amp_base_models import AmpTestBase, build_conv_model from amp_base_models import AmpTestBase, build_conv_model
import paddle import paddle
from paddle.fluid import core
from paddle.static import amp from paddle.static import amp
@unittest.skipIf(
not core.is_compiled_with_cuda()
or paddle.device.cuda.get_device_capability()[0] < 7.0,
"run test when gpu's compute capability is at least 7.0.",
)
class TestStaticAmpPromoteStats(AmpTestBase): class TestStaticAmpPromoteStats(AmpTestBase):
def check_promote_results( def check_promote_results(
self, use_amp, dtype, level, use_promote, expected_op_calls, debug_info self, use_amp, dtype, level, use_promote, expected_op_calls, debug_info
...@@ -103,6 +109,11 @@ class TestStaticAmpPromoteStats(AmpTestBase): ...@@ -103,6 +109,11 @@ class TestStaticAmpPromoteStats(AmpTestBase):
) )
@unittest.skipIf(
not core.is_compiled_with_cuda()
or paddle.device.cuda.get_device_capability()[0] < 7.0,
"run test when gpu's compute capability is at least 7.0.",
)
class TestEagerAmpPromoteStats(AmpTestBase): class TestEagerAmpPromoteStats(AmpTestBase):
def check_promote_results( def check_promote_results(
self, dtype, level, use_promote, expected_op_calls, debug_info self, dtype, level, use_promote, expected_op_calls, debug_info
...@@ -172,6 +183,11 @@ class TestEagerAmpPromoteStats(AmpTestBase): ...@@ -172,6 +183,11 @@ class TestEagerAmpPromoteStats(AmpTestBase):
) )
@unittest.skipIf(
not core.is_compiled_with_cuda()
or paddle.device.cuda.get_device_capability()[0] < 7.0,
"run test when gpu's compute capability is at least 7.0.",
)
class TestEagerAmpPromoteSimple(AmpTestBase): class TestEagerAmpPromoteSimple(AmpTestBase):
def setUp(self): def setUp(self):
self._conv = paddle.nn.Conv2D( self._conv = paddle.nn.Conv2D(
......
...@@ -1254,6 +1254,11 @@ class TestOptimizerDtype(unittest.TestCase): ...@@ -1254,6 +1254,11 @@ class TestOptimizerDtype(unittest.TestCase):
self.check_with_dtype('float32') self.check_with_dtype('float32')
@unittest.skipIf(
not core.is_compiled_with_cuda()
or paddle.device.cuda.get_device_capability()[0] < 7.0,
"run test when gpu's compute capability is at least 7.0.",
)
class TestMasterWeightSaveForFP16(unittest.TestCase): class TestMasterWeightSaveForFP16(unittest.TestCase):
''' '''
For Amp-O2, some optimizer(Momentum, Adam ...) will create master weights for parameters to improve the accuracy. For Amp-O2, some optimizer(Momentum, Adam ...) will create master weights for parameters to improve the accuracy.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册