未验证 提交 cdf31dc1 编写于 作者: N Nyakku Shigure 提交者: GitHub

[CodeStyle][F811] fix some test cases shadowed by the same name (#48745)

* [CodeStyle][F811] fix some unittests

* fix setup.py

* remove ignore from flake8 config

* remove repeat TestAbsDoubleGradCheck

* fix rrelu test

* fix fft ut

* add noqa in fluid.lstm ut

* add rtol and atol in test_matmul_v2_op

* update rtol

* empty commit

* empty commit

* revert changes in matmul ut and add noqa

* rename test case name
上级 da8e15e6
...@@ -37,9 +37,3 @@ per-file-ignores = ...@@ -37,9 +37,3 @@ per-file-ignores =
.cmake-format.py: F821 .cmake-format.py: F821
python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py: F821 python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py: F821
python/paddle/fluid/tests/unittests/dygraph_to_static/test_closure_analysis.py: F821 python/paddle/fluid/tests/unittests/dygraph_to_static/test_closure_analysis.py: F821
# These files will be fixed in the future
python/paddle/fluid/tests/unittests/fft/test_fft_with_static_graph.py: F811
python/paddle/fluid/tests/unittests/test_activation_nn_grad.py: F811
python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py: F811
python/paddle/fluid/tests/unittests/test_matmul_v2_op.py: F811
python/paddle/fluid/tests/unittests/test_rrelu_op.py: F811
...@@ -28,7 +28,7 @@ class TestDygraphGroupSharded(TestMultipleGpus): ...@@ -28,7 +28,7 @@ class TestDygraphGroupSharded(TestMultipleGpus):
self.run_mnist_2gpu('dygraph_group_sharded_api_eager.py') self.run_mnist_2gpu('dygraph_group_sharded_api_eager.py')
# check stage3 for some functions. # check stage3 for some functions.
def test_dygraph_group_sharded(self): def test_dygraph_group_sharded_stage3(self):
self.run_mnist_2gpu('dygraph_group_sharded_stage3_eager.py') self.run_mnist_2gpu('dygraph_group_sharded_stage3_eager.py')
......
...@@ -266,14 +266,6 @@ class TestFftn(unittest.TestCase): ...@@ -266,14 +266,6 @@ class TestFftn(unittest.TestCase):
@parameterize( @parameterize(
(TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'),
[ [
(
'test_x_complex',
rand_x(4, complex=True),
None,
None,
'backward',
TypeError,
),
( (
'test_n_nagative', 'test_n_nagative',
rand_x(4), rand_x(4),
...@@ -295,11 +287,11 @@ class TestFftn(unittest.TestCase): ...@@ -295,11 +287,11 @@ class TestFftn(unittest.TestCase):
('test_norm_not_in_enum', rand_x(2), None, -1, 'random', ValueError), ('test_norm_not_in_enum', rand_x(2), None, -1, 'random', ValueError),
], ],
) )
class TestRfftnException(unittest.TestCase): class TestFftnException(unittest.TestCase):
def test_static_rfftn(self): def test_static_fftn(self):
with self.assertRaises(self.expect_exception): with self.assertRaises(self.expect_exception):
with stgraph( with stgraph(
paddle.fft.rfftn, paddle.fft.fftn,
self.place, self.place,
self.x, self.x,
self.n, self.n,
......
...@@ -407,36 +407,6 @@ class TestSquareDoubleGradCheck(unittest.TestCase): ...@@ -407,36 +407,6 @@ class TestSquareDoubleGradCheck(unittest.TestCase):
self.func(p) self.func(p)
class TestAbsDoubleGradCheck(unittest.TestCase):
@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
shape = [2, 3, 7, 9]
eps = 1e-6
dtype = np.float64
x = layers.data('x', shape, False, dtype)
x.persistable = True
y = paddle.abs(x)
x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
# Because we set delta = 0.005 in calculating numeric gradient,
# if x is too small, the numeric gradient is inaccurate.
# we should avoid this
x_arr[np.abs(x_arr) < 0.005] = 0.02
gradient_checker.double_grad_check(
[x], y, x_init=x_arr, place=place, eps=eps
)
def test_grad(self):
paddle.enable_static()
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
class TestLogDoubleGradCheck(unittest.TestCase): class TestLogDoubleGradCheck(unittest.TestCase):
def log_wrapper(self, x): def log_wrapper(self, x):
return paddle.log(x[0]) return paddle.log(x[0])
......
...@@ -584,7 +584,7 @@ class TestCUDNNlstmAPI(unittest.TestCase): ...@@ -584,7 +584,7 @@ class TestCUDNNlstmAPI(unittest.TestCase):
@unittest.skipIf( @unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA" not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
) )
class TestCUDNNlstmAPI(unittest.TestCase): class TestCUDNNlstmAPI(unittest.TestCase): # noqa: F811
def test_lstm(self): def test_lstm(self):
seq_len = 20 seq_len = 20
batch_size = 5 batch_size = 5
......
...@@ -732,7 +732,7 @@ class TestMatmulop(unittest.TestCase): ...@@ -732,7 +732,7 @@ class TestMatmulop(unittest.TestCase):
paddle.enable_static() paddle.enable_static()
def func_dygraph_matmul(self): def func_dygraph_matmul(self): # noqa: F811
with _test_eager_guard(): with _test_eager_guard():
self.func_dygraph_matmul() self.func_dygraph_matmul()
......
...@@ -317,9 +317,9 @@ class RReluTest(OpTest): ...@@ -317,9 +317,9 @@ class RReluTest(OpTest):
self.lower = 0.1 self.lower = 0.1
self.upper = 0.3 self.upper = 0.3
self.is_test = True self.is_test = True
self.init_prams() self.init_params()
def init_prams(self): def init_params(self):
self.dtype = "float64" self.dtype = "float64"
self.x_shape = [2, 3, 4, 5] self.x_shape = [2, 3, 4, 5]
...@@ -343,22 +343,13 @@ class RReluTest(OpTest): ...@@ -343,22 +343,13 @@ class RReluTest(OpTest):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out')
class RReluTrainingTest(OpTest): class RReluTrainingTest(RReluTest):
def setUp(self): def setUp(self):
self.op_type = "rrelu" self.op_type = "rrelu"
self.lower = 0.3 self.lower = 0.3
self.upper = 0.3000009 self.upper = 0.300000009
self.is_test = False self.is_test = False
self.init_prams() self.init_params()
class RReluTrainingTest(OpTest):
def setUp(self):
self.op_type = "rrelu"
self.lower = 0.3
self.upper = 0.3000009
self.is_test = False
self.init_prams()
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -30,7 +30,6 @@ from setuptools import Command, Extension, setup ...@@ -30,7 +30,6 @@ from setuptools import Command, Extension, setup
from setuptools.command.egg_info import egg_info from setuptools.command.egg_info import egg_info
from setuptools.command.install import install as InstallCommandBase from setuptools.command.install import install as InstallCommandBase
from setuptools.command.install_lib import install_lib from setuptools.command.install_lib import install_lib
from setuptools.dist import Distribution
if sys.version_info < (3, 7): if sys.version_info < (3, 7):
raise RuntimeError( raise RuntimeError(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册