diff --git a/.flake8 b/.flake8 index 2d284df082e8aabc5dea987b11d1315167766a42..853e887f5e40ac9aa2d61fa88d4e810a50c6cc81 100644 --- a/.flake8 +++ b/.flake8 @@ -37,9 +37,3 @@ per-file-ignores = .cmake-format.py: F821 python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py: F821 python/paddle/fluid/tests/unittests/dygraph_to_static/test_closure_analysis.py: F821 - # These files will be fixed in the future - python/paddle/fluid/tests/unittests/fft/test_fft_with_static_graph.py: F811 - python/paddle/fluid/tests/unittests/test_activation_nn_grad.py: F811 - python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py: F811 - python/paddle/fluid/tests/unittests/test_matmul_v2_op.py: F811 - python/paddle/fluid/tests/unittests/test_rrelu_op.py: F811 diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_dygraph_group_sharded_api_for_eager.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_dygraph_group_sharded_api_for_eager.py index ecf864cf806f67da118c27f4b9bcd77d3cb19876..331974edfbc0d7ca38bf6b919ca441d85e33c54a 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_dygraph_group_sharded_api_for_eager.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_dygraph_group_sharded_api_for_eager.py @@ -28,7 +28,7 @@ class TestDygraphGroupSharded(TestMultipleGpus): self.run_mnist_2gpu('dygraph_group_sharded_api_eager.py') # check stage3 for some functions. - def test_dygraph_group_sharded(self): + def test_dygraph_group_sharded_stage3(self): self.run_mnist_2gpu('dygraph_group_sharded_stage3_eager.py') diff --git a/python/paddle/fluid/tests/unittests/fft/test_fft_with_static_graph.py b/python/paddle/fluid/tests/unittests/fft/test_fft_with_static_graph.py index 79b8fb27982525043bfdb347a06071a846055777..38ccb9b6470ab2cc4d183bdedd13ab010efed053 100644 --- a/python/paddle/fluid/tests/unittests/fft/test_fft_with_static_graph.py +++ b/python/paddle/fluid/tests/unittests/fft/test_fft_with_static_graph.py @@ -266,14 +266,6 @@ class TestFftn(unittest.TestCase): @parameterize( (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), [ - ( - 'test_x_complex', - rand_x(4, complex=True), - None, - None, - 'backward', - TypeError, - ), ( 'test_n_nagative', rand_x(4), @@ -295,11 +287,11 @@ class TestFftn(unittest.TestCase): ('test_norm_not_in_enum', rand_x(2), None, -1, 'random', ValueError), ], ) -class TestRfftnException(unittest.TestCase): - def test_static_rfftn(self): +class TestFftnException(unittest.TestCase): + def test_static_fftn(self): with self.assertRaises(self.expect_exception): with stgraph( - paddle.fft.rfftn, + paddle.fft.fftn, self.place, self.x, self.n, diff --git a/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py b/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py index 4b3311120467df23d32b21bc8d2f5004d24680bb..f10232cf02bce9efafea5f7ca30868277e5a811c 100644 --- a/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py +++ b/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py @@ -407,36 +407,6 @@ class TestSquareDoubleGradCheck(unittest.TestCase): self.func(p) -class TestAbsDoubleGradCheck(unittest.TestCase): - @prog_scope() - def func(self, place): - # the shape of input variable should be clearly specified, not inlcude -1. - shape = [2, 3, 7, 9] - eps = 1e-6 - dtype = np.float64 - - x = layers.data('x', shape, False, dtype) - x.persistable = True - y = paddle.abs(x) - x_arr = np.random.uniform(-1, 1, shape).astype(dtype) - # Because we set delta = 0.005 in calculating numeric gradient, - # if x is too small, the numeric gradient is inaccurate. - # we should avoid this - x_arr[np.abs(x_arr) < 0.005] = 0.02 - - gradient_checker.double_grad_check( - [x], y, x_init=x_arr, place=place, eps=eps - ) - - def test_grad(self): - paddle.enable_static() - places = [fluid.CPUPlace()] - if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) - for p in places: - self.func(p) - - class TestLogDoubleGradCheck(unittest.TestCase): def log_wrapper(self, x): return paddle.log(x[0]) diff --git a/python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py b/python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py index cbc7450bbc6d218c108c7463bb6a46bbdafb7325..536fc59f42ed88b2558a6035d1e1fbfc2132598f 100644 --- a/python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py +++ b/python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py @@ -584,7 +584,7 @@ class TestCUDNNlstmAPI(unittest.TestCase): @unittest.skipIf( not core.is_compiled_with_cuda(), "core is not compiled with CUDA" ) -class TestCUDNNlstmAPI(unittest.TestCase): +class TestCUDNNlstmAPI(unittest.TestCase): # noqa: F811 def test_lstm(self): seq_len = 20 batch_size = 5 diff --git a/python/paddle/fluid/tests/unittests/test_matmul_v2_op.py b/python/paddle/fluid/tests/unittests/test_matmul_v2_op.py index 868cec1d592b72019398904824eb96b14e8b39a7..c452958ead8414fcac8cb21b750a01bfc58a1ad9 100644 --- a/python/paddle/fluid/tests/unittests/test_matmul_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_matmul_v2_op.py @@ -732,7 +732,7 @@ class TestMatmulop(unittest.TestCase): paddle.enable_static() - def func_dygraph_matmul(self): + def func_dygraph_matmul(self): # noqa: F811 with _test_eager_guard(): self.func_dygraph_matmul() diff --git a/python/paddle/fluid/tests/unittests/test_rrelu_op.py b/python/paddle/fluid/tests/unittests/test_rrelu_op.py index 847675ee6f58f8751cb84f11cf4802e0388b6d63..96bccf8120257eb022322a31e8bf480729a3dd81 100644 --- a/python/paddle/fluid/tests/unittests/test_rrelu_op.py +++ b/python/paddle/fluid/tests/unittests/test_rrelu_op.py @@ -317,9 +317,9 @@ class RReluTest(OpTest): self.lower = 0.1 self.upper = 0.3 self.is_test = True - self.init_prams() + self.init_params() - def init_prams(self): + def init_params(self): self.dtype = "float64" self.x_shape = [2, 3, 4, 5] @@ -343,22 +343,13 @@ class RReluTest(OpTest): self.check_grad(['X'], 'Out') -class RReluTrainingTest(OpTest): +class RReluTrainingTest(RReluTest): def setUp(self): self.op_type = "rrelu" self.lower = 0.3 - self.upper = 0.3000009 + self.upper = 0.300000009 self.is_test = False - self.init_prams() - - -class RReluTrainingTest(OpTest): - def setUp(self): - self.op_type = "rrelu" - self.lower = 0.3 - self.upper = 0.3000009 - self.is_test = False - self.init_prams() + self.init_params() if __name__ == "__main__": diff --git a/setup.py b/setup.py index 6d088750a60b00c35fd1ee1f6a5ca639e41833dd..6e77373acf54034d42a971018300b0d6925ccf48 100644 --- a/setup.py +++ b/setup.py @@ -30,7 +30,6 @@ from setuptools import Command, Extension, setup from setuptools.command.egg_info import egg_info from setuptools.command.install import install as InstallCommandBase from setuptools.command.install_lib import install_lib -from setuptools.dist import Distribution if sys.version_info < (3, 7): raise RuntimeError(