From 4ee9e1f8d74eea5449464e32e224ffd588869d6e Mon Sep 17 00:00:00 2001 From: Nyakku Shigure Date: Thu, 8 Dec 2022 10:25:10 +0800 Subject: [PATCH] [CodeStyle][F811] fix some test cases shadowed by the same name (#48745) * [CodeStyle][F811] fix some unittests * fix setup.py * remove ignore from flake8 config * remove repeat TestAbsDoubleGradCheck * fix rrelu test * fix fft ut * add noqa in fluid.lstm ut * add rtol and atol in test_matmul_v2_op * update rtol * empty commit * empty commit * revert changes in matmul ut and add noqa * rename test case name --- .flake8 | 6 ---- ...est_dygraph_group_sharded_api_for_eager.py | 2 +- .../fft/test_fft_with_static_graph.py | 14 ++------- .../unittests/test_activation_nn_grad.py | 30 ------------------- .../tests/unittests/test_lstm_cudnn_op.py | 2 +- .../tests/unittests/test_matmul_v2_op.py | 2 +- .../fluid/tests/unittests/test_rrelu_op.py | 19 ++++-------- setup.py | 1 - 8 files changed, 11 insertions(+), 65 deletions(-) diff --git a/.flake8 b/.flake8 index 2d284df082e8a..853e887f5e40a 100644 --- a/.flake8 +++ b/.flake8 @@ -37,9 +37,3 @@ per-file-ignores = .cmake-format.py: F821 python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py: F821 python/paddle/fluid/tests/unittests/dygraph_to_static/test_closure_analysis.py: F821 - # These files will be fixed in the future - python/paddle/fluid/tests/unittests/fft/test_fft_with_static_graph.py: F811 - python/paddle/fluid/tests/unittests/test_activation_nn_grad.py: F811 - python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py: F811 - python/paddle/fluid/tests/unittests/test_matmul_v2_op.py: F811 - python/paddle/fluid/tests/unittests/test_rrelu_op.py: F811 diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_dygraph_group_sharded_api_for_eager.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_dygraph_group_sharded_api_for_eager.py index ecf864cf806f6..331974edfbc0d 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_dygraph_group_sharded_api_for_eager.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_dygraph_group_sharded_api_for_eager.py @@ -28,7 +28,7 @@ def test_dygraph_group_sharded(self): self.run_mnist_2gpu('dygraph_group_sharded_api_eager.py') # check stage3 for some functions. - def test_dygraph_group_sharded(self): + def test_dygraph_group_sharded_stage3(self): self.run_mnist_2gpu('dygraph_group_sharded_stage3_eager.py') diff --git a/python/paddle/fluid/tests/unittests/fft/test_fft_with_static_graph.py b/python/paddle/fluid/tests/unittests/fft/test_fft_with_static_graph.py index 79b8fb2798252..38ccb9b6470ab 100644 --- a/python/paddle/fluid/tests/unittests/fft/test_fft_with_static_graph.py +++ b/python/paddle/fluid/tests/unittests/fft/test_fft_with_static_graph.py @@ -266,14 +266,6 @@ def test_static_fftn(self): @parameterize( (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), [ - ( - 'test_x_complex', - rand_x(4, complex=True), - None, - None, - 'backward', - TypeError, - ), ( 'test_n_nagative', rand_x(4), @@ -295,11 +287,11 @@ def test_static_fftn(self): ('test_norm_not_in_enum', rand_x(2), None, -1, 'random', ValueError), ], ) -class TestRfftnException(unittest.TestCase): - def test_static_rfftn(self): +class TestFftnException(unittest.TestCase): + def test_static_fftn(self): with self.assertRaises(self.expect_exception): with stgraph( - paddle.fft.rfftn, + paddle.fft.fftn, self.place, self.x, self.n, diff --git a/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py b/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py index 4b3311120467d..f10232cf02bce 100644 --- a/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py +++ b/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py @@ -407,36 +407,6 @@ def test_grad(self): self.func(p) -class TestAbsDoubleGradCheck(unittest.TestCase): - @prog_scope() - def func(self, place): - # the shape of input variable should be clearly specified, not inlcude -1. - shape = [2, 3, 7, 9] - eps = 1e-6 - dtype = np.float64 - - x = layers.data('x', shape, False, dtype) - x.persistable = True - y = paddle.abs(x) - x_arr = np.random.uniform(-1, 1, shape).astype(dtype) - # Because we set delta = 0.005 in calculating numeric gradient, - # if x is too small, the numeric gradient is inaccurate. - # we should avoid this - x_arr[np.abs(x_arr) < 0.005] = 0.02 - - gradient_checker.double_grad_check( - [x], y, x_init=x_arr, place=place, eps=eps - ) - - def test_grad(self): - paddle.enable_static() - places = [fluid.CPUPlace()] - if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) - for p in places: - self.func(p) - - class TestLogDoubleGradCheck(unittest.TestCase): def log_wrapper(self, x): return paddle.log(x[0]) diff --git a/python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py b/python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py index cbc7450bbc6d2..536fc59f42ed8 100644 --- a/python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py +++ b/python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py @@ -584,7 +584,7 @@ def test_lstm(self): @unittest.skipIf( not core.is_compiled_with_cuda(), "core is not compiled with CUDA" ) -class TestCUDNNlstmAPI(unittest.TestCase): +class TestCUDNNlstmAPI(unittest.TestCase): # noqa: F811 def test_lstm(self): seq_len = 20 batch_size = 5 diff --git a/python/paddle/fluid/tests/unittests/test_matmul_v2_op.py b/python/paddle/fluid/tests/unittests/test_matmul_v2_op.py index 868cec1d592b7..c452958ead841 100644 --- a/python/paddle/fluid/tests/unittests/test_matmul_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_matmul_v2_op.py @@ -732,7 +732,7 @@ def func_dygraph_matmul(self): paddle.enable_static() - def func_dygraph_matmul(self): + def func_dygraph_matmul(self): # noqa: F811 with _test_eager_guard(): self.func_dygraph_matmul() diff --git a/python/paddle/fluid/tests/unittests/test_rrelu_op.py b/python/paddle/fluid/tests/unittests/test_rrelu_op.py index 847675ee6f58f..96bccf8120257 100644 --- a/python/paddle/fluid/tests/unittests/test_rrelu_op.py +++ b/python/paddle/fluid/tests/unittests/test_rrelu_op.py @@ -317,9 +317,9 @@ def setUp(self): self.lower = 0.1 self.upper = 0.3 self.is_test = True - self.init_prams() + self.init_params() - def init_prams(self): + def init_params(self): self.dtype = "float64" self.x_shape = [2, 3, 4, 5] @@ -343,22 +343,13 @@ def test_check_grad(self): self.check_grad(['X'], 'Out') -class RReluTrainingTest(OpTest): +class RReluTrainingTest(RReluTest): def setUp(self): self.op_type = "rrelu" self.lower = 0.3 - self.upper = 0.3000009 + self.upper = 0.300000009 self.is_test = False - self.init_prams() - - -class RReluTrainingTest(OpTest): - def setUp(self): - self.op_type = "rrelu" - self.lower = 0.3 - self.upper = 0.3000009 - self.is_test = False - self.init_prams() + self.init_params() if __name__ == "__main__": diff --git a/setup.py b/setup.py index 6d088750a60b0..6e77373acf540 100644 --- a/setup.py +++ b/setup.py @@ -30,7 +30,6 @@ from setuptools.command.egg_info import egg_info from setuptools.command.install import install as InstallCommandBase from setuptools.command.install_lib import install_lib -from setuptools.dist import Distribution if sys.version_info < (3, 7): raise RuntimeError(