From f665a6a54aa1991ce4b6c3a9c6cbb0aa27cfe4b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=A7=9C=E6=B0=B8=E4=B9=85?= <34344716+yjjiang11@users.noreply.github.com> Date: Wed, 14 Dec 2022 20:52:00 -0800 Subject: [PATCH] rm unittests eager guard test part16 meshgrid2normalize (#48839) * rm unittests eager guard test part16 meshgrid2normalize * modify --- .../fluid/tests/unittests/test_meshgrid_op.py | 52 ++++--------- .../fluid/tests/unittests/test_min_op.py | 5 -- .../fluid/tests/unittests/test_momentum_op.py | 10 --- .../tests/unittests/test_multi_dot_op.py | 5 -- .../tests/unittests/test_multiplex_op.py | 40 ++++------ .../fluid/tests/unittests/test_multiply.py | 15 +--- .../test_multiprocess_dataloader_dataset.py | 78 +++---------------- .../fluid/tests/unittests/test_nll_loss.py | 48 ++++++------ .../test_nn_functional_embedding_dygraph.py | 15 +--- .../fluid/tests/unittests/test_normalize.py | 16 ---- 10 files changed, 71 insertions(+), 213 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_meshgrid_op.py b/python/paddle/fluid/tests/unittests/test_meshgrid_op.py index 521fcf90b89..71ed7ad4aa5 100644 --- a/python/paddle/fluid/tests/unittests/test_meshgrid_op.py +++ b/python/paddle/fluid/tests/unittests/test_meshgrid_op.py @@ -19,7 +19,6 @@ from op_test import OpTest import paddle import paddle.fluid as fluid -from paddle.fluid.framework import _test_eager_guard class TestMeshgridOp(OpTest): @@ -200,10 +199,6 @@ class TestMeshgridOp6(unittest.TestCase): assert np.array_equal(res_3.shape, [100, 200]) assert np.array_equal(res_4.shape, [100, 200]) - def test_api_eager_dygraph(self): - with _test_eager_guard(): - self.test_api_with_dygraph() - class TestMeshgridOp7(unittest.TestCase): def test_api_with_dygraph_list_input(self): @@ -230,10 +225,6 @@ class TestMeshgridOp7(unittest.TestCase): assert np.array_equal(res_3.shape, [100, 200]) assert np.array_equal(res_4.shape, [100, 200]) - def test_api_eager_dygraph(self): - with _test_eager_guard(): - self.test_api_with_dygraph_list_input() - class TestMeshgridOp8(unittest.TestCase): def test_api_with_dygraph_tuple_input(self): @@ -260,10 +251,6 @@ class TestMeshgridOp8(unittest.TestCase): assert np.array_equal(res_3.shape, [100, 200]) assert np.array_equal(res_4.shape, [100, 200]) - def test_api_eager_dygraph(self): - with _test_eager_guard(): - self.test_api_with_dygraph_tuple_input() - class TestMeshgridEager(unittest.TestCase): def test_dygraph_api(self): @@ -290,28 +277,23 @@ class TestMeshgridEager(unittest.TestCase): res_1, res_2 = paddle.tensor.meshgrid((tensor_1, tensor_2)) sum = paddle.add_n([res_1, res_2]) sum.backward() - with _test_eager_guard(): - tensor_eager_1 = fluid.dygraph.to_variable(input_1) - tensor_eager_2 = fluid.dygraph.to_variable(input_2) - tensor_eager_1.stop_gradient = False - tensor_eager_2.stop_gradient = False - res_eager_1, res_eager_2 = paddle.tensor.meshgrid( - (tensor_eager_1, tensor_eager_2) - ) - sum_eager = paddle.add_n([res_eager_1, res_eager_2]) - sum_eager.backward() - self.assertEqual( - ( - tensor_1.grad.numpy() == tensor_eager_1.grad.numpy() - ).all(), - True, - ) - self.assertEqual( - ( - tensor_2.grad.numpy() == tensor_eager_2.grad.numpy() - ).all(), - True, - ) + tensor_eager_1 = fluid.dygraph.to_variable(input_1) + tensor_eager_2 = fluid.dygraph.to_variable(input_2) + tensor_eager_1.stop_gradient = False + tensor_eager_2.stop_gradient = False + res_eager_1, res_eager_2 = paddle.tensor.meshgrid( + (tensor_eager_1, tensor_eager_2) + ) + sum_eager = paddle.add_n([res_eager_1, res_eager_2]) + sum_eager.backward() + self.assertEqual( + (tensor_1.grad.numpy() == tensor_eager_1.grad.numpy()).all(), + True, + ) + self.assertEqual( + (tensor_2.grad.numpy() == tensor_eager_2.grad.numpy()).all(), + True, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_min_op.py b/python/paddle/fluid/tests/unittests/test_min_op.py index 083da6e96ac..c0e9803b140 100644 --- a/python/paddle/fluid/tests/unittests/test_min_op.py +++ b/python/paddle/fluid/tests/unittests/test_min_op.py @@ -20,7 +20,6 @@ from test_sum_op import TestReduceOPTensorAxisBase import paddle import paddle.fluid.core as core -from paddle.fluid.framework import _test_eager_guard class ApiMinTest(unittest.TestCase): @@ -83,10 +82,6 @@ class ApiMinTest(unittest.TestCase): z_expected = np.array(np.min(np_x, axis=0)) self.assertEqual((np_z == z_expected).all(), True) - def test_eager_api(self): - with _test_eager_guard(): - self.test_imperative_api() - class TestOutDtype(unittest.TestCase): def test_min(self): diff --git a/python/paddle/fluid/tests/unittests/test_momentum_op.py b/python/paddle/fluid/tests/unittests/test_momentum_op.py index 8c9ec6d4295..00088ab276b 100644 --- a/python/paddle/fluid/tests/unittests/test_momentum_op.py +++ b/python/paddle/fluid/tests/unittests/test_momentum_op.py @@ -21,7 +21,6 @@ from op_test import OpTest import paddle import paddle.fluid as fluid import paddle.fluid.core as core -from paddle.fluid.framework import _test_eager_guard from paddle.fluid.op import Operator @@ -558,11 +557,6 @@ class TestMomentumV2(unittest.TestCase): ) self.assertRaises(ValueError, paddle.optimizer.Momentum, momentum=None) - def test_api_eager_dygraph(self): - with _test_eager_guard(): - self.test_momentum_dygraph() - self.test_raise_error() - class TestMomentumOpWithDecay(OpTest): def setUp(self): @@ -996,10 +990,6 @@ class TestMultiTensorMomentumDygraph(unittest.TestCase): self._check_with_param_arrt(place, use_amp) self._check_with_param_group(place, use_amp) - def test_api_eager_dygraph(self): - with _test_eager_guard(): - self.test_main() - class TestMultiTensorMomentumStatic(unittest.TestCase): def _momentum_optimize_static( diff --git a/python/paddle/fluid/tests/unittests/test_multi_dot_op.py b/python/paddle/fluid/tests/unittests/test_multi_dot_op.py index e5afa760bd9..cdba6d5f7f0 100644 --- a/python/paddle/fluid/tests/unittests/test_multi_dot_op.py +++ b/python/paddle/fluid/tests/unittests/test_multi_dot_op.py @@ -19,7 +19,6 @@ from numpy.linalg import multi_dot from op_test import OpTest import paddle -from paddle.fluid.framework import _test_eager_guard paddle.enable_static() @@ -287,10 +286,6 @@ class APITestMultiDot(unittest.TestCase): expected_result = np.linalg.multi_dot([input_array1, input_array2]) np.testing.assert_allclose(expected_result, out.numpy(), rtol=1e-05) - def test_dygraph_api(self): - with _test_eager_guard(): - self.test_dygraph_without_out() - if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_multiplex_op.py b/python/paddle/fluid/tests/unittests/test_multiplex_op.py index 3afd01427d5..563a9fdb34b 100644 --- a/python/paddle/fluid/tests/unittests/test_multiplex_op.py +++ b/python/paddle/fluid/tests/unittests/test_multiplex_op.py @@ -19,7 +19,6 @@ from op_test import OpTest import paddle import paddle.fluid as fluid -from paddle.fluid.framework import _test_eager_guard class TestMultiplexOp(OpTest): @@ -114,28 +113,23 @@ class TestMultiplexODygrap(unittest.TestCase): inputs[1].stop_gradient = False res = paddle.multiplex(inputs, index) res.backward() - with _test_eager_guard(): - inputs_eager = [paddle.to_tensor(img1), paddle.to_tensor(img2)] - index_eager = paddle.to_tensor( - np.array([[1], [0]]).astype(np.int32) - ) - inputs_eager[0].stop_gradient = False - inputs_eager[1].stop_gradient = False - res_eager = paddle.multiplex(inputs_eager, index_eager) - res_eager.backward() - self.assertEqual((res.numpy() == res_eager.numpy()).all(), True) - self.assertEqual( - ( - inputs[0].grad.numpy() == inputs_eager[0].grad.numpy() - ).all(), - True, - ) - self.assertEqual( - ( - inputs[1].grad.numpy() == inputs_eager[1].grad.numpy() - ).all(), - True, - ) + inputs_eager = [paddle.to_tensor(img1), paddle.to_tensor(img2)] + index_eager = paddle.to_tensor( + np.array([[1], [0]]).astype(np.int32) + ) + inputs_eager[0].stop_gradient = False + inputs_eager[1].stop_gradient = False + res_eager = paddle.multiplex(inputs_eager, index_eager) + res_eager.backward() + self.assertEqual((res.numpy() == res_eager.numpy()).all(), True) + self.assertEqual( + (inputs[0].grad.numpy() == inputs_eager[0].grad.numpy()).all(), + True, + ) + self.assertEqual( + (inputs[1].grad.numpy() == inputs_eager[1].grad.numpy()).all(), + True, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_multiply.py b/python/paddle/fluid/tests/unittests/test_multiply.py index ef9cfb2b1dc..44ca96432ec 100755 --- a/python/paddle/fluid/tests/unittests/test_multiply.py +++ b/python/paddle/fluid/tests/unittests/test_multiply.py @@ -18,7 +18,6 @@ import numpy as np import paddle import paddle.tensor as tensor -from paddle.fluid.framework import _test_eager_guard from paddle.static import Program, program_guard @@ -55,7 +54,7 @@ class TestMultiplyApi(unittest.TestCase): res = paddle.multiply(x, y) return res.numpy() - def func_test_multiply(self): + def test_multiply(self): np.random.seed(7) # test static computation graph: 1-d array @@ -106,14 +105,9 @@ class TestMultiplyApi(unittest.TestCase): res = self._run_dynamic_graph_case(x_data, y_data) np.testing.assert_allclose(res, np.multiply(x_data, y_data), rtol=1e-05) - def test_multiply(self): - with _test_eager_guard(): - self.func_test_multiply() - self.func_test_multiply() - class TestMultiplyError(unittest.TestCase): - def func_test_errors(self): + def test_errors(self): # test static computation graph: dtype can not be int8 paddle.enable_static() with program_guard(Program(), Program()): @@ -186,11 +180,6 @@ class TestMultiplyError(unittest.TestCase): y_data = np.random.randn(200).astype(np.float32) self.assertRaises(ValueError, paddle.multiply, x_data, y_data) - def test_errors(self): - with _test_eager_guard(): - self.func_test_errors() - self.func_test_errors() - if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_dataset.py b/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_dataset.py index a09b613a85c..4d11fb73556 100755 --- a/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_dataset.py +++ b/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_dataset.py @@ -18,7 +18,6 @@ import numpy as np import paddle import paddle.fluid as fluid -from paddle.fluid.framework import _test_eager_guard from paddle.io import ( ChainDataset, ComposeDataset, @@ -92,21 +91,16 @@ class TestTensorDataset(unittest.TestCase): assert np.allclose(input.numpy(), input_np[i]) assert np.allclose(label.numpy(), label_np[i]) - def func_test_main(self): + def test_main(self): places = [paddle.CPUPlace()] if paddle.is_compiled_with_cuda(): places.append(paddle.CUDAPlace(0)) for p in places: self.run_main(num_workers=0, places=p) - def test_main(self): - with _test_eager_guard(): - self.func_test_main() - self.func_test_main() - class TestComposeDataset(unittest.TestCase): - def func_test_main(self): + def test_main(self): paddle.static.default_startup_program().random_seed = 1 paddle.static.default_main_program().random_seed = 1 @@ -124,14 +118,9 @@ class TestComposeDataset(unittest.TestCase): assert np.allclose(input2, input2_t) assert np.allclose(label2, label2_t) - def test_main(self): - with _test_eager_guard(): - self.func_test_main() - self.func_test_main() - class TestRandomSplitApi(unittest.TestCase): - def func_test_main(self): + def test_main(self): paddle.static.default_startup_program().random_seed = 1 paddle.static.default_main_program().random_seed = 1 @@ -150,14 +139,9 @@ class TestRandomSplitApi(unittest.TestCase): self.assertTrue(len(elements_list) == 0) - def test_main(self): - with _test_eager_guard(): - self.func_test_main() - self.func_test_main() - class TestRandomSplitError(unittest.TestCase): - def func_test_errors(self): + def test_errors(self): paddle.static.default_startup_program().random_seed = 1 paddle.static.default_main_program().random_seed = 1 @@ -165,11 +149,6 @@ class TestRandomSplitError(unittest.TestCase): self.assertRaises(ValueError, paddle.io.random_split, range(5), [8]) self.assertRaises(ValueError, paddle.io.random_split, range(5), []) - def test_errors(self): - with _test_eager_guard(): - self.func_test_errors() - self.func_test_errors() - class TestSubsetDataset(unittest.TestCase): def run_main(self, num_workers, places): @@ -228,7 +207,7 @@ class TestSubsetDataset(unittest.TestCase): self.assertEqual(odd_list, elements_list) - def func_test_main(self): + def test_main(self): paddle.static.default_startup_program().random_seed = 1 paddle.static.default_main_program().random_seed = 1 @@ -238,11 +217,6 @@ class TestSubsetDataset(unittest.TestCase): for p in places: self.run_main(num_workers=0, places=p) - def test_main(self): - with _test_eager_guard(): - self.func_test_main() - self.func_test_main() - class TestChainDataset(unittest.TestCase): def run_main(self, num_workers, places): @@ -268,18 +242,13 @@ class TestChainDataset(unittest.TestCase): assert np.allclose(label, samples[idx][1]) idx += 1 - def func_test_main(self): + def test_main(self): places = [paddle.CPUPlace()] if paddle.is_compiled_with_cuda(): places.append(paddle.CUDAPlace(0)) for p in places: self.run_main(num_workers=0, places=p) - def test_main(self): - with _test_eager_guard(): - self.func_test_main() - self.func_test_main() - class NumpyMixTensorDataset(Dataset): def __init__(self, sample_num): @@ -380,15 +349,10 @@ class TestComplextDataset(unittest.TestCase): assert data[4]['a'].shape == [2] assert data[4]['b'].shape == [2, 2] - def func_test_main(self): + def test_main(self): for num_workers in [0, 2]: self.run_main(num_workers) - def test_main(self): - with _test_eager_guard(): - self.func_test_main() - self.func_test_main() - class SingleFieldDataset(Dataset): def __init__(self, sample_num): @@ -426,15 +390,10 @@ class TestSingleFieldDataset(unittest.TestCase): ) assert data.shape == [2, 2, 3] - def func_test_main(self): + def test_main(self): for num_workers in [0, 2]: self.run_main(num_workers) - def test_main(self): - with _test_eager_guard(): - self.func_test_main() - self.func_test_main() - class SingleFieldIterableDataset(IterableDataset): def __init__(self, sample_num): @@ -460,18 +419,13 @@ class TestDataLoaderGenerateStates(unittest.TestCase): [457190280, 2660306227, 859341110, 354512857], ] - def func_test_main(self): + def test_main(self): from paddle.fluid.dataloader.worker import _generate_states for inp, outp in zip(self.inputs, self.outputs): out = _generate_states(*inp) assert out == outp - def test_main(self): - with _test_eager_guard(): - self.func_test_main() - self.func_test_main() - class TestDatasetWithDropLast(unittest.TestCase): def run_main(self, dataset, num_samples, batch_size): @@ -491,24 +445,14 @@ class TestDatasetWithDropLast(unittest.TestCase): datas.append(data) assert len(datas) == steps - def func_test_map_dataset(self): + def test_map_dataset(self): dataset = RandomDataset(10) self.run_main(dataset, 10, 3) - def test_map_dataset(self): - with _test_eager_guard(): - self.func_test_map_dataset() - self.func_test_map_dataset() - - def func_test_iterable_dataset(self): + def test_iterable_dataset(self): dataset = RandomIterableDataset(10) self.run_main(dataset, 10, 3) - def test_iterable_dataset(self): - with _test_eager_guard(): - self.func_test_iterable_dataset() - self.func_test_iterable_dataset() - if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_nll_loss.py b/python/paddle/fluid/tests/unittests/test_nll_loss.py index 159c682ce27..f21b3ea9b48 100644 --- a/python/paddle/fluid/tests/unittests/test_nll_loss.py +++ b/python/paddle/fluid/tests/unittests/test_nll_loss.py @@ -19,7 +19,6 @@ from op_test import OpTest import paddle import paddle.fluid as fluid -from paddle.fluid.framework import _test_eager_guard def nll_loss_1d( @@ -112,12 +111,11 @@ class TestNLLLoss(unittest.TestCase): dy_result = dy_res.numpy() with fluid.dygraph.guard(): - with _test_eager_guard(): - nll_loss = paddle.nn.loss.NLLLoss() - eager_res = nll_loss( - paddle.to_tensor(input_np), paddle.to_tensor(label_np) - ) - eager_result = eager_res.numpy() + nll_loss = paddle.nn.loss.NLLLoss() + eager_res = nll_loss( + paddle.to_tensor(input_np), paddle.to_tensor(label_np) + ) + eager_result = eager_res.numpy() expected = nll_loss_1d(input_np, label_np)[0] np.testing.assert_allclose(static_result, expected, rtol=1e-05) @@ -158,15 +156,14 @@ class TestNLLLoss(unittest.TestCase): ) dy_result = dy_res.numpy() - with _test_eager_guard(): - nll_loss = paddle.nn.loss.NLLLoss(reduction='sum') - in_t = paddle.to_tensor(input_np) - label = paddle.to_tensor(label_np) - in_t.stop_gradient = False - eager_res = nll_loss(in_t, label) - eager_result = eager_res.numpy() - loss = eager_res.sum() - loss.backward() + nll_loss = paddle.nn.loss.NLLLoss(reduction='sum') + in_t = paddle.to_tensor(input_np) + label = paddle.to_tensor(label_np) + in_t.stop_gradient = False + eager_res = nll_loss(in_t, label) + eager_result = eager_res.numpy() + loss = eager_res.sum() + loss.backward() expected = nll_loss_1d(input_np, label_np, reduction='sum')[0] np.testing.assert_allclose(static_result, expected, rtol=1e-05) @@ -215,16 +212,15 @@ class TestNLLLoss(unittest.TestCase): ) dy_result = dy_res.numpy() - with _test_eager_guard(): - nll_loss = paddle.nn.loss.NLLLoss( - weight=paddle.to_tensor(weight_np) - ) - eager_res = nll_loss( - paddle.to_tensor(input_np), paddle.to_tensor(label_np) - ) - loss = eager_res.sum() - loss.backward() - eager_result = eager_res.numpy() + nll_loss = paddle.nn.loss.NLLLoss( + weight=paddle.to_tensor(weight_np) + ) + eager_res = nll_loss( + paddle.to_tensor(input_np), paddle.to_tensor(label_np) + ) + loss = eager_res.sum() + loss.backward() + eager_result = eager_res.numpy() expected = nll_loss_1d(input_np, label_np, weight=weight_np)[0] diff --git a/python/paddle/fluid/tests/unittests/test_nn_functional_embedding_dygraph.py b/python/paddle/fluid/tests/unittests/test_nn_functional_embedding_dygraph.py index a584324c6eb..833a036e7d1 100644 --- a/python/paddle/fluid/tests/unittests/test_nn_functional_embedding_dygraph.py +++ b/python/paddle/fluid/tests/unittests/test_nn_functional_embedding_dygraph.py @@ -17,13 +17,12 @@ import unittest import numpy as np import paddle -from paddle.fluid.framework import _test_eager_guard paddle.disable_static() class EmbeddingDygraph(unittest.TestCase): - def func_1(self): + def test_1(self): x_data = np.arange(3, 6).reshape((3, 1)).astype(np.int64) paddle.disable_static(paddle.CPUPlace()) x = paddle.to_tensor(x_data, stop_gradient=False) @@ -42,12 +41,7 @@ class EmbeddingDygraph(unittest.TestCase): out.backward() adam.step() - def test_1(self): - with _test_eager_guard(): - self.func_1() - self.func_1() - - def func_2(self): + def test_2(self): x_data = np.arange(3, 6).reshape((3, 1)).astype(np.int64) y_data = np.arange(6, 12).reshape((3, 2)).astype(np.float32) paddle.disable_static(paddle.CPUPlace()) @@ -63,11 +57,6 @@ class EmbeddingDygraph(unittest.TestCase): with self.assertRaises(ValueError): embedding = paddle.nn.Embedding(10, -3, sparse=True) - def test_2(self): - with _test_eager_guard(): - self.func_2() - self.func_2() - if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_normalize.py b/python/paddle/fluid/tests/unittests/test_normalize.py index bd35cbd998e..908360d29e6 100644 --- a/python/paddle/fluid/tests/unittests/test_normalize.py +++ b/python/paddle/fluid/tests/unittests/test_normalize.py @@ -19,7 +19,6 @@ import numpy as np import paddle import paddle.fluid as fluid import paddle.nn.functional as F -from paddle.fluid.framework import _test_eager_guard def p_normalize(x, axis=1, p=2, epsilon=1e-12, keepdims=True): @@ -87,12 +86,6 @@ class TestNNFunctionalNormalize(unittest.TestCase): with fluid.program_guard(fluid.Program()): self.run_static() - def test_cpu_eager(self): - with _test_eager_guard(): - paddle.disable_static(place=paddle.fluid.CPUPlace()) - self.run_imperative() - paddle.enable_static() - def test_gpu(self): if not fluid.core.is_compiled_with_cuda(): return @@ -104,15 +97,6 @@ class TestNNFunctionalNormalize(unittest.TestCase): with fluid.program_guard(fluid.Program()): self.run_static(use_gpu=True) - def test_gpu_eager(self): - with _test_eager_guard(): - if not fluid.core.is_compiled_with_cuda(): - return - - paddle.disable_static(place=paddle.fluid.CUDAPlace(0)) - self.run_imperative() - paddle.enable_static() - if __name__ == "__main__": unittest.main() -- GitLab