diff --git a/python/paddle/fluid/tests/unittests/test_number_count_op.py b/python/paddle/fluid/tests/unittests/test_number_count_op.py index 032d582035dc09c13e9cd5a9f8b943c88928bb18..c2781b98e00b0c1fb18f78a209c9194a1d21dc60 100644 --- a/python/paddle/fluid/tests/unittests/test_number_count_op.py +++ b/python/paddle/fluid/tests/unittests/test_number_count_op.py @@ -20,7 +20,6 @@ import op_test import paddle import paddle.fluid.core as core from paddle.distributed.models.moe import utils -from paddle.fluid.framework import _test_eager_guard def count(x, upper_num): @@ -68,17 +67,12 @@ class TestNumberCountAPI(unittest.TestCase): res = exe.run(feed={'x': self.x}, fetch_list=[out]) assert np.allclose(res, self.out) - def func_api_dygraph(self): + def test_api_dygraph(self): paddle.disable_static() x = paddle.to_tensor(self.x) out = utils._number_count(x, self.upper_num) assert np.allclose(out.numpy(), self.out) - def test_api_dygraph(self): - with _test_eager_guard(): - self.func_api_dygraph() - self.func_api_dygraph() - if __name__ == '__main__': paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_one_hot_v2_op.py b/python/paddle/fluid/tests/unittests/test_one_hot_v2_op.py index 0be5ee13a1b27d524c1a2a72305f0fe3ceb94e04..53e34ae70e6739c8c6228bd73f1e02e777a7c6fc 100644 --- a/python/paddle/fluid/tests/unittests/test_one_hot_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_one_hot_v2_op.py @@ -20,7 +20,7 @@ from op_test import OpTest import paddle import paddle.fluid as fluid import paddle.fluid.core as core -from paddle.fluid.framework import Program, _test_eager_guard, program_guard +from paddle.fluid.framework import Program, program_guard class TestOneHotOp(OpTest): @@ -182,10 +182,9 @@ class TestOneHotOpApi(unittest.TestCase): one_hot_label = paddle.nn.functional.one_hot( fluid.dygraph.to_variable(label), depth ) - with _test_eager_guard(): - one_hot_label = paddle.nn.functional.one_hot( - paddle.to_tensor(label), depth - ) + one_hot_label = paddle.nn.functional.one_hot( + paddle.to_tensor(label), depth + ) def _run(self, depth): label = fluid.layers.data(name="label", shape=[1], dtype="int64") diff --git a/python/paddle/fluid/tests/unittests/test_onnx_export.py b/python/paddle/fluid/tests/unittests/test_onnx_export.py index e4e461bdf025fc191246677a46211c056d07339b..4d5e09a2ea9e0e97ab09aa101a794c21c3cbf687 100644 --- a/python/paddle/fluid/tests/unittests/test_onnx_export.py +++ b/python/paddle/fluid/tests/unittests/test_onnx_export.py @@ -17,7 +17,6 @@ import unittest import numpy as np import paddle -from paddle.fluid.framework import _test_eager_guard class LinearNet(paddle.nn.Layer): @@ -41,33 +40,23 @@ class Logic(paddle.nn.Layer): class TestExportWithTensor(unittest.TestCase): - def func_with_tensor(self): + def test_with_tensor(self): self.x_spec = paddle.static.InputSpec( shape=[None, 128], dtype='float32' ) model = LinearNet() paddle.onnx.export(model, 'linear_net', input_spec=[self.x_spec]) - def test_with_tensor(self): - with _test_eager_guard(): - self.func_with_tensor() - self.func_with_tensor() - class TestExportWithTensor1(unittest.TestCase): - def func_with_tensor(self): + def test_with_tensor(self): self.x = paddle.to_tensor(np.random.random((1, 128))) model = LinearNet() paddle.onnx.export(model, 'linear_net', input_spec=[self.x]) - def test_with_tensor(self): - with _test_eager_guard(): - self.func_with_tensor() - self.func_with_tensor() - class TestExportPrunedGraph(unittest.TestCase): - def func_prune_graph(self): + def test_prune_graph(self): model = Logic() self.x = paddle.to_tensor(np.array([1])) self.y = paddle.to_tensor(np.array([-1])) @@ -77,12 +66,6 @@ class TestExportPrunedGraph(unittest.TestCase): model, 'pruned', input_spec=[self.x], output_spec=[out] ) - def test_prune_graph(self): - # test eager - with _test_eager_guard(): - self.func_prune_graph() - self.func_prune_graph() - if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_optimizer.py b/python/paddle/fluid/tests/unittests/test_optimizer.py index ddbf0a8f2d761ab150628d63fa2b581fee55427c..e4eaeee2b594f1b8410ea73ef2fe69d285d77c58 100644 --- a/python/paddle/fluid/tests/unittests/test_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_optimizer.py @@ -27,7 +27,6 @@ import paddle.fluid.optimizer as optimizer from paddle.fluid.backward import append_backward from paddle.fluid.framework import ( Program, - _test_eager_guard, convert_np_dtype_to_dtype_, program_guard, ) @@ -1387,11 +1386,6 @@ class TestOptimizerDtype(unittest.TestCase): def test_float32(self): self.check_with_dtype('float32') - def test_api_eager_dygraph(self): - with _test_eager_guard(): - self.test_float64() - self.test_float32() - class TestMasterWeightSaveForFP16(unittest.TestCase): ''' diff --git a/python/paddle/fluid/tests/unittests/test_optimizer_for_varbase.py b/python/paddle/fluid/tests/unittests/test_optimizer_for_varbase.py index e53ac6bbd9ed3d2f07bdf8ebed2597d3ca861819..78a1f43547f27b73d339b2a4a1d0e4827ef27719 100644 --- a/python/paddle/fluid/tests/unittests/test_optimizer_for_varbase.py +++ b/python/paddle/fluid/tests/unittests/test_optimizer_for_varbase.py @@ -18,7 +18,7 @@ import numpy as np import paddle import paddle.optimizer as optimizer -from paddle.fluid.framework import _in_legacy_dygraph, _test_eager_guard +from paddle.fluid.framework import _in_legacy_dygraph class TestOptimizerForVarBase(unittest.TestCase): @@ -59,71 +59,36 @@ class TestOptimizerForVarBase(unittest.TestCase): x.numpy(), np.full([2, 3], -self.lr), rtol=1e-05 ) - def func_test_adam_with_varbase_list_input(self): + def test_adam_with_varbase_list_input(self): self.run_optimizer_step_with_varbase_list_input(optimizer.Adam) self.run_optimizer_minimize_with_varbase_list_input(optimizer.Adam) - def test_adam_with_varbase_list_input(self): - with _test_eager_guard(): - self.func_test_adam_with_varbase_list_input() - self.func_test_adam_with_varbase_list_input() - - def func_test_sgd_with_varbase_list_input(self): + def test_sgd_with_varbase_list_input(self): self.run_optimizer_step_with_varbase_list_input(optimizer.SGD) self.run_optimizer_minimize_with_varbase_list_input(optimizer.SGD) - def test_sgd_with_varbase_list_input(self): - with _test_eager_guard(): - self.func_test_sgd_with_varbase_list_input() - self.func_test_sgd_with_varbase_list_input() - - def func_test_adagrad_with_varbase_list_input(self): + def test_adagrad_with_varbase_list_input(self): self.run_optimizer_step_with_varbase_list_input(optimizer.Adagrad) self.run_optimizer_minimize_with_varbase_list_input(optimizer.Adagrad) - def test_adagrad_with_varbase_list_input(self): - with _test_eager_guard(): - self.func_test_adagrad_with_varbase_list_input() - self.func_test_adagrad_with_varbase_list_input() - - def func_test_adamw_with_varbase_list_input(self): + def test_adamw_with_varbase_list_input(self): self.run_optimizer_step_with_varbase_list_input(optimizer.AdamW) self.run_optimizer_minimize_with_varbase_list_input(optimizer.AdamW) - def test_adamw_with_varbase_list_input(self): - with _test_eager_guard(): - self.func_test_adamw_with_varbase_list_input() - self.func_test_adamw_with_varbase_list_input() - - def func_test_adamax_with_varbase_list_input(self): + def test_adamax_with_varbase_list_input(self): self.run_optimizer_step_with_varbase_list_input(optimizer.Adamax) self.run_optimizer_minimize_with_varbase_list_input(optimizer.Adamax) - def test_adamax_with_varbase_list_input(self): - with _test_eager_guard(): - self.func_test_adamax_with_varbase_list_input() - self.func_test_adamax_with_varbase_list_input() - - def func_test_momentum_with_varbase_list_input(self): + def test_momentum_with_varbase_list_input(self): self.run_optimizer_step_with_varbase_list_input(optimizer.Momentum) self.run_optimizer_minimize_with_varbase_list_input(optimizer.Momentum) - def test_momentum_with_varbase_list_input(self): - with _test_eager_guard(): - self.func_test_momentum_with_varbase_list_input() - self.func_test_momentum_with_varbase_list_input() - - def func_test_optimizer_with_varbase_input(self): + def test_optimizer_with_varbase_input(self): x = paddle.zeros([2, 3]) with self.assertRaises(TypeError): optimizer.Adam(learning_rate=self.lr, parameters=x) - def test_optimizer_with_varbase_input(self): - with _test_eager_guard(): - self.func_test_optimizer_with_varbase_input() - self.func_test_optimizer_with_varbase_input() - - def func_test_create_param_lr_with_1_for_coverage(self): + def test_create_param_lr_with_1_for_coverage(self): if _in_legacy_dygraph(): x = paddle.fluid.framework.ParamBase( dtype="float32", @@ -151,12 +116,7 @@ class TestOptimizerForVarBase(unittest.TestCase): z.backward() opt.step() - def test_create_param_lr_with_1_for_coverage(self): - with _test_eager_guard(): - self.func_test_create_param_lr_with_1_for_coverage() - self.func_test_create_param_lr_with_1_for_coverage() - - def func_test_create_param_lr_with_no_1_value_for_coverage(self): + def test_create_param_lr_with_no_1_value_for_coverage(self): if _in_legacy_dygraph(): x = paddle.fluid.framework.ParamBase( dtype="float32", @@ -184,11 +144,6 @@ class TestOptimizerForVarBase(unittest.TestCase): z.backward() opt.step() - def test_create_param_lr_with_no_1_value_for_coverage(self): - with _test_eager_guard(): - self.func_test_create_param_lr_with_1_for_coverage() - self.func_test_create_param_lr_with_1_for_coverage() - if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_outer.py b/python/paddle/fluid/tests/unittests/test_outer.py index dfd185433a430d972a91f7b0721844d7f11754bd..3bbe20b7b5b573a2b5907ffec2c01b5cebf0a9c0 100644 --- a/python/paddle/fluid/tests/unittests/test_outer.py +++ b/python/paddle/fluid/tests/unittests/test_outer.py @@ -17,7 +17,6 @@ import unittest import numpy as np import paddle -from paddle.fluid.framework import _test_eager_guard from paddle.static import Program, program_guard @@ -54,7 +53,7 @@ class TestMultiplyApi(unittest.TestCase): res = paddle.outer(x, y) return res.numpy() - def func_test_multiply(self): + def test_multiply(self): np.random.seed(7) # test static computation graph: 3-d array @@ -113,14 +112,9 @@ class TestMultiplyApi(unittest.TestCase): res = self._run_dynamic_graph_case(x_data, y_data) np.testing.assert_allclose(res, np.outer(x_data, y_data), rtol=1e-05) - def test_multiply(self): - with _test_eager_guard(): - self.func_test_multiply() - self.func_test_multiply() - class TestMultiplyError(unittest.TestCase): - def func_test_errors(self): + def test_errors(self): # test static computation graph: dtype can not be int8 paddle.enable_static() with program_guard(Program(), Program()): @@ -161,11 +155,6 @@ class TestMultiplyError(unittest.TestCase): y_data = np.random.randn(200).astype(np.float32) self.assertRaises(ValueError, paddle.outer, x_data, y_data) - def test_errors(self): - with _test_eager_guard(): - self.func_test_errors() - self.func_test_errors() - if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_paddle_imperative_double_grad.py b/python/paddle/fluid/tests/unittests/test_paddle_imperative_double_grad.py index 1547bd673db5f505d4a126e8666995e4c9373a6b..21ad7092f57997cd298a24e75d39f8047515b199 100644 --- a/python/paddle/fluid/tests/unittests/test_paddle_imperative_double_grad.py +++ b/python/paddle/fluid/tests/unittests/test_paddle_imperative_double_grad.py @@ -19,7 +19,7 @@ import numpy as np import paddle import paddle.fluid as fluid -from paddle.fluid.framework import _in_legacy_dygraph, _test_eager_guard +from paddle.fluid.framework import _in_legacy_dygraph from paddle.fluid.wrapped_decorator import wrap_decorator @@ -68,7 +68,7 @@ class TestDygraphDoubleGrad(TestCase): ) @dygraph_guard - def func_exception(self): + def test_exception(self): with self.assertRaises(AssertionError): self.grad(None, None) @@ -101,13 +101,8 @@ class TestDygraphDoubleGrad(TestCase): with self.assertRaises(AssertionError): self.grad([random_var(shape)], [random_var(shape)], no_grad_vars=1) - def test_exception(self): - with _test_eager_guard(): - self.func_exception() - self.func_exception() - @dygraph_guard - def func_simple_example(self): + def test_simple_example(self): x = random_var(self.shape) x.stop_gradient = False y = x + 1 @@ -141,13 +136,8 @@ class TestDygraphDoubleGrad(TestCase): grad_with_none_and_not_none.stop_gradient, create_graph ) - def test_simple_example(self): - with _test_eager_guard(): - self.func_simple_example() - self.func_simple_example() - @dygraph_guard - def func_none_one_initial_gradient(self): + def test_none_one_initial_gradient(self): numel = 1 for s in self.shape: numel *= s @@ -223,11 +213,6 @@ class TestDygraphDoubleGrad(TestCase): grad_z.numpy(), original_random_grad_z ) - def test_none_one_initial_gradient(self): - with _test_eager_guard(): - self.func_none_one_initial_gradient() - self.func_none_one_initial_gradient() - @dygraph_guard def func_example_with_gradient_accumulation_and_create_graph(self): x = random_var(self.shape) @@ -269,13 +254,8 @@ class TestDygraphDoubleGrad(TestCase): x_grad_actual, x_grad_expected, rtol=1e-05 ) - def test_example_with_gradient_accumulation_and_create_graph(self): - with _test_eager_guard(): - self.func_example_with_gradient_accumulation_and_create_graph() - self.func_example_with_gradient_accumulation_and_create_graph() - @dygraph_guard - def func_example_with_gradient_accumulation_and_no_grad_vars(self): + def test_example_with_gradient_accumulation_and_no_grad_vars(self): x = random_var(self.shape) x_np = x.numpy() numel = x_np.size @@ -321,13 +301,8 @@ class TestDygraphDoubleGrad(TestCase): x_grad_actual, x_grad_expected, rtol=1e-05 ) - def test_example_with_gradient_accumulation_and_no_grad_vars(self): - with _test_eager_guard(): - self.func_example_with_gradient_accumulation_and_no_grad_vars() - self.func_example_with_gradient_accumulation_and_no_grad_vars() - @dygraph_guard - def func_example_with_gradient_accumulation_and_not_create_graph(self): + def test_example_with_gradient_accumulation_and_not_create_graph(self): x = random_var(self.shape) x_np = x.numpy() numel = x_np.size @@ -363,11 +338,6 @@ class TestDygraphDoubleGrad(TestCase): x_grad_actual, x_grad_expected, rtol=1e-05 ) - def test_example_with_gradient_accumulation_and_not_create_graph(self): - with _test_eager_guard(): - self.func_example_with_gradient_accumulation_and_not_create_graph() - self.func_example_with_gradient_accumulation_and_not_create_graph() - class TestDygraphDoubleGradSortGradient(TestDygraphDoubleGrad): def setUp(self): diff --git a/python/paddle/fluid/tests/unittests/test_parameter.py b/python/paddle/fluid/tests/unittests/test_parameter.py index bb4a8bfab7b80c1a1841c9585db8fe6447343aa5..5ce6f313183959ad0da93da3f7df3b0f18d690fa 100644 --- a/python/paddle/fluid/tests/unittests/test_parameter.py +++ b/python/paddle/fluid/tests/unittests/test_parameter.py @@ -22,12 +22,7 @@ import paddle.fluid.core as core import paddle.fluid.io as io from paddle.fluid.dygraph import guard from paddle.fluid.executor import Executor -from paddle.fluid.framework import ( - ParamBase, - Variable, - _test_eager_guard, - default_main_program, -) +from paddle.fluid.framework import ParamBase, Variable, default_main_program from paddle.fluid.initializer import ConstantInitializer paddle.enable_static() @@ -59,7 +54,7 @@ class ParameterChecks(unittest.TestCase): zero_dim_param = b.create_parameter(name='x', shape=[], dtype='float32') self.assertEqual(zero_dim_param.shape, ()) - def func_parambase(self): + def test_parambase(self): with guard(): linear = paddle.nn.Linear(10, 10) param = linear.weight @@ -85,11 +80,6 @@ class ParameterChecks(unittest.TestCase): zero_dim_param = ParamBase(shape=[], dtype='float32') self.assertEqual(zero_dim_param.shape, []) - def test_parambase(self): - with _test_eager_guard(): - self.func_parambase() - self.func_parambase() - def func_exception(self): b = main_program.global_block() with self.assertRaises(ValueError): @@ -109,7 +99,7 @@ class ParameterChecks(unittest.TestCase): name='test', shape=[-1], dtype='float32', initializer=None ) - def func_parambase_to_vector(self): + def test_parambase_to_vector(self): with guard(): initializer = paddle.ParamAttr( initializer=paddle.nn.initializer.Constant(3.0) @@ -135,11 +125,6 @@ class ParameterChecks(unittest.TestCase): self.assertTrue(linear2.weight.is_leaf, True) self.assertTrue(linear2.bias.is_leaf, True) - def test_parambase_to_vector(self): - with _test_eager_guard(): - self.func_parambase_to_vector() - self.func_parambase_to_vector() - if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_poisson_op.py b/python/paddle/fluid/tests/unittests/test_poisson_op.py index 3c2fa7c1cbae4a970e252bebc7dcd899f278ec00..e2720edb013130de8aaeab38d06c6effd43276cd 100644 --- a/python/paddle/fluid/tests/unittests/test_poisson_op.py +++ b/python/paddle/fluid/tests/unittests/test_poisson_op.py @@ -19,7 +19,6 @@ import numpy as np from op_test import OpTest import paddle -from paddle.fluid.framework import _test_eager_guard paddle.enable_static() paddle.seed(100) @@ -103,13 +102,12 @@ class TestPoissonAPI(unittest.TestCase): y = paddle.poisson(x) self.assertTrue(np.min(y.numpy()) >= 0) - with _test_eager_guard(): - x = paddle.randn([10, 10], dtype='float32') - x.stop_gradient = False - y = paddle.poisson(x) - y.backward() - self.assertTrue(np.min(y.numpy()) >= 0) - np.testing.assert_array_equal(np.zeros_like(x), x.gradient()) + x = paddle.randn([10, 10], dtype='float32') + x.stop_gradient = False + y = paddle.poisson(x) + y.backward() + self.assertTrue(np.min(y.numpy()) >= 0) + np.testing.assert_array_equal(np.zeros_like(x), x.gradient()) def test_fixed_random_number(self): # Test GPU Fixed random number, which is generated by 'curandStatePhilox4_32_10_t' diff --git a/python/paddle/fluid/tests/unittests/test_pool1d_api.py b/python/paddle/fluid/tests/unittests/test_pool1d_api.py index 73d75d63c413bd11f385150c401d4a2483b65676..2c191bf4892b774065b1b390841bac009016b2df 100644 --- a/python/paddle/fluid/tests/unittests/test_pool1d_api.py +++ b/python/paddle/fluid/tests/unittests/test_pool1d_api.py @@ -20,7 +20,6 @@ import paddle import paddle.fluid as fluid import paddle.fluid.core as core import paddle.nn.functional as F -from paddle.fluid.framework import _test_eager_guard def adaptive_start_index(index, input_size, output_size): @@ -274,10 +273,6 @@ class TestPool1D_API(unittest.TestCase): self.check_avg_dygraph_padding_same(place) self.check_max_dygraph_return_index_results(place) - def test_dygraph_api(self): - with _test_eager_guard(): - self.test_pool1d() - class TestPool2DError_API(unittest.TestCase): def test_error_api(self): @@ -422,10 +417,6 @@ class TestPool2DError_API(unittest.TestCase): self.assertRaises(ValueError, run_stride_out_of_range) - def test_dygraph_api(self): - with _test_eager_guard(): - self.test_error_api() - if __name__ == '__main__': unittest.main()