diff --git a/python/paddle/fluid/tests/unittests/test_faster_tokenizer_op.py b/python/paddle/fluid/tests/unittests/test_faster_tokenizer_op.py index 6f3c23280080464e08d3d73e5243d77dfa1cc931..19258c43bdade00e146de706a870a4f013bc5080 100755 --- a/python/paddle/fluid/tests/unittests/test_faster_tokenizer_op.py +++ b/python/paddle/fluid/tests/unittests/test_faster_tokenizer_op.py @@ -22,7 +22,7 @@ import numpy as np import paddle import paddle.nn as nn from paddle import _legacy_C_ops -from paddle.fluid.framework import _non_static_mode, _test_eager_guard, core +from paddle.fluid.framework import _non_static_mode, core from paddle.fluid.layer_helper import LayerHelper sys.path.append("./tokenizer") @@ -196,7 +196,7 @@ class TestBertTokenizerOp(unittest.TestCase): self.texts_tensor = to_string_tensor(self.texts, "texts") self.text_pairs_tensor = to_string_tensor(self.text_pairs, "text_pairs") - def run_padding(self): + def test_padding(self): self.init_data() self.max_seq_len = 128 self.pad_to_max_seq_len = True @@ -310,12 +310,7 @@ class TestBertTokenizerOp(unittest.TestCase): token_type_ids, py_token_type_ids, rtol=0, atol=0.01 ) - def test_padding(self): - with _test_eager_guard(): - self.run_padding() - self.run_padding() - - def run_no_padding(self): + def test_no_padding(self): self.init_data() self.max_seq_len = 128 self.pad_to_max_seq_len = False @@ -375,12 +370,7 @@ class TestBertTokenizerOp(unittest.TestCase): token_type_ids, py_token_type_ids, rtol=0, atol=0.01 ) - def test_no_padding(self): - with _test_eager_guard(): - self.run_no_padding() - self.run_no_padding() - - def run_is_split_into_words(self): + def test_is_split_into_words(self): self.init_data() self.is_split_into_words = True @@ -403,11 +393,6 @@ class TestBertTokenizerOp(unittest.TestCase): token_type_ids, py_token_type_ids, rtol=0, atol=0.01 ) - def test_is_split_into_words(self): - with _test_eager_guard(): - self.run_is_split_into_words() - self.run_is_split_into_words() - def test_inference(self): self.init_data() if not os.path.exists(self.save_path): diff --git a/python/paddle/fluid/tests/unittests/test_frac_api.py b/python/paddle/fluid/tests/unittests/test_frac_api.py index 2e705bb2d7fad16478d599c019efe3e9e0aeee3d..a8395e5d458815168090038651d1067945905eb6 100644 --- a/python/paddle/fluid/tests/unittests/test_frac_api.py +++ b/python/paddle/fluid/tests/unittests/test_frac_api.py @@ -20,7 +20,6 @@ import paddle import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid import Program, program_guard -from paddle.fluid.framework import _test_eager_guard def ref_frac(x): @@ -64,17 +63,12 @@ class TestFracAPI(unittest.TestCase): def test_api_eager(self): paddle.disable_static(self.place) - with _test_eager_guard(): - x_tensor = paddle.to_tensor(self.x_np) - out = paddle.frac(x_tensor) + x_tensor = paddle.to_tensor(self.x_np) + out = paddle.frac(x_tensor) out_ref = ref_frac(self.x_np) np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05) paddle.enable_static() - def test_api_eager_dygraph(self): - with _test_eager_guard(): - self.test_api_dygraph() - class TestFracInt32(TestFracAPI): """Test Frac API with data type int32""" diff --git a/python/paddle/fluid/tests/unittests/test_full_like_op.py b/python/paddle/fluid/tests/unittests/test_full_like_op.py index 5a4b1235d8dd285adcf0d089ba46230f329037c8..b9c6492e9f2348725e8143b5c12d4a91a39fb950 100644 --- a/python/paddle/fluid/tests/unittests/test_full_like_op.py +++ b/python/paddle/fluid/tests/unittests/test_full_like_op.py @@ -19,7 +19,7 @@ from op_test import OpTest import paddle import paddle.fluid.core as core -from paddle.fluid.framework import _test_eager_guard, convert_np_dtype_to_dtype_ +from paddle.fluid.framework import convert_np_dtype_to_dtype_ from paddle.static import Program, program_guard @@ -142,14 +142,13 @@ class TestFullLikeOp3(TestFullLikeOp1): class TestFullLikeOp4(unittest.TestCase): def test_skip_data_transform(self): paddle.disable_static() - with _test_eager_guard(): - x = paddle.to_tensor( - [1.0, 2.0, 3.0, 4.0], place=paddle.CUDAPinnedPlace() - ) - out = paddle.full_like(x, 1.0) - self.assertTrue( - (out.numpy() == np.ones([4]).astype(np.float32)).all(), True - ) + x = paddle.to_tensor( + [1.0, 2.0, 3.0, 4.0], place=paddle.CUDAPinnedPlace() + ) + out = paddle.full_like(x, 1.0) + self.assertTrue( + (out.numpy() == np.ones([4]).astype(np.float32)).all(), True + ) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_full_op.py b/python/paddle/fluid/tests/unittests/test_full_op.py index 35474810da77bc407abb6aa1cf38d4ca592352e0..162dcae7841359ad35832ede154f32db1d1566bf 100644 --- a/python/paddle/fluid/tests/unittests/test_full_op.py +++ b/python/paddle/fluid/tests/unittests/test_full_op.py @@ -19,7 +19,6 @@ import numpy as np import paddle import paddle.fluid as fluid from paddle.fluid import Program, program_guard -from paddle.fluid.framework import _test_eager_guard # Test python API @@ -83,95 +82,66 @@ class TestFullAPI(unittest.TestCase): def test_api_eager(self): with fluid.dygraph.base.guard(): - with _test_eager_guard(): - positive_2_int32 = fluid.layers.fill_constant([1], "int32", 2) - positive_2_int64 = fluid.layers.fill_constant([1], "int64", 2) - positive_4_int64 = fluid.layers.fill_constant( - [1], "int64", 4, True - ) + positive_2_int32 = fluid.layers.fill_constant([1], "int32", 2) + positive_2_int64 = fluid.layers.fill_constant([1], "int64", 2) + positive_4_int64 = fluid.layers.fill_constant([1], "int64", 4, True) - out_1 = paddle.full( - shape=[1, 2], dtype="float32", fill_value=1.1 - ) + out_1 = paddle.full(shape=[1, 2], dtype="float32", fill_value=1.1) - out_2 = paddle.full( - shape=[1, positive_2_int32.item()], - dtype="float32", - fill_value=1.1, - ) + out_2 = paddle.full( + shape=[1, positive_2_int32.item()], + dtype="float32", + fill_value=1.1, + ) - out_3 = paddle.full( - shape=[1, positive_2_int64.item()], - dtype="float32", - fill_value=1.1, - ) + out_3 = paddle.full( + shape=[1, positive_2_int64.item()], + dtype="float32", + fill_value=1.1, + ) - out_4 = paddle.full( - shape=[1, 2], dtype="float32", fill_value=1.2 - ) + out_4 = paddle.full(shape=[1, 2], dtype="float32", fill_value=1.2) - out_5 = paddle.full( - shape=[1, 2], dtype="float32", fill_value=1.1 - ) + out_5 = paddle.full(shape=[1, 2], dtype="float32", fill_value=1.1) - out_6 = paddle.full( - shape=[1, 2], dtype=np.float32, fill_value=1.1 - ) + out_6 = paddle.full(shape=[1, 2], dtype=np.float32, fill_value=1.1) - val = fluid.layers.fill_constant( - shape=[1], dtype=np.float32, value=1.1 - ) - out_7 = paddle.full( - shape=[1, 2], dtype=np.float32, fill_value=val - ) + val = fluid.layers.fill_constant( + shape=[1], dtype=np.float32, value=1.1 + ) + out_7 = paddle.full(shape=[1, 2], dtype=np.float32, fill_value=val) - out_8 = paddle.full( - shape=positive_2_int32, dtype="float32", fill_value=1.1 - ) + out_8 = paddle.full( + shape=positive_2_int32, dtype="float32", fill_value=1.1 + ) - out_9 = paddle.full( - shape=[ - positive_2_int32, - positive_2_int64, - positive_4_int64, - ], - dtype="float32", - fill_value=1.1, - ) + out_9 = paddle.full( + shape=[ + positive_2_int32, + positive_2_int64, + positive_4_int64, + ], + dtype="float32", + fill_value=1.1, + ) - # test for numpy.float64 as fill_value - out_10 = paddle.full_like( - out_7, dtype=np.float32, fill_value=np.abs(1.1) - ) + # test for numpy.float64 as fill_value + out_10 = paddle.full_like( + out_7, dtype=np.float32, fill_value=np.abs(1.1) + ) - assert np.array_equal( - out_1, np.full([1, 2], 1.1, dtype="float32") - ) - assert np.array_equal( - out_2, np.full([1, 2], 1.1, dtype="float32") - ) - assert np.array_equal( - out_3, np.full([1, 2], 1.1, dtype="float32") - ) - assert np.array_equal( - out_4, np.full([1, 2], 1.2, dtype="float32") - ) - assert np.array_equal( - out_5, np.full([1, 2], 1.1, dtype="float32") - ) - assert np.array_equal( - out_6, np.full([1, 2], 1.1, dtype="float32") - ) - assert np.array_equal( - out_7, np.full([1, 2], 1.1, dtype="float32") - ) - assert np.array_equal(out_8, np.full([2], 1.1, dtype="float32")) - assert np.array_equal( - out_9, np.full([2, 2, 4], 1.1, dtype="float32") - ) - assert np.array_equal( - out_10, np.full([1, 2], 1.1, dtype="float32") - ) + assert np.array_equal(out_1, np.full([1, 2], 1.1, dtype="float32")) + assert np.array_equal(out_2, np.full([1, 2], 1.1, dtype="float32")) + assert np.array_equal(out_3, np.full([1, 2], 1.1, dtype="float32")) + assert np.array_equal(out_4, np.full([1, 2], 1.2, dtype="float32")) + assert np.array_equal(out_5, np.full([1, 2], 1.1, dtype="float32")) + assert np.array_equal(out_6, np.full([1, 2], 1.1, dtype="float32")) + assert np.array_equal(out_7, np.full([1, 2], 1.1, dtype="float32")) + assert np.array_equal(out_8, np.full([2], 1.1, dtype="float32")) + assert np.array_equal( + out_9, np.full([2, 2, 4], 1.1, dtype="float32") + ) + assert np.array_equal(out_10, np.full([1, 2], 1.1, dtype="float32")) class TestFullOpError(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_function_hook.py b/python/paddle/fluid/tests/unittests/test_function_hook.py index b6adc83a7489590b855624fe4a152bc6d9ec6ee0..3ac3b7e526c2c3b591f960926be09b6e5c1ceee0 100644 --- a/python/paddle/fluid/tests/unittests/test_function_hook.py +++ b/python/paddle/fluid/tests/unittests/test_function_hook.py @@ -18,7 +18,6 @@ import numpy as np import paddle from paddle import _legacy_C_ops -from paddle.fluid.framework import _test_eager_guard class TestCapture: @@ -29,10 +28,6 @@ class TestCapture: test_cap = TestCapture() -def test_hook(): - test_cap.list.append(1) - - def grad_hook(grad): test_cap.list.append(2) @@ -40,7 +35,7 @@ def grad_hook(grad): class TestBakcwardFunctionHookError(unittest.TestCase): - def func_hook(self): + def test_hook(self): input_data = np.ones([4, 4]).astype('float32') x = paddle.to_tensor(input_data.astype(np.float32), stop_gradient=False) @@ -49,19 +44,11 @@ class TestBakcwardFunctionHookError(unittest.TestCase): y = _legacy_C_ops.sigmoid(x) out = _legacy_C_ops.matmul_v2(y, z, 'trans_x', False, 'trans_y', False) - out._register_void_function_post_hook(test_hook) - y._register_void_function_post_hook(test_hook) y.register_hook(grad_hook) out.backward() - assert test_cap.list == [1, 2, 1] - - def test_hook(self): - # _register_void_function_post_hook do not support in eager mode - with _test_eager_guard(): - pass - self.func_hook() + assert test_cap.list == [2] if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_functional_conv2d_transpose.py b/python/paddle/fluid/tests/unittests/test_functional_conv2d_transpose.py index c87a5c77d7ac34352e438c503d0e5c0f10ee68c0..f45cf48afbf0dd24d20db9b72f13c8cf4c962d1c 100644 --- a/python/paddle/fluid/tests/unittests/test_functional_conv2d_transpose.py +++ b/python/paddle/fluid/tests/unittests/test_functional_conv2d_transpose.py @@ -22,7 +22,6 @@ import paddle.fluid.dygraph as dg import paddle.fluid.initializer as I import paddle.nn.functional as F from paddle import fluid -from paddle.fluid.framework import _test_eager_guard class TestFunctionalConv2D(TestCase): @@ -183,10 +182,6 @@ class TestFunctionalConv2D(TestCase): self.place = fluid.CPUPlace() self._test_identity() - def test_identity_cpu_check_eager(self): - with _test_eager_guard(): - self.test_identity_cpu() - @unittest.skipIf( not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA" ) @@ -194,13 +189,6 @@ class TestFunctionalConv2D(TestCase): self.place = fluid.CUDAPlace(0) self._test_identity() - @unittest.skipIf( - not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA" - ) - def test_identity_gpu_check_eager(self): - with _test_eager_guard(): - self.test_identity_gpu() - class TestFunctionalConv2DError(TestCase): batch_size = 4 @@ -571,10 +559,6 @@ class TestFunctionalConv2DErrorCase10(TestCase): with self.assertRaises(ValueError): self.dygraph_case() - def test_dygraph_exception_check_eager(self): - with _test_eager_guard(): - self.test_dygraph_exception() - def test_static_exception(self): with self.assertRaises(ValueError): self.static_graph_case() diff --git a/python/paddle/fluid/tests/unittests/test_functional_conv3d_transpose.py b/python/paddle/fluid/tests/unittests/test_functional_conv3d_transpose.py index 4c944d4fa3d1f41a1ca9e37765481f2dfb6d7505..ae402c874e6398453273ebff67ea1b8eb31e1312 100644 --- a/python/paddle/fluid/tests/unittests/test_functional_conv3d_transpose.py +++ b/python/paddle/fluid/tests/unittests/test_functional_conv3d_transpose.py @@ -22,7 +22,6 @@ import paddle.fluid.dygraph as dg import paddle.fluid.initializer as I import paddle.nn.functional as F from paddle import fluid -from paddle.fluid.framework import _test_eager_guard class TestFunctionalConv3DTranspose(TestCase): @@ -188,10 +187,6 @@ class TestFunctionalConv3DTranspose(TestCase): self.place = fluid.CPUPlace() self._test_identity() - def test_identity_cpu_check_eager(self): - with _test_eager_guard(): - self.test_identity_cpu() - @unittest.skipIf( not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA" ) @@ -199,13 +194,6 @@ class TestFunctionalConv3DTranspose(TestCase): self.place = fluid.CUDAPlace(0) self._test_identity() - @unittest.skipIf( - not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA" - ) - def test_identity_gpu_check_eager(self): - with _test_eager_guard(): - self.test_identity_gpu() - class TestFunctionalConv3DTransposeError(TestCase): batch_size = 4 @@ -596,10 +584,6 @@ class TestFunctionalConv3DTransposeErrorCase10(TestCase): with self.assertRaises(ValueError): self.dygraph_case() - def test_dygraph_exception_check_eager(self): - with _test_eager_guard(): - self.test_dygraph_exception() - def test_static_exception(self): with self.assertRaises(ValueError): self.static_graph_case()