未验证 提交 1e0f8734 编写于 作者: 姜永久 提交者: GitHub

rm unittests eager guard tests part7 faster2conv3d (#48820)

* rm unittests eager guard tests part7 faster2conv3d

* minor change

* rm post_hook test
上级 3210ea8e
......@@ -22,7 +22,7 @@ import numpy as np
import paddle
import paddle.nn as nn
from paddle import _legacy_C_ops
from paddle.fluid.framework import _non_static_mode, _test_eager_guard, core
from paddle.fluid.framework import _non_static_mode, core
from paddle.fluid.layer_helper import LayerHelper
sys.path.append("./tokenizer")
......@@ -196,7 +196,7 @@ class TestBertTokenizerOp(unittest.TestCase):
self.texts_tensor = to_string_tensor(self.texts, "texts")
self.text_pairs_tensor = to_string_tensor(self.text_pairs, "text_pairs")
def run_padding(self):
def test_padding(self):
self.init_data()
self.max_seq_len = 128
self.pad_to_max_seq_len = True
......@@ -310,12 +310,7 @@ class TestBertTokenizerOp(unittest.TestCase):
token_type_ids, py_token_type_ids, rtol=0, atol=0.01
)
def test_padding(self):
with _test_eager_guard():
self.run_padding()
self.run_padding()
def run_no_padding(self):
def test_no_padding(self):
self.init_data()
self.max_seq_len = 128
self.pad_to_max_seq_len = False
......@@ -375,12 +370,7 @@ class TestBertTokenizerOp(unittest.TestCase):
token_type_ids, py_token_type_ids, rtol=0, atol=0.01
)
def test_no_padding(self):
with _test_eager_guard():
self.run_no_padding()
self.run_no_padding()
def run_is_split_into_words(self):
def test_is_split_into_words(self):
self.init_data()
self.is_split_into_words = True
......@@ -403,11 +393,6 @@ class TestBertTokenizerOp(unittest.TestCase):
token_type_ids, py_token_type_ids, rtol=0, atol=0.01
)
def test_is_split_into_words(self):
with _test_eager_guard():
self.run_is_split_into_words()
self.run_is_split_into_words()
def test_inference(self):
self.init_data()
if not os.path.exists(self.save_path):
......
......@@ -20,7 +20,6 @@ import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid import Program, program_guard
from paddle.fluid.framework import _test_eager_guard
def ref_frac(x):
......@@ -64,17 +63,12 @@ class TestFracAPI(unittest.TestCase):
def test_api_eager(self):
paddle.disable_static(self.place)
with _test_eager_guard():
x_tensor = paddle.to_tensor(self.x_np)
out = paddle.frac(x_tensor)
x_tensor = paddle.to_tensor(self.x_np)
out = paddle.frac(x_tensor)
out_ref = ref_frac(self.x_np)
np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
paddle.enable_static()
def test_api_eager_dygraph(self):
with _test_eager_guard():
self.test_api_dygraph()
class TestFracInt32(TestFracAPI):
"""Test Frac API with data type int32"""
......
......@@ -19,7 +19,7 @@ from op_test import OpTest
import paddle
import paddle.fluid.core as core
from paddle.fluid.framework import _test_eager_guard, convert_np_dtype_to_dtype_
from paddle.fluid.framework import convert_np_dtype_to_dtype_
from paddle.static import Program, program_guard
......@@ -142,14 +142,13 @@ class TestFullLikeOp3(TestFullLikeOp1):
class TestFullLikeOp4(unittest.TestCase):
def test_skip_data_transform(self):
paddle.disable_static()
with _test_eager_guard():
x = paddle.to_tensor(
[1.0, 2.0, 3.0, 4.0], place=paddle.CUDAPinnedPlace()
)
out = paddle.full_like(x, 1.0)
self.assertTrue(
(out.numpy() == np.ones([4]).astype(np.float32)).all(), True
)
x = paddle.to_tensor(
[1.0, 2.0, 3.0, 4.0], place=paddle.CUDAPinnedPlace()
)
out = paddle.full_like(x, 1.0)
self.assertTrue(
(out.numpy() == np.ones([4]).astype(np.float32)).all(), True
)
paddle.enable_static()
......
......@@ -19,7 +19,6 @@ import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
from paddle.fluid.framework import _test_eager_guard
# Test python API
......@@ -83,95 +82,66 @@ class TestFullAPI(unittest.TestCase):
def test_api_eager(self):
with fluid.dygraph.base.guard():
with _test_eager_guard():
positive_2_int32 = fluid.layers.fill_constant([1], "int32", 2)
positive_2_int64 = fluid.layers.fill_constant([1], "int64", 2)
positive_4_int64 = fluid.layers.fill_constant(
[1], "int64", 4, True
)
positive_2_int32 = fluid.layers.fill_constant([1], "int32", 2)
positive_2_int64 = fluid.layers.fill_constant([1], "int64", 2)
positive_4_int64 = fluid.layers.fill_constant([1], "int64", 4, True)
out_1 = paddle.full(
shape=[1, 2], dtype="float32", fill_value=1.1
)
out_1 = paddle.full(shape=[1, 2], dtype="float32", fill_value=1.1)
out_2 = paddle.full(
shape=[1, positive_2_int32.item()],
dtype="float32",
fill_value=1.1,
)
out_2 = paddle.full(
shape=[1, positive_2_int32.item()],
dtype="float32",
fill_value=1.1,
)
out_3 = paddle.full(
shape=[1, positive_2_int64.item()],
dtype="float32",
fill_value=1.1,
)
out_3 = paddle.full(
shape=[1, positive_2_int64.item()],
dtype="float32",
fill_value=1.1,
)
out_4 = paddle.full(
shape=[1, 2], dtype="float32", fill_value=1.2
)
out_4 = paddle.full(shape=[1, 2], dtype="float32", fill_value=1.2)
out_5 = paddle.full(
shape=[1, 2], dtype="float32", fill_value=1.1
)
out_5 = paddle.full(shape=[1, 2], dtype="float32", fill_value=1.1)
out_6 = paddle.full(
shape=[1, 2], dtype=np.float32, fill_value=1.1
)
out_6 = paddle.full(shape=[1, 2], dtype=np.float32, fill_value=1.1)
val = fluid.layers.fill_constant(
shape=[1], dtype=np.float32, value=1.1
)
out_7 = paddle.full(
shape=[1, 2], dtype=np.float32, fill_value=val
)
val = fluid.layers.fill_constant(
shape=[1], dtype=np.float32, value=1.1
)
out_7 = paddle.full(shape=[1, 2], dtype=np.float32, fill_value=val)
out_8 = paddle.full(
shape=positive_2_int32, dtype="float32", fill_value=1.1
)
out_8 = paddle.full(
shape=positive_2_int32, dtype="float32", fill_value=1.1
)
out_9 = paddle.full(
shape=[
positive_2_int32,
positive_2_int64,
positive_4_int64,
],
dtype="float32",
fill_value=1.1,
)
out_9 = paddle.full(
shape=[
positive_2_int32,
positive_2_int64,
positive_4_int64,
],
dtype="float32",
fill_value=1.1,
)
# test for numpy.float64 as fill_value
out_10 = paddle.full_like(
out_7, dtype=np.float32, fill_value=np.abs(1.1)
)
# test for numpy.float64 as fill_value
out_10 = paddle.full_like(
out_7, dtype=np.float32, fill_value=np.abs(1.1)
)
assert np.array_equal(
out_1, np.full([1, 2], 1.1, dtype="float32")
)
assert np.array_equal(
out_2, np.full([1, 2], 1.1, dtype="float32")
)
assert np.array_equal(
out_3, np.full([1, 2], 1.1, dtype="float32")
)
assert np.array_equal(
out_4, np.full([1, 2], 1.2, dtype="float32")
)
assert np.array_equal(
out_5, np.full([1, 2], 1.1, dtype="float32")
)
assert np.array_equal(
out_6, np.full([1, 2], 1.1, dtype="float32")
)
assert np.array_equal(
out_7, np.full([1, 2], 1.1, dtype="float32")
)
assert np.array_equal(out_8, np.full([2], 1.1, dtype="float32"))
assert np.array_equal(
out_9, np.full([2, 2, 4], 1.1, dtype="float32")
)
assert np.array_equal(
out_10, np.full([1, 2], 1.1, dtype="float32")
)
assert np.array_equal(out_1, np.full([1, 2], 1.1, dtype="float32"))
assert np.array_equal(out_2, np.full([1, 2], 1.1, dtype="float32"))
assert np.array_equal(out_3, np.full([1, 2], 1.1, dtype="float32"))
assert np.array_equal(out_4, np.full([1, 2], 1.2, dtype="float32"))
assert np.array_equal(out_5, np.full([1, 2], 1.1, dtype="float32"))
assert np.array_equal(out_6, np.full([1, 2], 1.1, dtype="float32"))
assert np.array_equal(out_7, np.full([1, 2], 1.1, dtype="float32"))
assert np.array_equal(out_8, np.full([2], 1.1, dtype="float32"))
assert np.array_equal(
out_9, np.full([2, 2, 4], 1.1, dtype="float32")
)
assert np.array_equal(out_10, np.full([1, 2], 1.1, dtype="float32"))
class TestFullOpError(unittest.TestCase):
......
......@@ -18,7 +18,6 @@ import numpy as np
import paddle
from paddle import _legacy_C_ops
from paddle.fluid.framework import _test_eager_guard
class TestCapture:
......@@ -29,10 +28,6 @@ class TestCapture:
test_cap = TestCapture()
def test_hook():
test_cap.list.append(1)
def grad_hook(grad):
test_cap.list.append(2)
......@@ -40,7 +35,7 @@ def grad_hook(grad):
class TestBakcwardFunctionHookError(unittest.TestCase):
def func_hook(self):
def test_hook(self):
input_data = np.ones([4, 4]).astype('float32')
x = paddle.to_tensor(input_data.astype(np.float32), stop_gradient=False)
......@@ -49,19 +44,11 @@ class TestBakcwardFunctionHookError(unittest.TestCase):
y = _legacy_C_ops.sigmoid(x)
out = _legacy_C_ops.matmul_v2(y, z, 'trans_x', False, 'trans_y', False)
out._register_void_function_post_hook(test_hook)
y._register_void_function_post_hook(test_hook)
y.register_hook(grad_hook)
out.backward()
assert test_cap.list == [1, 2, 1]
def test_hook(self):
# _register_void_function_post_hook do not support in eager mode
with _test_eager_guard():
pass
self.func_hook()
assert test_cap.list == [2]
if __name__ == "__main__":
......
......@@ -22,7 +22,6 @@ import paddle.fluid.dygraph as dg
import paddle.fluid.initializer as I
import paddle.nn.functional as F
from paddle import fluid
from paddle.fluid.framework import _test_eager_guard
class TestFunctionalConv2D(TestCase):
......@@ -183,10 +182,6 @@ class TestFunctionalConv2D(TestCase):
self.place = fluid.CPUPlace()
self._test_identity()
def test_identity_cpu_check_eager(self):
with _test_eager_guard():
self.test_identity_cpu()
@unittest.skipIf(
not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
......@@ -194,13 +189,6 @@ class TestFunctionalConv2D(TestCase):
self.place = fluid.CUDAPlace(0)
self._test_identity()
@unittest.skipIf(
not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
def test_identity_gpu_check_eager(self):
with _test_eager_guard():
self.test_identity_gpu()
class TestFunctionalConv2DError(TestCase):
batch_size = 4
......@@ -571,10 +559,6 @@ class TestFunctionalConv2DErrorCase10(TestCase):
with self.assertRaises(ValueError):
self.dygraph_case()
def test_dygraph_exception_check_eager(self):
with _test_eager_guard():
self.test_dygraph_exception()
def test_static_exception(self):
with self.assertRaises(ValueError):
self.static_graph_case()
......
......@@ -22,7 +22,6 @@ import paddle.fluid.dygraph as dg
import paddle.fluid.initializer as I
import paddle.nn.functional as F
from paddle import fluid
from paddle.fluid.framework import _test_eager_guard
class TestFunctionalConv3DTranspose(TestCase):
......@@ -188,10 +187,6 @@ class TestFunctionalConv3DTranspose(TestCase):
self.place = fluid.CPUPlace()
self._test_identity()
def test_identity_cpu_check_eager(self):
with _test_eager_guard():
self.test_identity_cpu()
@unittest.skipIf(
not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
......@@ -199,13 +194,6 @@ class TestFunctionalConv3DTranspose(TestCase):
self.place = fluid.CUDAPlace(0)
self._test_identity()
@unittest.skipIf(
not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
def test_identity_gpu_check_eager(self):
with _test_eager_guard():
self.test_identity_gpu()
class TestFunctionalConv3DTransposeError(TestCase):
batch_size = 4
......@@ -596,10 +584,6 @@ class TestFunctionalConv3DTransposeErrorCase10(TestCase):
with self.assertRaises(ValueError):
self.dygraph_case()
def test_dygraph_exception_check_eager(self):
with _test_eager_guard():
self.test_dygraph_exception()
def test_static_exception(self):
with self.assertRaises(ValueError):
self.static_graph_case()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册