未验证 提交 a641312a 编写于 作者: 姜永久 提交者: GitHub

rm unittests eger guard tests part10 imperative_decorator2hook (#48827)

* rm unittests eger guard tests part10 imperative_decorator2hook

* modify
上级 9dac1e71
......@@ -20,7 +20,6 @@ from test_imperative_base import new_program_scope
import paddle
import paddle.fluid as fluid
import paddle.fluid.framework as framework
from paddle.fluid.framework import _test_eager_guard
class TestTracerMode(unittest.TestCase):
......@@ -47,7 +46,7 @@ class TestTracerMode(unittest.TestCase):
finally:
self.assertEqual(rlt, ans)
def func_main(self):
def test_main(self):
with fluid.dygraph.guard():
self.tracer = framework._dygraph_tracer()
self.tracer._train_mode = self.init_mode
......@@ -73,11 +72,6 @@ class TestTracerMode(unittest.TestCase):
with new_program_scope():
self.check_not_support_rlt(True)
def test_main(self):
with _test_eager_guard():
self.func_main()
self.func_main()
class TestTracerMode2(TestTracerMode):
def setUp(self):
......@@ -91,7 +85,7 @@ class TestNoGradClass(unittest.TestCase):
self.assertEqual(self.tracer._has_grad, False)
return a
def func_main(self):
def test_main(self):
paddle.disable_static()
self.tracer = framework._dygraph_tracer()
......@@ -128,11 +122,6 @@ class TestNoGradClass(unittest.TestCase):
self.assertEqual(a, b)
def test_main(self):
with _test_eager_guard():
self.func_main()
self.func_main()
if __name__ == '__main__':
unittest.main()
......@@ -24,7 +24,6 @@ import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Linear
......@@ -375,7 +374,6 @@ class TestDygraphDeepCF(unittest.TestCase):
)
with fluid.dygraph.guard():
with _test_eager_guard():
paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed)
fluid.default_startup_program().random_seed = seed
......@@ -394,12 +392,8 @@ class TestDygraphDeepCF(unittest.TestCase):
if slice + self.batch_size >= users_np.shape[0]:
break
prediction = deepcf(
to_variable(
users_np[slice : slice + self.batch_size]
),
to_variable(
items_np[slice : slice + self.batch_size]
),
to_variable(users_np[slice : slice + self.batch_size]),
to_variable(items_np[slice : slice + self.batch_size]),
)
loss = paddle.sum(
paddle.nn.functional.log_loss(
......
......@@ -20,7 +20,6 @@ import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.nn.functional as F
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.wrapped_decorator import wrap_decorator
from paddle.vision.models import resnet50, resnet101
......@@ -45,7 +44,7 @@ def random_var(size, low=-1, high=1, dtype='float32'):
class TestEagerGrad(TestCase):
def func_simple_example_eager_grad(self):
def test_simple_example_eager_grad(self):
np.random.seed(2021)
paddle.set_device('cpu')
np_x = np.random.random((3, 3))
......@@ -62,12 +61,7 @@ class TestEagerGrad(TestCase):
self.assertEqual(dx[0].stop_gradient, True)
np.testing.assert_allclose(dx[0].numpy(), expected_dx, rtol=1e-05)
def test_simple_example_eager_grad(self):
with _test_eager_guard():
self.func_simple_example_eager_grad()
self.func_simple_example_eager_grad()
def func_simple_example_eager_grad_allow_unused(self):
def test_simple_example_eager_grad_allow_unused(self):
np.random.seed(2021)
paddle.set_device('cpu')
np_x = np.random.random((3, 3))
......@@ -88,12 +82,7 @@ class TestEagerGrad(TestCase):
# x is unused input in the graph
self.assertIsNone(dx[1])
def test_simple_example_eager_grad_allow_unused(self):
with _test_eager_guard():
self.func_simple_example_eager_grad_allow_unused()
self.func_simple_example_eager_grad_allow_unused()
def func_simple_example_eager_grad_not_allow_unused(self):
def test_simple_example_eager_grad_not_allow_unused(self):
np.random.seed(2021)
paddle.set_device('cpu')
np_x = np.random.random((3, 3))
......@@ -112,12 +101,7 @@ class TestEagerGrad(TestCase):
error_msg = str(e)
assert error_msg.find("allow_unused") > 0
def test_simple_example_eager_grad_not_allow_unused(self):
with _test_eager_guard():
self.func_simple_example_eager_grad_not_allow_unused()
self.func_simple_example_eager_grad_not_allow_unused()
def func_simple_example_eager_grad_duplicate_input(self):
def test_simple_example_eager_grad_duplicate_input(self):
np.random.seed(2021)
paddle.set_device('cpu')
np_x = np.random.random((3, 3))
......@@ -136,12 +120,7 @@ class TestEagerGrad(TestCase):
error_msg = str(e)
assert error_msg.find("duplicate") > 0
def test_simple_example_eager_grad_duplicate_input(self):
with _test_eager_guard():
self.func_simple_example_eager_grad_duplicate_input()
self.func_simple_example_eager_grad_duplicate_input()
def func_simple_example_eager_grad_duplicate_output(self):
def test_simple_example_eager_grad_duplicate_output(self):
np.random.seed(2021)
paddle.set_device('cpu')
np_x = np.random.random((3, 3))
......@@ -160,13 +139,7 @@ class TestEagerGrad(TestCase):
error_msg = str(e)
assert error_msg.find("duplicate") > 0
def test_simple_example_eager_grad_duplicate_output(self):
with _test_eager_guard():
self.func_simple_example_eager_grad_duplicate_output()
self.func_simple_example_eager_grad_duplicate_output()
def test_simple_example_eager_two_grad_output(self):
with _test_eager_guard():
x1 = paddle.to_tensor([1.0, 2.0])
x1.stop_gradient = False
x2 = paddle.to_tensor([1.0, 2.0])
......@@ -233,7 +206,7 @@ class TestDygraphDoubleGrad(TestCase):
)
@dygraph_guard
def func_exception(self):
def test_exception(self):
with self.assertRaises(AssertionError):
self.grad(None, None)
......@@ -266,13 +239,8 @@ class TestDygraphDoubleGrad(TestCase):
with self.assertRaises(AssertionError):
self.grad([random_var(shape)], [random_var(shape)], no_grad_vars=1)
def test_exception(self):
with _test_eager_guard():
self.func_exception()
self.func_exception()
@dygraph_guard
def func_simple_example(self):
def test_simple_example(self):
x = random_var(self.shape)
x.stop_gradient = False
y = x + 1
......@@ -306,13 +274,8 @@ class TestDygraphDoubleGrad(TestCase):
grad_with_none_and_not_none.stop_gradient, create_graph
)
def test_simple_example(self):
with _test_eager_guard():
self.func_simple_example()
self.func_simple_example()
@dygraph_guard
def func_example_no_grad_vars(self):
def test_example_no_grad_vars(self):
x = random_var(self.shape)
x_np = x.numpy()
numel = x_np.size
......@@ -343,13 +306,8 @@ class TestDygraphDoubleGrad(TestCase):
np.testing.assert_allclose(dx_actual.numpy(), dx_expected, rtol=1e-05)
def test_example_no_grad_vars(self):
with _test_eager_guard():
self.func_example_no_grad_vars()
self.func_example_no_grad_vars()
@dygraph_guard
def func_none_one_initial_gradient(self):
def test_none_one_initial_gradient(self):
numel = 1
for s in self.shape:
numel *= s
......@@ -425,13 +383,8 @@ class TestDygraphDoubleGrad(TestCase):
grad_z.numpy(), original_random_grad_z
)
def test_none_one_initial_gradient(self):
with _test_eager_guard():
self.func_none_one_initial_gradient()
self.func_none_one_initial_gradient()
@dygraph_guard
def func_example_with_gradient_accumulation_and_create_graph(self):
def test_example_with_gradient_accumulation_and_create_graph(self):
x = random_var(self.shape)
x_np = x.numpy()
numel = x_np.size
......@@ -478,13 +431,8 @@ class TestDygraphDoubleGrad(TestCase):
x_grad_actual, x_grad_expected, rtol=1e-05
)
def test_example_with_gradient_accumulation_and_create_graph(self):
with _test_eager_guard():
self.func_example_with_gradient_accumulation_and_create_graph()
self.func_example_with_gradient_accumulation_and_create_graph()
@dygraph_guard
def func_example_with_gradient_accumulation_and_no_grad_vars(self):
def test_example_with_gradient_accumulation_and_no_grad_vars(self):
x = random_var(self.shape)
x_np = x.numpy()
numel = x_np.size
......@@ -529,13 +477,8 @@ class TestDygraphDoubleGrad(TestCase):
).astype('float32')
np.testing.assert_allclose(x_grad_actual, x_grad_expected, rtol=1e-05)
def test_example_with_gradient_accumulation_and_no_grad_vars(self):
with _test_eager_guard():
self.func_example_with_gradient_accumulation_and_no_grad_vars()
self.func_example_with_gradient_accumulation_and_no_grad_vars()
@dygraph_guard
def func_example_with_gradient_accumulation_and_not_create_graph(self):
def test_example_with_gradient_accumulation_and_not_create_graph(self):
x = random_var(self.shape)
x_np = x.numpy()
numel = x_np.size
......@@ -566,11 +509,6 @@ class TestDygraphDoubleGrad(TestCase):
x_grad_expected = (2.0 * x_np / float(numel)).astype('float32')
np.testing.assert_allclose(x_grad_actual, x_grad_expected, rtol=1e-05)
def test_example_with_gradient_accumulation_and_not_create_graph(self):
with _test_eager_guard():
self.func_example_with_gradient_accumulation_and_not_create_graph()
self.func_example_with_gradient_accumulation_and_not_create_graph()
class TestDygraphDoubleGradSortGradient(TestDygraphDoubleGrad):
def setUp(self):
......@@ -579,7 +517,7 @@ class TestDygraphDoubleGradSortGradient(TestDygraphDoubleGrad):
class TestDygraphDoubleGradVisitedUniq(TestCase):
def func_compare(self):
def test_compare(self):
value = (
np.random.uniform(-0.5, 0.5, 100)
.reshape(10, 2, 5)
......@@ -628,14 +566,9 @@ class TestDygraphDoubleGradVisitedUniq(TestCase):
np.testing.assert_array_equal(grad_1, grad_2)
def test_compare(self):
with _test_eager_guard():
self.func_compare()
self.func_compare()
class TestRaiseNoDoubleGradOp(TestCase):
def raise_no_grad_op(self):
def test_no_grad_op(self):
with fluid.dygraph.guard():
x = paddle.ones(shape=[2, 3, 2, 2], dtype='float32')
x.stop_gradient = False
......@@ -648,9 +581,6 @@ class TestRaiseNoDoubleGradOp(TestCase):
loss = paddle.mean(dx)
loss.backward()
def test_raise(self):
self.assertRaises(RuntimeError, self.raise_no_grad_op)
class TestDoubleGradResNet(TestCase):
def setUp(self):
......@@ -660,7 +590,6 @@ class TestDoubleGradResNet(TestCase):
@dygraph_guard
def test_resnet_resnet50(self):
with _test_eager_guard():
model = resnet50(pretrained=False)
egr_data = paddle.to_tensor(self.data)
egr_data.stop_gradient = False
......@@ -694,7 +623,6 @@ class TestDoubleGradResNet(TestCase):
@dygraph_guard
def test_resnet_resnet101(self):
with _test_eager_guard():
model = resnet101(pretrained=False)
egr_data = paddle.to_tensor(self.data)
egr_data.stop_gradient = False
......@@ -730,13 +658,8 @@ class TestDoubleGradResNet(TestCase):
class TestDoubleGradBasics(TestCase):
def test_matmul(self):
input_numpy = np.ones([3, 3]) * 2
with _test_eager_guard():
x = paddle.to_tensor(
input_numpy, stop_gradient=False, dtype='float32'
)
y = paddle.to_tensor(
input_numpy, stop_gradient=False, dtype='float32'
)
x = paddle.to_tensor(input_numpy, stop_gradient=False, dtype='float32')
y = paddle.to_tensor(input_numpy, stop_gradient=False, dtype='float32')
grad_out = paddle.to_tensor(
np.ones([3, 3]), stop_gradient=False, dtype='float32'
)
......@@ -762,9 +685,7 @@ class TestDoubleGradBasics(TestCase):
np.testing.assert_array_equal(y.grad.numpy(), y_grad_ref)
grad_out_grad_ref = np.ones([3, 3]) * 6.0
np.testing.assert_array_equal(
grad_out.grad.numpy(), grad_out_grad_ref
)
np.testing.assert_array_equal(grad_out.grad.numpy(), grad_out_grad_ref)
if __name__ == '__main__':
......
......@@ -19,7 +19,6 @@ from test_imperative_base import new_program_scope
import paddle
import paddle.fluid as fluid
from paddle.fluid.framework import _test_eager_guard
class MLP(fluid.Layer):
......@@ -54,7 +53,7 @@ class MLP(fluid.Layer):
class TestDygraphFramework(unittest.TestCase):
def func_test_dygraph_backward(self):
def test_dygraph_backward(self):
with new_program_scope():
mlp = MLP(input_size=2)
var_inp = fluid.layers.data(
......@@ -69,18 +68,8 @@ class TestDygraphFramework(unittest.TestCase):
except AssertionError as e:
self.assertTrue((e is not None))
def test_dygraph_backward(self):
with _test_eager_guard():
self.func_test_dygraph_backward()
self.func_test_dygraph_backward()
def func_test_dygraph_to_string(self):
def test_dygraph_to_string(self):
np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)
with fluid.dygraph.guard():
var_inp = fluid.dygraph.to_variable(np_inp)
print(str(var_inp))
def test_dygraph_to_string(self):
with _test_eager_guard():
self.func_test_dygraph_to_string()
self.func_test_dygraph_to_string()
......@@ -21,7 +21,6 @@ import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.optimizer import SGDOptimizer
from paddle.nn import Linear
......@@ -56,7 +55,7 @@ class Generator(fluid.Layer):
class TestDygraphGAN(unittest.TestCase):
def func_test_gan_float32(self):
def test_gan_float32(self):
seed = 90
paddle.seed(1)
paddle.framework.random._manual_program_seed(1)
......@@ -271,11 +270,6 @@ class TestDygraphGAN(unittest.TestCase):
for k, v in dy_params2.items():
np.testing.assert_allclose(v, static_params[k], rtol=1e-05)
def test_gan_float32(self):
with _test_eager_guard():
self.func_test_gan_float32()
self.func_test_gan_float32()
if __name__ == '__main__':
unittest.main()
......@@ -23,7 +23,6 @@ import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.nn.functional as F
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.optimizer import AdamOptimizer
......@@ -64,7 +63,7 @@ class GCN(fluid.Layer):
class TestDygraphGNN(unittest.TestCase):
def func_gnn_float32(self):
def test_gnn_float32(self):
paddle.seed(90)
paddle.framework.random._manual_program_seed(90)
startup = fluid.Program()
......@@ -188,11 +187,6 @@ class TestDygraphGNN(unittest.TestCase):
)
sys.stderr.write('%s %s\n' % (static_loss, loss_value))
def test_gnn_float32(self):
with _test_eager_guard():
self.func_gnn_float32()
self.func_gnn_float32()
if __name__ == '__main__':
paddle.enable_static()
......
......@@ -16,11 +16,7 @@ import unittest
import paddle
import paddle.fluid.core as core
from paddle.fluid.framework import (
_in_legacy_dygraph,
_test_eager_guard,
in_dygraph_mode,
)
from paddle.fluid.framework import _in_legacy_dygraph, in_dygraph_mode
class TestDataParallelGroup(unittest.TestCase):
......@@ -170,20 +166,6 @@ class TestDataParallelGroup(unittest.TestCase):
)
self.assertEqual([[1, 0], [3], [2]], res)
def test_construct_group_in_legacy_mode(self):
with _test_eager_guard():
pass
self.test_construct_group0()
self.test_construct_group1()
self.test_construct_group2()
self.test_construct_group3()
self.test_construct_group4()
self.test_construct_group5()
self.test_construct_group6()
self.test_construct_group7()
self.test_construct_group8()
self.test_construct_group9()
if __name__ == '__main__':
unittest.main()
......@@ -20,7 +20,6 @@ from test_imperative_lod_tensor_to_selected_rows import SimpleNet
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.dygraph.base as base
from paddle.fluid.framework import _test_eager_guard
call_forward_post_hook = False
call_forward_pre_hook = False
......@@ -47,7 +46,7 @@ def forward_pre_hook1(layer, input):
class Test_Forward_Hook(unittest.TestCase):
# test forward_pre_hook and forward_post_hook that have return value
def func_forward_hook_return_value(self):
def test_forward_hook_return_value(self):
seed = 90
places = [fluid.CPUPlace()]
......@@ -129,7 +128,7 @@ class Test_Forward_Hook(unittest.TestCase):
)
# test forward_pre_hook and forward_post_hook that don't have return value
def func_forward_hook(self):
def test_forward_hook(self):
seed = 90
places = [fluid.CPUPlace()]
......@@ -210,13 +209,6 @@ class Test_Forward_Hook(unittest.TestCase):
self.assertFalse(call_forward_post_hook)
self.assertFalse(call_forward_pre_hook)
def test_forward_hook_return_value(self):
with _test_eager_guard():
self.func_forward_hook()
self.func_forward_hook_return_value()
self.func_forward_hook()
self.func_forward_hook_return_value()
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册