未验证 提交 a641312a 编写于 作者: 姜永久 提交者: GitHub

rm unittests eger guard tests part10 imperative_decorator2hook (#48827)

* rm unittests eger guard tests part10 imperative_decorator2hook

* modify
上级 9dac1e71
...@@ -20,7 +20,6 @@ from test_imperative_base import new_program_scope ...@@ -20,7 +20,6 @@ from test_imperative_base import new_program_scope
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.framework as framework import paddle.fluid.framework as framework
from paddle.fluid.framework import _test_eager_guard
class TestTracerMode(unittest.TestCase): class TestTracerMode(unittest.TestCase):
...@@ -47,7 +46,7 @@ class TestTracerMode(unittest.TestCase): ...@@ -47,7 +46,7 @@ class TestTracerMode(unittest.TestCase):
finally: finally:
self.assertEqual(rlt, ans) self.assertEqual(rlt, ans)
def func_main(self): def test_main(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
self.tracer = framework._dygraph_tracer() self.tracer = framework._dygraph_tracer()
self.tracer._train_mode = self.init_mode self.tracer._train_mode = self.init_mode
...@@ -73,11 +72,6 @@ class TestTracerMode(unittest.TestCase): ...@@ -73,11 +72,6 @@ class TestTracerMode(unittest.TestCase):
with new_program_scope(): with new_program_scope():
self.check_not_support_rlt(True) self.check_not_support_rlt(True)
def test_main(self):
with _test_eager_guard():
self.func_main()
self.func_main()
class TestTracerMode2(TestTracerMode): class TestTracerMode2(TestTracerMode):
def setUp(self): def setUp(self):
...@@ -91,7 +85,7 @@ class TestNoGradClass(unittest.TestCase): ...@@ -91,7 +85,7 @@ class TestNoGradClass(unittest.TestCase):
self.assertEqual(self.tracer._has_grad, False) self.assertEqual(self.tracer._has_grad, False)
return a return a
def func_main(self): def test_main(self):
paddle.disable_static() paddle.disable_static()
self.tracer = framework._dygraph_tracer() self.tracer = framework._dygraph_tracer()
...@@ -128,11 +122,6 @@ class TestNoGradClass(unittest.TestCase): ...@@ -128,11 +122,6 @@ class TestNoGradClass(unittest.TestCase):
self.assertEqual(a, b) self.assertEqual(a, b)
def test_main(self):
with _test_eager_guard():
self.func_main()
self.func_main()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -24,7 +24,6 @@ import paddle ...@@ -24,7 +24,6 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.dygraph.base import to_variable from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Linear from paddle.nn import Linear
...@@ -375,47 +374,42 @@ class TestDygraphDeepCF(unittest.TestCase): ...@@ -375,47 +374,42 @@ class TestDygraphDeepCF(unittest.TestCase):
) )
with fluid.dygraph.guard(): with fluid.dygraph.guard():
with _test_eager_guard(): paddle.seed(seed)
paddle.seed(seed) paddle.framework.random._manual_program_seed(seed)
paddle.framework.random._manual_program_seed(seed) fluid.default_startup_program().random_seed = seed
fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed
fluid.default_main_program().random_seed = seed
deepcf = DeepCF(num_users, num_items, matrix)
adam = fluid.optimizer.AdamOptimizer(
0.01, parameter_list=deepcf.parameters()
)
for e in range(self.num_epoches): deepcf = DeepCF(num_users, num_items, matrix)
sys.stderr.write('epoch %d\n' % e) adam = fluid.optimizer.AdamOptimizer(
for slice in range( 0.01, parameter_list=deepcf.parameters()
0, self.batch_size * self.num_batches, self.batch_size )
):
if slice + self.batch_size >= users_np.shape[0]: for e in range(self.num_epoches):
break sys.stderr.write('epoch %d\n' % e)
prediction = deepcf( for slice in range(
to_variable( 0, self.batch_size * self.num_batches, self.batch_size
users_np[slice : slice + self.batch_size] ):
), if slice + self.batch_size >= users_np.shape[0]:
break
prediction = deepcf(
to_variable(users_np[slice : slice + self.batch_size]),
to_variable(items_np[slice : slice + self.batch_size]),
)
loss = paddle.sum(
paddle.nn.functional.log_loss(
prediction,
to_variable( to_variable(
items_np[slice : slice + self.batch_size] labels_np[slice : slice + self.batch_size]
), ),
) )
loss = paddle.sum( )
paddle.nn.functional.log_loss( loss.backward()
prediction, adam.minimize(loss)
to_variable( deepcf.clear_gradients()
labels_np[slice : slice + self.batch_size] eager_loss = loss.numpy()
), sys.stderr.write(
) 'eager loss: %s %s\n' % (slice, eager_loss)
) )
loss.backward()
adam.minimize(loss)
deepcf.clear_gradients()
eager_loss = loss.numpy()
sys.stderr.write(
'eager loss: %s %s\n' % (slice, eager_loss)
)
self.assertEqual(static_loss, dy_loss) self.assertEqual(static_loss, dy_loss)
self.assertEqual(static_loss, dy_loss2) self.assertEqual(static_loss, dy_loss2)
......
...@@ -20,7 +20,6 @@ import numpy as np ...@@ -20,7 +20,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.wrapped_decorator import wrap_decorator from paddle.fluid.wrapped_decorator import wrap_decorator
from paddle.vision.models import resnet50, resnet101 from paddle.vision.models import resnet50, resnet101
...@@ -45,7 +44,7 @@ def random_var(size, low=-1, high=1, dtype='float32'): ...@@ -45,7 +44,7 @@ def random_var(size, low=-1, high=1, dtype='float32'):
class TestEagerGrad(TestCase): class TestEagerGrad(TestCase):
def func_simple_example_eager_grad(self): def test_simple_example_eager_grad(self):
np.random.seed(2021) np.random.seed(2021)
paddle.set_device('cpu') paddle.set_device('cpu')
np_x = np.random.random((3, 3)) np_x = np.random.random((3, 3))
...@@ -62,12 +61,7 @@ class TestEagerGrad(TestCase): ...@@ -62,12 +61,7 @@ class TestEagerGrad(TestCase):
self.assertEqual(dx[0].stop_gradient, True) self.assertEqual(dx[0].stop_gradient, True)
np.testing.assert_allclose(dx[0].numpy(), expected_dx, rtol=1e-05) np.testing.assert_allclose(dx[0].numpy(), expected_dx, rtol=1e-05)
def test_simple_example_eager_grad(self): def test_simple_example_eager_grad_allow_unused(self):
with _test_eager_guard():
self.func_simple_example_eager_grad()
self.func_simple_example_eager_grad()
def func_simple_example_eager_grad_allow_unused(self):
np.random.seed(2021) np.random.seed(2021)
paddle.set_device('cpu') paddle.set_device('cpu')
np_x = np.random.random((3, 3)) np_x = np.random.random((3, 3))
...@@ -88,12 +82,7 @@ class TestEagerGrad(TestCase): ...@@ -88,12 +82,7 @@ class TestEagerGrad(TestCase):
# x is unused input in the graph # x is unused input in the graph
self.assertIsNone(dx[1]) self.assertIsNone(dx[1])
def test_simple_example_eager_grad_allow_unused(self): def test_simple_example_eager_grad_not_allow_unused(self):
with _test_eager_guard():
self.func_simple_example_eager_grad_allow_unused()
self.func_simple_example_eager_grad_allow_unused()
def func_simple_example_eager_grad_not_allow_unused(self):
np.random.seed(2021) np.random.seed(2021)
paddle.set_device('cpu') paddle.set_device('cpu')
np_x = np.random.random((3, 3)) np_x = np.random.random((3, 3))
...@@ -112,12 +101,7 @@ class TestEagerGrad(TestCase): ...@@ -112,12 +101,7 @@ class TestEagerGrad(TestCase):
error_msg = str(e) error_msg = str(e)
assert error_msg.find("allow_unused") > 0 assert error_msg.find("allow_unused") > 0
def test_simple_example_eager_grad_not_allow_unused(self): def test_simple_example_eager_grad_duplicate_input(self):
with _test_eager_guard():
self.func_simple_example_eager_grad_not_allow_unused()
self.func_simple_example_eager_grad_not_allow_unused()
def func_simple_example_eager_grad_duplicate_input(self):
np.random.seed(2021) np.random.seed(2021)
paddle.set_device('cpu') paddle.set_device('cpu')
np_x = np.random.random((3, 3)) np_x = np.random.random((3, 3))
...@@ -136,12 +120,7 @@ class TestEagerGrad(TestCase): ...@@ -136,12 +120,7 @@ class TestEagerGrad(TestCase):
error_msg = str(e) error_msg = str(e)
assert error_msg.find("duplicate") > 0 assert error_msg.find("duplicate") > 0
def test_simple_example_eager_grad_duplicate_input(self): def test_simple_example_eager_grad_duplicate_output(self):
with _test_eager_guard():
self.func_simple_example_eager_grad_duplicate_input()
self.func_simple_example_eager_grad_duplicate_input()
def func_simple_example_eager_grad_duplicate_output(self):
np.random.seed(2021) np.random.seed(2021)
paddle.set_device('cpu') paddle.set_device('cpu')
np_x = np.random.random((3, 3)) np_x = np.random.random((3, 3))
...@@ -160,34 +139,28 @@ class TestEagerGrad(TestCase): ...@@ -160,34 +139,28 @@ class TestEagerGrad(TestCase):
error_msg = str(e) error_msg = str(e)
assert error_msg.find("duplicate") > 0 assert error_msg.find("duplicate") > 0
def test_simple_example_eager_grad_duplicate_output(self):
with _test_eager_guard():
self.func_simple_example_eager_grad_duplicate_output()
self.func_simple_example_eager_grad_duplicate_output()
def test_simple_example_eager_two_grad_output(self): def test_simple_example_eager_two_grad_output(self):
with _test_eager_guard(): x1 = paddle.to_tensor([1.0, 2.0])
x1 = paddle.to_tensor([1.0, 2.0]) x1.stop_gradient = False
x1.stop_gradient = False x2 = paddle.to_tensor([1.0, 2.0])
x2 = paddle.to_tensor([1.0, 2.0]) x2.stop_gradient = False
x2.stop_gradient = False out1 = x1 * 2
out1 = x1 * 2 out2 = x2 * 2
out2 = x2 * 2
dout2_record_by_hook = [] dout2_record_by_hook = []
def record_hook(grad): def record_hook(grad):
dout2_record_by_hook.append(grad) dout2_record_by_hook.append(grad)
out2.register_hook(record_hook) out2.register_hook(record_hook)
out3 = paddle.multiply(out1, out2) out3 = paddle.multiply(out1, out2)
out4 = paddle.mean(out3) out4 = paddle.mean(out3)
egr_dout2, egr_dout3 = paddle.grad([out4], [out2, out3]) egr_dout2, egr_dout3 = paddle.grad([out4], [out2, out3])
np.testing.assert_array_equal( np.testing.assert_array_equal(
dout2_record_by_hook[0].numpy(), np.array([1.0, 2.0]) dout2_record_by_hook[0].numpy(), np.array([1.0, 2.0])
) )
x1 = paddle.to_tensor([1.0, 2.0]) x1 = paddle.to_tensor([1.0, 2.0])
x1.stop_gradient = False x1.stop_gradient = False
...@@ -233,7 +206,7 @@ class TestDygraphDoubleGrad(TestCase): ...@@ -233,7 +206,7 @@ class TestDygraphDoubleGrad(TestCase):
) )
@dygraph_guard @dygraph_guard
def func_exception(self): def test_exception(self):
with self.assertRaises(AssertionError): with self.assertRaises(AssertionError):
self.grad(None, None) self.grad(None, None)
...@@ -266,13 +239,8 @@ class TestDygraphDoubleGrad(TestCase): ...@@ -266,13 +239,8 @@ class TestDygraphDoubleGrad(TestCase):
with self.assertRaises(AssertionError): with self.assertRaises(AssertionError):
self.grad([random_var(shape)], [random_var(shape)], no_grad_vars=1) self.grad([random_var(shape)], [random_var(shape)], no_grad_vars=1)
def test_exception(self):
with _test_eager_guard():
self.func_exception()
self.func_exception()
@dygraph_guard @dygraph_guard
def func_simple_example(self): def test_simple_example(self):
x = random_var(self.shape) x = random_var(self.shape)
x.stop_gradient = False x.stop_gradient = False
y = x + 1 y = x + 1
...@@ -306,13 +274,8 @@ class TestDygraphDoubleGrad(TestCase): ...@@ -306,13 +274,8 @@ class TestDygraphDoubleGrad(TestCase):
grad_with_none_and_not_none.stop_gradient, create_graph grad_with_none_and_not_none.stop_gradient, create_graph
) )
def test_simple_example(self):
with _test_eager_guard():
self.func_simple_example()
self.func_simple_example()
@dygraph_guard @dygraph_guard
def func_example_no_grad_vars(self): def test_example_no_grad_vars(self):
x = random_var(self.shape) x = random_var(self.shape)
x_np = x.numpy() x_np = x.numpy()
numel = x_np.size numel = x_np.size
...@@ -343,13 +306,8 @@ class TestDygraphDoubleGrad(TestCase): ...@@ -343,13 +306,8 @@ class TestDygraphDoubleGrad(TestCase):
np.testing.assert_allclose(dx_actual.numpy(), dx_expected, rtol=1e-05) np.testing.assert_allclose(dx_actual.numpy(), dx_expected, rtol=1e-05)
def test_example_no_grad_vars(self):
with _test_eager_guard():
self.func_example_no_grad_vars()
self.func_example_no_grad_vars()
@dygraph_guard @dygraph_guard
def func_none_one_initial_gradient(self): def test_none_one_initial_gradient(self):
numel = 1 numel = 1
for s in self.shape: for s in self.shape:
numel *= s numel *= s
...@@ -425,13 +383,8 @@ class TestDygraphDoubleGrad(TestCase): ...@@ -425,13 +383,8 @@ class TestDygraphDoubleGrad(TestCase):
grad_z.numpy(), original_random_grad_z grad_z.numpy(), original_random_grad_z
) )
def test_none_one_initial_gradient(self):
with _test_eager_guard():
self.func_none_one_initial_gradient()
self.func_none_one_initial_gradient()
@dygraph_guard @dygraph_guard
def func_example_with_gradient_accumulation_and_create_graph(self): def test_example_with_gradient_accumulation_and_create_graph(self):
x = random_var(self.shape) x = random_var(self.shape)
x_np = x.numpy() x_np = x.numpy()
numel = x_np.size numel = x_np.size
...@@ -478,13 +431,8 @@ class TestDygraphDoubleGrad(TestCase): ...@@ -478,13 +431,8 @@ class TestDygraphDoubleGrad(TestCase):
x_grad_actual, x_grad_expected, rtol=1e-05 x_grad_actual, x_grad_expected, rtol=1e-05
) )
def test_example_with_gradient_accumulation_and_create_graph(self):
with _test_eager_guard():
self.func_example_with_gradient_accumulation_and_create_graph()
self.func_example_with_gradient_accumulation_and_create_graph()
@dygraph_guard @dygraph_guard
def func_example_with_gradient_accumulation_and_no_grad_vars(self): def test_example_with_gradient_accumulation_and_no_grad_vars(self):
x = random_var(self.shape) x = random_var(self.shape)
x_np = x.numpy() x_np = x.numpy()
numel = x_np.size numel = x_np.size
...@@ -529,13 +477,8 @@ class TestDygraphDoubleGrad(TestCase): ...@@ -529,13 +477,8 @@ class TestDygraphDoubleGrad(TestCase):
).astype('float32') ).astype('float32')
np.testing.assert_allclose(x_grad_actual, x_grad_expected, rtol=1e-05) np.testing.assert_allclose(x_grad_actual, x_grad_expected, rtol=1e-05)
def test_example_with_gradient_accumulation_and_no_grad_vars(self):
with _test_eager_guard():
self.func_example_with_gradient_accumulation_and_no_grad_vars()
self.func_example_with_gradient_accumulation_and_no_grad_vars()
@dygraph_guard @dygraph_guard
def func_example_with_gradient_accumulation_and_not_create_graph(self): def test_example_with_gradient_accumulation_and_not_create_graph(self):
x = random_var(self.shape) x = random_var(self.shape)
x_np = x.numpy() x_np = x.numpy()
numel = x_np.size numel = x_np.size
...@@ -566,11 +509,6 @@ class TestDygraphDoubleGrad(TestCase): ...@@ -566,11 +509,6 @@ class TestDygraphDoubleGrad(TestCase):
x_grad_expected = (2.0 * x_np / float(numel)).astype('float32') x_grad_expected = (2.0 * x_np / float(numel)).astype('float32')
np.testing.assert_allclose(x_grad_actual, x_grad_expected, rtol=1e-05) np.testing.assert_allclose(x_grad_actual, x_grad_expected, rtol=1e-05)
def test_example_with_gradient_accumulation_and_not_create_graph(self):
with _test_eager_guard():
self.func_example_with_gradient_accumulation_and_not_create_graph()
self.func_example_with_gradient_accumulation_and_not_create_graph()
class TestDygraphDoubleGradSortGradient(TestDygraphDoubleGrad): class TestDygraphDoubleGradSortGradient(TestDygraphDoubleGrad):
def setUp(self): def setUp(self):
...@@ -579,7 +517,7 @@ class TestDygraphDoubleGradSortGradient(TestDygraphDoubleGrad): ...@@ -579,7 +517,7 @@ class TestDygraphDoubleGradSortGradient(TestDygraphDoubleGrad):
class TestDygraphDoubleGradVisitedUniq(TestCase): class TestDygraphDoubleGradVisitedUniq(TestCase):
def func_compare(self): def test_compare(self):
value = ( value = (
np.random.uniform(-0.5, 0.5, 100) np.random.uniform(-0.5, 0.5, 100)
.reshape(10, 2, 5) .reshape(10, 2, 5)
...@@ -628,14 +566,9 @@ class TestDygraphDoubleGradVisitedUniq(TestCase): ...@@ -628,14 +566,9 @@ class TestDygraphDoubleGradVisitedUniq(TestCase):
np.testing.assert_array_equal(grad_1, grad_2) np.testing.assert_array_equal(grad_1, grad_2)
def test_compare(self):
with _test_eager_guard():
self.func_compare()
self.func_compare()
class TestRaiseNoDoubleGradOp(TestCase): class TestRaiseNoDoubleGradOp(TestCase):
def raise_no_grad_op(self): def test_no_grad_op(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
x = paddle.ones(shape=[2, 3, 2, 2], dtype='float32') x = paddle.ones(shape=[2, 3, 2, 2], dtype='float32')
x.stop_gradient = False x.stop_gradient = False
...@@ -648,9 +581,6 @@ class TestRaiseNoDoubleGradOp(TestCase): ...@@ -648,9 +581,6 @@ class TestRaiseNoDoubleGradOp(TestCase):
loss = paddle.mean(dx) loss = paddle.mean(dx)
loss.backward() loss.backward()
def test_raise(self):
self.assertRaises(RuntimeError, self.raise_no_grad_op)
class TestDoubleGradResNet(TestCase): class TestDoubleGradResNet(TestCase):
def setUp(self): def setUp(self):
...@@ -660,20 +590,19 @@ class TestDoubleGradResNet(TestCase): ...@@ -660,20 +590,19 @@ class TestDoubleGradResNet(TestCase):
@dygraph_guard @dygraph_guard
def test_resnet_resnet50(self): def test_resnet_resnet50(self):
with _test_eager_guard(): model = resnet50(pretrained=False)
model = resnet50(pretrained=False) egr_data = paddle.to_tensor(self.data)
egr_data = paddle.to_tensor(self.data) egr_data.stop_gradient = False
egr_data.stop_gradient = False egr_out = model(egr_data)
egr_out = model(egr_data) egr_preds = paddle.argmax(egr_out, axis=1)
egr_preds = paddle.argmax(egr_out, axis=1) egr_label_onehot = paddle.nn.functional.one_hot(
egr_label_onehot = paddle.nn.functional.one_hot( paddle.to_tensor(egr_preds), num_classes=egr_out.shape[1]
paddle.to_tensor(egr_preds), num_classes=egr_out.shape[1] )
) egr_target = paddle.sum(egr_out * egr_label_onehot, axis=1)
egr_target = paddle.sum(egr_out * egr_label_onehot, axis=1)
egr_g = paddle.grad(outputs=egr_target, inputs=egr_out)[0] egr_g = paddle.grad(outputs=egr_target, inputs=egr_out)[0]
egr_g_numpy = egr_g.numpy() egr_g_numpy = egr_g.numpy()
self.assertEqual(list(egr_g_numpy.shape), list(egr_out.shape)) self.assertEqual(list(egr_g_numpy.shape), list(egr_out.shape))
model = resnet50(pretrained=False) model = resnet50(pretrained=False)
data = paddle.to_tensor(self.data) data = paddle.to_tensor(self.data)
...@@ -694,20 +623,19 @@ class TestDoubleGradResNet(TestCase): ...@@ -694,20 +623,19 @@ class TestDoubleGradResNet(TestCase):
@dygraph_guard @dygraph_guard
def test_resnet_resnet101(self): def test_resnet_resnet101(self):
with _test_eager_guard(): model = resnet101(pretrained=False)
model = resnet101(pretrained=False) egr_data = paddle.to_tensor(self.data)
egr_data = paddle.to_tensor(self.data) egr_data.stop_gradient = False
egr_data.stop_gradient = False egr_out = model(egr_data)
egr_out = model(egr_data) egr_preds = paddle.argmax(egr_out, axis=1)
egr_preds = paddle.argmax(egr_out, axis=1) egr_label_onehot = paddle.nn.functional.one_hot(
egr_label_onehot = paddle.nn.functional.one_hot( paddle.to_tensor(egr_preds), num_classes=egr_out.shape[1]
paddle.to_tensor(egr_preds), num_classes=egr_out.shape[1] )
) egr_target = paddle.sum(egr_out * egr_label_onehot, axis=1)
egr_target = paddle.sum(egr_out * egr_label_onehot, axis=1)
egr_g = paddle.grad(outputs=egr_target, inputs=egr_out)[0] egr_g = paddle.grad(outputs=egr_target, inputs=egr_out)[0]
egr_g_numpy = egr_g.numpy() egr_g_numpy = egr_g.numpy()
self.assertEqual(list(egr_g_numpy.shape), list(egr_out.shape)) self.assertEqual(list(egr_g_numpy.shape), list(egr_out.shape))
model = resnet101(pretrained=False) model = resnet101(pretrained=False)
data = paddle.to_tensor(self.data) data = paddle.to_tensor(self.data)
...@@ -730,41 +658,34 @@ class TestDoubleGradResNet(TestCase): ...@@ -730,41 +658,34 @@ class TestDoubleGradResNet(TestCase):
class TestDoubleGradBasics(TestCase): class TestDoubleGradBasics(TestCase):
def test_matmul(self): def test_matmul(self):
input_numpy = np.ones([3, 3]) * 2 input_numpy = np.ones([3, 3]) * 2
with _test_eager_guard(): x = paddle.to_tensor(input_numpy, stop_gradient=False, dtype='float32')
x = paddle.to_tensor( y = paddle.to_tensor(input_numpy, stop_gradient=False, dtype='float32')
input_numpy, stop_gradient=False, dtype='float32' grad_out = paddle.to_tensor(
) np.ones([3, 3]), stop_gradient=False, dtype='float32'
y = paddle.to_tensor( )
input_numpy, stop_gradient=False, dtype='float32'
)
grad_out = paddle.to_tensor(
np.ones([3, 3]), stop_gradient=False, dtype='float32'
)
out = paddle.matmul(x, y, False, False) out = paddle.matmul(x, y, False, False)
new_x_g, new_y_g = paddle.grad( new_x_g, new_y_g = paddle.grad(
[out], [x, y], [grad_out], retain_graph=True, create_graph=True [out], [x, y], [grad_out], retain_graph=True, create_graph=True
) )
new_x_g.backward() new_x_g.backward()
out_ref = np.ones([3, 3]) * 12.0 out_ref = np.ones([3, 3]) * 12.0
np.testing.assert_array_equal(out.numpy(), out_ref) np.testing.assert_array_equal(out.numpy(), out_ref)
new_x_g_ref = np.ones([3, 3]) * 6.0 new_x_g_ref = np.ones([3, 3]) * 6.0
new_y_g_ref = np.ones([3, 3]) * 6.0 new_y_g_ref = np.ones([3, 3]) * 6.0
np.testing.assert_array_equal(new_x_g.numpy(), new_x_g_ref) np.testing.assert_array_equal(new_x_g.numpy(), new_x_g_ref)
np.testing.assert_array_equal(new_y_g.numpy(), new_y_g_ref) np.testing.assert_array_equal(new_y_g.numpy(), new_y_g_ref)
x_grad_ref = np.ones([3, 3]) * 0.0 x_grad_ref = np.ones([3, 3]) * 0.0
np.testing.assert_array_equal(x.grad.numpy(), x_grad_ref) np.testing.assert_array_equal(x.grad.numpy(), x_grad_ref)
y_grad_ref = np.ones([3, 3]) * 3.0 y_grad_ref = np.ones([3, 3]) * 3.0
np.testing.assert_array_equal(y.grad.numpy(), y_grad_ref) np.testing.assert_array_equal(y.grad.numpy(), y_grad_ref)
grad_out_grad_ref = np.ones([3, 3]) * 6.0 grad_out_grad_ref = np.ones([3, 3]) * 6.0
np.testing.assert_array_equal( np.testing.assert_array_equal(grad_out.grad.numpy(), grad_out_grad_ref)
grad_out.grad.numpy(), grad_out_grad_ref
)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -19,7 +19,6 @@ from test_imperative_base import new_program_scope ...@@ -19,7 +19,6 @@ from test_imperative_base import new_program_scope
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.framework import _test_eager_guard
class MLP(fluid.Layer): class MLP(fluid.Layer):
...@@ -54,7 +53,7 @@ class MLP(fluid.Layer): ...@@ -54,7 +53,7 @@ class MLP(fluid.Layer):
class TestDygraphFramework(unittest.TestCase): class TestDygraphFramework(unittest.TestCase):
def func_test_dygraph_backward(self): def test_dygraph_backward(self):
with new_program_scope(): with new_program_scope():
mlp = MLP(input_size=2) mlp = MLP(input_size=2)
var_inp = fluid.layers.data( var_inp = fluid.layers.data(
...@@ -69,18 +68,8 @@ class TestDygraphFramework(unittest.TestCase): ...@@ -69,18 +68,8 @@ class TestDygraphFramework(unittest.TestCase):
except AssertionError as e: except AssertionError as e:
self.assertTrue((e is not None)) self.assertTrue((e is not None))
def test_dygraph_backward(self): def test_dygraph_to_string(self):
with _test_eager_guard():
self.func_test_dygraph_backward()
self.func_test_dygraph_backward()
def func_test_dygraph_to_string(self):
np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32) np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)
with fluid.dygraph.guard(): with fluid.dygraph.guard():
var_inp = fluid.dygraph.to_variable(np_inp) var_inp = fluid.dygraph.to_variable(np_inp)
print(str(var_inp)) print(str(var_inp))
def test_dygraph_to_string(self):
with _test_eager_guard():
self.func_test_dygraph_to_string()
self.func_test_dygraph_to_string()
...@@ -21,7 +21,6 @@ import paddle ...@@ -21,7 +21,6 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.dygraph.base import to_variable from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.optimizer import SGDOptimizer from paddle.fluid.optimizer import SGDOptimizer
from paddle.nn import Linear from paddle.nn import Linear
...@@ -56,7 +55,7 @@ class Generator(fluid.Layer): ...@@ -56,7 +55,7 @@ class Generator(fluid.Layer):
class TestDygraphGAN(unittest.TestCase): class TestDygraphGAN(unittest.TestCase):
def func_test_gan_float32(self): def test_gan_float32(self):
seed = 90 seed = 90
paddle.seed(1) paddle.seed(1)
paddle.framework.random._manual_program_seed(1) paddle.framework.random._manual_program_seed(1)
...@@ -271,11 +270,6 @@ class TestDygraphGAN(unittest.TestCase): ...@@ -271,11 +270,6 @@ class TestDygraphGAN(unittest.TestCase):
for k, v in dy_params2.items(): for k, v in dy_params2.items():
np.testing.assert_allclose(v, static_params[k], rtol=1e-05) np.testing.assert_allclose(v, static_params[k], rtol=1e-05)
def test_gan_float32(self):
with _test_eager_guard():
self.func_test_gan_float32()
self.func_test_gan_float32()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -23,7 +23,6 @@ import paddle.fluid as fluid ...@@ -23,7 +23,6 @@ import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle.fluid.dygraph.base import to_variable from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.optimizer import AdamOptimizer from paddle.fluid.optimizer import AdamOptimizer
...@@ -64,7 +63,7 @@ class GCN(fluid.Layer): ...@@ -64,7 +63,7 @@ class GCN(fluid.Layer):
class TestDygraphGNN(unittest.TestCase): class TestDygraphGNN(unittest.TestCase):
def func_gnn_float32(self): def test_gnn_float32(self):
paddle.seed(90) paddle.seed(90)
paddle.framework.random._manual_program_seed(90) paddle.framework.random._manual_program_seed(90)
startup = fluid.Program() startup = fluid.Program()
...@@ -188,11 +187,6 @@ class TestDygraphGNN(unittest.TestCase): ...@@ -188,11 +187,6 @@ class TestDygraphGNN(unittest.TestCase):
) )
sys.stderr.write('%s %s\n' % (static_loss, loss_value)) sys.stderr.write('%s %s\n' % (static_loss, loss_value))
def test_gnn_float32(self):
with _test_eager_guard():
self.func_gnn_float32()
self.func_gnn_float32()
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static() paddle.enable_static()
......
...@@ -16,11 +16,7 @@ import unittest ...@@ -16,11 +16,7 @@ import unittest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.framework import ( from paddle.fluid.framework import _in_legacy_dygraph, in_dygraph_mode
_in_legacy_dygraph,
_test_eager_guard,
in_dygraph_mode,
)
class TestDataParallelGroup(unittest.TestCase): class TestDataParallelGroup(unittest.TestCase):
...@@ -170,20 +166,6 @@ class TestDataParallelGroup(unittest.TestCase): ...@@ -170,20 +166,6 @@ class TestDataParallelGroup(unittest.TestCase):
) )
self.assertEqual([[1, 0], [3], [2]], res) self.assertEqual([[1, 0], [3], [2]], res)
def test_construct_group_in_legacy_mode(self):
with _test_eager_guard():
pass
self.test_construct_group0()
self.test_construct_group1()
self.test_construct_group2()
self.test_construct_group3()
self.test_construct_group4()
self.test_construct_group5()
self.test_construct_group6()
self.test_construct_group7()
self.test_construct_group8()
self.test_construct_group9()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -20,7 +20,6 @@ from test_imperative_lod_tensor_to_selected_rows import SimpleNet ...@@ -20,7 +20,6 @@ from test_imperative_lod_tensor_to_selected_rows import SimpleNet
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.fluid.dygraph.base as base import paddle.fluid.dygraph.base as base
from paddle.fluid.framework import _test_eager_guard
call_forward_post_hook = False call_forward_post_hook = False
call_forward_pre_hook = False call_forward_pre_hook = False
...@@ -47,7 +46,7 @@ def forward_pre_hook1(layer, input): ...@@ -47,7 +46,7 @@ def forward_pre_hook1(layer, input):
class Test_Forward_Hook(unittest.TestCase): class Test_Forward_Hook(unittest.TestCase):
# test forward_pre_hook and forward_post_hook that have return value # test forward_pre_hook and forward_post_hook that have return value
def func_forward_hook_return_value(self): def test_forward_hook_return_value(self):
seed = 90 seed = 90
places = [fluid.CPUPlace()] places = [fluid.CPUPlace()]
...@@ -129,7 +128,7 @@ class Test_Forward_Hook(unittest.TestCase): ...@@ -129,7 +128,7 @@ class Test_Forward_Hook(unittest.TestCase):
) )
# test forward_pre_hook and forward_post_hook that don't have return value # test forward_pre_hook and forward_post_hook that don't have return value
def func_forward_hook(self): def test_forward_hook(self):
seed = 90 seed = 90
places = [fluid.CPUPlace()] places = [fluid.CPUPlace()]
...@@ -210,13 +209,6 @@ class Test_Forward_Hook(unittest.TestCase): ...@@ -210,13 +209,6 @@ class Test_Forward_Hook(unittest.TestCase):
self.assertFalse(call_forward_post_hook) self.assertFalse(call_forward_post_hook)
self.assertFalse(call_forward_pre_hook) self.assertFalse(call_forward_pre_hook)
def test_forward_hook_return_value(self):
with _test_eager_guard():
self.func_forward_hook()
self.func_forward_hook_return_value()
self.func_forward_hook()
self.func_forward_hook_return_value()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册