未验证 提交 19303281 编写于 作者: 姜永久 提交者: GitHub

rm unittests eager guard tests imperative_save2using (#48834)

* rm unittests eager guard tests imperative_save2using

* minor change

* rm legacy test

* rm eager test
上级 0e1538ad
......@@ -22,7 +22,6 @@ import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph.learning_rate_scheduler import LearningRateDecay
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.optimizer import Adam
from paddle.nn import Embedding
......@@ -1031,16 +1030,6 @@ class TestDygraphPtbRnn(unittest.TestCase):
self.func_testSetNumpyBeforeTrain()
self.func_testOnlyLoadParams()
self.func_test_load_compatible_with_keep_name_table()
with _test_eager_guard():
self.func_setUp()
self.func_testLoadAndSetVarBase()
self.func_testSetVariable()
self.func_testSetNumpy()
self.func_testSetVariableBeforeTrain()
self.func_testLoadAndSetVarBaseBeforeTrain()
self.func_testSetNumpyBeforeTrain()
self.func_testOnlyLoadParams()
self.func_test_load_compatible_with_keep_name_table()
if __name__ == '__main__':
......
......@@ -23,7 +23,6 @@ import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph.learning_rate_scheduler import LearningRateDecay
from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Embedding
from paddle.optimizer import Adam
......@@ -1072,17 +1071,6 @@ class TestDygraphPtbRnn(unittest.TestCase):
self.func_testOnlyLoadParams()
self.func_test_no_state_in_input_dict()
self.func_test_state_shape_mismatch()
with _test_eager_guard():
self.func_setUp()
self.func_testLoadAndSetVarBase()
self.func_testSetVariable()
self.func_testSetNumpy()
self.func_testSetVariableBeforeTrain()
self.func_testLoadAndSetVarBaseBeforeTrain()
self.func_testSetNumpyBeforeTrain()
self.func_testOnlyLoadParams()
self.func_test_no_state_in_input_dict()
self.func_test_state_shape_mismatch()
if __name__ == '__main__':
......
......@@ -20,7 +20,6 @@ from test_imperative_base import new_program_scope
import paddle
import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.layer_helper import LayerHelper
from paddle.nn import BatchNorm
......@@ -418,13 +417,12 @@ class TestImperativeResneXt(unittest.TestCase):
) = run_dygraph()
with fluid.dygraph.guard():
with _test_eager_guard():
(
eager_out,
eager_param_init_value,
eager_param_value,
eager_grad_value,
) = run_dygraph()
(
eager_out,
eager_param_init_value,
eager_param_value,
eager_grad_value,
) = run_dygraph()
with new_program_scope():
paddle.seed(seed)
......
......@@ -20,7 +20,6 @@ import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.optimizer import SGDOptimizer
......@@ -40,7 +39,8 @@ class SimpleNet(paddle.nn.Layer):
class TestSimpleNet(unittest.TestCase):
def func_selectedrows_gradient1(self):
def test_selectedrows_gradient1(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
......@@ -77,15 +77,10 @@ class TestSimpleNet(unittest.TestCase):
input_emb.clear_gradient()
self.assertIsNotNone(input_emb.gradient())
paddle.enable_static()
def test_selectedrows_gradient1(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with _test_eager_guard():
self.func_selectedrows_gradient1()
self.func_selectedrows_gradient1()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
def func_selectedrows_gradient2(self):
def test_selectedrows_gradient2(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
......@@ -121,12 +116,6 @@ class TestSimpleNet(unittest.TestCase):
input_emb.clear_gradient()
self.assertIsNotNone(input_emb.gradient())
def test_selectedrows_gradient2(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with _test_eager_guard():
self.func_selectedrows_gradient2()
self.func_selectedrows_gradient2()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
......
......@@ -22,7 +22,6 @@ import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.framework as framework
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.optimizer import SGDOptimizer
from paddle.nn import Embedding
......@@ -90,7 +89,7 @@ class SimpleNet(fluid.Layer):
class TestDygraphSimpleNet(unittest.TestCase):
def func_simple_net(self):
def test_simple_net(self):
for is_sparse in [True, False]:
dtype_list = ["float32"]
if not core.is_compiled_with_rocm():
......@@ -98,11 +97,6 @@ class TestDygraphSimpleNet(unittest.TestCase):
for dtype in dtype_list:
self.simple_net_float(is_sparse, dtype)
def test_simple_net(self):
with _test_eager_guard():
self.func_simple_net()
self.func_simple_net()
def simple_net_float(self, is_sparse, dtype):
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
......
......@@ -20,7 +20,6 @@ import time
import unittest
from paddle.fluid import core
from paddle.fluid.framework import _test_eager_guard
def set_child_signal_handler(self, child_pid):
......@@ -38,7 +37,7 @@ def set_child_signal_handler(self, child_pid):
class DygraphDataLoaderSingalHandler(unittest.TestCase):
def func_child_process_exit_with_error(self):
def test_child_process_exit_with_error(self):
def __test_process__():
core._set_process_signal_handler()
sys.exit(1)
......@@ -65,12 +64,7 @@ class DygraphDataLoaderSingalHandler(unittest.TestCase):
self.assertIsNotNone(exception)
def test_child_process_exit_with_error(self):
with _test_eager_guard():
self.func_child_process_exit_with_error()
self.func_child_process_exit_with_error()
def func_child_process_killed_by_sigsegv(self):
def test_child_process_killed_by_sigsegv(self):
def __test_process__():
core._set_process_signal_handler()
os.kill(os.getpid(), signal.SIGSEGV)
......@@ -97,12 +91,7 @@ class DygraphDataLoaderSingalHandler(unittest.TestCase):
self.assertIsNotNone(exception)
def test_child_process_killed_by_sigsegv(self):
with _test_eager_guard():
self.func_child_process_killed_by_sigsegv()
self.func_child_process_killed_by_sigsegv()
def func_child_process_killed_by_sigbus(self):
def test_child_process_killed_by_sigbus(self):
def __test_process__():
core._set_process_signal_handler()
os.kill(os.getpid(), signal.SIGBUS)
......@@ -129,12 +118,7 @@ class DygraphDataLoaderSingalHandler(unittest.TestCase):
self.assertIsNotNone(exception)
def test_child_process_killed_by_sigbus(self):
with _test_eager_guard():
self.func_child_process_killed_by_sigbus()
self.func_child_process_killed_by_sigbus()
def func_child_process_killed_by_sigterm(self):
def test_child_process_killed_by_sigterm(self):
def __test_process__():
core._set_process_signal_handler()
time.sleep(10)
......@@ -146,11 +130,6 @@ class DygraphDataLoaderSingalHandler(unittest.TestCase):
set_child_signal_handler(id(self), test_process.pid)
time.sleep(1)
def test_child_process_killed_by_sigterm(self):
with _test_eager_guard():
self.func_child_process_killed_by_sigterm()
self.func_child_process_killed_by_sigterm()
if __name__ == '__main__':
unittest.main()
......@@ -19,7 +19,6 @@ import numpy as np
import paddle
import paddle.fluid as fluid
from paddle import _legacy_C_ops
from paddle.fluid.framework import _test_eager_guard
from paddle.tensor import random
if fluid.is_compiled_with_cuda():
......@@ -661,62 +660,13 @@ class TestStarGANWithGradientPenalty(unittest.TestCase):
fluid_dygraph_loss.append(loss)
eager_dygraph_loss = []
with _test_eager_guard():
with fluid.dygraph.guard(cfg.place):
eager_dygraph_model = DyGraphTrainModel(cfg)
for batch_id, (image_real, label_org, label_trg) in enumerate(
dataset()
):
loss = eager_dygraph_model.run(
image_real, label_org, label_trg
)
eager_dygraph_loss.append(loss)
for (g_loss_f, d_loss_f), (g_loss_e, d_loss_e) in zip(
fluid_dygraph_loss, eager_dygraph_loss
):
self.assertEqual(g_loss_f, g_loss_e)
self.assertEqual(d_loss_f, d_loss_e)
def test_all_cases(self):
self.func_main()
class TestStarGANWithGradientPenaltyLegacy(unittest.TestCase):
def func_main(self):
self.place_test(fluid.CPUPlace())
if fluid.is_compiled_with_cuda():
self.place_test(fluid.CUDAPlace(0))
def place_test(self, place):
cfg = Config(place)
dataset = create_mnist_dataset(cfg)
dataset = paddle.reader.cache(dataset)
static_graph_model = StaticGraphTrainModel(cfg)
static_loss = []
for batch_id, (image_real, label_org, label_trg) in enumerate(
dataset()
):
loss = static_graph_model.run(image_real, label_org, label_trg)
static_loss.append(loss)
dygraph_loss = []
with fluid.dygraph.guard(cfg.place):
dygraph_model = DyGraphTrainModel(cfg)
eager_dygraph_model = DyGraphTrainModel(cfg)
for batch_id, (image_real, label_org, label_trg) in enumerate(
dataset()
):
loss = dygraph_model.run(image_real, label_org, label_trg)
dygraph_loss.append(loss)
for (g_loss_s, d_loss_s), (g_loss_d, d_loss_d) in zip(
static_loss, dygraph_loss
):
self.assertEqual(g_loss_s, g_loss_d)
self.assertEqual(d_loss_s, d_loss_d)
loss = eager_dygraph_model.run(image_real, label_org, label_trg)
eager_dygraph_loss.append(loss)
def test_all_cases(self):
self.func_main()
......
......@@ -20,7 +20,6 @@ import numpy as np
import paddle
import paddle.nn as nn
from paddle.fluid.framework import _test_eager_guard
class SimpleNet(nn.Layer):
......@@ -47,7 +46,7 @@ class TestCases(unittest.TestCase):
x = net(x)
self.assertFalse(x.stop_gradient)
def func_main(self):
def test_main(self):
threads = []
for _ in range(10):
threads.append(threading.Thread(target=self.thread_1_main))
......@@ -57,11 +56,6 @@ class TestCases(unittest.TestCase):
for t in threads:
t.join()
def test_main(self):
with _test_eager_guard():
self.func_main()
self.func_main()
if __name__ == "__main__":
unittest.main()
......@@ -22,7 +22,7 @@ import paddle.fluid as fluid
import paddle.nn.functional as F
from paddle.fluid import Layer, core
from paddle.fluid.dygraph import guard, to_variable
from paddle.fluid.framework import _in_legacy_dygraph, _test_eager_guard
from paddle.fluid.framework import _in_legacy_dygraph
from paddle.nn import Linear
np.set_printoptions(suppress=True)
......@@ -1328,15 +1328,14 @@ class TestDygraphTransformerSortGradient(unittest.TestCase):
) = run_dygraph()
with guard():
with _test_eager_guard():
(
eager_avg_cost_value,
eager_sum_cost_value,
eager_predict_value,
eager_token_num_value,
eager_param_init,
eager_param_updated,
) = run_dygraph()
(
eager_avg_cost_value,
eager_sum_cost_value,
eager_predict_value,
eager_token_num_value,
eager_param_init,
eager_param_updated,
) = run_dygraph()
np.testing.assert_allclose(
dy_avg_cost_value, eager_avg_cost_value, rtol=1e-05
)
......
......@@ -19,7 +19,6 @@ import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.wrapped_decorator import wrap_decorator
......@@ -46,67 +45,62 @@ def random_var(size, low=-1, high=1, dtype='float32'):
class TestDygraphTripleGradMatmul(TestCase):
def test_matmul_triple_grad(self):
input_numpy = np.ones([3, 3]) * 2
with _test_eager_guard():
x = paddle.to_tensor(
input_numpy, stop_gradient=False, dtype='float32'
)
y = paddle.to_tensor(
input_numpy, stop_gradient=False, dtype='float32'
)
out = paddle.matmul(x, y, False, False)
x = paddle.to_tensor(input_numpy, stop_gradient=False, dtype='float32')
y = paddle.to_tensor(input_numpy, stop_gradient=False, dtype='float32')
out = paddle.matmul(x, y, False, False)
new_out_g = paddle.to_tensor(
np.ones([3, 3]), stop_gradient=False, dtype='float32'
)
new_x_g, new_y_g = paddle.grad(
[out], [x, y], [new_out_g], retain_graph=True, create_graph=True
)
new_out_g = paddle.to_tensor(
np.ones([3, 3]), stop_gradient=False, dtype='float32'
)
new_x_g, new_y_g = paddle.grad(
[out], [x, y], [new_out_g], retain_graph=True, create_graph=True
)
new_x_g_g = paddle.to_tensor(
np.ones([3, 3]), stop_gradient=False, dtype='float32'
)
new_y_g_g = paddle.to_tensor(
np.ones([3, 3]), stop_gradient=False, dtype='float32'
)
new_a, new_b, new_c = paddle.grad(
[new_x_g, new_y_g],
[x, y, new_out_g],
[new_x_g_g, new_y_g_g],
retain_graph=True,
create_graph=True,
)
new_x_g_g = paddle.to_tensor(
np.ones([3, 3]), stop_gradient=False, dtype='float32'
)
new_y_g_g = paddle.to_tensor(
np.ones([3, 3]), stop_gradient=False, dtype='float32'
)
new_a, new_b, new_c = paddle.grad(
[new_x_g, new_y_g],
[x, y, new_out_g],
[new_x_g_g, new_y_g_g],
retain_graph=True,
create_graph=True,
)
new_a.backward()
new_a.backward()
out_ref = np.ones([3, 3]) * 12.0
np.testing.assert_array_equal(out.numpy(), out_ref)
out_ref = np.ones([3, 3]) * 12.0
np.testing.assert_array_equal(out.numpy(), out_ref)
new_x_g_ref = np.ones([3, 3]) * 6.0
new_y_g_ref = np.ones([3, 3]) * 6.0
np.testing.assert_array_equal(new_x_g.numpy(), new_x_g_ref)
np.testing.assert_array_equal(new_y_g.numpy(), new_y_g_ref)
new_x_g_ref = np.ones([3, 3]) * 6.0
new_y_g_ref = np.ones([3, 3]) * 6.0
np.testing.assert_array_equal(new_x_g.numpy(), new_x_g_ref)
np.testing.assert_array_equal(new_y_g.numpy(), new_y_g_ref)
new_a_ref = np.ones([3, 3]) * 3.0
new_b_ref = np.ones([3, 3]) * 3.0
new_c_ref = np.ones([3, 3]) * 12.0
new_a_ref = np.ones([3, 3]) * 3.0
new_b_ref = np.ones([3, 3]) * 3.0
new_c_ref = np.ones([3, 3]) * 12.0
np.testing.assert_array_equal(new_a.numpy(), new_a_ref)
np.testing.assert_array_equal(new_b.numpy(), new_b_ref)
np.testing.assert_array_equal(new_c.numpy(), new_c_ref)
np.testing.assert_array_equal(new_a.numpy(), new_a_ref)
np.testing.assert_array_equal(new_b.numpy(), new_b_ref)
np.testing.assert_array_equal(new_c.numpy(), new_c_ref)
x_grad_ref = np.ones([3, 3]) * 0.0
np.testing.assert_array_equal(x.grad.numpy(), x_grad_ref)
x_grad_ref = np.ones([3, 3]) * 0.0
np.testing.assert_array_equal(x.grad.numpy(), x_grad_ref)
y_grad_ref = np.ones([3, 3]) * 0.0
np.testing.assert_array_equal(y.grad.numpy(), y_grad_ref)
y_grad_ref = np.ones([3, 3]) * 0.0
np.testing.assert_array_equal(y.grad.numpy(), y_grad_ref)
new_out_g_ref = np.ones([3, 3]) * 3.0
np.testing.assert_array_equal(new_out_g.grad.numpy(), new_out_g_ref)
new_out_g_ref = np.ones([3, 3]) * 3.0
np.testing.assert_array_equal(new_out_g.grad.numpy(), new_out_g_ref)
new_x_g_g_ref = np.ones([3, 3]) * 0.0
new_y_g_g_ref = np.ones([3, 3]) * 3.0
np.testing.assert_array_equal(new_x_g_g.grad.numpy(), new_x_g_g_ref)
np.testing.assert_array_equal(new_y_g_g.grad.numpy(), new_y_g_g_ref)
new_x_g_g_ref = np.ones([3, 3]) * 0.0
new_y_g_g_ref = np.ones([3, 3]) * 3.0
np.testing.assert_array_equal(new_x_g_g.grad.numpy(), new_x_g_g_ref)
np.testing.assert_array_equal(new_y_g_g.grad.numpy(), new_y_g_g_ref)
class TestDygraphTripleGrad(TestCase):
......@@ -231,9 +225,6 @@ class TestDygraphTripleGrad(TestCase):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
self.func_exception()
self.func_example_with_gradient_and_create_graph()
with _test_eager_guard():
self.func_exception()
self.func_example_with_gradient_and_create_graph()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
......@@ -327,8 +318,6 @@ class TestDygraphTripleGradBradcastCase(TestCase):
def test_all_cases(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
self.func_example_with_gradient_and_create_graph()
with _test_eager_guard():
self.func_example_with_gradient_and_create_graph()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
......
......@@ -19,7 +19,6 @@ import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph import guard, to_variable
from paddle.fluid.framework import _test_eager_guard
class TestImperativeUsingNonZeroGpu(unittest.TestCase):
......@@ -28,7 +27,7 @@ class TestImperativeUsingNonZeroGpu(unittest.TestCase):
var = to_variable(np_arr)
np.testing.assert_array_equal(np_arr, var.numpy())
def func_non_zero_gpu(self):
def test_non_zero_gpu(self):
if not fluid.is_compiled_with_cuda():
return
......@@ -39,11 +38,6 @@ class TestImperativeUsingNonZeroGpu(unittest.TestCase):
else:
self.run_main(np_arr, fluid.CUDAPlace(0))
def test_non_zero_gpu(self):
with _test_eager_guard():
self.func_non_zero_gpu()
self.func_non_zero_gpu()
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册