diff --git a/python/paddle/fluid/tests/unittests/test_imperative_save_load.py b/python/paddle/fluid/tests/unittests/test_imperative_save_load.py index adefda695245ade070f784f3763509525c02a1ff..327cbce7ea6c501990202fb8df3a63febc085b01 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_save_load.py @@ -22,7 +22,6 @@ import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.dygraph.base import to_variable from paddle.fluid.dygraph.learning_rate_scheduler import LearningRateDecay -from paddle.fluid.framework import _test_eager_guard from paddle.fluid.optimizer import Adam from paddle.nn import Embedding @@ -1031,16 +1030,6 @@ class TestDygraphPtbRnn(unittest.TestCase): self.func_testSetNumpyBeforeTrain() self.func_testOnlyLoadParams() self.func_test_load_compatible_with_keep_name_table() - with _test_eager_guard(): - self.func_setUp() - self.func_testLoadAndSetVarBase() - self.func_testSetVariable() - self.func_testSetNumpy() - self.func_testSetVariableBeforeTrain() - self.func_testLoadAndSetVarBaseBeforeTrain() - self.func_testSetNumpyBeforeTrain() - self.func_testOnlyLoadParams() - self.func_test_load_compatible_with_keep_name_table() if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_imperative_save_load_v2.py b/python/paddle/fluid/tests/unittests/test_imperative_save_load_v2.py index 3eff719a92839699360893db28615883f57152d6..c39573e5207a7dac8341a3a7a2075202bf2e15d9 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_save_load_v2.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_save_load_v2.py @@ -23,7 +23,6 @@ import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.dygraph.base import to_variable from paddle.fluid.dygraph.learning_rate_scheduler import LearningRateDecay -from paddle.fluid.framework import _test_eager_guard from paddle.nn import Embedding from paddle.optimizer import Adam @@ -1072,17 +1071,6 @@ class TestDygraphPtbRnn(unittest.TestCase): self.func_testOnlyLoadParams() self.func_test_no_state_in_input_dict() self.func_test_state_shape_mismatch() - with _test_eager_guard(): - self.func_setUp() - self.func_testLoadAndSetVarBase() - self.func_testSetVariable() - self.func_testSetNumpy() - self.func_testSetVariableBeforeTrain() - self.func_testLoadAndSetVarBaseBeforeTrain() - self.func_testSetNumpyBeforeTrain() - self.func_testOnlyLoadParams() - self.func_test_no_state_in_input_dict() - self.func_test_state_shape_mismatch() if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py b/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py index 24f8a83cdeaac3cbb59ee12e5e8522978fe6e13e..b02e1595dbe1ca150fdcedc80c2424a06c60b72c 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py @@ -20,7 +20,6 @@ from test_imperative_base import new_program_scope import paddle import paddle.fluid as fluid from paddle.fluid import core -from paddle.fluid.framework import _test_eager_guard from paddle.fluid.layer_helper import LayerHelper from paddle.nn import BatchNorm @@ -418,13 +417,12 @@ class TestImperativeResneXt(unittest.TestCase): ) = run_dygraph() with fluid.dygraph.guard(): - with _test_eager_guard(): - ( - eager_out, - eager_param_init_value, - eager_param_value, - eager_grad_value, - ) = run_dygraph() + ( + eager_out, + eager_param_init_value, + eager_param_value, + eager_grad_value, + ) = run_dygraph() with new_program_scope(): paddle.seed(seed) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_selected_rows.py b/python/paddle/fluid/tests/unittests/test_imperative_selected_rows.py index 498317b2a33f9f06a34565a296e33dc7951bc840..cea97398d17159aa0756d5e985b77de0db772ddc 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_selected_rows.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_selected_rows.py @@ -20,7 +20,6 @@ import paddle import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.dygraph.base import to_variable -from paddle.fluid.framework import _test_eager_guard from paddle.fluid.optimizer import SGDOptimizer @@ -40,7 +39,8 @@ class SimpleNet(paddle.nn.Layer): class TestSimpleNet(unittest.TestCase): - def func_selectedrows_gradient1(self): + def test_selectedrows_gradient1(self): + fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) places = [fluid.CPUPlace()] if core.is_compiled_with_cuda(): places.append(fluid.CUDAPlace(0)) @@ -77,15 +77,10 @@ class TestSimpleNet(unittest.TestCase): input_emb.clear_gradient() self.assertIsNotNone(input_emb.gradient()) paddle.enable_static() - - def test_selectedrows_gradient1(self): - fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - with _test_eager_guard(): - self.func_selectedrows_gradient1() - self.func_selectedrows_gradient1() fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) - def func_selectedrows_gradient2(self): + def test_selectedrows_gradient2(self): + fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) places = [fluid.CPUPlace()] if core.is_compiled_with_cuda(): places.append(fluid.CUDAPlace(0)) @@ -121,12 +116,6 @@ class TestSimpleNet(unittest.TestCase): input_emb.clear_gradient() self.assertIsNotNone(input_emb.gradient()) - - def test_selectedrows_gradient2(self): - fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - with _test_eager_guard(): - self.func_selectedrows_gradient2() - self.func_selectedrows_gradient2() fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_selected_rows_to_lod_tensor.py b/python/paddle/fluid/tests/unittests/test_imperative_selected_rows_to_lod_tensor.py index 220bde8e5b235f49dc10e2f34e6fa387e2aaced2..cd707bb0ca6420139e500bd21d794e6802cd1bbf 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_selected_rows_to_lod_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_selected_rows_to_lod_tensor.py @@ -22,7 +22,6 @@ import paddle.fluid as fluid import paddle.fluid.core as core import paddle.fluid.framework as framework from paddle.fluid.dygraph.base import to_variable -from paddle.fluid.framework import _test_eager_guard from paddle.fluid.optimizer import SGDOptimizer from paddle.nn import Embedding @@ -90,7 +89,7 @@ class SimpleNet(fluid.Layer): class TestDygraphSimpleNet(unittest.TestCase): - def func_simple_net(self): + def test_simple_net(self): for is_sparse in [True, False]: dtype_list = ["float32"] if not core.is_compiled_with_rocm(): @@ -98,11 +97,6 @@ class TestDygraphSimpleNet(unittest.TestCase): for dtype in dtype_list: self.simple_net_float(is_sparse, dtype) - def test_simple_net(self): - with _test_eager_guard(): - self.func_simple_net() - self.func_simple_net() - def simple_net_float(self, is_sparse, dtype): places = [fluid.CPUPlace()] if core.is_compiled_with_cuda(): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_signal_handler.py b/python/paddle/fluid/tests/unittests/test_imperative_signal_handler.py index f374d0b073c271d51309884787c072411c0c7766..ccb8f83bc6fae0e490c181b6060bb59fd5aba795 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_signal_handler.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_signal_handler.py @@ -20,7 +20,6 @@ import time import unittest from paddle.fluid import core -from paddle.fluid.framework import _test_eager_guard def set_child_signal_handler(self, child_pid): @@ -38,7 +37,7 @@ def set_child_signal_handler(self, child_pid): class DygraphDataLoaderSingalHandler(unittest.TestCase): - def func_child_process_exit_with_error(self): + def test_child_process_exit_with_error(self): def __test_process__(): core._set_process_signal_handler() sys.exit(1) @@ -65,12 +64,7 @@ class DygraphDataLoaderSingalHandler(unittest.TestCase): self.assertIsNotNone(exception) - def test_child_process_exit_with_error(self): - with _test_eager_guard(): - self.func_child_process_exit_with_error() - self.func_child_process_exit_with_error() - - def func_child_process_killed_by_sigsegv(self): + def test_child_process_killed_by_sigsegv(self): def __test_process__(): core._set_process_signal_handler() os.kill(os.getpid(), signal.SIGSEGV) @@ -97,12 +91,7 @@ class DygraphDataLoaderSingalHandler(unittest.TestCase): self.assertIsNotNone(exception) - def test_child_process_killed_by_sigsegv(self): - with _test_eager_guard(): - self.func_child_process_killed_by_sigsegv() - self.func_child_process_killed_by_sigsegv() - - def func_child_process_killed_by_sigbus(self): + def test_child_process_killed_by_sigbus(self): def __test_process__(): core._set_process_signal_handler() os.kill(os.getpid(), signal.SIGBUS) @@ -129,12 +118,7 @@ class DygraphDataLoaderSingalHandler(unittest.TestCase): self.assertIsNotNone(exception) - def test_child_process_killed_by_sigbus(self): - with _test_eager_guard(): - self.func_child_process_killed_by_sigbus() - self.func_child_process_killed_by_sigbus() - - def func_child_process_killed_by_sigterm(self): + def test_child_process_killed_by_sigterm(self): def __test_process__(): core._set_process_signal_handler() time.sleep(10) @@ -146,11 +130,6 @@ class DygraphDataLoaderSingalHandler(unittest.TestCase): set_child_signal_handler(id(self), test_process.pid) time.sleep(1) - def test_child_process_killed_by_sigterm(self): - with _test_eager_guard(): - self.func_child_process_killed_by_sigterm() - self.func_child_process_killed_by_sigterm() - if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py b/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py index 4c61385dc04791ec349364a9b84684a6525070dd..4316a82581ca5b0c207e665a05cdd5eb638f1808 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py @@ -19,7 +19,6 @@ import numpy as np import paddle import paddle.fluid as fluid from paddle import _legacy_C_ops -from paddle.fluid.framework import _test_eager_guard from paddle.tensor import random if fluid.is_compiled_with_cuda(): @@ -661,62 +660,13 @@ class TestStarGANWithGradientPenalty(unittest.TestCase): fluid_dygraph_loss.append(loss) eager_dygraph_loss = [] - with _test_eager_guard(): - with fluid.dygraph.guard(cfg.place): - eager_dygraph_model = DyGraphTrainModel(cfg) - for batch_id, (image_real, label_org, label_trg) in enumerate( - dataset() - ): - loss = eager_dygraph_model.run( - image_real, label_org, label_trg - ) - eager_dygraph_loss.append(loss) - - for (g_loss_f, d_loss_f), (g_loss_e, d_loss_e) in zip( - fluid_dygraph_loss, eager_dygraph_loss - ): - self.assertEqual(g_loss_f, g_loss_e) - self.assertEqual(d_loss_f, d_loss_e) - - def test_all_cases(self): - self.func_main() - - -class TestStarGANWithGradientPenaltyLegacy(unittest.TestCase): - def func_main(self): - self.place_test(fluid.CPUPlace()) - - if fluid.is_compiled_with_cuda(): - self.place_test(fluid.CUDAPlace(0)) - - def place_test(self, place): - cfg = Config(place) - - dataset = create_mnist_dataset(cfg) - dataset = paddle.reader.cache(dataset) - - static_graph_model = StaticGraphTrainModel(cfg) - static_loss = [] - for batch_id, (image_real, label_org, label_trg) in enumerate( - dataset() - ): - loss = static_graph_model.run(image_real, label_org, label_trg) - static_loss.append(loss) - - dygraph_loss = [] with fluid.dygraph.guard(cfg.place): - dygraph_model = DyGraphTrainModel(cfg) + eager_dygraph_model = DyGraphTrainModel(cfg) for batch_id, (image_real, label_org, label_trg) in enumerate( dataset() ): - loss = dygraph_model.run(image_real, label_org, label_trg) - dygraph_loss.append(loss) - - for (g_loss_s, d_loss_s), (g_loss_d, d_loss_d) in zip( - static_loss, dygraph_loss - ): - self.assertEqual(g_loss_s, g_loss_d) - self.assertEqual(d_loss_s, d_loss_d) + loss = eager_dygraph_model.run(image_real, label_org, label_trg) + eager_dygraph_loss.append(loss) def test_all_cases(self): self.func_main() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_thread_local_has_grad.py b/python/paddle/fluid/tests/unittests/test_imperative_thread_local_has_grad.py index 8e7418b2f70754a10ec0133d7c61fd663b67f755..8df3c5916c68a1c6db903c9f650f5b866b664aa0 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_thread_local_has_grad.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_thread_local_has_grad.py @@ -20,7 +20,6 @@ import numpy as np import paddle import paddle.nn as nn -from paddle.fluid.framework import _test_eager_guard class SimpleNet(nn.Layer): @@ -47,7 +46,7 @@ class TestCases(unittest.TestCase): x = net(x) self.assertFalse(x.stop_gradient) - def func_main(self): + def test_main(self): threads = [] for _ in range(10): threads.append(threading.Thread(target=self.thread_1_main)) @@ -57,11 +56,6 @@ class TestCases(unittest.TestCase): for t in threads: t.join() - def test_main(self): - with _test_eager_guard(): - self.func_main() - self.func_main() - if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_transformer_sorted_gradient.py b/python/paddle/fluid/tests/unittests/test_imperative_transformer_sorted_gradient.py index 32a5da60fdc21dbe17695beb1ccdf75498d2e8fa..be0c38e702bfe232a7ba9a3cfaa54a7d8c05b353 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_transformer_sorted_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_transformer_sorted_gradient.py @@ -22,7 +22,7 @@ import paddle.fluid as fluid import paddle.nn.functional as F from paddle.fluid import Layer, core from paddle.fluid.dygraph import guard, to_variable -from paddle.fluid.framework import _in_legacy_dygraph, _test_eager_guard +from paddle.fluid.framework import _in_legacy_dygraph from paddle.nn import Linear np.set_printoptions(suppress=True) @@ -1328,15 +1328,14 @@ class TestDygraphTransformerSortGradient(unittest.TestCase): ) = run_dygraph() with guard(): - with _test_eager_guard(): - ( - eager_avg_cost_value, - eager_sum_cost_value, - eager_predict_value, - eager_token_num_value, - eager_param_init, - eager_param_updated, - ) = run_dygraph() + ( + eager_avg_cost_value, + eager_sum_cost_value, + eager_predict_value, + eager_token_num_value, + eager_param_init, + eager_param_updated, + ) = run_dygraph() np.testing.assert_allclose( dy_avg_cost_value, eager_avg_cost_value, rtol=1e-05 ) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_triple_grad.py b/python/paddle/fluid/tests/unittests/test_imperative_triple_grad.py index 175e42fb60017be4688e8c879136a6a44544beb7..570e6cf5c3e4f4c9ec57bf2227d409e0d286cd49 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_triple_grad.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_triple_grad.py @@ -19,7 +19,6 @@ import numpy as np import paddle import paddle.fluid as fluid -from paddle.fluid.framework import _test_eager_guard from paddle.fluid.wrapped_decorator import wrap_decorator @@ -46,67 +45,62 @@ def random_var(size, low=-1, high=1, dtype='float32'): class TestDygraphTripleGradMatmul(TestCase): def test_matmul_triple_grad(self): input_numpy = np.ones([3, 3]) * 2 - with _test_eager_guard(): - x = paddle.to_tensor( - input_numpy, stop_gradient=False, dtype='float32' - ) - y = paddle.to_tensor( - input_numpy, stop_gradient=False, dtype='float32' - ) - out = paddle.matmul(x, y, False, False) + x = paddle.to_tensor(input_numpy, stop_gradient=False, dtype='float32') + y = paddle.to_tensor(input_numpy, stop_gradient=False, dtype='float32') + out = paddle.matmul(x, y, False, False) - new_out_g = paddle.to_tensor( - np.ones([3, 3]), stop_gradient=False, dtype='float32' - ) - new_x_g, new_y_g = paddle.grad( - [out], [x, y], [new_out_g], retain_graph=True, create_graph=True - ) + new_out_g = paddle.to_tensor( + np.ones([3, 3]), stop_gradient=False, dtype='float32' + ) + new_x_g, new_y_g = paddle.grad( + [out], [x, y], [new_out_g], retain_graph=True, create_graph=True + ) - new_x_g_g = paddle.to_tensor( - np.ones([3, 3]), stop_gradient=False, dtype='float32' - ) - new_y_g_g = paddle.to_tensor( - np.ones([3, 3]), stop_gradient=False, dtype='float32' - ) - new_a, new_b, new_c = paddle.grad( - [new_x_g, new_y_g], - [x, y, new_out_g], - [new_x_g_g, new_y_g_g], - retain_graph=True, - create_graph=True, - ) + new_x_g_g = paddle.to_tensor( + np.ones([3, 3]), stop_gradient=False, dtype='float32' + ) + new_y_g_g = paddle.to_tensor( + np.ones([3, 3]), stop_gradient=False, dtype='float32' + ) + new_a, new_b, new_c = paddle.grad( + [new_x_g, new_y_g], + [x, y, new_out_g], + [new_x_g_g, new_y_g_g], + retain_graph=True, + create_graph=True, + ) - new_a.backward() + new_a.backward() - out_ref = np.ones([3, 3]) * 12.0 - np.testing.assert_array_equal(out.numpy(), out_ref) + out_ref = np.ones([3, 3]) * 12.0 + np.testing.assert_array_equal(out.numpy(), out_ref) - new_x_g_ref = np.ones([3, 3]) * 6.0 - new_y_g_ref = np.ones([3, 3]) * 6.0 - np.testing.assert_array_equal(new_x_g.numpy(), new_x_g_ref) - np.testing.assert_array_equal(new_y_g.numpy(), new_y_g_ref) + new_x_g_ref = np.ones([3, 3]) * 6.0 + new_y_g_ref = np.ones([3, 3]) * 6.0 + np.testing.assert_array_equal(new_x_g.numpy(), new_x_g_ref) + np.testing.assert_array_equal(new_y_g.numpy(), new_y_g_ref) - new_a_ref = np.ones([3, 3]) * 3.0 - new_b_ref = np.ones([3, 3]) * 3.0 - new_c_ref = np.ones([3, 3]) * 12.0 + new_a_ref = np.ones([3, 3]) * 3.0 + new_b_ref = np.ones([3, 3]) * 3.0 + new_c_ref = np.ones([3, 3]) * 12.0 - np.testing.assert_array_equal(new_a.numpy(), new_a_ref) - np.testing.assert_array_equal(new_b.numpy(), new_b_ref) - np.testing.assert_array_equal(new_c.numpy(), new_c_ref) + np.testing.assert_array_equal(new_a.numpy(), new_a_ref) + np.testing.assert_array_equal(new_b.numpy(), new_b_ref) + np.testing.assert_array_equal(new_c.numpy(), new_c_ref) - x_grad_ref = np.ones([3, 3]) * 0.0 - np.testing.assert_array_equal(x.grad.numpy(), x_grad_ref) + x_grad_ref = np.ones([3, 3]) * 0.0 + np.testing.assert_array_equal(x.grad.numpy(), x_grad_ref) - y_grad_ref = np.ones([3, 3]) * 0.0 - np.testing.assert_array_equal(y.grad.numpy(), y_grad_ref) + y_grad_ref = np.ones([3, 3]) * 0.0 + np.testing.assert_array_equal(y.grad.numpy(), y_grad_ref) - new_out_g_ref = np.ones([3, 3]) * 3.0 - np.testing.assert_array_equal(new_out_g.grad.numpy(), new_out_g_ref) + new_out_g_ref = np.ones([3, 3]) * 3.0 + np.testing.assert_array_equal(new_out_g.grad.numpy(), new_out_g_ref) - new_x_g_g_ref = np.ones([3, 3]) * 0.0 - new_y_g_g_ref = np.ones([3, 3]) * 3.0 - np.testing.assert_array_equal(new_x_g_g.grad.numpy(), new_x_g_g_ref) - np.testing.assert_array_equal(new_y_g_g.grad.numpy(), new_y_g_g_ref) + new_x_g_g_ref = np.ones([3, 3]) * 0.0 + new_y_g_g_ref = np.ones([3, 3]) * 3.0 + np.testing.assert_array_equal(new_x_g_g.grad.numpy(), new_x_g_g_ref) + np.testing.assert_array_equal(new_y_g_g.grad.numpy(), new_y_g_g_ref) class TestDygraphTripleGrad(TestCase): @@ -231,9 +225,6 @@ class TestDygraphTripleGrad(TestCase): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) self.func_exception() self.func_example_with_gradient_and_create_graph() - with _test_eager_guard(): - self.func_exception() - self.func_example_with_gradient_and_create_graph() fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) @@ -327,8 +318,6 @@ class TestDygraphTripleGradBradcastCase(TestCase): def test_all_cases(self): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) self.func_example_with_gradient_and_create_graph() - with _test_eager_guard(): - self.func_example_with_gradient_and_create_graph() fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_using_non_zero_gpu.py b/python/paddle/fluid/tests/unittests/test_imperative_using_non_zero_gpu.py index f9b3abf1b07e1bdae887bec3ab89a038872999c9..a482d2a5d8ffd91ee352591b68c81c11569d4b15 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_using_non_zero_gpu.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_using_non_zero_gpu.py @@ -19,7 +19,6 @@ import numpy as np import paddle import paddle.fluid as fluid from paddle.fluid.dygraph import guard, to_variable -from paddle.fluid.framework import _test_eager_guard class TestImperativeUsingNonZeroGpu(unittest.TestCase): @@ -28,7 +27,7 @@ class TestImperativeUsingNonZeroGpu(unittest.TestCase): var = to_variable(np_arr) np.testing.assert_array_equal(np_arr, var.numpy()) - def func_non_zero_gpu(self): + def test_non_zero_gpu(self): if not fluid.is_compiled_with_cuda(): return @@ -39,11 +38,6 @@ class TestImperativeUsingNonZeroGpu(unittest.TestCase): else: self.run_main(np_arr, fluid.CUDAPlace(0)) - def test_non_zero_gpu(self): - with _test_eager_guard(): - self.func_non_zero_gpu() - self.func_non_zero_gpu() - if __name__ == '__main__': unittest.main()