未验证 提交 a8d139a4 编写于 作者: 姜永久 提交者: GitHub

rm unittests eager guard test part14 initializer2layer_norm (#48835)

* rm unittests eager guard test part14 initializer2layer_norm

* monior change
上级 627eaa0f
...@@ -605,16 +605,14 @@ class TestBilinearInitializerDygraphAPI(unittest.TestCase): ...@@ -605,16 +605,14 @@ class TestBilinearInitializerDygraphAPI(unittest.TestCase):
def test_bilinear_initializer(self): def test_bilinear_initializer(self):
paddle.disable_static() paddle.disable_static()
with framework._test_eager_guard(): eager_x = self.func_test_case()
eager_x = self.func_test_case()
legacy_x = self.func_test_case() legacy_x = self.func_test_case()
self.assertEqual(eager_x.numpy().all(), legacy_x.numpy().all()) self.assertEqual(eager_x.numpy().all(), legacy_x.numpy().all())
paddle.enable_static() paddle.enable_static()
def test_bilinear_initializer_fp16(self): def test_bilinear_initializer_fp16(self):
paddle.disable_static() paddle.disable_static()
with framework._test_eager_guard(): eager_x = self.func_test_case_fp16()
eager_x = self.func_test_case_fp16()
legacy_x = self.func_test_case_fp16() legacy_x = self.func_test_case_fp16()
self.assertEqual(eager_x.numpy().all(), legacy_x.numpy().all()) self.assertEqual(eager_x.numpy().all(), legacy_x.numpy().all())
paddle.enable_static() paddle.enable_static()
...@@ -712,7 +710,7 @@ class TestSetGlobalInitializer(unittest.TestCase): ...@@ -712,7 +710,7 @@ class TestSetGlobalInitializer(unittest.TestCase):
class TestUniformInitializerDygraph(unittest.TestCase): class TestUniformInitializerDygraph(unittest.TestCase):
def func_uniform_initializer(self, dtype="float32"): def test_uniform_initializer(self, dtype="float32"):
""" """
In dygraph mode, we can use initializer directly to initialize a tensor. In dygraph mode, we can use initializer directly to initialize a tensor.
""" """
...@@ -737,14 +735,9 @@ class TestUniformInitializerDygraph(unittest.TestCase): ...@@ -737,14 +735,9 @@ class TestUniformInitializerDygraph(unittest.TestCase):
paddle.enable_static() paddle.enable_static()
def test_uniform_initializer(self, dtype="float32"):
with framework._test_eager_guard():
self.func_uniform_initializer()
self.func_uniform_initializer()
class TestXavierInitializerDygraph(unittest.TestCase): class TestXavierInitializerDygraph(unittest.TestCase):
def func_xvarier_initializer(self, dtype="float32"): def test_xvarier_initializer(self, dtype="float32"):
""" """
In dygraph mode, we can use initializer directly to initialize a tensor. In dygraph mode, we can use initializer directly to initialize a tensor.
""" """
...@@ -767,14 +760,9 @@ class TestXavierInitializerDygraph(unittest.TestCase): ...@@ -767,14 +760,9 @@ class TestXavierInitializerDygraph(unittest.TestCase):
np.testing.assert_allclose(hist, hist2, rtol=0, atol=0.01) np.testing.assert_allclose(hist, hist2, rtol=0, atol=0.01)
paddle.enable_static() paddle.enable_static()
def test_xavier_initializer(self, dtype="float32"):
with framework._test_eager_guard():
self.func_xvarier_initializer()
self.func_xvarier_initializer()
class TestMSRAInitializerDygraph(unittest.TestCase): class TestMSRAInitializerDygraph(unittest.TestCase):
def func_msra_initializer(self, dtype="float32"): def test_msra_initializer(self, dtype="float32"):
""" """
In dygraph mode, we can use initializer directly to initialize a tensor. In dygraph mode, we can use initializer directly to initialize a tensor.
""" """
...@@ -797,14 +785,9 @@ class TestMSRAInitializerDygraph(unittest.TestCase): ...@@ -797,14 +785,9 @@ class TestMSRAInitializerDygraph(unittest.TestCase):
np.testing.assert_allclose(hist, hist2, rtol=0, atol=0.01) np.testing.assert_allclose(hist, hist2, rtol=0, atol=0.01)
paddle.enable_static() paddle.enable_static()
def test_msra_initializer(self, dtype="float32"):
with framework._test_eager_guard():
self.func_msra_initializer()
self.func_msra_initializer()
class TesetconsistencyOfDynamicAndStaticGraph(unittest.TestCase): class TesetconsistencyOfDynamicAndStaticGraph(unittest.TestCase):
def func_order(self): def test_order(self):
paddle.set_device('cpu') paddle.set_device('cpu')
SEED = 123 SEED = 123
weight_attr = paddle.framework.ParamAttr( weight_attr = paddle.framework.ParamAttr(
...@@ -854,11 +837,6 @@ class TesetconsistencyOfDynamicAndStaticGraph(unittest.TestCase): ...@@ -854,11 +837,6 @@ class TesetconsistencyOfDynamicAndStaticGraph(unittest.TestCase):
np.testing.assert_array_equal(dynamic_res[0], static_res[0]) np.testing.assert_array_equal(dynamic_res[0], static_res[0])
np.testing.assert_array_equal(dynamic_res[1], static_res[1]) np.testing.assert_array_equal(dynamic_res[1], static_res[1])
def test_order(self):
with framework._test_eager_guard():
self.func_order()
self.func_order()
# 2-D Parameter with shape: [10, 15] # 2-D Parameter with shape: [10, 15]
class TestOrthogonalInitializer1(unittest.TestCase): class TestOrthogonalInitializer1(unittest.TestCase):
...@@ -881,7 +859,7 @@ class TestOrthogonalInitializer1(unittest.TestCase): ...@@ -881,7 +859,7 @@ class TestOrthogonalInitializer1(unittest.TestCase):
np.matmul(a, a.T), 9 * np.eye(10), rtol=1e-5, atol=1e-8 np.matmul(a, a.T), 9 * np.eye(10), rtol=1e-5, atol=1e-8
) )
def func_orthogonal(self): def test_orthogonal(self):
self.config() self.config()
paddle.set_default_dtype(self.dtype) paddle.set_default_dtype(self.dtype)
...@@ -918,11 +896,6 @@ class TestOrthogonalInitializer1(unittest.TestCase): ...@@ -918,11 +896,6 @@ class TestOrthogonalInitializer1(unittest.TestCase):
self.check_result(res_dygraph, res_static) self.check_result(res_dygraph, res_static)
def test_orthogonal(self):
with framework._test_eager_guard():
self.func_orthogonal()
self.func_orthogonal()
# 2-D Parameter with shape: [15, 10] # 2-D Parameter with shape: [15, 10]
class TestOrthogonalInitializer2(TestOrthogonalInitializer1): class TestOrthogonalInitializer2(TestOrthogonalInitializer1):
...@@ -998,7 +971,7 @@ class TestOrthogonalInitializer4(unittest.TestCase): ...@@ -998,7 +971,7 @@ class TestOrthogonalInitializer4(unittest.TestCase):
np.matmul(a, a.T), 9 * np.eye(6), rtol=1e-5, atol=1e-8 np.matmul(a, a.T), 9 * np.eye(6), rtol=1e-5, atol=1e-8
) )
def func_orthogonal(self): def test_orthogonal(self):
self.config() self.config()
paddle.set_default_dtype(self.dtype) paddle.set_default_dtype(self.dtype)
...@@ -1030,11 +1003,6 @@ class TestOrthogonalInitializer4(unittest.TestCase): ...@@ -1030,11 +1003,6 @@ class TestOrthogonalInitializer4(unittest.TestCase):
)[0] )[0]
self.check_result(res_dygraph, res_static) self.check_result(res_dygraph, res_static)
def test_orthogonal(self):
with framework._test_eager_guard():
self.func_orthogonal()
self.func_orthogonal()
# 4-D Parameter with shape: [50, 4, 3, 3] # 4-D Parameter with shape: [50, 4, 3, 3]
class TestOrthogonalInitializer5(TestOrthogonalInitializer4): class TestOrthogonalInitializer5(TestOrthogonalInitializer4):
...@@ -1105,7 +1073,7 @@ class TestDiracInitializer1(unittest.TestCase): ...@@ -1105,7 +1073,7 @@ class TestDiracInitializer1(unittest.TestCase):
np.testing.assert_array_equal(w_dygraph, w_static) np.testing.assert_array_equal(w_dygraph, w_static)
np.testing.assert_array_equal(conv_out, conv_in[:, 0:2, 1:9]) np.testing.assert_array_equal(conv_out, conv_in[:, 0:2, 1:9])
def func_dirac(self): def test_dirac(self):
self.config() self.config()
paddle.set_default_dtype(self.dtype) paddle.set_default_dtype(self.dtype)
...@@ -1151,11 +1119,6 @@ class TestDiracInitializer1(unittest.TestCase): ...@@ -1151,11 +1119,6 @@ class TestDiracInitializer1(unittest.TestCase):
weight_dygraph, weight_static, conv_input, conv_output weight_dygraph, weight_static, conv_input, conv_output
) )
def test_dirac(self):
with framework._test_eager_guard():
self.func_dirac()
self.func_dirac()
# initialize Conv2D weight # initialize Conv2D weight
class TestDiracInitializer2(TestDiracInitializer1): class TestDiracInitializer2(TestDiracInitializer1):
......
...@@ -17,7 +17,6 @@ import unittest ...@@ -17,7 +17,6 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
from paddle.fluid.framework import _test_eager_guard
from paddle.static import Program, program_guard from paddle.static import Program, program_guard
...@@ -54,7 +53,7 @@ class TestMultiplyApi(unittest.TestCase): ...@@ -54,7 +53,7 @@ class TestMultiplyApi(unittest.TestCase):
res = paddle.inner(x, y) res = paddle.inner(x, y)
return res.numpy() return res.numpy()
def func_test_multiply(self): def test_multiply(self):
np.random.seed(7) np.random.seed(7)
# test static computation graph: 3-d array # test static computation graph: 3-d array
...@@ -113,14 +112,9 @@ class TestMultiplyApi(unittest.TestCase): ...@@ -113,14 +112,9 @@ class TestMultiplyApi(unittest.TestCase):
res = self._run_dynamic_graph_case(x_data, y_data) res = self._run_dynamic_graph_case(x_data, y_data)
np.testing.assert_allclose(res, np.inner(x_data, y_data), rtol=1e-05) np.testing.assert_allclose(res, np.inner(x_data, y_data), rtol=1e-05)
def test_multiply(self):
with _test_eager_guard():
self.func_test_multiply()
self.func_test_multiply()
class TestMultiplyError(unittest.TestCase): class TestMultiplyError(unittest.TestCase):
def func_test_errors(self): def test_errors(self):
# test static computation graph: dtype can not be int8 # test static computation graph: dtype can not be int8
paddle.enable_static() paddle.enable_static()
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
...@@ -174,11 +168,6 @@ class TestMultiplyError(unittest.TestCase): ...@@ -174,11 +168,6 @@ class TestMultiplyError(unittest.TestCase):
y_data = np.random.randn(200).astype(np.float32) y_data = np.random.randn(200).astype(np.float32)
self.assertRaises(ValueError, paddle.inner, x_data, y_data) self.assertRaises(ValueError, paddle.inner, x_data, y_data)
def test_errors(self):
with _test_eager_guard():
self.func_test_errors()
self.func_test_errors()
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static() paddle.enable_static()
......
...@@ -17,11 +17,10 @@ import unittest ...@@ -17,11 +17,10 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
from paddle.fluid.framework import _test_eager_guard
class TestInplace(unittest.TestCase): class TestInplace(unittest.TestCase):
def func_test_forward_version(self): def test_forward_version(self):
with paddle.fluid.dygraph.guard(): with paddle.fluid.dygraph.guard():
var = paddle.to_tensor(np.ones((4, 2, 3)).astype(np.float32)) var = paddle.to_tensor(np.ones((4, 2, 3)).astype(np.float32))
self.assertEqual(var.inplace_version, 0) self.assertEqual(var.inplace_version, 0)
...@@ -38,12 +37,7 @@ class TestInplace(unittest.TestCase): ...@@ -38,12 +37,7 @@ class TestInplace(unittest.TestCase):
var[2] = 3 var[2] = 3
self.assertEqual(var.inplace_version, 3) self.assertEqual(var.inplace_version, 3)
def test_forward_version(self): def test_backward_error(self):
with _test_eager_guard():
self.func_test_forward_version()
self.func_test_forward_version()
def func_test_backward_error(self):
# It raises an error because the inplace operator will result # It raises an error because the inplace operator will result
# in incorrect gradient computation. # in incorrect gradient computation.
with paddle.fluid.dygraph.guard(): with paddle.fluid.dygraph.guard():
...@@ -67,12 +61,7 @@ class TestInplace(unittest.TestCase): ...@@ -67,12 +61,7 @@ class TestInplace(unittest.TestCase):
): ):
loss.backward() loss.backward()
def test_backward_error(self): def test_backward_success_1(self):
with _test_eager_guard():
self.func_test_backward_error()
self.func_test_backward_error()
def func_test_backward_success_1(self):
# var_b is modified inplace before using it, the inplace operator doesn't result # var_b is modified inplace before using it, the inplace operator doesn't result
# in incorrect gradient computation. # in incorrect gradient computation.
with paddle.fluid.dygraph.guard(): with paddle.fluid.dygraph.guard():
...@@ -87,12 +76,7 @@ class TestInplace(unittest.TestCase): ...@@ -87,12 +76,7 @@ class TestInplace(unittest.TestCase):
loss = var_c.sum() loss = var_c.sum()
loss.backward() loss.backward()
def test_backward_success_1(self): def test_backward_success_2(self):
with _test_eager_guard():
self.func_test_backward_success_1()
self.func_test_backward_success_1()
def func_test_backward_success_2(self):
# Although var_b is modified inplace after using it, it does not used in gradient computation. # Although var_b is modified inplace after using it, it does not used in gradient computation.
# The inplace operator doesn't result in incorrect gradient computation. # The inplace operator doesn't result in incorrect gradient computation.
with paddle.fluid.dygraph.guard(): with paddle.fluid.dygraph.guard():
...@@ -112,11 +96,6 @@ class TestInplace(unittest.TestCase): ...@@ -112,11 +96,6 @@ class TestInplace(unittest.TestCase):
loss.backward() loss.backward()
def test_backward_success_2(self):
with _test_eager_guard():
self.func_test_backward_success_2()
self.func_test_backward_success_2()
class TestDygraphInplace(unittest.TestCase): class TestDygraphInplace(unittest.TestCase):
def setUp(self): def setUp(self):
...@@ -136,7 +115,7 @@ class TestDygraphInplace(unittest.TestCase): ...@@ -136,7 +115,7 @@ class TestDygraphInplace(unittest.TestCase):
def inplace_api_processing(self, var): def inplace_api_processing(self, var):
return paddle.squeeze_(var) return paddle.squeeze_(var)
def func_test_inplace_api(self): def test_inplace_api(self):
var = paddle.to_tensor(self.input_var_numpy).astype(self.dtype) var = paddle.to_tensor(self.input_var_numpy).astype(self.dtype)
inplace_var = self.inplace_api_processing(var) inplace_var = self.inplace_api_processing(var)
self.assertTrue(id(var) == id(inplace_var)) self.assertTrue(id(var) == id(inplace_var))
...@@ -144,12 +123,7 @@ class TestDygraphInplace(unittest.TestCase): ...@@ -144,12 +123,7 @@ class TestDygraphInplace(unittest.TestCase):
inplace_var[0] = 2.0 inplace_var[0] = 2.0
np.testing.assert_array_equal(var.numpy(), inplace_var.numpy()) np.testing.assert_array_equal(var.numpy(), inplace_var.numpy())
def test_inplace_api(self): def test_forward_version(self):
with _test_eager_guard():
self.func_test_inplace_api()
self.func_test_inplace_api()
def func_test_forward_version(self):
with paddle.fluid.dygraph.guard(): with paddle.fluid.dygraph.guard():
var = paddle.to_tensor(self.input_var_numpy).astype(self.dtype) var = paddle.to_tensor(self.input_var_numpy).astype(self.dtype)
self.assertEqual(var.inplace_version, 0) self.assertEqual(var.inplace_version, 0)
...@@ -163,12 +137,7 @@ class TestDygraphInplace(unittest.TestCase): ...@@ -163,12 +137,7 @@ class TestDygraphInplace(unittest.TestCase):
inplace_var = self.inplace_api_processing(inplace_var) inplace_var = self.inplace_api_processing(inplace_var)
self.assertEqual(var.inplace_version, 3) self.assertEqual(var.inplace_version, 3)
def test_forward_version(self): def test_leaf_inplace_var_error(self):
with _test_eager_guard():
self.func_test_forward_version()
self.func_test_forward_version()
def func_test_leaf_inplace_var_error(self):
with paddle.fluid.dygraph.guard(): with paddle.fluid.dygraph.guard():
var = paddle.to_tensor(self.input_var_numpy).astype(self.dtype) var = paddle.to_tensor(self.input_var_numpy).astype(self.dtype)
var.stop_gradient = False var.stop_gradient = False
...@@ -178,12 +147,7 @@ class TestDygraphInplace(unittest.TestCase): ...@@ -178,12 +147,7 @@ class TestDygraphInplace(unittest.TestCase):
self.assertRaises(ValueError, leaf_inplace_error) self.assertRaises(ValueError, leaf_inplace_error)
def test_leaf_inplace_var_error(self): def test_backward_error(self):
with _test_eager_guard():
self.func_test_leaf_inplace_var_error()
self.func_test_leaf_inplace_var_error()
def func_test_backward_error(self):
# It raises an error because the inplace operator will result # It raises an error because the inplace operator will result
# in incorrect gradient computation. # in incorrect gradient computation.
with paddle.fluid.dygraph.guard(): with paddle.fluid.dygraph.guard():
...@@ -205,12 +169,7 @@ class TestDygraphInplace(unittest.TestCase): ...@@ -205,12 +169,7 @@ class TestDygraphInplace(unittest.TestCase):
): ):
loss.backward() loss.backward()
def test_backward_error(self): def test_backward_success_1(self):
with _test_eager_guard():
self.func_test_backward_error()
self.func_test_backward_error()
def func_test_backward_success_1(self):
# var_b is modified inplace before using it, the inplace operator doesn't result # var_b is modified inplace before using it, the inplace operator doesn't result
# in incorrect gradient computation. # in incorrect gradient computation.
grad_var_a, grad_var_a_inplace = 0, 1 grad_var_a, grad_var_a_inplace = 0, 1
...@@ -242,12 +201,7 @@ class TestDygraphInplace(unittest.TestCase): ...@@ -242,12 +201,7 @@ class TestDygraphInplace(unittest.TestCase):
self.assertTrue(self.np_compare(grad_var_a_inplace, grad_var_a)) self.assertTrue(self.np_compare(grad_var_a_inplace, grad_var_a))
def test_backward_success_1(self): def test_backward_success_2(self):
with _test_eager_guard():
self.func_test_backward_success_1()
self.func_test_backward_success_1()
def func_test_backward_success_2(self):
# Although var_b is modified inplace after using it, it does not used in gradient computation. # Although var_b is modified inplace after using it, it does not used in gradient computation.
# The inplace operator doesn't result in incorrect gradient computation. # The inplace operator doesn't result in incorrect gradient computation.
grad_var_a, grad_var_a_inplace = 0, 1 grad_var_a, grad_var_a_inplace = 0, 1
...@@ -286,11 +240,6 @@ class TestDygraphInplace(unittest.TestCase): ...@@ -286,11 +240,6 @@ class TestDygraphInplace(unittest.TestCase):
grad_var_a = var_a.grad.numpy() grad_var_a = var_a.grad.numpy()
np.testing.assert_array_equal(grad_var_a_inplace, grad_var_a) np.testing.assert_array_equal(grad_var_a_inplace, grad_var_a)
def test_backward_success_2(self):
with _test_eager_guard():
self.func_test_backward_success_2()
self.func_test_backward_success_2()
class TestDygraphInplaceUnsqueeze(TestDygraphInplace): class TestDygraphInplaceUnsqueeze(TestDygraphInplace):
def non_inplace_api_processing(self, var): def non_inplace_api_processing(self, var):
...@@ -507,7 +456,7 @@ class TestDygraphInplaceRemainder(TestDygraphInplaceAdd): ...@@ -507,7 +456,7 @@ class TestDygraphInplaceRemainder(TestDygraphInplaceAdd):
class TestLossIsInplaceVar(unittest.TestCase): class TestLossIsInplaceVar(unittest.TestCase):
def func_test_loss_is_inplace_var(self): def test_loss_is_inplace_var(self):
with paddle.fluid.dygraph.guard(): with paddle.fluid.dygraph.guard():
var_a = paddle.ones((2, 2)) var_a = paddle.ones((2, 2))
var_a.stop_gradient = False var_a.stop_gradient = False
...@@ -530,14 +479,9 @@ class TestLossIsInplaceVar(unittest.TestCase): ...@@ -530,14 +479,9 @@ class TestLossIsInplaceVar(unittest.TestCase):
np.testing.assert_array_equal(inplace_grad_var_a, grad_var_a) np.testing.assert_array_equal(inplace_grad_var_a, grad_var_a)
def test_loss_is_inplace_var(self):
with _test_eager_guard():
self.func_test_loss_is_inplace_var()
self.func_test_loss_is_inplace_var()
class TestContinuouslyInplace(unittest.TestCase): class TestContinuouslyInplace(unittest.TestCase):
def func_test_continuously_inplace(self): def test_continuously_inplace(self):
a = paddle.rand([2, 3]) a = paddle.rand([2, 3])
a.stop_gradient = False a.stop_gradient = False
b = a * 2 b = a * 2
...@@ -548,24 +492,18 @@ class TestContinuouslyInplace(unittest.TestCase): ...@@ -548,24 +492,18 @@ class TestContinuouslyInplace(unittest.TestCase):
b.backward() b.backward()
def test_continuously_inplace(self):
with _test_eager_guard():
self.func_test_continuously_inplace()
self.func_test_continuously_inplace()
class TestGetitemBeforeInplace(unittest.TestCase): class TestGetitemBeforeInplace(unittest.TestCase):
def test_getitem_before_inplace(self): def test_getitem_before_inplace(self):
with _test_eager_guard(): a = paddle.ones(shape=[4, 2, 3], dtype="float32")
a = paddle.ones(shape=[4, 2, 3], dtype="float32") a.stop_gradient = False
a.stop_gradient = False b = a**2
b = a**2 b[0] = 3
b[0] = 3 # getitem has no_need_buffer input
# getitem has no_need_buffer input c = b[0:2]
c = b[0:2] loss = c.sum()
loss = c.sum() b[1] = 2
b[1] = 2 loss.backward()
loss.backward()
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -21,7 +21,6 @@ import paddle.fluid as fluid ...@@ -21,7 +21,6 @@ import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid import Program, program_guard from paddle.fluid import Program, program_guard
from paddle.fluid.dygraph import to_variable from paddle.fluid.dygraph import to_variable
from paddle.fluid.framework import _test_eager_guard
def _reference_instance_norm_naive(x, scale, bias, epsilon, mean, var): def _reference_instance_norm_naive(x, scale, bias, epsilon, mean, var):
...@@ -291,10 +290,6 @@ class TestElasticNormOp(unittest.TestCase): ...@@ -291,10 +290,6 @@ class TestElasticNormOp(unittest.TestCase):
outputs.numpy(), out_np, rtol=1e-05, atol=1e-06 outputs.numpy(), out_np, rtol=1e-05, atol=1e-06
) )
def test_eager_api(self):
with _test_eager_guard():
self.test_norm()
class TestElasticNormOpCase2(unittest.TestCase): class TestElasticNormOpCase2(unittest.TestCase):
def init_test_case(self): def init_test_case(self):
...@@ -329,10 +324,6 @@ class TestElasticNormOpCase2(unittest.TestCase): ...@@ -329,10 +324,6 @@ class TestElasticNormOpCase2(unittest.TestCase):
outputs.numpy(), out_np, rtol=1e-05, atol=1e-06 outputs.numpy(), out_np, rtol=1e-05, atol=1e-06
) )
def test_eager_api(self):
with _test_eager_guard():
self.test_norm()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -20,7 +20,6 @@ import paddle ...@@ -20,7 +20,6 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid import Program, program_guard from paddle.fluid import Program, program_guard
from paddle.fluid.framework import _test_eager_guard
class TestInstanceNorm(unittest.TestCase): class TestInstanceNorm(unittest.TestCase):
...@@ -118,11 +117,6 @@ class TestInstanceNorm(unittest.TestCase): ...@@ -118,11 +117,6 @@ class TestInstanceNorm(unittest.TestCase):
y2 = compute_v2(x) y2 = compute_v2(x)
np.testing.assert_allclose(y1, y2, rtol=1e-05) np.testing.assert_allclose(y1, y2, rtol=1e-05)
def test_eager_api(self):
with _test_eager_guard():
self.test_dygraph()
self.test_error()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -19,7 +19,6 @@ import unittest ...@@ -19,7 +19,6 @@ import unittest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.framework import _test_eager_guard
class TestSaveLoadAPIError(unittest.TestCase): class TestSaveLoadAPIError(unittest.TestCase):
...@@ -30,7 +29,7 @@ class TestSaveLoadAPIError(unittest.TestCase): ...@@ -30,7 +29,7 @@ class TestSaveLoadAPIError(unittest.TestCase):
def tearDown(self): def tearDown(self):
self.temp_dir.cleanup() self.temp_dir.cleanup()
def func_test_get_valid_program_error(self): def test_get_valid_program_error(self):
# case 1: CompiledProgram no program # case 1: CompiledProgram no program
graph = core.Graph(core.ProgramDesc()) graph = core.Graph(core.ProgramDesc())
compiled_program = fluid.CompiledProgram(graph) compiled_program = fluid.CompiledProgram(graph)
...@@ -41,12 +40,7 @@ class TestSaveLoadAPIError(unittest.TestCase): ...@@ -41,12 +40,7 @@ class TestSaveLoadAPIError(unittest.TestCase):
with self.assertRaises(TypeError): with self.assertRaises(TypeError):
fluid.io._get_valid_program("program") fluid.io._get_valid_program("program")
def test_get_valid_program_error(self): def test_load_vars_error(self):
with _test_eager_guard():
self.func_test_get_valid_program_error()
self.func_test_get_valid_program_error()
def func_test_load_vars_error(self):
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
# case 1: main_program type error when vars None # case 1: main_program type error when vars None
...@@ -64,11 +58,6 @@ class TestSaveLoadAPIError(unittest.TestCase): ...@@ -64,11 +58,6 @@ class TestSaveLoadAPIError(unittest.TestCase):
vars="vars", vars="vars",
) )
def test_load_vars_error(self):
with _test_eager_guard():
self.func_test_load_vars_error()
self.func_test_load_vars_error()
class TestSaveInferenceModelAPIError(unittest.TestCase): class TestSaveInferenceModelAPIError(unittest.TestCase):
def setUp(self): def setUp(self):
...@@ -77,7 +66,7 @@ class TestSaveInferenceModelAPIError(unittest.TestCase): ...@@ -77,7 +66,7 @@ class TestSaveInferenceModelAPIError(unittest.TestCase):
def tearDown(self): def tearDown(self):
self.temp_dir.cleanup() self.temp_dir.cleanup()
def func_test_useless_feeded_var_names(self): def test_useless_feeded_var_names(self):
start_prog = fluid.Program() start_prog = fluid.Program()
main_prog = fluid.Program() main_prog = fluid.Program()
with fluid.program_guard(main_prog, start_prog): with fluid.program_guard(main_prog, start_prog):
...@@ -98,11 +87,6 @@ class TestSaveInferenceModelAPIError(unittest.TestCase): ...@@ -98,11 +87,6 @@ class TestSaveInferenceModelAPIError(unittest.TestCase):
main_program=main_prog, main_program=main_prog,
) )
def test_useless_feeded_var_names(self):
with _test_eager_guard():
self.func_test_useless_feeded_var_names()
self.func_test_useless_feeded_var_names()
class TestWhenTrainWithNoGrad(unittest.TestCase): class TestWhenTrainWithNoGrad(unittest.TestCase):
def setUp(self): def setUp(self):
...@@ -111,7 +95,7 @@ class TestWhenTrainWithNoGrad(unittest.TestCase): ...@@ -111,7 +95,7 @@ class TestWhenTrainWithNoGrad(unittest.TestCase):
def tearDown(self): def tearDown(self):
self.temp_dir.cleanup() self.temp_dir.cleanup()
def func_test_when_train_with_no_grad(self): def test_when_train_with_no_grad(self):
paddle.disable_static() paddle.disable_static()
net = paddle.nn.Linear(1024, 1) net = paddle.nn.Linear(1024, 1)
net = paddle.jit.to_static(net) net = paddle.jit.to_static(net)
...@@ -127,11 +111,6 @@ class TestWhenTrainWithNoGrad(unittest.TestCase): ...@@ -127,11 +111,6 @@ class TestWhenTrainWithNoGrad(unittest.TestCase):
x = paddle.rand([1024], 'float32') x = paddle.rand([1024], 'float32')
net(x) net(x)
def test_when_train_with_no_grad(self):
with _test_eager_guard():
self.func_test_when_train_with_no_grad()
self.func_test_when_train_with_no_grad()
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static() paddle.enable_static()
......
...@@ -18,7 +18,6 @@ import numpy as np ...@@ -18,7 +18,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.framework import _test_eager_guard
def run_static(x_np, dtype, op_str, use_gpu=False): def run_static(x_np, dtype, op_str, use_gpu=False):
...@@ -51,14 +50,13 @@ def run_dygraph(x_np, op_str, use_gpu=True): ...@@ -51,14 +50,13 @@ def run_dygraph(x_np, op_str, use_gpu=True):
def run_eager(x_np, op_str, use_gpu=True): def run_eager(x_np, op_str, use_gpu=True):
with paddle.fluid.dygraph.guard(): with paddle.fluid.dygraph.guard():
with _test_eager_guard(): place = paddle.CPUPlace()
place = paddle.CPUPlace() if use_gpu and fluid.core.is_compiled_with_cuda():
if use_gpu and fluid.core.is_compiled_with_cuda(): place = paddle.CUDAPlace(0)
place = paddle.CUDAPlace(0)
x = paddle.to_tensor(x_np)
x = paddle.to_tensor(x_np) dygraph_result = getattr(paddle.tensor, op_str)(x)
dygraph_result = getattr(paddle.tensor, op_str)(x) return dygraph_result
return dygraph_result
def np_data_generator( def np_data_generator(
......
...@@ -1837,5 +1837,4 @@ class TestNotJitForward(unittest.TestCase): ...@@ -1837,5 +1837,4 @@ class TestNotJitForward(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
with fluid.framework._test_eager_guard(): unittest.main()
unittest.main()
...@@ -20,7 +20,6 @@ from op_test import OpTest ...@@ -20,7 +20,6 @@ from op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.dygraph as dg import paddle.fluid.dygraph as dg
from paddle.fluid.framework import _test_eager_guard
class TestKronOp(OpTest): class TestKronOp(OpTest):
...@@ -104,11 +103,6 @@ class TestKronLayer(unittest.TestCase): ...@@ -104,11 +103,6 @@ class TestKronLayer(unittest.TestCase):
(c,) = exe.run(main, feed={'a': a, 'b': b}, fetch_list=[out_var]) (c,) = exe.run(main, feed={'a': a, 'b': b}, fetch_list=[out_var])
np.testing.assert_allclose(c, np.kron(a, b)) np.testing.assert_allclose(c, np.kron(a, b))
def test_api_eager_dygraph(self):
with _test_eager_guard():
self.test_case()
self.test_case_with_output()
class TestComplexKronOp(OpTest): class TestComplexKronOp(OpTest):
def setUp(self): def setUp(self):
......
...@@ -20,7 +20,6 @@ import paddle ...@@ -20,7 +20,6 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid import Program, program_guard from paddle.fluid import Program, program_guard
from paddle.fluid.framework import _test_eager_guard
class TestDygraphLayerNormv2(unittest.TestCase): class TestDygraphLayerNormv2(unittest.TestCase):
...@@ -66,12 +65,11 @@ class TestDygraphLayerNormv2(unittest.TestCase): ...@@ -66,12 +65,11 @@ class TestDygraphLayerNormv2(unittest.TestCase):
def compute_v2(x): def compute_v2(x):
with fluid.dygraph.guard(p): with fluid.dygraph.guard(p):
with _test_eager_guard(): ln = paddle.nn.LayerNorm(shape[1:])
ln = paddle.nn.LayerNorm(shape[1:]) x1 = paddle.to_tensor(x)
x1 = paddle.to_tensor(x) x1.stop_gradient = False
x1.stop_gradient = False y = ln(x1)
y = ln(x1) y.backward()
y.backward()
return y.numpy(), x1.gradient() return y.numpy(), x1.gradient()
x = np.random.randn(*shape).astype("float32") x = np.random.randn(*shape).astype("float32")
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册