未验证 提交 d808f160 编写于 作者: 姜永久 提交者: GitHub

rm eager guard tests part3_1 (#49059)

上级 d0fefa23
...@@ -24,7 +24,6 @@ import paddle.fluid as fluid ...@@ -24,7 +24,6 @@ import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
from paddle.fluid import Program, program_guard from paddle.fluid import Program, program_guard
from paddle.fluid.framework import _test_eager_guard
class TestCastOpFp32ToFp64(OpTest): class TestCastOpFp32ToFp64(OpTest):
...@@ -122,16 +121,15 @@ class TestCastOpError(unittest.TestCase): ...@@ -122,16 +121,15 @@ class TestCastOpError(unittest.TestCase):
class TestCastOpEager(unittest.TestCase): class TestCastOpEager(unittest.TestCase):
def test_eager(self): def test_eager(self):
with paddle.fluid.dygraph.base.guard(): with paddle.fluid.dygraph.base.guard():
with _test_eager_guard(): x = paddle.ones([2, 2], dtype="float16")
x = paddle.ones([2, 2], dtype="float16") x.stop_gradient = False
x.stop_gradient = False out = paddle.cast(x, "float32")
out = paddle.cast(x, "float32") np.testing.assert_array_equal(
np.testing.assert_array_equal( out, np.ones([2, 2]).astype('float32')
out, np.ones([2, 2]).astype('float32') )
) out.backward()
out.backward() np.testing.assert_array_equal(x.gradient(), x.numpy())
np.testing.assert_array_equal(x.gradient(), x.numpy()) self.assertTrue(x.gradient().dtype == np.float16)
self.assertTrue(x.gradient().dtype == np.float16)
class TestCastDoubleGradCheck(unittest.TestCase): class TestCastDoubleGradCheck(unittest.TestCase):
......
...@@ -20,7 +20,6 @@ from op_test import OpTest ...@@ -20,7 +20,6 @@ from op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import Program, program_guard from paddle.fluid import Program, program_guard
from paddle.fluid.framework import _test_eager_guard
class TestClipOp(OpTest): class TestClipOp(OpTest):
...@@ -231,7 +230,7 @@ class TestClipAPI(unittest.TestCase): ...@@ -231,7 +230,7 @@ class TestClipAPI(unittest.TestCase):
) )
paddle.disable_static() paddle.disable_static()
def func_clip_dygraph(self): def test_clip_dygraph(self):
paddle.disable_static() paddle.disable_static()
place = ( place = (
fluid.CUDAPlace(0) fluid.CUDAPlace(0)
...@@ -279,20 +278,14 @@ class TestClipAPI(unittest.TestCase): ...@@ -279,20 +278,14 @@ class TestClipAPI(unittest.TestCase):
out_6.numpy(), data.clip(0.2, 0.8), rtol=1e-05 out_6.numpy(), data.clip(0.2, 0.8), rtol=1e-05
) )
def test_clip_dygraph(self):
with _test_eager_guard():
self.func_clip_dygraph()
self.func_clip_dygraph()
def test_clip_dygraph_default_max(self): def test_clip_dygraph_default_max(self):
paddle.disable_static() paddle.disable_static()
with _test_eager_guard(): x_int32 = paddle.to_tensor([1, 2, 3], dtype="int32")
x_int32 = paddle.to_tensor([1, 2, 3], dtype="int32") x_int64 = paddle.to_tensor([1, 2, 3], dtype="int64")
x_int64 = paddle.to_tensor([1, 2, 3], dtype="int64") x_f32 = paddle.to_tensor([1, 2, 3], dtype="float32")
x_f32 = paddle.to_tensor([1, 2, 3], dtype="float32") egr_out1 = paddle.clip(x_int32, min=1)
egr_out1 = paddle.clip(x_int32, min=1) egr_out2 = paddle.clip(x_int64, min=1)
egr_out2 = paddle.clip(x_int64, min=1) egr_out3 = paddle.clip(x_f32, min=1)
egr_out3 = paddle.clip(x_f32, min=1)
x_int32 = paddle.to_tensor([1, 2, 3], dtype="int32") x_int32 = paddle.to_tensor([1, 2, 3], dtype="int32")
x_int64 = paddle.to_tensor([1, 2, 3], dtype="int64") x_int64 = paddle.to_tensor([1, 2, 3], dtype="int64")
x_f32 = paddle.to_tensor([1, 2, 3], dtype="float32") x_f32 = paddle.to_tensor([1, 2, 3], dtype="float32")
......
...@@ -19,7 +19,6 @@ from op_test import OpTest ...@@ -19,7 +19,6 @@ from op_test import OpTest
import paddle import paddle
import paddle.fluid.dygraph as dg import paddle.fluid.dygraph as dg
from paddle.fluid.framework import _test_eager_guard
class TestComplexAbsOp(OpTest): class TestComplexAbsOp(OpTest):
...@@ -109,10 +108,6 @@ class TestAbs(unittest.TestCase): ...@@ -109,10 +108,6 @@ class TestAbs(unittest.TestCase):
y = paddle.abs(paddle.to_tensor(x)) y = paddle.abs(paddle.to_tensor(x))
np.testing.assert_allclose(np.abs(x), y.numpy(), rtol=1e-05) np.testing.assert_allclose(np.abs(x), y.numpy(), rtol=1e-05)
def test_eager(self):
with _test_eager_guard():
self.test_all_positive()
class TestRealAbsOp(OpTest): class TestRealAbsOp(OpTest):
def setUp(self): def setUp(self):
......
...@@ -17,7 +17,6 @@ import unittest ...@@ -17,7 +17,6 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
from paddle.fluid.framework import _test_eager_guard
class TestComplexCastOp(unittest.TestCase): class TestComplexCastOp(unittest.TestCase):
...@@ -80,12 +79,6 @@ class TestComplexCastOp(unittest.TestCase): ...@@ -80,12 +79,6 @@ class TestComplexCastOp(unittest.TestCase):
c_128.cast('complex128').numpy(), c_64.numpy(), rtol=1e-05 c_128.cast('complex128').numpy(), c_64.numpy(), rtol=1e-05
) )
def test_eager(self):
with _test_eager_guard():
self.test_complex64_complex128()
self.test_real_to_complex()
self.test_complex_to_real()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -20,7 +20,6 @@ from numpy.random import random as rand ...@@ -20,7 +20,6 @@ from numpy.random import random as rand
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.dygraph as dg import paddle.fluid.dygraph as dg
from paddle.fluid.framework import _test_eager_guard
paddle_apis = { paddle_apis = {
"add": paddle.add, "add": paddle.add,
...@@ -112,12 +111,6 @@ class TestComplexElementwiseLayers(unittest.TestCase): ...@@ -112,12 +111,6 @@ class TestComplexElementwiseLayers(unittest.TestCase):
self.compare_by_basic_api(x, y) self.compare_by_basic_api(x, y)
self.compare_op_by_basic_api(x, y) self.compare_op_by_basic_api(x, y)
def test_eager(self):
with _test_eager_guard():
self.test_real_x_complex_y()
self.test_complex_x_real_y()
self.test_complex_xy()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -18,7 +18,6 @@ import numpy as np ...@@ -18,7 +18,6 @@ import numpy as np
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.dygraph as dg import paddle.fluid.dygraph as dg
from paddle.fluid.framework import _test_eager_guard
class TestComplexGetitemLayer(unittest.TestCase): class TestComplexGetitemLayer(unittest.TestCase):
...@@ -95,15 +94,6 @@ class TestComplexGetitemLayer(unittest.TestCase): ...@@ -95,15 +94,6 @@ class TestComplexGetitemLayer(unittest.TestCase):
np.testing.assert_allclose(x_var_slice.numpy(), x_np_slice) np.testing.assert_allclose(x_var_slice.numpy(), x_np_slice)
def test_eager(self):
with _test_eager_guard():
self.test_case1()
self.test_case2()
self.test_case3()
self.test_case4()
self.test_case5()
self.test_case6()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册