未验证 提交 3210ea8e 编写于 作者: 姜永久 提交者: GitHub

rm unittests eager guard test part8 gaussian2gumnel (#48821)

* rm unittests eager guard test part8 gaussian2gumnel

* rm

* modify

* fix

* fix GroupNorm param
上级 675b3486
......@@ -19,7 +19,6 @@ import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.tests.unittests.op_test import OpTest, convert_uint16_to_float
from paddle.tensor import random
......@@ -49,10 +48,6 @@ class TestGaussianRandomOp(OpTest):
def test_check_output(self):
self.check_output_customized(self.verify_output)
def test_eager(self):
with _test_eager_guard():
self.test_check_output()
def verify_output(self, outs):
self.assertEqual(outs[0].shape, (123, 92))
hist, _ = np.histogram(outs[0], range=(-3, 5))
......@@ -96,10 +91,6 @@ class TestGaussianRandomBF16Op(OpTest):
self.verify_output, place=core.CUDAPlace(0)
)
def test_eager(self):
with _test_eager_guard():
self.test_check_output()
def verify_output(self, outs):
outs = convert_uint16_to_float(outs)
self.assertEqual(outs[0].shape, (123, 92))
......
......@@ -21,7 +21,6 @@ import paddle
import paddle.fluid as fluid
import paddle.fluid.dygraph as dg
import paddle.nn.functional as F
from paddle.fluid.framework import _test_eager_guard
def gelu(x, approximate):
......@@ -98,10 +97,6 @@ class TestGeluOp(unittest.TestCase):
x_g_ref, x_g_fast_math, rtol=1e-05, atol=0.0005
)
def test_fast_math_eager(self):
with _test_eager_guard():
self.test_fast_math()
if __name__ == '__main__':
unittest.main()
......@@ -49,7 +49,7 @@ class TestGraphKhopSampler(unittest.TestCase):
self.sample_sizes = [5, 5]
self.dst_src_dict = dst_src_dict
def func_sample_result(self):
def test_sample_result(self):
paddle.disable_static()
row = paddle.to_tensor(self.row)
colptr = paddle.to_tensor(self.colptr)
......@@ -89,12 +89,7 @@ class TestGraphKhopSampler(unittest.TestCase):
# Ensure the correct sample neighbors.
self.assertTrue(np.sum(in_neighbors) == in_neighbors.shape[0])
def test_sample_result(self):
with fluid.framework._test_eager_guard():
self.func_sample_result()
self.func_sample_result()
def func_uva_sample_result(self):
def test_uva_sample_result(self):
paddle.disable_static()
if paddle.fluid.core.is_compiled_with_cuda():
row = None
......@@ -151,11 +146,6 @@ class TestGraphKhopSampler(unittest.TestCase):
in_neighbors = np.isin(edge_src_n.numpy(), self.dst_src_dict[n])
self.assertTrue(np.sum(in_neighbors) == in_neighbors.shape[0])
def test_uva_sample_result(self):
with fluid.framework._test_eager_guard():
self.func_uva_sample_result()
self.func_uva_sample_result()
def test_sample_result_static_with_eids(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
......
......@@ -18,7 +18,6 @@ import numpy as np
from op_test import OpTest
import paddle
from paddle.fluid.framework import _test_eager_guard
def graph_send_recv_wrapper(
......@@ -381,12 +380,6 @@ class API_GraphSendRecvOpTest(unittest.TestCase):
)
np.testing.assert_allclose(np_sum, ret[0], rtol=1e-05, atol=1e-06)
def test_api_eager_dygraph(self):
with _test_eager_guard():
self.test_dygraph()
self.test_int32_input()
self.test_set_outsize_gpu()
class API_GeometricSendURecvTest(unittest.TestCase):
def test_static(self):
......@@ -533,12 +526,6 @@ class API_GeometricSendURecvTest(unittest.TestCase):
)
np.testing.assert_allclose(np_sum, ret[0], rtol=1e-05, atol=1e-06)
def test_api_eager_dygraph(self):
with _test_eager_guard():
self.test_dygraph()
self.test_int32_input()
self.test_set_outsize_gpu()
if __name__ == '__main__':
unittest.main()
......@@ -20,7 +20,6 @@ from op_test import OpTest
import paddle
import paddle.fluid.core as core
from paddle.fluid.framework import _test_eager_guard
def get_broadcast_shape(shp1, shp2):
......@@ -1060,16 +1059,6 @@ class API_GeometricSendUERecvTest(unittest.TestCase):
),
)
def test_api_eager_dygraph(self):
with _test_eager_guard():
self.test_compute_all_with_sum()
self.test_compute_all_with_mean()
self.test_compute_all_with_max()
self.test_compute_all_with_max_fp16()
self.test_compute_all_with_min()
self.test_compute_all_with_min_fp16()
self.test_reshape_lhs_rhs()
if __name__ == "__main__":
unittest.main()
......@@ -18,7 +18,6 @@ import numpy as np
from op_test import OpTest
import paddle
from paddle.fluid.framework import _test_eager_guard
def compute_graph_send_uv(inputs, attributes):
......@@ -264,10 +263,6 @@ class API_GeometricSendUVTest(unittest.TestCase):
),
)
def test_api_eager_dygraph(self):
with _test_eager_guard():
self.test_compute_all_dygraph()
if __name__ == "__main__":
unittest.main()
......@@ -21,7 +21,6 @@ from testsuite import create_op
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.framework import _test_eager_guard
def group_norm_naive(x, scale, bias, epsilon, groups, data_layout):
......@@ -307,20 +306,15 @@ class TestGroupNormEager(unittest.TestCase):
groupNorm = paddle.nn.GroupNorm(num_channels=32, num_groups=4)
ret1 = groupNorm(tensor_1)
ret1.backward()
with _test_eager_guard():
tensor_eager_1 = fluid.dygraph.to_variable(input)
tensor_eager_1.stop_gradient = False
groupNorm_eager = paddle.nn.GroupNorm(
num_channels=32, num_groups=4
)
ret2 = groupNorm_eager(tensor_eager_1)
ret2.backward()
self.assertEqual(
(
tensor_1.grad.numpy() == tensor_eager_1.grad.numpy()
).all(),
True,
)
tensor_eager_1 = fluid.dygraph.to_variable(input)
tensor_eager_1.stop_gradient = False
groupNorm_eager = paddle.nn.GroupNorm(num_channels=32, num_groups=4)
ret2 = groupNorm_eager(tensor_eager_1)
ret2.backward()
self.assertEqual(
(tensor_1.grad.numpy() == tensor_eager_1.grad.numpy()).all(),
True,
)
class TestGroupNormEager_fp32(unittest.TestCase):
......@@ -335,20 +329,15 @@ class TestGroupNormEager_fp32(unittest.TestCase):
groupNorm = paddle.nn.GroupNorm(num_channels=32, num_groups=4)
ret1 = groupNorm(tensor_1)
ret1.backward()
with _test_eager_guard():
tensor_eager_1 = fluid.dygraph.to_variable(input)
tensor_eager_1.stop_gradient = False
groupNorm_eager = paddle.nn.GroupNorm(
num_channels=32, num_groups=4
)
ret2 = groupNorm_eager(tensor_eager_1)
ret2.backward()
self.assertEqual(
(
tensor_1.grad.numpy() == tensor_eager_1.grad.numpy()
).all(),
True,
)
tensor_eager_1 = fluid.dygraph.to_variable(input)
tensor_eager_1.stop_gradient = False
groupNorm_eager = paddle.nn.GroupNorm(num_channels=32, num_groups=4)
ret2 = groupNorm_eager(tensor_eager_1)
ret2.backward()
self.assertEqual(
(tensor_1.grad.numpy() == tensor_eager_1.grad.numpy()).all(),
True,
)
class TestGroupNormEager_fp16(unittest.TestCase):
......@@ -367,20 +356,15 @@ class TestGroupNormEager_fp16(unittest.TestCase):
groupNorm = paddle.nn.GroupNorm(num_channels=32, num_groups=4)
ret1 = groupNorm(tensor_1)
ret1.backward()
with _test_eager_guard():
tensor_eager_1 = fluid.dygraph.to_variable(input)
tensor_eager_1.stop_gradient = False
groupNorm_eager = paddle.nn.GroupNorm(
num_channels=32, num_groups=4
)
ret2 = groupNorm_eager(tensor_eager_1)
ret2.backward()
self.assertEqual(
(
tensor_1.grad.numpy() == tensor_eager_1.grad.numpy()
).all(),
True,
)
tensor_eager_1 = fluid.dygraph.to_variable(input)
tensor_eager_1.stop_gradient = False
groupNorm_eager = paddle.nn.GroupNorm(num_channels=32, num_groups=4)
ret2 = groupNorm_eager(tensor_eager_1)
ret2.backward()
self.assertEqual(
(tensor_1.grad.numpy() == tensor_eager_1.grad.numpy()).all(),
True,
)
if __name__ == '__main__':
......
......@@ -19,7 +19,6 @@ import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.framework import _test_eager_guard
def group_norm_naive_for_general_dimension(x, scale, bias, epsilon, groups):
......@@ -73,10 +72,6 @@ class TestGroupNormAPIV2_With_General_Dimensions(unittest.TestCase):
self.assertTrue(np.allclose(result1, expect_res1, atol=1e-5))
self.assertTrue(np.allclose(result2, expect_res2, atol=1e-5))
def test_eager_api(self):
with _test_eager_guard():
self.test_numerical_accuracy()
class TestGroupNormAPIV2_With_General_Dimensions_fp16(unittest.TestCase):
def test_numerical_accuracy(self):
......@@ -125,10 +120,6 @@ class TestGroupNormAPIV2_With_General_Dimensions_fp16(unittest.TestCase):
result2, expect_res2, rtol=1e-2, atol=1e-3
)
def test_eager_api(self):
with _test_eager_guard():
self.test_numerical_accuracy()
class TestGroupNormDimException(unittest.TestCase):
def test_exception(self):
......
......@@ -17,7 +17,6 @@ from op_test import OpTest
import paddle
import paddle.fluid as fluid
from paddle.fluid.framework import _test_eager_guard
paddle.enable_static()
......@@ -206,12 +205,6 @@ class TestGumbelSoftmaxAPI(unittest.TestCase):
out_np = np.array(y)
self.assertEqual(out_np.sum(), self.count_expected)
with _test_eager_guard():
x = paddle.to_tensor(self.x)
y = paddle.nn.functional.gumbel_softmax(x, hard=True)
out_np = np.array(y)
self.assertEqual(out_np.sum(), self.count_expected)
class TestGumbelSoftmaxOpError(unittest.TestCase):
def test_errors(self):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册