未验证 提交 68b78f30 编写于 作者: 姜永久 提交者: GitHub

remove unittests eager guard tests part4 concat2custom_grad (#48802)

* remove unittests eager guard tests part4 concat2custom_grad

* modify conv2d&3d test

* fix a bug in mem_allocated

* valueError->indexError for cross_entropy_loss test

* reset max_alloc_size
上级 a641312a
......@@ -22,7 +22,6 @@ import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid import Program, core, program_guard
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.tests.unittests.op_test import (
OpTest,
convert_float_to_uint16,
......@@ -354,12 +353,6 @@ class TestConcatAPI(unittest.TestCase):
self.assertEqual((out1.numpy() == np_out1).all(), True)
self.assertEqual((out2.numpy() == np_out2).all(), True)
def test_eager(self):
with _test_eager_guard():
self.test_api()
self.test_fluid_api()
self.test_imperative()
def test_errors(self):
with program_guard(Program(), Program()):
# The item in input must be Variable.
......
......@@ -21,7 +21,6 @@ import paddle.fluid.dygraph as dg
import paddle.fluid.initializer as I
import paddle.nn.functional as F
from paddle import fluid, nn
from paddle.fluid.framework import _test_eager_guard
def _reverse_repeat_list(t, n):
......@@ -222,12 +221,8 @@ class Conv2DTestCase(unittest.TestCase):
result2 = self.functional(place)
with dg.guard(place):
result3, g1 = self.paddle_nn_layer()
with _test_eager_guard():
res_eager, g2 = self.paddle_nn_layer()
np.testing.assert_array_almost_equal(result1, result2)
np.testing.assert_array_almost_equal(result2, result3)
np.testing.assert_allclose(result3, res_eager, rtol=1e-05)
np.testing.assert_allclose(g1, g2, rtol=1e-05)
def runTest(self):
place = fluid.CPUPlace()
......
......@@ -21,7 +21,6 @@ import paddle.fluid.dygraph as dg
import paddle.fluid.initializer as I
import paddle.nn.functional as F
from paddle import fluid, nn
from paddle.fluid.framework import _test_eager_guard
class Conv3DTestCase(unittest.TestCase):
......@@ -184,12 +183,8 @@ class Conv3DTestCase(unittest.TestCase):
result2 = self.functional(place)
with dg.guard(place):
result3, g1 = self.paddle_nn_layer()
with _test_eager_guard():
res_eager, g2 = self.paddle_nn_layer()
np.testing.assert_array_almost_equal(result1, result2)
np.testing.assert_array_almost_equal(result2, result3)
np.testing.assert_allclose(result3, res_eager, rtol=1e-05)
np.testing.assert_allclose(g1, g2, rtol=1e-05)
def runTest(self):
place = fluid.CPUPlace()
......
......@@ -18,7 +18,6 @@ import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.framework import _test_eager_guard
def numpy_cov(np_arr, rowvar=True, ddof=1, fweights=None, aweights=None):
......@@ -36,7 +35,7 @@ class Cov_Test(unittest.TestCase):
self.shape = [20, 10]
self.weightshape = [10]
def func_test_tensor_cov_default(self):
def test_tensor_cov_default(self):
typelist = ['float64']
places = [fluid.CPUPlace()]
if fluid.core.is_compiled_with_cuda():
......@@ -59,12 +58,7 @@ class Cov_Test(unittest.TestCase):
)
np.testing.assert_allclose(np_cov, cov.numpy(), rtol=1e-05)
def test_tensor_cov_default(self):
with _test_eager_guard():
self.func_test_tensor_cov_default()
self.func_test_tensor_cov_default()
def func_test_tensor_cov_rowvar(self):
def test_tensor_cov_rowvar(self):
typelist = ['float64']
places = [fluid.CPUPlace()]
if fluid.core.is_compiled_with_cuda():
......@@ -91,12 +85,7 @@ class Cov_Test(unittest.TestCase):
)
np.testing.assert_allclose(np_cov, cov.numpy(), rtol=1e-05)
def test_tensor_cov_rowvar(self):
with _test_eager_guard():
self.func_test_tensor_cov_rowvar()
self.func_test_tensor_cov_rowvar()
def func_test_tensor_cov_ddof(self):
def test_tensor_cov_ddof(self):
typelist = ['float64']
places = [fluid.CPUPlace()]
if fluid.core.is_compiled_with_cuda():
......@@ -123,12 +112,7 @@ class Cov_Test(unittest.TestCase):
)
np.testing.assert_allclose(np_cov, cov.numpy(), rtol=1e-05)
def test_tensor_cov_ddof(self):
with _test_eager_guard():
self.func_test_tensor_cov_ddof()
self.func_test_tensor_cov_ddof()
def func_test_tensor_cov_fweights(self):
def test_tensor_cov_fweights(self):
typelist = ['float64']
places = [fluid.CPUPlace()]
if fluid.core.is_compiled_with_cuda():
......@@ -159,12 +143,7 @@ class Cov_Test(unittest.TestCase):
)
np.testing.assert_allclose(np_cov, cov.numpy(), rtol=1e-05)
def test_tensor_cov_fweights(self):
with _test_eager_guard():
self.func_test_tensor_cov_fweights()
self.func_test_tensor_cov_fweights()
def func_test_tensor_cov_aweights(self):
def test_tensor_cov_aweights(self):
typelist = ['float64']
places = [fluid.CPUPlace()]
if fluid.core.is_compiled_with_cuda():
......@@ -195,12 +174,7 @@ class Cov_Test(unittest.TestCase):
)
np.testing.assert_allclose(np_cov, cov.numpy(), rtol=1e-05)
def test_tensor_cov_aweights(self):
with _test_eager_guard():
self.func_test_tensor_cov_aweights()
self.func_test_tensor_cov_aweights()
def func_test_tensor_cov_weights(self):
def test_tensor_cov_weights(self):
typelist = ['float64']
places = [fluid.CPUPlace()]
if fluid.core.is_compiled_with_cuda():
......@@ -233,11 +207,6 @@ class Cov_Test(unittest.TestCase):
)
np.testing.assert_allclose(np_cov, cov.numpy(), rtol=1e-05)
def test_tensor_cov_weights(self):
with _test_eager_guard():
self.func_test_tensor_cov_weights()
self.func_test_tensor_cov_weights()
class Cov_Test2(Cov_Test):
def setUp(self):
......@@ -254,7 +223,7 @@ class Cov_Test3(unittest.TestCase):
self.fw_s = 1.0
self.aw_s = 1.0
def func_test_errors(self):
def test_errors(self):
def test_err():
np_arr = np.random.rand(*self.shape).astype('float64')
np_fw = self.fw_s * np.random.rand(*self.fweightshape).astype(
......@@ -276,11 +245,6 @@ class Cov_Test3(unittest.TestCase):
self.assertRaises(ValueError, test_err)
def test_errors(self):
with _test_eager_guard():
self.func_test_errors()
self.func_test_errors()
# Input(fweights) only support N-D (N<=1) tensor
class Cov_Test4(Cov_Test3):
......
......@@ -17,12 +17,11 @@ import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
class TestCreateParameterError(unittest.TestCase):
def func_errors(self):
def test_errors(self):
paddle.enable_static()
with program_guard(Program(), Program()):
......@@ -52,11 +51,6 @@ class TestCreateParameterError(unittest.TestCase):
self.assertRaises(TypeError, test_default_initializer)
def test_errors(self):
with fluid.framework._test_eager_guard():
self.func_errors()
self.func_errors()
if __name__ == '__main__':
paddle.enable_static()
......
......@@ -21,7 +21,6 @@ from test_softmax_with_cross_entropy_op import cross_entropy
import paddle
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
from paddle.fluid.framework import _test_eager_guard
def log_softmax(x, axis=-1):
......@@ -1708,41 +1707,6 @@ class CrossEntropyLoss(unittest.TestCase):
np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05)
np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05)
def test_soft_1d_dygraph_api(self):
with _test_eager_guard():
self.test_cross_entropy_loss_soft_1d()
self.test_cross_entropy_loss_soft_1d_weight()
self.test_cross_entropy_loss_soft_1d_mean()
self.test_cross_entropy_loss_soft_1d_weight_mean()
# put all testcases in one test will be failed
def test_soft_2d_dygraph_api(self):
with _test_eager_guard():
self.test_cross_entropy_loss_soft_2d()
self.test_cross_entropy_loss_soft_2d_weight_mean()
def test_other_dygraph_api(self):
with _test_eager_guard():
self.test_cross_entropy_loss_1d_with_mean_ignore()
self.test_cross_entropy_loss_1d_with_mean_ignore_negative()
self.test_cross_entropy_loss_1d_with_weight_mean_ignore()
self.test_cross_entropy_loss_1d_with_weight_mean_ignore_exceedlabel()
self.test_cross_entropy_loss_1d_with_weight_mean()
self.test_cross_entropy_loss_1d_with_weight_sum()
self.test_cross_entropy_loss_1d_with_weight_none()
self.test_cross_entropy_loss_1d_with_weight_none_func()
self.test_cross_entropy_loss_1d_mean()
self.test_cross_entropy_loss_1d_sum()
self.test_cross_entropy_loss_1d_none()
self.test_cross_entropy_loss_2d_with_weight_none()
self.test_cross_entropy_loss_2d_with_weight_axis_change_mean()
self.test_cross_entropy_loss_2d_with_weight_mean_ignore_exceedlabel()
self.test_cross_entropy_loss_2d_with_weight_mean()
self.test_cross_entropy_loss_2d_with_weight_sum()
self.test_cross_entropy_loss_2d_none()
self.test_cross_entropy_loss_2d_mean()
self.test_cross_entropy_loss_2d_sum()
class TestCrossEntropyFAPIError(unittest.TestCase):
def test_errors(self):
......@@ -1777,7 +1741,7 @@ class TestCrossEntropyFAPIError(unittest.TestCase):
ignore_index=-100,
)
self.assertRaises(ValueError, test_LabelValue_ExceedMax)
self.assertRaises(IndexError, test_LabelValue_ExceedMax)
def test_LabelValue_ExceedMin():
input_data = paddle.rand(shape=[20, 100])
......@@ -1793,7 +1757,7 @@ class TestCrossEntropyFAPIError(unittest.TestCase):
ignore_index=-100,
)
self.assertRaises(ValueError, test_LabelValue_ExceedMin)
self.assertRaises(IndexError, test_LabelValue_ExceedMin)
def static_test_WeightLength_NotEqual():
input_np = np.random.random([2, 4]).astype('float32')
......
......@@ -21,7 +21,6 @@ from paddle.device.cuda import (
memory_allocated,
)
from paddle.fluid import core
from paddle.fluid.framework import _test_eager_guard
class TestMaxMemoryAllocated(unittest.TestCase):
......@@ -43,12 +42,7 @@ class TestMaxMemoryAllocated(unittest.TestCase):
peak_memory_allocated_size, max_memory_allocated(device)
)
def test_max_memory_allocated(self):
with _test_eager_guard():
self.func_test_max_memory_allocated()
self.func_test_max_memory_allocated()
def func_test_max_memory_allocated_for_all_places(self):
def test_max_memory_allocated_for_all_places(self):
if core.is_compiled_with_cuda():
gpu_num = device_count()
for i in range(gpu_num):
......@@ -57,12 +51,7 @@ class TestMaxMemoryAllocated(unittest.TestCase):
self.func_test_max_memory_allocated(i)
self.func_test_max_memory_allocated("gpu:" + str(i))
def test_max_memory_allocated_for_all_places(self):
with _test_eager_guard():
self.func_test_max_memory_allocated_for_all_places()
self.func_test_max_memory_allocated_for_all_places()
def func_test_max_memory_allocated_exception(self):
def test_max_memory_allocated_exception(self):
if core.is_compiled_with_cuda():
wrong_device = [
core.CPUPlace(),
......@@ -79,11 +68,6 @@ class TestMaxMemoryAllocated(unittest.TestCase):
with self.assertRaises(BaseException):
max_memory_allocated()
def test_max_memory_allocated_exception(self):
with _test_eager_guard():
self.func_test_max_memory_allocated_exception()
self.func_test_max_memory_allocated_exception()
if __name__ == "__main__":
unittest.main()
......@@ -17,7 +17,6 @@ import unittest
import paddle
from paddle.device.cuda import device_count, memory_reserved
from paddle.fluid import core
from paddle.fluid.framework import _test_eager_guard
class TestMemoryreserved(unittest.TestCase):
......@@ -28,12 +27,7 @@ class TestMemoryreserved(unittest.TestCase):
memory_reserved_size = memory_reserved(device)
self.assertEqual(memory_reserved_size, alloc_size)
def test_memory_reserved(self):
with _test_eager_guard():
self.func_test_memory_reserved()
self.func_test_memory_reserved()
def func_test_memory_reserved_for_all_places(self):
def test_memory_reserved_for_all_places(self):
if core.is_compiled_with_cuda():
gpu_num = device_count()
for i in range(gpu_num):
......@@ -42,12 +36,7 @@ class TestMemoryreserved(unittest.TestCase):
self.func_test_memory_reserved(i)
self.func_test_memory_reserved("gpu:" + str(i))
def test_memory_reserved_for_all_places(self):
with _test_eager_guard():
self.func_test_memory_reserved_for_all_places()
self.func_test_memory_reserved_for_all_places()
def func_test_memory_reserved_exception(self):
def test_memory_reserved_exception(self):
if core.is_compiled_with_cuda():
wrong_device = [
core.CPUPlace(),
......@@ -64,11 +53,6 @@ class TestMemoryreserved(unittest.TestCase):
with self.assertRaises(BaseException):
memory_reserved()
def test_memory_reserved_exception(self):
with _test_eager_guard():
self.func_test_memory_reserved_exception()
self.func_test_memory_reserved_exception()
if __name__ == "__main__":
unittest.main()
......@@ -18,7 +18,6 @@ import numpy as np
import paddle
import paddle.fluid.dygraph as dg
from paddle.fluid.framework import _test_eager_guard
class TestTensorBackward(unittest.TestCase):
......@@ -28,7 +27,7 @@ class TestTensorBackward(unittest.TestCase):
if paddle.is_compiled_with_cuda():
self._places.append(paddle.CUDAPlace(0))
def func_tensor_backward(self):
def test_tensor_backward(self):
for dtype in self._dtypes:
x = np.random.random([2, 100]).astype(dtype)
y = np.random.random([100, 2]).astype(dtype)
......@@ -49,11 +48,6 @@ class TestTensorBackward(unittest.TestCase):
x_grad, x_tensor.grad.numpy(), rtol=1e-05
)
def test_tensor_backward(self):
with _test_eager_guard():
self.func_tensor_backward()
self.func_tensor_backward()
class TestBackwardAPI(unittest.TestCase):
def setUp(self):
......@@ -62,7 +56,7 @@ class TestBackwardAPI(unittest.TestCase):
if paddle.is_compiled_with_cuda():
self._places.append(paddle.CUDAPlace(0))
def func_backward_api(self):
def test_backward_api(self):
for dtype in self._dtypes:
x = np.random.random([2, 2]).astype(dtype)
y = np.random.random([2, 2]).astype(dtype)
......@@ -86,12 +80,7 @@ class TestBackwardAPI(unittest.TestCase):
x_grad * 2, x_tensor.grad.numpy(), rtol=1e-05
)
def test_backward_api(self):
with _test_eager_guard():
self.func_backward_api()
self.func_backward_api()
def func_backward_single_tensor(self):
def test_backward_single_tensor(self):
for dtype in self._dtypes:
x = np.random.random([2, 2]).astype(dtype)
y = np.random.random([2, 2]).astype(dtype)
......@@ -112,12 +101,7 @@ class TestBackwardAPI(unittest.TestCase):
x_grad, x_tensor.grad.numpy(), rtol=1e-05
)
def test_backward_single_tensor(self):
with _test_eager_guard():
self.func_backward_single_tensor()
self.func_backward_single_tensor()
def func_backward_none_grad_tensor(self):
def test_backward_none_grad_tensor(self):
for dtype in self._dtypes:
x = np.random.random([2, 2]).astype(dtype)
y = np.random.random([2, 2]).astype(dtype)
......@@ -137,12 +121,7 @@ class TestBackwardAPI(unittest.TestCase):
x_grad, x_tensor.grad.numpy(), rtol=1e-05
)
def test_backward_none_grad_tensor(self):
with _test_eager_guard():
self.func_backward_none_grad_tensor()
self.func_backward_none_grad_tensor()
def func_backward_accumulator_with_init_grad(self):
def test_backward_accumulator_with_init_grad(self):
for dtype in self._dtypes:
x = np.random.random(
[
......@@ -180,11 +159,6 @@ class TestBackwardAPI(unittest.TestCase):
x_grad, x_tensor.grad.numpy(), rtol=1e-05
)
def test_backward_accumulator_with_init_grad(self):
with _test_eager_guard():
self.func_backward_accumulator_with_init_grad()
self.func_backward_accumulator_with_init_grad()
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册