未验证 提交 0e1538ad 编写于 作者: 姜永久 提交者: GitHub

rm unittests eager guard tests part18 pool2d2retian_graph (#48844)

* rm unittests eager guard tests part18 pool2d2retian_graph

* modify pylayer test
上级 b333d7ed
......@@ -24,7 +24,6 @@ from test_pool2d_op import (
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.framework import _test_eager_guard
from paddle.nn.functional import avg_pool2d, max_pool2d
......@@ -372,10 +371,6 @@ class TestPool2D_API(unittest.TestCase):
self.check_max_dygraph_ceilmode_results(place)
self.check_max_dygraph_nhwc_results(place)
def test_dygraph_api(self):
with _test_eager_guard():
self.test_pool2d()
class TestPool2DError_API(unittest.TestCase):
def test_error_api(self):
......@@ -602,10 +597,6 @@ class TestPool2DError_API(unittest.TestCase):
self.assertRaises(ValueError, run_stride_out_of_range)
def test_dygraph_api(self):
with _test_eager_guard():
self.test_error_api()
if __name__ == '__main__':
unittest.main()
......@@ -24,7 +24,6 @@ from test_pool3d_op import (
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.framework import _test_eager_guard
from paddle.nn.functional import avg_pool3d, max_pool3d
......@@ -367,10 +366,6 @@ class TestPool3D_API(unittest.TestCase):
self.check_max_dygraph_ndhwc_results(place)
self.check_max_dygraph_ceilmode_results(place)
def test_dygraph_api(self):
with _test_eager_guard():
self.test_pool3d()
class TestPool3DError_API(unittest.TestCase):
def test_error_api(self):
......@@ -568,10 +563,6 @@ class TestPool3DError_API(unittest.TestCase):
self.assertRaises(ValueError, run_size_out_of_range)
def test_dygraph_api(self):
with _test_eager_guard():
self.test_error_api()
if __name__ == '__main__':
unittest.main()
......@@ -22,7 +22,6 @@ import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.nn.functional as F
from paddle.fluid import Program
from paddle.fluid.framework import _test_eager_guard
def ref_prelu(x, weight):
......@@ -79,10 +78,6 @@ class TestFunctionalPReluAPI(unittest.TestCase):
self.dygraph_check(self.weight_np_0)
self.dygraph_check(self.weight_np_1)
def test_dygraph_api_eager(self):
with _test_eager_guard():
self.test_dygraph_api()
def test_error(self):
with paddle.static.program_guard(paddle.static.Program()):
weight_fp32 = paddle.fluid.data(
......
......@@ -19,7 +19,6 @@ import numpy as np
import paddle
from paddle.distributed.models.moe import utils
from paddle.fluid import core
from paddle.fluid.framework import _test_eager_guard
def count(x, upper_num):
......@@ -115,7 +114,7 @@ class TestPruneGateByCapacityAPI1(unittest.TestCase):
)
assert_allclose(res[0], self.out, self.n_expert)
def func_dygraph_api(self):
def test_dygraph_api(self):
paddle.disable_static(self.place)
gate_idx_tensor = paddle.to_tensor(self.gate_idx)
expert_count_tensor = paddle.to_tensor(self.expert_count)
......@@ -124,11 +123,6 @@ class TestPruneGateByCapacityAPI1(unittest.TestCase):
)
assert_allclose(out.numpy(), self.out, self.n_expert)
def test_dygraph_api(self):
with _test_eager_guard():
self.func_dygraph_api()
self.func_dygraph_api()
@unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
......
......@@ -18,7 +18,7 @@ import numpy as np
import paddle
from paddle.autograd.py_layer import EagerPyLayer, LegacyPyLayer
from paddle.fluid.framework import _test_eager_guard, in_dygraph_mode
from paddle.fluid.framework import in_dygraph_mode
class FakeTensor(paddle.fluid.core.VarBase):
......@@ -27,7 +27,7 @@ class FakeTensor(paddle.fluid.core.VarBase):
class TestPyLayer(unittest.TestCase):
def func_test_simple_pylayer_multiple_output(self):
def test_simple_pylayer_multiple_output(self):
class tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer):
@staticmethod
def forward(ctx, x1, x2, func1, func2=paddle.square):
......@@ -59,12 +59,7 @@ class TestPyLayer(unittest.TestCase):
np.max(np.abs((input1.grad.numpy() - input2.grad.numpy()))) < 1e-10
)
def test_simple_pylayer_multiple_output(self):
with _test_eager_guard():
self.func_test_simple_pylayer_multiple_output()
self.func_test_simple_pylayer_multiple_output()
def func_test_simple_pylayer_return_none_with_no_grad(self):
def test_simple_pylayer_return_none_with_no_grad(self):
class tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer):
@staticmethod
def forward(ctx, x1, x2, func1, func2=paddle.square):
......@@ -100,12 +95,7 @@ class TestPyLayer(unittest.TestCase):
np.max(np.abs((input1.grad.numpy() - input2.grad.numpy()))) < 1e-10
)
def test_simple_pylayer_return_none_with_no_grad(self):
with _test_eager_guard():
self.func_test_simple_pylayer_return_none_with_no_grad()
self.func_test_simple_pylayer_return_none_with_no_grad()
def func_test_simple_pylayer_single_output(self):
def test_simple_pylayer_single_output(self):
class tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer):
@staticmethod
def forward(ctx, x1, func1, func2=paddle.square):
......@@ -133,12 +123,7 @@ class TestPyLayer(unittest.TestCase):
np.max(np.abs((input1.grad.numpy() - input2.grad.numpy()))) < 1e-10
)
def test_simple_pylayer_single_output(self):
with _test_eager_guard():
self.func_test_simple_pylayer_single_output()
self.func_test_simple_pylayer_single_output()
def func_test_pylayer_num_output_match(self):
def test_pylayer_num_output_match(self):
class tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer):
@staticmethod
def forward(
......@@ -160,12 +145,7 @@ class TestPyLayer(unittest.TestCase):
with self.assertRaises(ValueError):
z.mean().backward()
def test_pylayer_num_output_match(self):
with _test_eager_guard():
self.func_test_pylayer_num_output_match()
self.func_test_pylayer_num_output_match()
def func_test_pylayer_dtype(self):
def test_pylayer_dtype(self):
class tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer):
@staticmethod
def forward(ctx, x, dtype):
......@@ -195,12 +175,7 @@ class TestPyLayer(unittest.TestCase):
z.sum().backward()
self.assertIsNotNone(input1.grad)
def test_pylayer_dtype(self):
with _test_eager_guard():
self.func_test_pylayer_dtype()
self.func_test_pylayer_dtype()
def func_test_pylayer_Exception_forward(self):
def test_pylayer_Exception_forward(self):
class Layer_None1(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer):
@staticmethod
def forward(ctx, *args):
......@@ -263,12 +238,7 @@ class TestPyLayer(unittest.TestCase):
with self.assertRaises(NotImplementedError):
z = Layer_no_fw.apply(input1)
def test_pylayer_Exception_forward(self):
with _test_eager_guard():
self.func_test_pylayer_Exception_forward()
self.func_test_pylayer_Exception_forward()
def func_test_pylayer_nograd(self):
def test_pylayer_nograd(self):
class tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer):
@staticmethod
def forward(ctx, x1, func1, func2=paddle.square, xx=None):
......@@ -286,12 +256,7 @@ class TestPyLayer(unittest.TestCase):
z.mean().backward()
self.assertIsNone(z.grad)
def test_pylayer_nograd(self):
with _test_eager_guard():
self.func_test_pylayer_nograd()
self.func_test_pylayer_nograd()
def func_test_pylayer_Exception_bk(self):
def test_pylayer_Exception_bk(self):
class Layer_bk_none1(
EagerPyLayer if in_dygraph_mode() else LegacyPyLayer
):
......@@ -396,12 +361,7 @@ class TestPyLayer(unittest.TestCase):
z = z[0] + z[1]
z.mean().backward()
def test_pylayer_Exception_bk(self):
with _test_eager_guard():
self.func_test_pylayer_Exception_bk()
self.func_test_pylayer_Exception_bk()
def func_test_pylayer_bk_return_none(self):
def test_pylayer_bk_return_none(self):
class Layer_bk_none1(
EagerPyLayer if in_dygraph_mode() else LegacyPyLayer
):
......@@ -442,12 +402,7 @@ class TestPyLayer(unittest.TestCase):
with self.assertRaises(ValueError):
z.mean().backward()
def test_pylayer_bk_return_none(self):
with _test_eager_guard():
self.func_test_pylayer_bk_return_none()
self.func_test_pylayer_bk_return_none()
def func_test_pylayer_inplace(self):
def test_pylayer_inplace(self):
class cus_tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer):
@staticmethod
def forward(ctx, x):
......@@ -475,115 +430,98 @@ class TestPyLayer(unittest.TestCase):
z.backward()
self.assertIsNotNone(data.grad)
def test_pylayer_inplace(self):
with _test_eager_guard():
self.func_test_pylayer_inplace()
self.func_test_pylayer_inplace()
def test_pylayer_inplace_backward_error(self):
with _test_eager_guard():
class cus_tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer):
@staticmethod
def forward(ctx, x):
return x
class cus_tanh(
EagerPyLayer if in_dygraph_mode() else LegacyPyLayer
):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, dy):
return dy
class Layer(paddle.nn.Layer):
def __init__(self):
super().__init__()
def forward(self, data):
var_b = data**2
var_c = var_b**2
z = cus_tanh.apply(var_b)
loss = paddle.nn.functional.relu(var_c)
return loss
data = paddle.ones([2, 3], dtype="float64")
@staticmethod
def backward(ctx, dy):
return dy
class Layer(paddle.nn.Layer):
def __init__(self):
super().__init__()
def forward(self, data):
var_b = data**2
var_c = var_b**2
z = cus_tanh.apply(var_b)
loss = paddle.nn.functional.relu(var_c)
return loss
data = paddle.ones([2, 3], dtype="float64")
data.stop_gradient = False
layer = Layer()
z = layer(data)
with self.assertRaisesRegexp(
RuntimeError,
"received tensor_version:{} != wrapper_version_snapshot:{}".format(
1, 0
),
):
z.backward()
def test_pylayer_inplace_backward_success_1(self):
class cus_tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, dy):
return dy
class Layer(paddle.nn.Layer):
def __init__(self):
super().__init__()
def forward(self, data):
var_b = data**2
var_c = cus_tanh.apply(var_b)
var_d = var_c**2
loss = var_d.sum()
return loss
for i in range(2):
data = paddle.ones([2, 3], dtype="float64") / (i + 1)
data.stop_gradient = False
layer = Layer()
z = layer(data)
with self.assertRaisesRegexp(
RuntimeError,
"received tensor_version:{} != wrapper_version_snapshot:{}".format(
1, 0
),
):
z.backward()
z.backward()
self.assertIsNotNone(data.grad)
def test_pylayer_inplace_backward_success_1(self):
with _test_eager_guard():
def test_pylayer_inplace_backward_success_2(self):
class cus_tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer):
@staticmethod
def forward(ctx, x):
return x
class cus_tanh(
EagerPyLayer if in_dygraph_mode() else LegacyPyLayer
):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, dy):
return dy
class Layer(paddle.nn.Layer):
def __init__(self):
super().__init__()
def forward(self, data):
var_b = data**2
var_c = cus_tanh.apply(var_b)
var_d = var_c**2
loss = var_d.sum()
return loss
for i in range(2):
data = paddle.ones([2, 3], dtype="float64") / (i + 1)
data.stop_gradient = False
layer = Layer()
z = layer(data)
z.backward()
self.assertIsNotNone(data.grad)
@staticmethod
def backward(ctx, dy):
return dy
def test_pylayer_inplace_backward_success_2(self):
with _test_eager_guard():
class Layer(paddle.nn.Layer):
def __init__(self):
super().__init__()
class cus_tanh(
EagerPyLayer if in_dygraph_mode() else LegacyPyLayer
):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, dy):
return dy
class Layer(paddle.nn.Layer):
def __init__(self):
super().__init__()
def forward(self, data):
var_b = data**2
var_c = cus_tanh.apply(var_b)
var_d = var_c + var_c
loss = var_d.sum()
return loss
for i in range(2):
data = paddle.ones([2, 3], dtype="float64") / (i + 1)
data.stop_gradient = False
layer = Layer()
z = layer(data)
z.backward()
self.assertIsNotNone(data.grad)
def forward(self, data):
var_b = data**2
var_c = cus_tanh.apply(var_b)
var_d = var_c + var_c
loss = var_d.sum()
return loss
def func_test_pylayer_inplace_and_leaf_exception(self):
for i in range(2):
data = paddle.ones([2, 3], dtype="float64") / (i + 1)
data.stop_gradient = False
layer = Layer()
z = layer(data)
z.backward()
self.assertIsNotNone(data.grad)
def test_pylayer_inplace_and_leaf_exception(self):
class cus_pylayer_op(
EagerPyLayer if in_dygraph_mode() else LegacyPyLayer
):
......@@ -611,12 +549,7 @@ class TestPyLayer(unittest.TestCase):
with self.assertRaises(ValueError):
z = layer(data)
def test_pylayer_inplace_and_leaf_exception(self):
with _test_eager_guard():
self.func_test_pylayer_inplace_and_leaf_exception()
self.func_test_pylayer_inplace_and_leaf_exception()
def func_test_backward_in_backward(self):
def test_backward_in_backward(self):
class cus_tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer):
@staticmethod
def forward(ctx, x):
......@@ -641,12 +574,7 @@ class TestPyLayer(unittest.TestCase):
z = paddle.tanh(data)
z = cus_tanh.apply(data)
def test_backward_in_backward(self):
with _test_eager_guard():
self.func_test_backward_in_backward()
self.func_test_backward_in_backward()
def func_test_return_to_tensor(self):
def test_return_to_tensor(self):
class Tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer):
@staticmethod
def forward(ctx, x1):
......@@ -668,90 +596,77 @@ class TestPyLayer(unittest.TestCase):
z, number, none_item, string_item, tensor1 = Tanh.apply(x1=input1)
z.mean().backward()
def test_return_to_tensor(self):
with _test_eager_guard():
self.func_test_return_to_tensor()
self.func_test_return_to_tensor()
def test_materialize_grads(self):
with _test_eager_guard():
class Tanh(EagerPyLayer):
@staticmethod
def forward(ctx, x):
ctx.mark_not_inplace(x)
return x, x + x
class Tanh(EagerPyLayer):
@staticmethod
def forward(ctx, x):
ctx.mark_not_inplace(x)
return x, x + x
@staticmethod
def backward(ctx, grad, grad2):
self.assertEqual(grad2, paddle.zeros([1]))
return grad
@staticmethod
def backward(ctx, grad, grad2):
self.assertEqual(grad2, paddle.zeros([1]))
return grad
x = paddle.ones([1], dtype="float64")
x.stop_gradient = False
Tanh.apply(x)[0].backward()
x = paddle.ones([1], dtype="float64")
x.stop_gradient = False
Tanh.apply(x)[0].backward()
def test_dont_materialize_grads(self):
with _test_eager_guard():
class Tanh(EagerPyLayer):
@staticmethod
def forward(ctx, x):
ctx.mark_not_inplace(x)
ctx.set_materialize_grads(False)
return x, x + x
class Tanh(EagerPyLayer):
@staticmethod
def forward(ctx, x):
ctx.mark_not_inplace(x)
ctx.set_materialize_grads(False)
return x, x + x
@staticmethod
def backward(ctx, grad, grad2):
self.assertIsNone(grad2)
return grad
@staticmethod
def backward(ctx, grad, grad2):
self.assertIsNone(grad2)
return grad
x = paddle.ones([1], dtype="float64")
x.stop_gradient = False
Tanh.apply(x)[0].backward()
x = paddle.ones([1], dtype="float64")
x.stop_gradient = False
Tanh.apply(x)[0].backward()
def test_mark_non_differentiable(self):
with _test_eager_guard():
class Tanh(EagerPyLayer):
@staticmethod
def forward(ctx, x):
a = x + x
ctx.mark_non_differentiable(a)
return a
class Tanh(EagerPyLayer):
@staticmethod
def forward(ctx, x):
a = x + x
ctx.mark_non_differentiable(a)
return a
@staticmethod
def backward(ctx, grad):
self.assertTrue(False) # should not be call
return paddle.ones([1], dtype="float64")
@staticmethod
def backward(ctx, grad):
self.assertTrue(False) # should not be call
return paddle.ones([1], dtype="float64")
x = paddle.ones([1], dtype="float64")
x.stop_gradient = False
y = Tanh.apply(x)
y.sum().backward()
x = paddle.ones([1], dtype="float64")
x.stop_gradient = False
y = Tanh.apply(x)
y.sum().backward()
def test_mark_non_differentiable2(self):
with _test_eager_guard():
class Tanh(EagerPyLayer):
@staticmethod
def forward(ctx, x):
a = x + x
b = x + x + x
ctx.mark_non_differentiable(a)
return a, b
class Tanh(EagerPyLayer):
@staticmethod
def forward(ctx, x):
a = x + x
b = x + x + x
ctx.mark_non_differentiable(a)
return a, b
@staticmethod
def backward(ctx, grad_a, grad_b):
self.assertEqual(grad_a, paddle.zeros([1]))
self.assertEqual(grad_b, paddle.ones([1], dtype="float64"))
return grad_b
@staticmethod
def backward(ctx, grad_a, grad_b):
self.assertEqual(grad_a, paddle.zeros([1]))
self.assertEqual(grad_b, paddle.ones([1], dtype="float64"))
return grad_b
x = paddle.ones([1], dtype="float64")
x.stop_gradient = False
a, b = Tanh.apply(x)
b.sum().backward()
self.assertEqual(x.grad, paddle.ones([1], dtype="float64"))
x = paddle.ones([1], dtype="float64")
x.stop_gradient = False
a, b = Tanh.apply(x)
b.sum().backward()
self.assertEqual(x.grad, paddle.ones([1], dtype="float64"))
class TestPyLayerReturnType(unittest.TestCase):
......@@ -832,9 +747,9 @@ class TestPyLayerReturnType(unittest.TestCase):
input1 = paddle.randn([3, 2])
input1.stop_gradient = False
y, _ = Tanh.apply(input1, 1 + input1)
with self.assertRaises(ValueError):
y, _ = Tanh.apply(input1, 1 + input1)
y.mean().backward()
def test_backward_return_fake_tensor(self):
......@@ -849,9 +764,9 @@ class TestPyLayerReturnType(unittest.TestCase):
input1 = paddle.randn([3, 2])
input1.stop_gradient = False
y, _ = Tanh.apply(input1)
with self.assertRaises(ValueError):
y, _ = Tanh.apply(input1)
y.mean().backward()
......
......@@ -20,7 +20,6 @@ from op_test import OpTest
import paddle
import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid.framework import _test_eager_guard
from paddle.static import Program, program_guard
paddle.enable_static()
......@@ -52,10 +51,6 @@ class TestRandintOp(OpTest):
hist, prob = self.output_hist(np.array(outs[0]))
np.testing.assert_allclose(hist, prob, rtol=0, atol=0.001)
def test_check_output_eager(self):
with _test_eager_guard():
self.test_check_output()
class TestRandintOpError(unittest.TestCase):
def test_errors(self):
......@@ -71,10 +66,6 @@ class TestRandintOpError(unittest.TestCase):
TypeError, paddle.randint, 5, shape=[shape_tensor]
)
def test_errors_eager(self):
with _test_eager_guard():
self.test_errors()
class TestRandintOp_attr_tensorlist(OpTest):
def setUp(self):
......@@ -100,10 +91,6 @@ class TestRandintOp_attr_tensorlist(OpTest):
hist, prob = self.output_hist(np.array(outs[0]))
np.testing.assert_allclose(hist, prob, rtol=0, atol=0.001)
def test_check_output_eager(self):
with _test_eager_guard():
self.test_check_output()
class TestRandint_attr_tensor(OpTest):
def setUp(self):
......@@ -123,10 +110,6 @@ class TestRandint_attr_tensor(OpTest):
hist, prob = self.output_hist(np.array(outs[0]))
np.testing.assert_allclose(hist, prob, rtol=0, atol=0.001)
def test_check_output_eager(self):
with _test_eager_guard():
self.test_check_output()
# Test python API
class TestRandintAPI(unittest.TestCase):
......@@ -167,23 +150,10 @@ class TestRandintAPI(unittest.TestCase):
fetch_list=[out1, out2, out3, out4, out5],
)
def test_api_eager(self):
with _test_eager_guard():
self.test_api()
class TestRandintImperative(unittest.TestCase):
def test_api(self):
def test_case(self):
paddle.disable_static()
self.run_test_case()
with _test_eager_guard():
self.run_test_case()
paddle.enable_static()
def run_test_case(self):
n = 10
x1 = paddle.randint(n, shape=[10], dtype="int32")
x2 = paddle.tensor.randint(n)
......@@ -191,6 +161,7 @@ class TestRandintImperative(unittest.TestCase):
for i in [x1, x2, x3]:
for j in i.numpy().tolist():
self.assertTrue((j >= 0 and j < n))
paddle.enable_static()
class TestRandomValue(unittest.TestCase):
......@@ -208,9 +179,6 @@ class TestRandomValue(unittest.TestCase):
self.run_test_case()
with _test_eager_guard():
self.run_test_case()
paddle.enable_static()
def run_test_case(self):
......
......@@ -19,7 +19,6 @@ import numpy as np
import paddle
import paddle.fluid.core as core
from paddle.distributed.models.moe import utils
from paddle.fluid.framework import _test_eager_guard
def random_routing(topk_idx, topk_value, prob, topk=2):
......@@ -54,7 +53,7 @@ class TestNumberCountAPIFp32(unittest.TestCase):
)
self.place = paddle.CUDAPlace(0)
def func_api_dygraph(self):
def test_api_dygraph(self):
paddle.disable_static()
x = paddle.to_tensor(self.x)
value = paddle.to_tensor(self.topk_value)
......@@ -62,11 +61,6 @@ class TestNumberCountAPIFp32(unittest.TestCase):
out = utils._random_routing(x, value, prob)
assert np.allclose(out.numpy(), self.out)
def test_api_dygraph(self):
with _test_eager_guard():
self.func_api_dygraph()
self.func_api_dygraph()
@unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
......
......@@ -19,7 +19,6 @@ from op_test import OpTest
import paddle
import paddle.fluid.core as core
from paddle.fluid.framework import _test_eager_guard
from paddle.static import Program, program_guard
......@@ -83,10 +82,6 @@ class TestRandpermOp(OpTest):
check_randperm_out(self.n, out_np), msg=error_msg(out_np)
)
def test_eager(self):
with _test_eager_guard():
self.test_check_output()
class TestRandpermOpN(TestRandpermOp):
def init_attrs(self):
......@@ -153,13 +148,12 @@ class TestRandpermEager(unittest.TestCase):
def test_out(self):
paddle.disable_static()
n = 10
with _test_eager_guard():
for dtype in ['int32', np.int64, 'float32', 'float64']:
data_p = paddle.randperm(n, dtype)
data_np = data_p.numpy()
self.assertTrue(
check_randperm_out(n, data_np), msg=error_msg(data_np)
)
for dtype in ['int32', np.int64, 'float32', 'float64']:
data_p = paddle.randperm(n, dtype)
data_np = data_p.numpy()
self.assertTrue(
check_randperm_out(n, data_np), msg=error_msg(data_np)
)
paddle.enable_static()
......
......@@ -17,7 +17,6 @@ import numpy as np
import paddle
from paddle import _legacy_C_ops
from paddle.fluid.framework import _test_eager_guard
paddle.set_device('cpu')
......@@ -34,7 +33,7 @@ def clear_grad_test_0(w, a):
class TestInplaceAndClearGradient(unittest.TestCase):
def func_test(self):
def test_inplace_n_clear_grad(self):
input_data = np.ones([1, 1])
w = paddle.to_tensor(input_data, 'float32', stop_gradient=False)
......@@ -49,11 +48,6 @@ class TestInplaceAndClearGradient(unittest.TestCase):
out.backward()
assert w.grad[0] == 0.15
def test(self):
with _test_eager_guard():
self.func_test()
self.func_test()
# Test 2
class Counter:
......@@ -76,7 +70,7 @@ def clear_grad_test_1(w, c):
class TestInplaceClearGradAccumulation(unittest.TestCase):
def func_test(self):
def test_inplace_clear_grad_accum(self):
input_data = np.ones([1, 1])
w = paddle.to_tensor(input_data, 'float32', stop_gradient=False)
c = Counter()
......@@ -98,14 +92,9 @@ class TestInplaceClearGradAccumulation(unittest.TestCase):
assert c.num_calls == 1
c.num_calls = 0
def test(self):
with _test_eager_guard():
self.func_test()
self.func_test()
class TestInplaceClearGradAccumulationAlt(unittest.TestCase):
def func_test(self):
def test_inplace_clear_grad_accum(self):
input_data = np.ones([1, 1])
w = paddle.to_tensor(input_data, 'float32', stop_gradient=False)
out = _legacy_C_ops.scale(w, 'scale', 0.1)
......@@ -116,11 +105,6 @@ class TestInplaceClearGradAccumulationAlt(unittest.TestCase):
assert w.grad._inplace_version() == 1
def test(self):
with _test_eager_guard():
self.func_test()
self.func_test()
if __name__ == '__main__':
unittest.main()
......@@ -150,16 +150,11 @@ class TestRetainGraph(unittest.TestCase):
loss_g.backward()
optim_g.minimize(loss_g)
def func_retain(self):
def test_retain(self):
self.run_retain(need_retain=True)
if not fluid.framework.in_dygraph_mode():
self.assertRaises(RuntimeError, self.run_retain, need_retain=False)
def test_retain(self):
with fluid.framework._test_eager_guard():
self.func_retain()
self.func_retain()
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册