未验证 提交 0e1538ad 编写于 作者: 姜永久 提交者: GitHub

rm unittests eager guard tests part18 pool2d2retian_graph (#48844)

* rm unittests eager guard tests part18 pool2d2retian_graph

* modify pylayer test
上级 b333d7ed
...@@ -24,7 +24,6 @@ from test_pool2d_op import ( ...@@ -24,7 +24,6 @@ from test_pool2d_op import (
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.framework import _test_eager_guard
from paddle.nn.functional import avg_pool2d, max_pool2d from paddle.nn.functional import avg_pool2d, max_pool2d
...@@ -372,10 +371,6 @@ class TestPool2D_API(unittest.TestCase): ...@@ -372,10 +371,6 @@ class TestPool2D_API(unittest.TestCase):
self.check_max_dygraph_ceilmode_results(place) self.check_max_dygraph_ceilmode_results(place)
self.check_max_dygraph_nhwc_results(place) self.check_max_dygraph_nhwc_results(place)
def test_dygraph_api(self):
with _test_eager_guard():
self.test_pool2d()
class TestPool2DError_API(unittest.TestCase): class TestPool2DError_API(unittest.TestCase):
def test_error_api(self): def test_error_api(self):
...@@ -602,10 +597,6 @@ class TestPool2DError_API(unittest.TestCase): ...@@ -602,10 +597,6 @@ class TestPool2DError_API(unittest.TestCase):
self.assertRaises(ValueError, run_stride_out_of_range) self.assertRaises(ValueError, run_stride_out_of_range)
def test_dygraph_api(self):
with _test_eager_guard():
self.test_error_api()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -24,7 +24,6 @@ from test_pool3d_op import ( ...@@ -24,7 +24,6 @@ from test_pool3d_op import (
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.framework import _test_eager_guard
from paddle.nn.functional import avg_pool3d, max_pool3d from paddle.nn.functional import avg_pool3d, max_pool3d
...@@ -367,10 +366,6 @@ class TestPool3D_API(unittest.TestCase): ...@@ -367,10 +366,6 @@ class TestPool3D_API(unittest.TestCase):
self.check_max_dygraph_ndhwc_results(place) self.check_max_dygraph_ndhwc_results(place)
self.check_max_dygraph_ceilmode_results(place) self.check_max_dygraph_ceilmode_results(place)
def test_dygraph_api(self):
with _test_eager_guard():
self.test_pool3d()
class TestPool3DError_API(unittest.TestCase): class TestPool3DError_API(unittest.TestCase):
def test_error_api(self): def test_error_api(self):
...@@ -568,10 +563,6 @@ class TestPool3DError_API(unittest.TestCase): ...@@ -568,10 +563,6 @@ class TestPool3DError_API(unittest.TestCase):
self.assertRaises(ValueError, run_size_out_of_range) self.assertRaises(ValueError, run_size_out_of_range)
def test_dygraph_api(self):
with _test_eager_guard():
self.test_error_api()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -22,7 +22,6 @@ import paddle.fluid as fluid ...@@ -22,7 +22,6 @@ import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle.fluid import Program from paddle.fluid import Program
from paddle.fluid.framework import _test_eager_guard
def ref_prelu(x, weight): def ref_prelu(x, weight):
...@@ -79,10 +78,6 @@ class TestFunctionalPReluAPI(unittest.TestCase): ...@@ -79,10 +78,6 @@ class TestFunctionalPReluAPI(unittest.TestCase):
self.dygraph_check(self.weight_np_0) self.dygraph_check(self.weight_np_0)
self.dygraph_check(self.weight_np_1) self.dygraph_check(self.weight_np_1)
def test_dygraph_api_eager(self):
with _test_eager_guard():
self.test_dygraph_api()
def test_error(self): def test_error(self):
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
weight_fp32 = paddle.fluid.data( weight_fp32 = paddle.fluid.data(
......
...@@ -19,7 +19,6 @@ import numpy as np ...@@ -19,7 +19,6 @@ import numpy as np
import paddle import paddle
from paddle.distributed.models.moe import utils from paddle.distributed.models.moe import utils
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.framework import _test_eager_guard
def count(x, upper_num): def count(x, upper_num):
...@@ -115,7 +114,7 @@ class TestPruneGateByCapacityAPI1(unittest.TestCase): ...@@ -115,7 +114,7 @@ class TestPruneGateByCapacityAPI1(unittest.TestCase):
) )
assert_allclose(res[0], self.out, self.n_expert) assert_allclose(res[0], self.out, self.n_expert)
def func_dygraph_api(self): def test_dygraph_api(self):
paddle.disable_static(self.place) paddle.disable_static(self.place)
gate_idx_tensor = paddle.to_tensor(self.gate_idx) gate_idx_tensor = paddle.to_tensor(self.gate_idx)
expert_count_tensor = paddle.to_tensor(self.expert_count) expert_count_tensor = paddle.to_tensor(self.expert_count)
...@@ -124,11 +123,6 @@ class TestPruneGateByCapacityAPI1(unittest.TestCase): ...@@ -124,11 +123,6 @@ class TestPruneGateByCapacityAPI1(unittest.TestCase):
) )
assert_allclose(out.numpy(), self.out, self.n_expert) assert_allclose(out.numpy(), self.out, self.n_expert)
def test_dygraph_api(self):
with _test_eager_guard():
self.func_dygraph_api()
self.func_dygraph_api()
@unittest.skipIf( @unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA" not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
......
...@@ -18,7 +18,7 @@ import numpy as np ...@@ -18,7 +18,7 @@ import numpy as np
import paddle import paddle
from paddle.autograd.py_layer import EagerPyLayer, LegacyPyLayer from paddle.autograd.py_layer import EagerPyLayer, LegacyPyLayer
from paddle.fluid.framework import _test_eager_guard, in_dygraph_mode from paddle.fluid.framework import in_dygraph_mode
class FakeTensor(paddle.fluid.core.VarBase): class FakeTensor(paddle.fluid.core.VarBase):
...@@ -27,7 +27,7 @@ class FakeTensor(paddle.fluid.core.VarBase): ...@@ -27,7 +27,7 @@ class FakeTensor(paddle.fluid.core.VarBase):
class TestPyLayer(unittest.TestCase): class TestPyLayer(unittest.TestCase):
def func_test_simple_pylayer_multiple_output(self): def test_simple_pylayer_multiple_output(self):
class tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer): class tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer):
@staticmethod @staticmethod
def forward(ctx, x1, x2, func1, func2=paddle.square): def forward(ctx, x1, x2, func1, func2=paddle.square):
...@@ -59,12 +59,7 @@ class TestPyLayer(unittest.TestCase): ...@@ -59,12 +59,7 @@ class TestPyLayer(unittest.TestCase):
np.max(np.abs((input1.grad.numpy() - input2.grad.numpy()))) < 1e-10 np.max(np.abs((input1.grad.numpy() - input2.grad.numpy()))) < 1e-10
) )
def test_simple_pylayer_multiple_output(self): def test_simple_pylayer_return_none_with_no_grad(self):
with _test_eager_guard():
self.func_test_simple_pylayer_multiple_output()
self.func_test_simple_pylayer_multiple_output()
def func_test_simple_pylayer_return_none_with_no_grad(self):
class tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer): class tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer):
@staticmethod @staticmethod
def forward(ctx, x1, x2, func1, func2=paddle.square): def forward(ctx, x1, x2, func1, func2=paddle.square):
...@@ -100,12 +95,7 @@ class TestPyLayer(unittest.TestCase): ...@@ -100,12 +95,7 @@ class TestPyLayer(unittest.TestCase):
np.max(np.abs((input1.grad.numpy() - input2.grad.numpy()))) < 1e-10 np.max(np.abs((input1.grad.numpy() - input2.grad.numpy()))) < 1e-10
) )
def test_simple_pylayer_return_none_with_no_grad(self): def test_simple_pylayer_single_output(self):
with _test_eager_guard():
self.func_test_simple_pylayer_return_none_with_no_grad()
self.func_test_simple_pylayer_return_none_with_no_grad()
def func_test_simple_pylayer_single_output(self):
class tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer): class tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer):
@staticmethod @staticmethod
def forward(ctx, x1, func1, func2=paddle.square): def forward(ctx, x1, func1, func2=paddle.square):
...@@ -133,12 +123,7 @@ class TestPyLayer(unittest.TestCase): ...@@ -133,12 +123,7 @@ class TestPyLayer(unittest.TestCase):
np.max(np.abs((input1.grad.numpy() - input2.grad.numpy()))) < 1e-10 np.max(np.abs((input1.grad.numpy() - input2.grad.numpy()))) < 1e-10
) )
def test_simple_pylayer_single_output(self): def test_pylayer_num_output_match(self):
with _test_eager_guard():
self.func_test_simple_pylayer_single_output()
self.func_test_simple_pylayer_single_output()
def func_test_pylayer_num_output_match(self):
class tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer): class tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer):
@staticmethod @staticmethod
def forward( def forward(
...@@ -160,12 +145,7 @@ class TestPyLayer(unittest.TestCase): ...@@ -160,12 +145,7 @@ class TestPyLayer(unittest.TestCase):
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
z.mean().backward() z.mean().backward()
def test_pylayer_num_output_match(self): def test_pylayer_dtype(self):
with _test_eager_guard():
self.func_test_pylayer_num_output_match()
self.func_test_pylayer_num_output_match()
def func_test_pylayer_dtype(self):
class tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer): class tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer):
@staticmethod @staticmethod
def forward(ctx, x, dtype): def forward(ctx, x, dtype):
...@@ -195,12 +175,7 @@ class TestPyLayer(unittest.TestCase): ...@@ -195,12 +175,7 @@ class TestPyLayer(unittest.TestCase):
z.sum().backward() z.sum().backward()
self.assertIsNotNone(input1.grad) self.assertIsNotNone(input1.grad)
def test_pylayer_dtype(self): def test_pylayer_Exception_forward(self):
with _test_eager_guard():
self.func_test_pylayer_dtype()
self.func_test_pylayer_dtype()
def func_test_pylayer_Exception_forward(self):
class Layer_None1(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer): class Layer_None1(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer):
@staticmethod @staticmethod
def forward(ctx, *args): def forward(ctx, *args):
...@@ -263,12 +238,7 @@ class TestPyLayer(unittest.TestCase): ...@@ -263,12 +238,7 @@ class TestPyLayer(unittest.TestCase):
with self.assertRaises(NotImplementedError): with self.assertRaises(NotImplementedError):
z = Layer_no_fw.apply(input1) z = Layer_no_fw.apply(input1)
def test_pylayer_Exception_forward(self): def test_pylayer_nograd(self):
with _test_eager_guard():
self.func_test_pylayer_Exception_forward()
self.func_test_pylayer_Exception_forward()
def func_test_pylayer_nograd(self):
class tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer): class tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer):
@staticmethod @staticmethod
def forward(ctx, x1, func1, func2=paddle.square, xx=None): def forward(ctx, x1, func1, func2=paddle.square, xx=None):
...@@ -286,12 +256,7 @@ class TestPyLayer(unittest.TestCase): ...@@ -286,12 +256,7 @@ class TestPyLayer(unittest.TestCase):
z.mean().backward() z.mean().backward()
self.assertIsNone(z.grad) self.assertIsNone(z.grad)
def test_pylayer_nograd(self): def test_pylayer_Exception_bk(self):
with _test_eager_guard():
self.func_test_pylayer_nograd()
self.func_test_pylayer_nograd()
def func_test_pylayer_Exception_bk(self):
class Layer_bk_none1( class Layer_bk_none1(
EagerPyLayer if in_dygraph_mode() else LegacyPyLayer EagerPyLayer if in_dygraph_mode() else LegacyPyLayer
): ):
...@@ -396,12 +361,7 @@ class TestPyLayer(unittest.TestCase): ...@@ -396,12 +361,7 @@ class TestPyLayer(unittest.TestCase):
z = z[0] + z[1] z = z[0] + z[1]
z.mean().backward() z.mean().backward()
def test_pylayer_Exception_bk(self): def test_pylayer_bk_return_none(self):
with _test_eager_guard():
self.func_test_pylayer_Exception_bk()
self.func_test_pylayer_Exception_bk()
def func_test_pylayer_bk_return_none(self):
class Layer_bk_none1( class Layer_bk_none1(
EagerPyLayer if in_dygraph_mode() else LegacyPyLayer EagerPyLayer if in_dygraph_mode() else LegacyPyLayer
): ):
...@@ -442,12 +402,7 @@ class TestPyLayer(unittest.TestCase): ...@@ -442,12 +402,7 @@ class TestPyLayer(unittest.TestCase):
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
z.mean().backward() z.mean().backward()
def test_pylayer_bk_return_none(self): def test_pylayer_inplace(self):
with _test_eager_guard():
self.func_test_pylayer_bk_return_none()
self.func_test_pylayer_bk_return_none()
def func_test_pylayer_inplace(self):
class cus_tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer): class cus_tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer):
@staticmethod @staticmethod
def forward(ctx, x): def forward(ctx, x):
...@@ -475,115 +430,98 @@ class TestPyLayer(unittest.TestCase): ...@@ -475,115 +430,98 @@ class TestPyLayer(unittest.TestCase):
z.backward() z.backward()
self.assertIsNotNone(data.grad) self.assertIsNotNone(data.grad)
def test_pylayer_inplace(self):
with _test_eager_guard():
self.func_test_pylayer_inplace()
self.func_test_pylayer_inplace()
def test_pylayer_inplace_backward_error(self): def test_pylayer_inplace_backward_error(self):
with _test_eager_guard(): class cus_tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer):
@staticmethod
def forward(ctx, x):
return x
class cus_tanh( @staticmethod
EagerPyLayer if in_dygraph_mode() else LegacyPyLayer def backward(ctx, dy):
): return dy
@staticmethod
def forward(ctx, x): class Layer(paddle.nn.Layer):
return x def __init__(self):
super().__init__()
@staticmethod
def backward(ctx, dy): def forward(self, data):
return dy var_b = data**2
var_c = var_b**2
class Layer(paddle.nn.Layer): z = cus_tanh.apply(var_b)
def __init__(self): loss = paddle.nn.functional.relu(var_c)
super().__init__() return loss
def forward(self, data): data = paddle.ones([2, 3], dtype="float64")
var_b = data**2 data.stop_gradient = False
var_c = var_b**2 layer = Layer()
z = cus_tanh.apply(var_b) z = layer(data)
loss = paddle.nn.functional.relu(var_c) with self.assertRaisesRegexp(
return loss RuntimeError,
"received tensor_version:{} != wrapper_version_snapshot:{}".format(
data = paddle.ones([2, 3], dtype="float64") 1, 0
),
):
z.backward()
def test_pylayer_inplace_backward_success_1(self):
class cus_tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, dy):
return dy
class Layer(paddle.nn.Layer):
def __init__(self):
super().__init__()
def forward(self, data):
var_b = data**2
var_c = cus_tanh.apply(var_b)
var_d = var_c**2
loss = var_d.sum()
return loss
for i in range(2):
data = paddle.ones([2, 3], dtype="float64") / (i + 1)
data.stop_gradient = False data.stop_gradient = False
layer = Layer() layer = Layer()
z = layer(data) z = layer(data)
with self.assertRaisesRegexp( z.backward()
RuntimeError, self.assertIsNotNone(data.grad)
"received tensor_version:{} != wrapper_version_snapshot:{}".format(
1, 0
),
):
z.backward()
def test_pylayer_inplace_backward_success_1(self): def test_pylayer_inplace_backward_success_2(self):
with _test_eager_guard(): class cus_tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer):
@staticmethod
def forward(ctx, x):
return x
class cus_tanh( @staticmethod
EagerPyLayer if in_dygraph_mode() else LegacyPyLayer def backward(ctx, dy):
): return dy
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, dy):
return dy
class Layer(paddle.nn.Layer):
def __init__(self):
super().__init__()
def forward(self, data):
var_b = data**2
var_c = cus_tanh.apply(var_b)
var_d = var_c**2
loss = var_d.sum()
return loss
for i in range(2):
data = paddle.ones([2, 3], dtype="float64") / (i + 1)
data.stop_gradient = False
layer = Layer()
z = layer(data)
z.backward()
self.assertIsNotNone(data.grad)
def test_pylayer_inplace_backward_success_2(self): class Layer(paddle.nn.Layer):
with _test_eager_guard(): def __init__(self):
super().__init__()
class cus_tanh( def forward(self, data):
EagerPyLayer if in_dygraph_mode() else LegacyPyLayer var_b = data**2
): var_c = cus_tanh.apply(var_b)
@staticmethod var_d = var_c + var_c
def forward(ctx, x): loss = var_d.sum()
return x return loss
@staticmethod
def backward(ctx, dy):
return dy
class Layer(paddle.nn.Layer):
def __init__(self):
super().__init__()
def forward(self, data):
var_b = data**2
var_c = cus_tanh.apply(var_b)
var_d = var_c + var_c
loss = var_d.sum()
return loss
for i in range(2):
data = paddle.ones([2, 3], dtype="float64") / (i + 1)
data.stop_gradient = False
layer = Layer()
z = layer(data)
z.backward()
self.assertIsNotNone(data.grad)
def func_test_pylayer_inplace_and_leaf_exception(self): for i in range(2):
data = paddle.ones([2, 3], dtype="float64") / (i + 1)
data.stop_gradient = False
layer = Layer()
z = layer(data)
z.backward()
self.assertIsNotNone(data.grad)
def test_pylayer_inplace_and_leaf_exception(self):
class cus_pylayer_op( class cus_pylayer_op(
EagerPyLayer if in_dygraph_mode() else LegacyPyLayer EagerPyLayer if in_dygraph_mode() else LegacyPyLayer
): ):
...@@ -611,12 +549,7 @@ class TestPyLayer(unittest.TestCase): ...@@ -611,12 +549,7 @@ class TestPyLayer(unittest.TestCase):
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
z = layer(data) z = layer(data)
def test_pylayer_inplace_and_leaf_exception(self): def test_backward_in_backward(self):
with _test_eager_guard():
self.func_test_pylayer_inplace_and_leaf_exception()
self.func_test_pylayer_inplace_and_leaf_exception()
def func_test_backward_in_backward(self):
class cus_tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer): class cus_tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer):
@staticmethod @staticmethod
def forward(ctx, x): def forward(ctx, x):
...@@ -641,12 +574,7 @@ class TestPyLayer(unittest.TestCase): ...@@ -641,12 +574,7 @@ class TestPyLayer(unittest.TestCase):
z = paddle.tanh(data) z = paddle.tanh(data)
z = cus_tanh.apply(data) z = cus_tanh.apply(data)
def test_backward_in_backward(self): def test_return_to_tensor(self):
with _test_eager_guard():
self.func_test_backward_in_backward()
self.func_test_backward_in_backward()
def func_test_return_to_tensor(self):
class Tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer): class Tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer):
@staticmethod @staticmethod
def forward(ctx, x1): def forward(ctx, x1):
...@@ -668,90 +596,77 @@ class TestPyLayer(unittest.TestCase): ...@@ -668,90 +596,77 @@ class TestPyLayer(unittest.TestCase):
z, number, none_item, string_item, tensor1 = Tanh.apply(x1=input1) z, number, none_item, string_item, tensor1 = Tanh.apply(x1=input1)
z.mean().backward() z.mean().backward()
def test_return_to_tensor(self):
with _test_eager_guard():
self.func_test_return_to_tensor()
self.func_test_return_to_tensor()
def test_materialize_grads(self): def test_materialize_grads(self):
with _test_eager_guard(): class Tanh(EagerPyLayer):
@staticmethod
class Tanh(EagerPyLayer): def forward(ctx, x):
@staticmethod ctx.mark_not_inplace(x)
def forward(ctx, x): return x, x + x
ctx.mark_not_inplace(x)
return x, x + x
@staticmethod @staticmethod
def backward(ctx, grad, grad2): def backward(ctx, grad, grad2):
self.assertEqual(grad2, paddle.zeros([1])) self.assertEqual(grad2, paddle.zeros([1]))
return grad return grad
x = paddle.ones([1], dtype="float64") x = paddle.ones([1], dtype="float64")
x.stop_gradient = False x.stop_gradient = False
Tanh.apply(x)[0].backward() Tanh.apply(x)[0].backward()
def test_dont_materialize_grads(self): def test_dont_materialize_grads(self):
with _test_eager_guard(): class Tanh(EagerPyLayer):
@staticmethod
class Tanh(EagerPyLayer): def forward(ctx, x):
@staticmethod ctx.mark_not_inplace(x)
def forward(ctx, x): ctx.set_materialize_grads(False)
ctx.mark_not_inplace(x) return x, x + x
ctx.set_materialize_grads(False)
return x, x + x
@staticmethod @staticmethod
def backward(ctx, grad, grad2): def backward(ctx, grad, grad2):
self.assertIsNone(grad2) self.assertIsNone(grad2)
return grad return grad
x = paddle.ones([1], dtype="float64") x = paddle.ones([1], dtype="float64")
x.stop_gradient = False x.stop_gradient = False
Tanh.apply(x)[0].backward() Tanh.apply(x)[0].backward()
def test_mark_non_differentiable(self): def test_mark_non_differentiable(self):
with _test_eager_guard(): class Tanh(EagerPyLayer):
@staticmethod
class Tanh(EagerPyLayer): def forward(ctx, x):
@staticmethod a = x + x
def forward(ctx, x): ctx.mark_non_differentiable(a)
a = x + x return a
ctx.mark_non_differentiable(a)
return a
@staticmethod @staticmethod
def backward(ctx, grad): def backward(ctx, grad):
self.assertTrue(False) # should not be call self.assertTrue(False) # should not be call
return paddle.ones([1], dtype="float64") return paddle.ones([1], dtype="float64")
x = paddle.ones([1], dtype="float64") x = paddle.ones([1], dtype="float64")
x.stop_gradient = False x.stop_gradient = False
y = Tanh.apply(x) y = Tanh.apply(x)
y.sum().backward() y.sum().backward()
def test_mark_non_differentiable2(self): def test_mark_non_differentiable2(self):
with _test_eager_guard(): class Tanh(EagerPyLayer):
@staticmethod
class Tanh(EagerPyLayer): def forward(ctx, x):
@staticmethod a = x + x
def forward(ctx, x): b = x + x + x
a = x + x ctx.mark_non_differentiable(a)
b = x + x + x return a, b
ctx.mark_non_differentiable(a)
return a, b
@staticmethod @staticmethod
def backward(ctx, grad_a, grad_b): def backward(ctx, grad_a, grad_b):
self.assertEqual(grad_a, paddle.zeros([1])) self.assertEqual(grad_a, paddle.zeros([1]))
self.assertEqual(grad_b, paddle.ones([1], dtype="float64")) self.assertEqual(grad_b, paddle.ones([1], dtype="float64"))
return grad_b return grad_b
x = paddle.ones([1], dtype="float64") x = paddle.ones([1], dtype="float64")
x.stop_gradient = False x.stop_gradient = False
a, b = Tanh.apply(x) a, b = Tanh.apply(x)
b.sum().backward() b.sum().backward()
self.assertEqual(x.grad, paddle.ones([1], dtype="float64")) self.assertEqual(x.grad, paddle.ones([1], dtype="float64"))
class TestPyLayerReturnType(unittest.TestCase): class TestPyLayerReturnType(unittest.TestCase):
...@@ -832,9 +747,9 @@ class TestPyLayerReturnType(unittest.TestCase): ...@@ -832,9 +747,9 @@ class TestPyLayerReturnType(unittest.TestCase):
input1 = paddle.randn([3, 2]) input1 = paddle.randn([3, 2])
input1.stop_gradient = False input1.stop_gradient = False
y, _ = Tanh.apply(input1, 1 + input1)
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
y, _ = Tanh.apply(input1, 1 + input1)
y.mean().backward() y.mean().backward()
def test_backward_return_fake_tensor(self): def test_backward_return_fake_tensor(self):
...@@ -849,9 +764,9 @@ class TestPyLayerReturnType(unittest.TestCase): ...@@ -849,9 +764,9 @@ class TestPyLayerReturnType(unittest.TestCase):
input1 = paddle.randn([3, 2]) input1 = paddle.randn([3, 2])
input1.stop_gradient = False input1.stop_gradient = False
y, _ = Tanh.apply(input1)
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
y, _ = Tanh.apply(input1)
y.mean().backward() y.mean().backward()
......
...@@ -20,7 +20,6 @@ from op_test import OpTest ...@@ -20,7 +20,6 @@ from op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.framework import _test_eager_guard
from paddle.static import Program, program_guard from paddle.static import Program, program_guard
paddle.enable_static() paddle.enable_static()
...@@ -52,10 +51,6 @@ class TestRandintOp(OpTest): ...@@ -52,10 +51,6 @@ class TestRandintOp(OpTest):
hist, prob = self.output_hist(np.array(outs[0])) hist, prob = self.output_hist(np.array(outs[0]))
np.testing.assert_allclose(hist, prob, rtol=0, atol=0.001) np.testing.assert_allclose(hist, prob, rtol=0, atol=0.001)
def test_check_output_eager(self):
with _test_eager_guard():
self.test_check_output()
class TestRandintOpError(unittest.TestCase): class TestRandintOpError(unittest.TestCase):
def test_errors(self): def test_errors(self):
...@@ -71,10 +66,6 @@ class TestRandintOpError(unittest.TestCase): ...@@ -71,10 +66,6 @@ class TestRandintOpError(unittest.TestCase):
TypeError, paddle.randint, 5, shape=[shape_tensor] TypeError, paddle.randint, 5, shape=[shape_tensor]
) )
def test_errors_eager(self):
with _test_eager_guard():
self.test_errors()
class TestRandintOp_attr_tensorlist(OpTest): class TestRandintOp_attr_tensorlist(OpTest):
def setUp(self): def setUp(self):
...@@ -100,10 +91,6 @@ class TestRandintOp_attr_tensorlist(OpTest): ...@@ -100,10 +91,6 @@ class TestRandintOp_attr_tensorlist(OpTest):
hist, prob = self.output_hist(np.array(outs[0])) hist, prob = self.output_hist(np.array(outs[0]))
np.testing.assert_allclose(hist, prob, rtol=0, atol=0.001) np.testing.assert_allclose(hist, prob, rtol=0, atol=0.001)
def test_check_output_eager(self):
with _test_eager_guard():
self.test_check_output()
class TestRandint_attr_tensor(OpTest): class TestRandint_attr_tensor(OpTest):
def setUp(self): def setUp(self):
...@@ -123,10 +110,6 @@ class TestRandint_attr_tensor(OpTest): ...@@ -123,10 +110,6 @@ class TestRandint_attr_tensor(OpTest):
hist, prob = self.output_hist(np.array(outs[0])) hist, prob = self.output_hist(np.array(outs[0]))
np.testing.assert_allclose(hist, prob, rtol=0, atol=0.001) np.testing.assert_allclose(hist, prob, rtol=0, atol=0.001)
def test_check_output_eager(self):
with _test_eager_guard():
self.test_check_output()
# Test python API # Test python API
class TestRandintAPI(unittest.TestCase): class TestRandintAPI(unittest.TestCase):
...@@ -167,23 +150,10 @@ class TestRandintAPI(unittest.TestCase): ...@@ -167,23 +150,10 @@ class TestRandintAPI(unittest.TestCase):
fetch_list=[out1, out2, out3, out4, out5], fetch_list=[out1, out2, out3, out4, out5],
) )
def test_api_eager(self):
with _test_eager_guard():
self.test_api()
class TestRandintImperative(unittest.TestCase): class TestRandintImperative(unittest.TestCase):
def test_api(self): def test_case(self):
paddle.disable_static() paddle.disable_static()
self.run_test_case()
with _test_eager_guard():
self.run_test_case()
paddle.enable_static()
def run_test_case(self):
n = 10 n = 10
x1 = paddle.randint(n, shape=[10], dtype="int32") x1 = paddle.randint(n, shape=[10], dtype="int32")
x2 = paddle.tensor.randint(n) x2 = paddle.tensor.randint(n)
...@@ -191,6 +161,7 @@ class TestRandintImperative(unittest.TestCase): ...@@ -191,6 +161,7 @@ class TestRandintImperative(unittest.TestCase):
for i in [x1, x2, x3]: for i in [x1, x2, x3]:
for j in i.numpy().tolist(): for j in i.numpy().tolist():
self.assertTrue((j >= 0 and j < n)) self.assertTrue((j >= 0 and j < n))
paddle.enable_static()
class TestRandomValue(unittest.TestCase): class TestRandomValue(unittest.TestCase):
...@@ -208,9 +179,6 @@ class TestRandomValue(unittest.TestCase): ...@@ -208,9 +179,6 @@ class TestRandomValue(unittest.TestCase):
self.run_test_case() self.run_test_case()
with _test_eager_guard():
self.run_test_case()
paddle.enable_static() paddle.enable_static()
def run_test_case(self): def run_test_case(self):
......
...@@ -19,7 +19,6 @@ import numpy as np ...@@ -19,7 +19,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.distributed.models.moe import utils from paddle.distributed.models.moe import utils
from paddle.fluid.framework import _test_eager_guard
def random_routing(topk_idx, topk_value, prob, topk=2): def random_routing(topk_idx, topk_value, prob, topk=2):
...@@ -54,7 +53,7 @@ class TestNumberCountAPIFp32(unittest.TestCase): ...@@ -54,7 +53,7 @@ class TestNumberCountAPIFp32(unittest.TestCase):
) )
self.place = paddle.CUDAPlace(0) self.place = paddle.CUDAPlace(0)
def func_api_dygraph(self): def test_api_dygraph(self):
paddle.disable_static() paddle.disable_static()
x = paddle.to_tensor(self.x) x = paddle.to_tensor(self.x)
value = paddle.to_tensor(self.topk_value) value = paddle.to_tensor(self.topk_value)
...@@ -62,11 +61,6 @@ class TestNumberCountAPIFp32(unittest.TestCase): ...@@ -62,11 +61,6 @@ class TestNumberCountAPIFp32(unittest.TestCase):
out = utils._random_routing(x, value, prob) out = utils._random_routing(x, value, prob)
assert np.allclose(out.numpy(), self.out) assert np.allclose(out.numpy(), self.out)
def test_api_dygraph(self):
with _test_eager_guard():
self.func_api_dygraph()
self.func_api_dygraph()
@unittest.skipIf( @unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA" not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
......
...@@ -19,7 +19,6 @@ from op_test import OpTest ...@@ -19,7 +19,6 @@ from op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.framework import _test_eager_guard
from paddle.static import Program, program_guard from paddle.static import Program, program_guard
...@@ -83,10 +82,6 @@ class TestRandpermOp(OpTest): ...@@ -83,10 +82,6 @@ class TestRandpermOp(OpTest):
check_randperm_out(self.n, out_np), msg=error_msg(out_np) check_randperm_out(self.n, out_np), msg=error_msg(out_np)
) )
def test_eager(self):
with _test_eager_guard():
self.test_check_output()
class TestRandpermOpN(TestRandpermOp): class TestRandpermOpN(TestRandpermOp):
def init_attrs(self): def init_attrs(self):
...@@ -153,13 +148,12 @@ class TestRandpermEager(unittest.TestCase): ...@@ -153,13 +148,12 @@ class TestRandpermEager(unittest.TestCase):
def test_out(self): def test_out(self):
paddle.disable_static() paddle.disable_static()
n = 10 n = 10
with _test_eager_guard(): for dtype in ['int32', np.int64, 'float32', 'float64']:
for dtype in ['int32', np.int64, 'float32', 'float64']: data_p = paddle.randperm(n, dtype)
data_p = paddle.randperm(n, dtype) data_np = data_p.numpy()
data_np = data_p.numpy() self.assertTrue(
self.assertTrue( check_randperm_out(n, data_np), msg=error_msg(data_np)
check_randperm_out(n, data_np), msg=error_msg(data_np) )
)
paddle.enable_static() paddle.enable_static()
......
...@@ -17,7 +17,6 @@ import numpy as np ...@@ -17,7 +17,6 @@ import numpy as np
import paddle import paddle
from paddle import _legacy_C_ops from paddle import _legacy_C_ops
from paddle.fluid.framework import _test_eager_guard
paddle.set_device('cpu') paddle.set_device('cpu')
...@@ -34,7 +33,7 @@ def clear_grad_test_0(w, a): ...@@ -34,7 +33,7 @@ def clear_grad_test_0(w, a):
class TestInplaceAndClearGradient(unittest.TestCase): class TestInplaceAndClearGradient(unittest.TestCase):
def func_test(self): def test_inplace_n_clear_grad(self):
input_data = np.ones([1, 1]) input_data = np.ones([1, 1])
w = paddle.to_tensor(input_data, 'float32', stop_gradient=False) w = paddle.to_tensor(input_data, 'float32', stop_gradient=False)
...@@ -49,11 +48,6 @@ class TestInplaceAndClearGradient(unittest.TestCase): ...@@ -49,11 +48,6 @@ class TestInplaceAndClearGradient(unittest.TestCase):
out.backward() out.backward()
assert w.grad[0] == 0.15 assert w.grad[0] == 0.15
def test(self):
with _test_eager_guard():
self.func_test()
self.func_test()
# Test 2 # Test 2
class Counter: class Counter:
...@@ -76,7 +70,7 @@ def clear_grad_test_1(w, c): ...@@ -76,7 +70,7 @@ def clear_grad_test_1(w, c):
class TestInplaceClearGradAccumulation(unittest.TestCase): class TestInplaceClearGradAccumulation(unittest.TestCase):
def func_test(self): def test_inplace_clear_grad_accum(self):
input_data = np.ones([1, 1]) input_data = np.ones([1, 1])
w = paddle.to_tensor(input_data, 'float32', stop_gradient=False) w = paddle.to_tensor(input_data, 'float32', stop_gradient=False)
c = Counter() c = Counter()
...@@ -98,14 +92,9 @@ class TestInplaceClearGradAccumulation(unittest.TestCase): ...@@ -98,14 +92,9 @@ class TestInplaceClearGradAccumulation(unittest.TestCase):
assert c.num_calls == 1 assert c.num_calls == 1
c.num_calls = 0 c.num_calls = 0
def test(self):
with _test_eager_guard():
self.func_test()
self.func_test()
class TestInplaceClearGradAccumulationAlt(unittest.TestCase): class TestInplaceClearGradAccumulationAlt(unittest.TestCase):
def func_test(self): def test_inplace_clear_grad_accum(self):
input_data = np.ones([1, 1]) input_data = np.ones([1, 1])
w = paddle.to_tensor(input_data, 'float32', stop_gradient=False) w = paddle.to_tensor(input_data, 'float32', stop_gradient=False)
out = _legacy_C_ops.scale(w, 'scale', 0.1) out = _legacy_C_ops.scale(w, 'scale', 0.1)
...@@ -116,11 +105,6 @@ class TestInplaceClearGradAccumulationAlt(unittest.TestCase): ...@@ -116,11 +105,6 @@ class TestInplaceClearGradAccumulationAlt(unittest.TestCase):
assert w.grad._inplace_version() == 1 assert w.grad._inplace_version() == 1
def test(self):
with _test_eager_guard():
self.func_test()
self.func_test()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -150,16 +150,11 @@ class TestRetainGraph(unittest.TestCase): ...@@ -150,16 +150,11 @@ class TestRetainGraph(unittest.TestCase):
loss_g.backward() loss_g.backward()
optim_g.minimize(loss_g) optim_g.minimize(loss_g)
def func_retain(self): def test_retain(self):
self.run_retain(need_retain=True) self.run_retain(need_retain=True)
if not fluid.framework.in_dygraph_mode(): if not fluid.framework.in_dygraph_mode():
self.assertRaises(RuntimeError, self.run_retain, need_retain=False) self.assertRaises(RuntimeError, self.run_retain, need_retain=False)
def test_retain(self):
with fluid.framework._test_eager_guard():
self.func_retain()
self.func_retain()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册