未验证 提交 d4305a26 编写于 作者: 姜永久 提交者: GitHub

rm unittests eager guard tests part21 sum_op2tensor_zero (#48883)

* rm unittests eager guard tests part21 sum_op2tensor_zero

* modify tensor hook test

* rm assert for type promotion

* review
上级 6727ab5c
...@@ -26,7 +26,6 @@ import paddle.fluid.core as core ...@@ -26,7 +26,6 @@ import paddle.fluid.core as core
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
import paddle.inference as paddle_infer import paddle.inference as paddle_infer
from paddle import enable_static from paddle import enable_static
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.op import Operator from paddle.fluid.op import Operator
from paddle.fluid.tests.unittests.op_test import ( from paddle.fluid.tests.unittests.op_test import (
OpTest, OpTest,
...@@ -376,27 +375,24 @@ class API_Test_Add_n(unittest.TestCase): ...@@ -376,27 +375,24 @@ class API_Test_Add_n(unittest.TestCase):
def test_dygraph_api(self): def test_dygraph_api(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
with _test_eager_guard(): input0 = paddle.ones(shape=[2, 3], dtype='float32')
input0 = paddle.ones(shape=[2, 3], dtype='float32') input1 = paddle.ones(shape=[2, 3], dtype='float32')
input1 = paddle.ones(shape=[2, 3], dtype='float32') input0.stop_gradient = False
input0.stop_gradient = False input1.stop_gradient = False
input1.stop_gradient = False expected_result = np.empty((2, 3))
expected_result = np.empty((2, 3)) expected_result.fill(2)
expected_result.fill(2) sum_value = paddle.add_n([input0, input1])
sum_value = paddle.add_n([input0, input1]) self.assertEqual((sum_value.numpy() == expected_result).all(), True)
self.assertEqual(
(sum_value.numpy() == expected_result).all(), True
)
expected_grad_result = np.empty((2, 3)) expected_grad_result = np.empty((2, 3))
expected_grad_result.fill(1) expected_grad_result.fill(1)
sum_value.backward() sum_value.backward()
self.assertEqual( self.assertEqual(
(input0.grad.numpy() == expected_grad_result).all(), True (input0.grad.numpy() == expected_grad_result).all(), True
) )
self.assertEqual( self.assertEqual(
(input1.grad.numpy() == expected_grad_result).all(), True (input1.grad.numpy() == expected_grad_result).all(), True
) )
def test_add_n_and_add_and_grad(self): def test_add_n_and_add_and_grad(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
......
...@@ -112,21 +112,11 @@ class TestDygraphAutoTuneStatus(TestAutoTune): ...@@ -112,21 +112,11 @@ class TestDygraphAutoTuneStatus(TestAutoTune):
expected_res = self.get_expected_res(i, enable_autotune) expected_res = self.get_expected_res(i, enable_autotune)
self.check_status(expected_res) self.check_status(expected_res)
def func_enable_autotune(self):
self.run_program(enable_autotune=True)
def test_enable_autotune(self): def test_enable_autotune(self):
with paddle.fluid.framework._test_eager_guard(): self.run_program(enable_autotune=True)
self.func_enable_autotune()
self.func_enable_autotune()
def func_disable_autotune(self):
self.run_program(enable_autotune=False)
def test_disable_autotune(self): def test_disable_autotune(self):
with paddle.fluid.framework._test_eager_guard(): self.run_program(enable_autotune=False)
self.func_disable_autotune()
self.func_disable_autotune()
class TestStaticAutoTuneStatus(TestAutoTune): class TestStaticAutoTuneStatus(TestAutoTune):
......
...@@ -18,14 +18,13 @@ import numpy as np ...@@ -18,14 +18,13 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.framework import _test_eager_guard
class TensorFill_Test(unittest.TestCase): class TensorFill_Test(unittest.TestCase):
def setUp(self): def setUp(self):
self.shape = [32, 32] self.shape = [32, 32]
def func_test_tensor_fill_true(self): def test_tensor_fill_true(self):
typelist = ['float32', 'float64', 'int32', 'int64', 'float16'] typelist = ['float32', 'float64', 'int32', 'int64', 'float16']
places = [fluid.CPUPlace()] places = [fluid.CPUPlace()]
if fluid.core.is_compiled_with_cuda(): if fluid.core.is_compiled_with_cuda():
...@@ -49,12 +48,8 @@ class TensorFill_Test(unittest.TestCase): ...@@ -49,12 +48,8 @@ class TensorFill_Test(unittest.TestCase):
tensor.fill_(var) # var type is basic type in typelist tensor.fill_(var) # var type is basic type in typelist
self.assertEqual((tensor.numpy() == target).all(), True) self.assertEqual((tensor.numpy() == target).all(), True)
def test_tensor_fill_true(self): def test_tensor_fill_backward(self):
with _test_eager_guard(): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
self.func_test_tensor_fill_true()
self.func_test_tensor_fill_true()
def func_test_tensor_fill_backward(self):
typelist = ['float32'] typelist = ['float32']
places = [fluid.CPUPlace()] places = [fluid.CPUPlace()]
if fluid.core.is_compiled_with_cuda(): if fluid.core.is_compiled_with_cuda():
...@@ -79,26 +74,15 @@ class TensorFill_Test(unittest.TestCase): ...@@ -79,26 +74,15 @@ class TensorFill_Test(unittest.TestCase):
loss.backward() loss.backward()
self.assertEqual((y.grad.numpy() == 0).all().item(), True) self.assertEqual((y.grad.numpy() == 0).all().item(), True)
def test_tensor_fill_backward(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with _test_eager_guard():
self.func_test_tensor_fill_backward()
self.func_test_tensor_fill_backward()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
def func_test_errors(self): def test_errors(self):
def test_list(): def test_list():
x = paddle.to_tensor([2, 3, 4]) x = paddle.to_tensor([2, 3, 4])
x.fill_([1]) x.fill_([1])
self.assertRaises(TypeError, test_list) self.assertRaises(TypeError, test_list)
def test_errors(self):
with _test_eager_guard():
self.func_test_errors()
self.func_test_errors()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -18,11 +18,11 @@ import numpy as np ...@@ -18,11 +18,11 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.framework import _test_eager_guard
class TensorFillDiagonal_Test(unittest.TestCase): class TensorFillDiagonal_Test(unittest.TestCase):
def func_dim2_normal(self): def test_dim2_normal(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
expected_np = np.array([[1, 2, 2], [2, 1, 2], [2, 2, 1]]).astype( expected_np = np.array([[1, 2, 2], [2, 1, 2], [2, 2, 1]]).astype(
'float32' 'float32'
) )
...@@ -55,15 +55,10 @@ class TensorFillDiagonal_Test(unittest.TestCase): ...@@ -55,15 +55,10 @@ class TensorFillDiagonal_Test(unittest.TestCase):
(y.grad.numpy().astype('float32') == expected_grad).all(), (y.grad.numpy().astype('float32') == expected_grad).all(),
True, True,
) )
def test_dim2_normal(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with _test_eager_guard():
self.func_dim2_normal()
self.func_dim2_normal()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
def func_offset(self): def test_offset(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
expected_np = np.array([[2, 2, 1], [2, 2, 2], [2, 2, 2]]).astype( expected_np = np.array([[2, 2, 1], [2, 2, 2], [2, 2, 2]]).astype(
'float32' 'float32'
) )
...@@ -96,15 +91,9 @@ class TensorFillDiagonal_Test(unittest.TestCase): ...@@ -96,15 +91,9 @@ class TensorFillDiagonal_Test(unittest.TestCase):
(y.grad.numpy().astype('float32') == expected_grad).all(), (y.grad.numpy().astype('float32') == expected_grad).all(),
True, True,
) )
def test_offset(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with _test_eager_guard():
self.func_offset()
self.func_offset()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
def func_bool(self): def test_bool(self):
expected_np = np.array( expected_np = np.array(
[[False, True, True], [True, False, True], [True, True, False]] [[False, True, True], [True, False, True], [True, True, False]]
) )
...@@ -126,12 +115,8 @@ class TensorFillDiagonal_Test(unittest.TestCase): ...@@ -126,12 +115,8 @@ class TensorFillDiagonal_Test(unittest.TestCase):
self.assertEqual((x.numpy() == expected_np).all(), True) self.assertEqual((x.numpy() == expected_np).all(), True)
def test_bool(self): def test_dim2_unnormal_wrap(self):
with _test_eager_guard(): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
self.func_bool()
self.func_bool()
def func_dim2_unnormal_wrap(self):
expected_np = np.array( expected_np = np.array(
[ [
[1, 2, 2], [1, 2, 2],
...@@ -180,15 +165,10 @@ class TensorFillDiagonal_Test(unittest.TestCase): ...@@ -180,15 +165,10 @@ class TensorFillDiagonal_Test(unittest.TestCase):
(y.grad.numpy().astype('float32') == expected_grad).all(), (y.grad.numpy().astype('float32') == expected_grad).all(),
True, True,
) )
def test_dim2_unnormal_wrap(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with _test_eager_guard():
self.func_dim2_unnormal_wrap()
self.func_dim2_unnormal_wrap()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
def func_dim2_unnormal_unwrap(self): def test_dim2_unnormal_unwrap(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
expected_np = np.array( expected_np = np.array(
[ [
[1, 2, 2], [1, 2, 2],
...@@ -237,15 +217,10 @@ class TensorFillDiagonal_Test(unittest.TestCase): ...@@ -237,15 +217,10 @@ class TensorFillDiagonal_Test(unittest.TestCase):
(y.grad.numpy().astype('float32') == expected_grad).all(), (y.grad.numpy().astype('float32') == expected_grad).all(),
True, True,
) )
def test_dim2_unnormal_unwrap(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with _test_eager_guard():
self.func_dim2_unnormal_unwrap()
self.func_dim2_unnormal_unwrap()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
def func_dim_larger2_normal(self): def test_dim_larger2_normal(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
expected_np = np.array( expected_np = np.array(
[ [
[[1, 2, 2], [2, 2, 2], [2, 2, 2]], [[1, 2, 2], [2, 2, 2], [2, 2, 2]],
...@@ -286,12 +261,6 @@ class TensorFillDiagonal_Test(unittest.TestCase): ...@@ -286,12 +261,6 @@ class TensorFillDiagonal_Test(unittest.TestCase):
(y.grad.numpy().astype('float32') == expected_grad).all(), (y.grad.numpy().astype('float32') == expected_grad).all(),
True, True,
) )
def test_dim_larger2_normal(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with _test_eager_guard():
self.func_dim_larger2_normal()
self.func_dim_larger2_normal()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
......
...@@ -19,7 +19,6 @@ import numpy as np ...@@ -19,7 +19,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle.fluid.framework import _test_eager_guard
class TensorFillDiagTensor_Test(unittest.TestCase): class TensorFillDiagTensor_Test(unittest.TestCase):
...@@ -29,7 +28,8 @@ class TensorFillDiagTensor_Test(unittest.TestCase): ...@@ -29,7 +28,8 @@ class TensorFillDiagTensor_Test(unittest.TestCase):
if fluid.core.is_compiled_with_cuda(): if fluid.core.is_compiled_with_cuda():
self.places.append(fluid.CUDAPlace(0)) self.places.append(fluid.CUDAPlace(0))
def func_dim2(self): def test_dim2(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
expected_np = np.array( expected_np = np.array(
[[1, 2, 2], [2, 1, 2], [2, 2, 1], [2, 2, 2]] [[1, 2, 2], [2, 1, 2], [2, 2, 1], [2, 2, 2]]
).astype('float32') ).astype('float32')
...@@ -59,15 +59,10 @@ class TensorFillDiagTensor_Test(unittest.TestCase): ...@@ -59,15 +59,10 @@ class TensorFillDiagTensor_Test(unittest.TestCase):
(y.grad.numpy().astype('float32') == expected_grad).all(), (y.grad.numpy().astype('float32') == expected_grad).all(),
True, True,
) )
def test_dim2(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with _test_eager_guard():
self.func_dim2()
self.func_dim2()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
def func_dim2_offset_1(self): def test_dim2_offset_1(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
expected_np = np.array( expected_np = np.array(
[[2, 2, 2], [1, 2, 2], [2, 1, 2], [2, 2, 1]] [[2, 2, 2], [1, 2, 2], [2, 1, 2], [2, 2, 1]]
).astype('float32') ).astype('float32')
...@@ -97,15 +92,10 @@ class TensorFillDiagTensor_Test(unittest.TestCase): ...@@ -97,15 +92,10 @@ class TensorFillDiagTensor_Test(unittest.TestCase):
(y.grad.numpy().astype('float32') == expected_grad).all(), (y.grad.numpy().astype('float32') == expected_grad).all(),
True, True,
) )
def test_dim2_offset_1(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with _test_eager_guard():
self.func_dim2_offset_1()
self.func_dim2_offset_1()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
def func_dim2_offset1(self): def test_dim2_offset1(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
expected_np = np.array( expected_np = np.array(
[[2, 1, 2], [2, 2, 1], [2, 2, 2], [2, 2, 2]] [[2, 1, 2], [2, 2, 1], [2, 2, 2], [2, 2, 2]]
).astype('float32') ).astype('float32')
...@@ -135,15 +125,10 @@ class TensorFillDiagTensor_Test(unittest.TestCase): ...@@ -135,15 +125,10 @@ class TensorFillDiagTensor_Test(unittest.TestCase):
(y.grad.numpy().astype('float32') == expected_grad).all(), (y.grad.numpy().astype('float32') == expected_grad).all(),
True, True,
) )
def test_dim2_offset1(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with _test_eager_guard():
self.func_dim2_offset1()
self.func_dim2_offset1()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
def func_dim4(self): def test_dim4(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
expected_np = np.array( expected_np = np.array(
[ [
[ [
...@@ -201,15 +186,10 @@ class TensorFillDiagTensor_Test(unittest.TestCase): ...@@ -201,15 +186,10 @@ class TensorFillDiagTensor_Test(unittest.TestCase):
(y.grad.numpy().astype('float32') == expected_grad).all(), (y.grad.numpy().astype('float32') == expected_grad).all(),
True, True,
) )
def test_func_dim4(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with _test_eager_guard():
self.func_dim4()
self.func_dim4()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
def func_largedim(self): def test_largedim(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
# large dim only test on gpu because the cpu version is too slow for ci test, and the memory is limited # large dim only test on gpu because the cpu version is too slow for ci test, and the memory is limited
if len(self.places) > 1: if len(self.places) > 1:
bsdim = 1024 bsdim = 1024
...@@ -233,12 +213,6 @@ class TensorFillDiagTensor_Test(unittest.TestCase): ...@@ -233,12 +213,6 @@ class TensorFillDiagTensor_Test(unittest.TestCase):
self.assertEqual((y == expected_pred).all(), True) self.assertEqual((y == expected_pred).all(), True)
self.assertEqual((y.grad == expected_grad).all(), True) self.assertEqual((y.grad == expected_grad).all(), True)
def test_largedim(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with _test_eager_guard():
self.func_largedim()
self.func_largedim()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
......
...@@ -20,7 +20,6 @@ import paddle ...@@ -20,7 +20,6 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.nn as nn import paddle.nn as nn
from paddle.fluid.framework import _test_eager_guard
class SimpleNet(nn.Layer): class SimpleNet(nn.Layer):
...@@ -66,7 +65,9 @@ class TestTensorRegisterHook(unittest.TestCase): ...@@ -66,7 +65,9 @@ class TestTensorRegisterHook(unittest.TestCase):
if paddle.is_compiled_with_cuda(): if paddle.is_compiled_with_cuda():
self.devices.append("gpu") self.devices.append("gpu")
def func_hook_for_interior_var(self): def test_hook_for_interior_var(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
def run_double_hook_for_interior_var(double_hook, removed=False): def run_double_hook_for_interior_var(double_hook, removed=False):
for device in self.devices: for device in self.devices:
paddle.set_device(device) paddle.set_device(device)
...@@ -155,15 +156,11 @@ class TestTensorRegisterHook(unittest.TestCase): ...@@ -155,15 +156,11 @@ class TestTensorRegisterHook(unittest.TestCase):
run_print_hook_for_interior_var(print_hook) run_print_hook_for_interior_var(print_hook)
# register hook and removed # register hook and removed
run_print_hook_for_interior_var(print_hook, removed=True) run_print_hook_for_interior_var(print_hook, removed=True)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
def test_hook_for_interior_var(self): def test_hook_for_leaf_var(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with _test_eager_guard():
self.func_hook_for_interior_var()
self.func_hook_for_interior_var()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
def func_hook_for_leaf_var(self):
def run_double_hook_for_leaf_var(double_hook, removed=False): def run_double_hook_for_leaf_var(double_hook, removed=False):
for device in self.devices: for device in self.devices:
paddle.set_device(device) paddle.set_device(device)
...@@ -201,15 +198,11 @@ class TestTensorRegisterHook(unittest.TestCase): ...@@ -201,15 +198,11 @@ class TestTensorRegisterHook(unittest.TestCase):
run_double_hook_for_leaf_var(lambda grad: grad * 2) run_double_hook_for_leaf_var(lambda grad: grad * 2)
# register hook and removed # register hook and removed
run_double_hook_for_leaf_var(lambda grad: grad * 2, removed=True) run_double_hook_for_leaf_var(lambda grad: grad * 2, removed=True)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
def test_hook_for_leaf_var(self): def test_hook_for_accumulated_grad_interior_var(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with _test_eager_guard():
self.func_hook_for_leaf_var()
self.func_hook_for_leaf_var()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
def func_hook_for_accumulated_grad_interior_var(self):
def run_double_hook_for_accumulated_grad_interior_var( def run_double_hook_for_accumulated_grad_interior_var(
double_hook, removed=False double_hook, removed=False
): ):
...@@ -265,15 +258,11 @@ class TestTensorRegisterHook(unittest.TestCase): ...@@ -265,15 +258,11 @@ class TestTensorRegisterHook(unittest.TestCase):
run_double_hook_for_accumulated_grad_interior_var( run_double_hook_for_accumulated_grad_interior_var(
lambda grad: grad * 2, removed=True lambda grad: grad * 2, removed=True
) )
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
def test_hook_for_accumulated_grad_interior_var(self): def test_hook_for_accumulated_grad_leaf_var(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with _test_eager_guard():
self.func_hook_for_accumulated_grad_interior_var()
self.func_hook_for_accumulated_grad_interior_var()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
def func_hook_for_accumulated_grad_leaf_var(self):
def run_double_hook_for_accumulated_grad_leaf_var( def run_double_hook_for_accumulated_grad_leaf_var(
double_hook, removed=False double_hook, removed=False
): ):
...@@ -315,13 +304,11 @@ class TestTensorRegisterHook(unittest.TestCase): ...@@ -315,13 +304,11 @@ class TestTensorRegisterHook(unittest.TestCase):
run_double_hook_for_accumulated_grad_leaf_var( run_double_hook_for_accumulated_grad_leaf_var(
lambda grad: grad * 2, removed=True lambda grad: grad * 2, removed=True
) )
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
def test_hook_for_accumulated_grad_leaf_var(self): def test_hook_in_model(self):
with _test_eager_guard(): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
self.func_hook_for_accumulated_grad_leaf_var()
self.func_hook_for_accumulated_grad_leaf_var()
def func_hook_in_model(self):
def run_double_hook_in_model( def run_double_hook_in_model(
data, label, hook=None, register=False, remove=False data, label, hook=None, register=False, remove=False
): ):
...@@ -378,15 +365,11 @@ class TestTensorRegisterHook(unittest.TestCase): ...@@ -378,15 +365,11 @@ class TestTensorRegisterHook(unittest.TestCase):
np.testing.assert_array_equal(ret1_grad, ret1_grad_rm) np.testing.assert_array_equal(ret1_grad, ret1_grad_rm)
np.testing.assert_array_equal(linear1_w_grad, linear1_w_grad_rm) np.testing.assert_array_equal(linear1_w_grad, linear1_w_grad_rm)
np.testing.assert_array_equal(linear1_b_grad, linear1_b_grad_rm) np.testing.assert_array_equal(linear1_b_grad, linear1_b_grad_rm)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
def test_func_hook_in_model(self): def test_multiple_hooks_for_interior_var(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with _test_eager_guard():
self.func_hook_in_model()
self.func_hook_in_model()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
def func_multiple_hooks_for_interior_var(self):
def run_multiple_hooks_for_interior_var( def run_multiple_hooks_for_interior_var(
device, hooks, remove1=False, remove2=False, remove3=False device, hooks, remove1=False, remove2=False, remove3=False
): ):
...@@ -466,15 +449,9 @@ class TestTensorRegisterHook(unittest.TestCase): ...@@ -466,15 +449,9 @@ class TestTensorRegisterHook(unittest.TestCase):
np.testing.assert_array_equal(w_grad, z) np.testing.assert_array_equal(w_grad, z)
np.testing.assert_array_equal(x_grad, z) np.testing.assert_array_equal(x_grad, z)
np.testing.assert_array_equal(y_grad, z) np.testing.assert_array_equal(y_grad, z)
def test_multiple_hooks_for_interior_var(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with _test_eager_guard():
self.func_multiple_hooks_for_interior_var()
self.func_multiple_hooks_for_interior_var()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
def func_hook_in_double_grad(self): def test_hook_in_double_grad(self):
def double_print_hook(grad): def double_print_hook(grad):
grad = grad * 2 grad = grad * 2
print(grad) print(grad)
...@@ -510,12 +487,7 @@ class TestTensorRegisterHook(unittest.TestCase): ...@@ -510,12 +487,7 @@ class TestTensorRegisterHook(unittest.TestCase):
z.backward() z.backward()
np.testing.assert_array_equal(x.grad.numpy(), np.array([8.0])) np.testing.assert_array_equal(x.grad.numpy(), np.array([8.0]))
def test_hook_in_double_grad(self): def test_remove_one_hook_multiple_times(self):
with _test_eager_guard():
self.func_hook_in_double_grad()
self.func_hook_in_double_grad()
def func_remove_one_hook_multiple_times(self):
for device in self.devices: for device in self.devices:
paddle.set_device(device) paddle.set_device(device)
...@@ -526,12 +498,7 @@ class TestTensorRegisterHook(unittest.TestCase): ...@@ -526,12 +498,7 @@ class TestTensorRegisterHook(unittest.TestCase):
self.assertTrue(h.remove()) self.assertTrue(h.remove())
self.assertFalse(h.remove()) self.assertFalse(h.remove())
def test_remove_one_hook_multiple_times(self): def test_register_hook_for_stop_gradient_var(self):
with _test_eager_guard():
self.func_remove_one_hook_multiple_times()
self.func_remove_one_hook_multiple_times()
def func_register_hook_for_stop_gradient_var(self):
for device in self.devices: for device in self.devices:
paddle.set_device(device) paddle.set_device(device)
...@@ -540,11 +507,6 @@ class TestTensorRegisterHook(unittest.TestCase): ...@@ -540,11 +507,6 @@ class TestTensorRegisterHook(unittest.TestCase):
with self.assertRaises(RuntimeError): with self.assertRaises(RuntimeError):
x.register_hook(lambda grad: grad * 2) x.register_hook(lambda grad: grad * 2)
def test_register_hook_for_stop_gradient_var(self):
with _test_eager_guard():
self.func_register_hook_for_stop_gradient_var()
self.func_register_hook_for_stop_gradient_var()
def test_register_hook_in_static_mode(self): def test_register_hook_in_static_mode(self):
paddle.enable_static() paddle.enable_static()
...@@ -562,7 +524,7 @@ class TestTensorRegisterHook(unittest.TestCase): ...@@ -562,7 +524,7 @@ class TestTensorRegisterHook(unittest.TestCase):
paddle.disable_static() paddle.disable_static()
def func_register_hook_in_dy2static_mode(self): def test_register_hook_in_dy2static_mode(self):
net = SimpleNetForStatic(self.in_size, self.out_size) net = SimpleNetForStatic(self.in_size, self.out_size)
jit_net = paddle.jit.to_static( jit_net = paddle.jit.to_static(
net, input_spec=[paddle.static.InputSpec([None, self.in_size])] net, input_spec=[paddle.static.InputSpec([None, self.in_size])]
...@@ -576,11 +538,6 @@ class TestTensorRegisterHook(unittest.TestCase): ...@@ -576,11 +538,6 @@ class TestTensorRegisterHook(unittest.TestCase):
with self.assertRaises(AssertionError): with self.assertRaises(AssertionError):
out = jit_net(data_t) out = jit_net(data_t)
def test_register_hook_in_dy2static_mode(self):
with _test_eager_guard():
self.func_register_hook_in_dy2static_mode()
self.func_register_hook_in_dy2static_mode()
HOOK_INIT_VALUE = 10 HOOK_INIT_VALUE = 10
HOOK_IS_CALLED = False HOOK_IS_CALLED = False
...@@ -599,7 +556,7 @@ class TestTensorRegisterBackwardHook(unittest.TestCase): ...@@ -599,7 +556,7 @@ class TestTensorRegisterBackwardHook(unittest.TestCase):
if paddle.is_compiled_with_cuda(): if paddle.is_compiled_with_cuda():
self.devices.append("gpu") self.devices.append("gpu")
def func_register_backward_hook(self): def test_register_backward_hook(self):
global HOOK_INIT_VALUE global HOOK_INIT_VALUE
global HOOK_IS_CALLED global HOOK_IS_CALLED
for device in self.devices: for device in self.devices:
...@@ -616,35 +573,20 @@ class TestTensorRegisterBackwardHook(unittest.TestCase): ...@@ -616,35 +573,20 @@ class TestTensorRegisterBackwardHook(unittest.TestCase):
HOOK_INIT_VALUE = 10 HOOK_INIT_VALUE = 10
HOOK_IS_CALLED = False HOOK_IS_CALLED = False
def test_register_backward_hook(self): def test_register_backward_hook_for_interior_var(self):
with _test_eager_guard():
self.func_register_backward_hook()
self.func_register_backward_hook()
def func_register_backward_hook_for_interior_var(self):
x = paddle.to_tensor(5.0, stop_gradient=False) x = paddle.to_tensor(5.0, stop_gradient=False)
y = paddle.pow(x, 4.0) y = paddle.pow(x, 4.0)
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
y._register_backward_hook(global_void_hook) y._register_backward_hook(global_void_hook)
def test_register_backward_hook_for_interior_var(self): def test_register_backward_hook_for_var_without_gradient(self):
with _test_eager_guard():
self.func_register_backward_hook_for_interior_var()
self.func_register_backward_hook_for_interior_var()
def func_register_backward_hook_for_var_without_gradient(self):
x = paddle.to_tensor(5.0) x = paddle.to_tensor(5.0)
y = paddle.pow(x, 4.0) y = paddle.pow(x, 4.0)
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
x._register_backward_hook(global_void_hook) x._register_backward_hook(global_void_hook)
def test_register_backward_hook_for_var_without_gradient(self):
with _test_eager_guard():
self.func_register_backward_hook_for_var_without_gradient()
self.func_register_backward_hook_for_var_without_gradient()
class TestRegsiterBackwardFinalHook(unittest.TestCase): class TestRegsiterBackwardFinalHook(unittest.TestCase):
def setUp(self): def setUp(self):
......
...@@ -17,7 +17,6 @@ import unittest ...@@ -17,7 +17,6 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
from paddle.fluid.framework import _test_eager_guard
# Support types are ref from `paddle.tensor.math` # Support types are ref from `paddle.tensor.math`
# - Related paddle dtypes: # - Related paddle dtypes:
...@@ -50,7 +49,7 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase): ...@@ -50,7 +49,7 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase):
self.assertEqual(c_rlt.dtype, c.dtype) self.assertEqual(c_rlt.dtype, c.dtype)
np.testing.assert_array_equal(c_rlt.numpy(), c.numpy()) np.testing.assert_array_equal(c_rlt.numpy(), c.numpy())
def func_tensor_add_scalar(self): def test_tensor_add_scalar(self):
# tensor(int64) + scalar(int) # tensor(int64) + scalar(int)
a = paddle.ones([2, 2, 2], dtype='int64') a = paddle.ones([2, 2, 2], dtype='int64')
b = 1 b = 1
...@@ -81,12 +80,7 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase): ...@@ -81,12 +80,7 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase):
c = paddle.full([2, 2, 2], 2.5, dtype="float32") c = paddle.full([2, 2, 2], 2.5, dtype="float32")
self.check_operation(a, b, c, '+') self.check_operation(a, b, c, '+')
def test_tensor_add_scalar(self): def test_tensor_sub_scalar(self):
with _test_eager_guard():
self.func_tensor_add_scalar()
self.func_tensor_add_scalar()
def func_tensor_sub_scalar(self):
# tensor(int64) - scalar(int) # tensor(int64) - scalar(int)
a = paddle.ones([2, 2, 2], dtype='int64') a = paddle.ones([2, 2, 2], dtype='int64')
b = 1 b = 1
...@@ -117,12 +111,7 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase): ...@@ -117,12 +111,7 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase):
c = paddle.full([2, 2, 2], 0.5, dtype="float32") c = paddle.full([2, 2, 2], 0.5, dtype="float32")
self.check_operation(a, b, c, '-') self.check_operation(a, b, c, '-')
def test_tensor_sub_scalar(self): def test_scalar_sub_tensor(self):
with _test_eager_guard():
self.func_tensor_sub_scalar()
self.func_tensor_sub_scalar()
def func_scalar_sub_tensor(self):
# scalar(int) - tensor(int64) # scalar(int) - tensor(int64)
a = 1 a = 1
b = paddle.ones([2, 2, 2], dtype='int64') b = paddle.ones([2, 2, 2], dtype='int64')
...@@ -153,12 +142,7 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase): ...@@ -153,12 +142,7 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase):
c = paddle.full([2, 2, 2], -0.5, dtype="float32") c = paddle.full([2, 2, 2], -0.5, dtype="float32")
self.check_operation(a, b, c, '-') self.check_operation(a, b, c, '-')
def test_scalar_sub_tensor(self): def test_tensor_mul_tensor(self):
with _test_eager_guard():
self.func_scalar_sub_tensor()
self.func_scalar_sub_tensor()
def func_tensor_mul_tensor(self):
# tensor(int64) * scalar(int) # tensor(int64) * scalar(int)
a = paddle.ones([2, 2, 2], dtype='int64') a = paddle.ones([2, 2, 2], dtype='int64')
b = 1 b = 1
...@@ -189,12 +173,7 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase): ...@@ -189,12 +173,7 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase):
c = paddle.full([2, 2, 2], 1.5, dtype="float32") c = paddle.full([2, 2, 2], 1.5, dtype="float32")
self.check_operation(a, b, c, '*') self.check_operation(a, b, c, '*')
def test_tensor_mul_tensor(self): def test_tensor_div_scalar(self):
with _test_eager_guard():
self.func_tensor_mul_tensor()
self.func_tensor_mul_tensor()
def func_tensor_div_scalar(self):
# tensor(int64) / scalar(int) # tensor(int64) / scalar(int)
a = paddle.ones([2, 2, 2], dtype='int64') a = paddle.ones([2, 2, 2], dtype='int64')
b = 2 b = 2
...@@ -225,12 +204,7 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase): ...@@ -225,12 +204,7 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase):
c = paddle.full([2, 2, 2], 2, dtype="float32") c = paddle.full([2, 2, 2], 2, dtype="float32")
self.check_operation(a, b, c, '/') self.check_operation(a, b, c, '/')
def test_tensor_div_scalar(self): def test_scalar_div_tensor(self):
with _test_eager_guard():
self.func_tensor_div_scalar()
self.func_tensor_div_scalar()
def func_scalar_div_tensor(self):
# scalar(int) / tensor(int64) # scalar(int) / tensor(int64)
a = 1 a = 1
b = paddle.full([2, 2, 2], 2, dtype='int64') b = paddle.full([2, 2, 2], 2, dtype='int64')
...@@ -255,12 +229,7 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase): ...@@ -255,12 +229,7 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase):
c = paddle.full([2, 2, 2], 2, dtype="float32") c = paddle.full([2, 2, 2], 2, dtype="float32")
self.check_operation(a, b, c, '/') self.check_operation(a, b, c, '/')
def test_scalar_div_tensor(self): def test_tensor_pow_scalar(self):
with _test_eager_guard():
self.func_scalar_div_tensor()
self.func_scalar_div_tensor()
def func_tensor_pow_scalar(self):
# tensor(int64) ** scalar(int) # tensor(int64) ** scalar(int)
a = paddle.full([2, 2, 2], 2, dtype='int64') a = paddle.full([2, 2, 2], 2, dtype='int64')
b = 3 b = 3
...@@ -285,12 +254,7 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase): ...@@ -285,12 +254,7 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase):
c = paddle.full([2, 2, 2], 8, dtype="float32") c = paddle.full([2, 2, 2], 8, dtype="float32")
self.check_operation(a, b, c, '**') self.check_operation(a, b, c, '**')
def test_tensor_pow_scalar(self): def test_scalar_pow_tensor(self):
with _test_eager_guard():
self.func_tensor_pow_scalar()
self.func_tensor_pow_scalar()
def func_scalar_pow_tensor(self):
# scalar(int) ** tensor(int64) # scalar(int) ** tensor(int64)
a = 3 a = 3
b = paddle.full([2, 2, 2], 2, dtype='int64') b = paddle.full([2, 2, 2], 2, dtype='int64')
...@@ -315,25 +279,15 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase): ...@@ -315,25 +279,15 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase):
c = paddle.full([2, 2, 2], 9, dtype="float32") c = paddle.full([2, 2, 2], 9, dtype="float32")
self.check_operation(a, b, c, '**') self.check_operation(a, b, c, '**')
def test_scalar_pow_tensor(self):
with _test_eager_guard():
self.func_scalar_pow_tensor()
self.func_scalar_pow_tensor()
# TODO: floordiv op kernel doesn't support float # TODO: floordiv op kernel doesn't support float
def func_tensor_floordiv_scalar(self): def test_tensor_floordiv_scalar(self):
# tensor(int64) // scalar(int) # tensor(int64) // scalar(int)
a = paddle.full([2, 2, 2], 3, dtype='int64') a = paddle.full([2, 2, 2], 3, dtype='int64')
b = 2 b = 2
c = paddle.full([2, 2, 2], 1, dtype="int64") c = paddle.full([2, 2, 2], 1, dtype="int64")
self.check_operation(a, b, c, '//') self.check_operation(a, b, c, '//')
def test_tensor_floordiv_scalar(self): def test_tensor_mod_scalar(self):
with _test_eager_guard():
self.func_tensor_floordiv_scalar()
self.func_tensor_floordiv_scalar()
def func_tensor_mod_scalar(self):
# tensor(int64) % scalar(int) # tensor(int64) % scalar(int)
a = paddle.full([2, 2, 2], 3, dtype='int64') a = paddle.full([2, 2, 2], 3, dtype='int64')
b = 2 b = 2
...@@ -358,11 +312,6 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase): ...@@ -358,11 +312,6 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase):
c = paddle.full([2, 2, 2], 1, dtype="float32") c = paddle.full([2, 2, 2], 1, dtype="float32")
self.check_operation(a, b, c, '%') self.check_operation(a, b, c, '%')
def test_tensor_mod_scalar(self):
with _test_eager_guard():
self.func_tensor_mod_scalar()
self.func_tensor_mod_scalar()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -16,7 +16,6 @@ import unittest ...@@ -16,7 +16,6 @@ import unittest
import warnings import warnings
import paddle import paddle
from paddle.fluid.framework import _test_eager_guard
class TestTensorTypePromotion(unittest.TestCase): class TestTensorTypePromotion(unittest.TestCase):
...@@ -28,42 +27,23 @@ class TestTensorTypePromotion(unittest.TestCase): ...@@ -28,42 +27,23 @@ class TestTensorTypePromotion(unittest.TestCase):
with warnings.catch_warnings(record=True) as context: with warnings.catch_warnings(record=True) as context:
warnings.simplefilter("always") warnings.simplefilter("always")
self.x + self.y self.x + self.y
self.assertTrue(
"The dtype of left and right variables are not the same"
in str(context[-1].message)
)
def sub_operator(self): def sub_operator(self):
with warnings.catch_warnings(record=True) as context: with warnings.catch_warnings(record=True) as context:
warnings.simplefilter("always") warnings.simplefilter("always")
self.x - self.y self.x - self.y
self.assertTrue(
"The dtype of left and right variables are not the same"
in str(context[-1].message)
)
def mul_operator(self): def mul_operator(self):
with warnings.catch_warnings(record=True) as context: with warnings.catch_warnings(record=True) as context:
warnings.simplefilter("always") warnings.simplefilter("always")
self.x * self.y self.x * self.y
self.assertTrue(
"The dtype of left and right variables are not the same"
in str(context[-1].message)
)
def div_operator(self): def div_operator(self):
with warnings.catch_warnings(record=True) as context: with warnings.catch_warnings(record=True) as context:
warnings.simplefilter("always") warnings.simplefilter("always")
self.x / self.y self.x / self.y
self.assertTrue(
"The dtype of left and right variables are not the same"
in str(context[-1].message)
)
def test_operator(self): def test_operator(self):
with _test_eager_guard():
pass
# add / sub / mul / div has been sunk to cpp level, there is no warnings to catch by this test.
self.setUp() self.setUp()
self.add_operator() self.add_operator()
self.sub_operator() self.sub_operator()
......
...@@ -18,11 +18,11 @@ import numpy as np ...@@ -18,11 +18,11 @@ import numpy as np
import paddle import paddle
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.framework import _in_legacy_dygraph, _test_eager_guard from paddle.fluid.framework import _in_legacy_dygraph
class TestTensorCopyFrom(unittest.TestCase): class TestTensorCopyFrom(unittest.TestCase):
def func_main(self): def test_main(self):
if paddle.fluid.core.is_compiled_with_cuda(): if paddle.fluid.core.is_compiled_with_cuda():
place = paddle.CPUPlace() place = paddle.CPUPlace()
np_value = np.random.random(size=[10, 30]).astype('float32') np_value = np.random.random(size=[10, 30]).astype('float32')
...@@ -30,14 +30,9 @@ class TestTensorCopyFrom(unittest.TestCase): ...@@ -30,14 +30,9 @@ class TestTensorCopyFrom(unittest.TestCase):
tensor._uva() tensor._uva()
self.assertTrue(tensor.place.is_gpu_place()) self.assertTrue(tensor.place.is_gpu_place())
def test_main(self):
with _test_eager_guard():
self.func_main()
self.func_main()
class TestUVATensorFromNumpy(unittest.TestCase): class TestUVATensorFromNumpy(unittest.TestCase):
def func_uva_tensor_creation(self): def test_uva_tensor_creation(self):
if paddle.fluid.core.is_compiled_with_cuda(): if paddle.fluid.core.is_compiled_with_cuda():
dtype_list = [ dtype_list = [
"int32", "int32",
...@@ -74,11 +69,6 @@ class TestUVATensorFromNumpy(unittest.TestCase): ...@@ -74,11 +69,6 @@ class TestUVATensorFromNumpy(unittest.TestCase):
tensor1.numpy(), tensor2.numpy(), rtol=1e-05 tensor1.numpy(), tensor2.numpy(), rtol=1e-05
) )
def test_uva_tensor_creation(self):
with _test_eager_guard():
self.func_uva_tensor_creation()
self.func_uva_tensor_creation()
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -18,14 +18,13 @@ import numpy as np ...@@ -18,14 +18,13 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.framework import _test_eager_guard
class TensorFill_Test(unittest.TestCase): class TensorFill_Test(unittest.TestCase):
def setUp(self): def setUp(self):
self.shape = [32, 32] self.shape = [32, 32]
def func_test_tensor_fill_true(self): def test_tensor_fill_true(self):
typelist = ['float32', 'float64', 'int32', 'int64', 'float16'] typelist = ['float32', 'float64', 'int32', 'int64', 'float16']
places = [fluid.CPUPlace()] places = [fluid.CPUPlace()]
if fluid.core.is_compiled_with_cuda(): if fluid.core.is_compiled_with_cuda():
...@@ -44,11 +43,6 @@ class TensorFill_Test(unittest.TestCase): ...@@ -44,11 +43,6 @@ class TensorFill_Test(unittest.TestCase):
tensor.zero_() tensor.zero_()
self.assertEqual((tensor.numpy() == target).all().item(), True) self.assertEqual((tensor.numpy() == target).all().item(), True)
def test_tensor_fill_true(self):
with _test_eager_guard():
self.func_test_tensor_fill_true()
self.func_test_tensor_fill_true()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册