未验证 提交 3125733a 编写于 作者: W Weilong Wu 提交者: GitHub

rm _disable_legacy_dygraph and disable one mkldnn test file (#48721)

* rm _disable_legacy_dygraph

* disable test_flags_mkldnn_ops_on_off test
上级 d050c188
......@@ -5,6 +5,7 @@ file(
string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}")
list(REMOVE_ITEM TEST_OPS "test_onnx_format_quantization_mobilenetv1")
list(REMOVE_ITEM TEST_OPS "test_flags_mkldnn_ops_on_off")
if(WITH_MKLDNN AND NOT WIN32)
list(APPEND TEST_OPS "test_onnx_format_quantization_mobilenetv1")
......@@ -19,4 +20,4 @@ if(WITH_MKLDNN AND NOT WIN32)
set_tests_properties(test_onnx_format_quantization_mobilenetv1
PROPERTIES TIMEOUT 300)
endif()
set_tests_properties(test_flags_mkldnn_ops_on_off PROPERTIES TIMEOUT 120)
# set_tests_properties(test_flags_mkldnn_ops_on_off PROPERTIES TIMEOUT 120)
......@@ -18,11 +18,9 @@ import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.framework import _enable_legacy_dygraph, _global_flags
from paddle.fluid.framework import _global_flags
from paddle.fluid.layer_helper import LayerHelper
_enable_legacy_dygraph()
def check():
print(
......
......@@ -22,7 +22,6 @@ from decorator_helper import prog_scope
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.framework as framework
import paddle.fluid.layers as layers
from paddle.fluid import Program, program_guard
from paddle.fluid.backward import append_backward
......@@ -42,7 +41,6 @@ class TestAssignOp(op_test.OpTest):
self.check_output(check_eager=True)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
paddle.disable_static()
framework._disable_legacy_dygraph()
def test_backward(self):
paddle.enable_static()
......@@ -50,7 +48,6 @@ class TestAssignOp(op_test.OpTest):
self.check_grad(['X'], 'Out', check_eager=True)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
paddle.disable_static()
framework._disable_legacy_dygraph()
class TestAssignFP16Op(op_test.OpTest):
......@@ -67,7 +64,6 @@ class TestAssignFP16Op(op_test.OpTest):
self.check_output(check_eager=True)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
paddle.disable_static()
framework._disable_legacy_dygraph()
def test_backward(self):
paddle.enable_static()
......@@ -75,7 +71,6 @@ class TestAssignFP16Op(op_test.OpTest):
self.check_grad(['X'], 'Out', check_eager=True)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
paddle.disable_static()
framework._disable_legacy_dygraph()
class TestAssignOpWithLoDTensorArray(unittest.TestCase):
......
......@@ -22,85 +22,77 @@ import paddle.fluid.core as core
from paddle.fluid.framework import (
EagerParamBase,
_current_expected_place,
_disable_legacy_dygraph,
_test_eager_guard,
in_dygraph_mode,
)
class EagerScaleTestCase(unittest.TestCase):
def test_scale_base(self):
with _test_eager_guard():
paddle.set_device("cpu")
arr = np.ones([4, 16, 16, 32]).astype('float32')
tensor = paddle.to_tensor(arr, 'float32', core.CPUPlace())
print(tensor)
paddle.set_device("cpu")
arr = np.ones([4, 16, 16, 32]).astype('float32')
tensor = paddle.to_tensor(arr, 'float32', core.CPUPlace())
print(tensor)
tensor = core.eager.scale(tensor, 2.0, 0.9, True, False)
for i in range(0, 100):
tensor = core.eager.scale(tensor, 2.0, 0.9, True, False)
for i in range(0, 100):
tensor = core.eager.scale(tensor, 2.0, 0.9, True, False)
print(tensor)
self.assertEqual(tensor.shape, [4, 16, 16, 32])
self.assertEqual(tensor.stop_gradient, True)
print(tensor)
self.assertEqual(tensor.shape, [4, 16, 16, 32])
self.assertEqual(tensor.stop_gradient, True)
def test_retain_grad_and_run_backward(self):
with _test_eager_guard():
paddle.set_device("cpu")
paddle.set_device("cpu")
input_data = np.ones([4, 16, 16, 32]).astype('float32')
data_eager = paddle.to_tensor(
input_data, 'float32', core.CPUPlace(), False
)
input_data = np.ones([4, 16, 16, 32]).astype('float32')
data_eager = paddle.to_tensor(
input_data, 'float32', core.CPUPlace(), False
)
grad_data = np.ones([4, 16, 16, 32]).astype('float32')
grad_eager = paddle.to_tensor(grad_data, 'float32', core.CPUPlace())
grad_data = np.ones([4, 16, 16, 32]).astype('float32')
grad_eager = paddle.to_tensor(grad_data, 'float32', core.CPUPlace())
data_eager.retain_grads()
data_eager.retain_grads()
out_eager = core.eager.scale(data_eager, 1.0, 0.9, True, True)
self.assertIsNone(data_eager.grad)
out_eager.backward(grad_eager, False)
self.assertIsNotNone(data_eager.grad)
np.testing.assert_array_equal(data_eager.grad.numpy(), input_data)
out_eager = core.eager.scale(data_eager, 1.0, 0.9, True, True)
self.assertIsNone(data_eager.grad)
out_eager.backward(grad_eager, False)
self.assertIsNotNone(data_eager.grad)
np.testing.assert_array_equal(data_eager.grad.numpy(), input_data)
def test_retain_grad_and_run_backward_raises(self):
with _test_eager_guard():
paddle.set_device("cpu")
paddle.set_device("cpu")
input_data = np.ones([4, 16, 16, 32]).astype('float32')
data_eager = paddle.to_tensor(
input_data, 'float32', core.CPUPlace(), False
)
input_data = np.ones([4, 16, 16, 32]).astype('float32')
data_eager = paddle.to_tensor(
input_data, 'float32', core.CPUPlace(), False
)
grad_data = np.ones([4, 16, 16, 32]).astype('float32')
grad_data2 = np.ones([4, 16]).astype('float32')
grad_eager = paddle.to_tensor(grad_data, 'float32', core.CPUPlace())
grad_eager2 = paddle.to_tensor(
grad_data2, 'float32', core.CPUPlace()
)
grad_data = np.ones([4, 16, 16, 32]).astype('float32')
grad_data2 = np.ones([4, 16]).astype('float32')
grad_eager = paddle.to_tensor(grad_data, 'float32', core.CPUPlace())
grad_eager2 = paddle.to_tensor(grad_data2, 'float32', core.CPUPlace())
data_eager.retain_grads()
data_eager.retain_grads()
out_eager = core.eager.scale(data_eager, 1.0, 0.9, True, True)
self.assertIsNone(data_eager.grad)
with self.assertRaisesRegexp(
AssertionError, "The type of grad_tensor must be paddle.Tensor"
):
out_eager.backward(grad_data, False)
out_eager = core.eager.scale(data_eager, 1.0, 0.9, True, True)
self.assertIsNone(data_eager.grad)
with self.assertRaisesRegexp(
AssertionError, "The type of grad_tensor must be paddle.Tensor"
):
out_eager.backward(grad_data, False)
with self.assertRaisesRegexp(
AssertionError,
"Tensor shape not match, Tensor of grad_tensor /*",
):
out_eager.backward(grad_eager2, False)
with self.assertRaisesRegexp(
AssertionError,
"Tensor shape not match, Tensor of grad_tensor /*",
):
out_eager.backward(grad_eager2, False)
class EagerDtypeTestCase(unittest.TestCase):
def check_to_tesnsor_and_numpy(self, dtype, proto_dtype):
with _test_eager_guard():
arr = np.random.random([4, 16, 16, 32]).astype(dtype)
tensor = paddle.to_tensor(arr, dtype)
self.assertEqual(tensor.dtype, proto_dtype)
np.testing.assert_array_equal(arr, tensor.numpy())
arr = np.random.random([4, 16, 16, 32]).astype(dtype)
tensor = paddle.to_tensor(arr, dtype)
self.assertEqual(tensor.dtype, proto_dtype)
np.testing.assert_array_equal(arr, tensor.numpy())
def test_dtype_base(self):
print("Test_dtype")
......@@ -315,9 +307,9 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
place_list = [core.CPUPlace()]
if core.is_compiled_with_cuda():
place_list.append(core.CUDAPlace(0))
with _test_eager_guard():
for p in place_list:
self.constructor(p)
for p in place_list:
self.constructor(p)
def constructor_with_kwargs(self, place):
# init Tensor by Python array
......@@ -639,180 +631,171 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
place_list = [core.CPUPlace()]
if core.is_compiled_with_cuda():
place_list.append(core.CUDAPlace(0))
with _test_eager_guard():
for p in place_list:
self.constructor_with_kwargs(p)
for p in place_list:
self.constructor_with_kwargs(p)
def test_copy_and_copy_to(self):
print("Test_copy_and_copy_to")
with _test_eager_guard():
paddle.set_device("cpu")
arr = np.ones([4, 16, 16, 32]).astype('float32')
arr1 = np.zeros([4, 16]).astype('float32')
arr2 = np.ones([4, 16, 16, 32]).astype('float32') + np.ones(
[4, 16, 16, 32]
).astype('float32')
tensor = paddle.to_tensor(
arr, core.VarDesc.VarType.FP32, core.CPUPlace()
)
self.assertEqual(tensor.stop_gradient, True)
tensor.stop_gradient = False
print("Set persistable")
tensor.persistable = False
tensor1 = paddle.to_tensor(
arr1, core.VarDesc.VarType.FP32, core.CPUPlace()
paddle.set_device("cpu")
arr = np.ones([4, 16, 16, 32]).astype('float32')
arr1 = np.zeros([4, 16]).astype('float32')
arr2 = np.ones([4, 16, 16, 32]).astype('float32') + np.ones(
[4, 16, 16, 32]
).astype('float32')
tensor = paddle.to_tensor(
arr, core.VarDesc.VarType.FP32, core.CPUPlace()
)
self.assertEqual(tensor.stop_gradient, True)
tensor.stop_gradient = False
print("Set persistable")
tensor.persistable = False
tensor1 = paddle.to_tensor(
arr1, core.VarDesc.VarType.FP32, core.CPUPlace()
)
tensor1.persistable = True
self.assertEqual(tensor1.stop_gradient, True)
np.testing.assert_array_equal(tensor.numpy(), arr)
print("Test copy_")
tensor.copy_(tensor1, True)
self.assertEqual(tensor.persistable, False)
self.assertEqual(tensor.shape, [4, 16])
self.assertEqual(tensor.dtype, core.VarDesc.VarType.FP32)
np.testing.assert_array_equal(tensor.numpy(), arr1)
print("Test _copy_to")
tensor2 = paddle.to_tensor(
arr2, core.VarDesc.VarType.FP32, core.CPUPlace()
)
np.testing.assert_array_equal(tensor2.numpy(), arr2)
self.assertTrue(tensor2.place.is_cpu_place())
tensor2.persistable = True
tensor2.stop_gradient = False
if core.is_compiled_with_cuda():
tensor3 = tensor2._copy_to(core.CUDAPlace(0), True)
np.testing.assert_array_equal(tensor3.numpy(), arr2)
self.assertEqual(tensor3.persistable, True)
self.assertEqual(tensor3.stop_gradient, True)
self.assertTrue(tensor3.place.is_gpu_place())
tensor4 = tensor2.cuda(0, True)
np.testing.assert_array_equal(tensor4.numpy(), arr2)
self.assertEqual(tensor4.persistable, True)
self.assertEqual(tensor4.stop_gradient, False)
self.assertTrue(tensor4.place.is_gpu_place())
tensor5 = tensor4.cpu()
np.testing.assert_array_equal(tensor5.numpy(), arr2)
self.assertEqual(tensor5.persistable, True)
self.assertEqual(tensor5.stop_gradient, False)
self.assertTrue(tensor5.place.is_cpu_place())
tensor10 = paddle.to_tensor([1, 2, 3], place='gpu_pinned')
tensor11 = tensor10._copy_to(core.CUDAPlace(0), True)
np.testing.assert_array_equal(tensor10.numpy(), tensor11.numpy())
else:
tensor3 = tensor2._copy_to(core.CPUPlace(), True)
np.testing.assert_array_equal(tensor3.numpy(), arr2)
self.assertEqual(tensor3.persistable, True)
self.assertEqual(tensor3.stop_gradient, True)
self.assertTrue(tensor3.place.is_cpu_place())
tensor4 = tensor2.cpu()
np.testing.assert_array_equal(tensor4.numpy(), arr2)
self.assertEqual(tensor4.persistable, True)
self.assertEqual(tensor4.stop_gradient, False)
self.assertTrue(tensor4.place.is_cpu_place())
def test_share_buffer_to(self):
arr = np.ones([4, 16, 16, 32]).astype('float32')
arr1 = np.zeros([4, 16]).astype('float32')
arr2 = np.ones([4, 16, 16, 32]).astype('float32') + np.ones(
[4, 16, 16, 32]
).astype('float32')
tensor = None
tensor2 = None
tensor = paddle.to_tensor(
arr, core.VarDesc.VarType.FP32, core.CPUPlace()
)
tensor3 = core.eager.Tensor(value=tensor, place=core.CPUPlace())
if core.is_compiled_with_cuda():
tensor2 = paddle.to_tensor(
arr2, core.VarDesc.VarType.FP32, core.CUDAPlace(0)
)
tensor1.persistable = True
self.assertEqual(tensor1.stop_gradient, True)
np.testing.assert_array_equal(tensor.numpy(), arr)
print("Test copy_")
tensor.copy_(tensor1, True)
self.assertEqual(tensor.persistable, False)
self.assertEqual(tensor.shape, [4, 16])
self.assertEqual(tensor.dtype, core.VarDesc.VarType.FP32)
np.testing.assert_array_equal(tensor.numpy(), arr1)
print("Test _copy_to")
else:
tensor2 = paddle.to_tensor(
arr2, core.VarDesc.VarType.FP32, core.CPUPlace()
)
np.testing.assert_array_equal(tensor2.numpy(), arr2)
self.assertTrue(tensor2.place.is_cpu_place())
tensor2.persistable = True
tensor2.stop_gradient = False
if core.is_compiled_with_cuda():
tensor3 = tensor2._copy_to(core.CUDAPlace(0), True)
np.testing.assert_array_equal(tensor3.numpy(), arr2)
self.assertEqual(tensor3.persistable, True)
self.assertEqual(tensor3.stop_gradient, True)
self.assertTrue(tensor3.place.is_gpu_place())
tensor4 = tensor2.cuda(0, True)
np.testing.assert_array_equal(tensor4.numpy(), arr2)
self.assertEqual(tensor4.persistable, True)
self.assertEqual(tensor4.stop_gradient, False)
self.assertTrue(tensor4.place.is_gpu_place())
tensor5 = tensor4.cpu()
np.testing.assert_array_equal(tensor5.numpy(), arr2)
self.assertEqual(tensor5.persistable, True)
self.assertEqual(tensor5.stop_gradient, False)
self.assertTrue(tensor5.place.is_cpu_place())
tensor10 = paddle.to_tensor([1, 2, 3], place='gpu_pinned')
tensor11 = tensor10._copy_to(core.CUDAPlace(0), True)
np.testing.assert_array_equal(
tensor10.numpy(), tensor11.numpy()
)
else:
tensor3 = tensor2._copy_to(core.CPUPlace(), True)
np.testing.assert_array_equal(tensor3.numpy(), arr2)
self.assertEqual(tensor3.persistable, True)
self.assertEqual(tensor3.stop_gradient, True)
self.assertTrue(tensor3.place.is_cpu_place())
tensor4 = tensor2.cpu()
np.testing.assert_array_equal(tensor4.numpy(), arr2)
self.assertEqual(tensor4.persistable, True)
self.assertEqual(tensor4.stop_gradient, False)
self.assertTrue(tensor4.place.is_cpu_place())
def test_share_buffer_to(self):
with _test_eager_guard():
arr = np.ones([4, 16, 16, 32]).astype('float32')
arr1 = np.zeros([4, 16]).astype('float32')
arr2 = np.ones([4, 16, 16, 32]).astype('float32') + np.ones(
[4, 16, 16, 32]
).astype('float32')
tensor = None
tensor2 = None
tensor = paddle.to_tensor(
arr, core.VarDesc.VarType.FP32, core.CPUPlace()
)
tensor3 = core.eager.Tensor(value=tensor, place=core.CPUPlace())
if core.is_compiled_with_cuda():
tensor2 = paddle.to_tensor(
arr2, core.VarDesc.VarType.FP32, core.CUDAPlace(0)
)
else:
tensor2 = paddle.to_tensor(
arr2, core.VarDesc.VarType.FP32, core.CPUPlace()
)
np.testing.assert_array_equal(tensor.numpy(), arr)
np.testing.assert_array_equal(tensor2.numpy(), arr2)
tensor2._share_buffer_to(tensor)
np.testing.assert_array_equal(tensor.numpy(), arr2)
np.testing.assert_array_equal(tensor2.numpy(), arr2)
self.assertTrue(tensor._is_shared_buffer_with(tensor2))
self.assertTrue(tensor2._is_shared_buffer_with(tensor))
tensor._share_buffer_to(tensor3)
np.testing.assert_array_equal(tensor3.numpy(), arr2)
self.assertTrue(tensor3._is_shared_buffer_with(tensor))
np.testing.assert_array_equal(tensor.numpy(), arr)
np.testing.assert_array_equal(tensor2.numpy(), arr2)
tensor2._share_buffer_to(tensor)
np.testing.assert_array_equal(tensor.numpy(), arr2)
np.testing.assert_array_equal(tensor2.numpy(), arr2)
self.assertTrue(tensor._is_shared_buffer_with(tensor2))
self.assertTrue(tensor2._is_shared_buffer_with(tensor))
tensor._share_buffer_to(tensor3)
np.testing.assert_array_equal(tensor3.numpy(), arr2)
self.assertTrue(tensor3._is_shared_buffer_with(tensor))
def test_share_underline_tensor_to(self):
with _test_eager_guard():
arr = np.ones([4, 16, 16, 32]).astype('float32')
arr1 = np.zeros([4, 16]).astype('float32')
arr2 = np.ones([4, 16, 16, 32]).astype('float32') + np.ones(
[4, 16, 16, 32]
).astype('float32')
tensor = None
tensor2 = None
tensor = paddle.to_tensor(
arr, core.VarDesc.VarType.FP32, core.CPUPlace()
arr = np.ones([4, 16, 16, 32]).astype('float32')
arr1 = np.zeros([4, 16]).astype('float32')
arr2 = np.ones([4, 16, 16, 32]).astype('float32') + np.ones(
[4, 16, 16, 32]
).astype('float32')
tensor = None
tensor2 = None
tensor = paddle.to_tensor(
arr, core.VarDesc.VarType.FP32, core.CPUPlace()
)
tensor3 = core.eager.Tensor()
if core.is_compiled_with_cuda():
tensor2 = paddle.to_tensor(
arr2, core.VarDesc.VarType.FP32, core.CUDAPlace(0)
)
tensor3 = core.eager.Tensor()
if core.is_compiled_with_cuda():
tensor2 = paddle.to_tensor(
arr2, core.VarDesc.VarType.FP32, core.CUDAPlace(0)
)
else:
tensor2 = paddle.to_tensor(
arr2, core.VarDesc.VarType.FP32, core.CPUPlace()
)
np.testing.assert_array_equal(tensor.numpy(), arr)
np.testing.assert_array_equal(tensor2.numpy(), arr2)
tensor2._share_underline_tensor_to(tensor)
np.testing.assert_array_equal(tensor.numpy(), arr2)
np.testing.assert_array_equal(tensor2.numpy(), arr2)
self.assertTrue(tensor._is_shared_underline_tensor_with(tensor2))
self.assertTrue(tensor2._is_shared_underline_tensor_with(tensor))
tensor._share_underline_tensor_to(tensor3)
np.testing.assert_array_equal(tensor3.numpy(), arr2)
self.assertTrue(tensor3._is_shared_underline_tensor_with(tensor))
else:
tensor2 = paddle.to_tensor(
arr2, core.VarDesc.VarType.FP32, core.CPUPlace()
)
np.testing.assert_array_equal(tensor.numpy(), arr)
np.testing.assert_array_equal(tensor2.numpy(), arr2)
tensor2._share_underline_tensor_to(tensor)
np.testing.assert_array_equal(tensor.numpy(), arr2)
np.testing.assert_array_equal(tensor2.numpy(), arr2)
self.assertTrue(tensor._is_shared_underline_tensor_with(tensor2))
self.assertTrue(tensor2._is_shared_underline_tensor_with(tensor))
tensor._share_underline_tensor_to(tensor3)
np.testing.assert_array_equal(tensor3.numpy(), arr2)
self.assertTrue(tensor3._is_shared_underline_tensor_with(tensor))
def test_properties(self):
print("Test_properties")
with _test_eager_guard():
paddle.set_device("cpu")
arr = np.ones([4, 16, 16, 32]).astype('float32')
tensor = paddle.to_tensor(
arr, core.VarDesc.VarType.FP32, core.CPUPlace()
)
self.assertEqual(tensor.shape, [4, 16, 16, 32])
tensor.name = 'tensor_name_test'
self.assertEqual(tensor.name, 'tensor_name_test')
self.assertEqual(tensor.persistable, False)
tensor.persistable = True
self.assertEqual(tensor.persistable, True)
tensor.persistable = False
self.assertEqual(tensor.persistable, False)
self.assertTrue(tensor.place.is_cpu_place())
self.assertEqual(tensor._place_str, 'Place(cpu)')
self.assertEqual(tensor.stop_gradient, True)
tensor.stop_gradient = False
self.assertEqual(tensor.stop_gradient, False)
tensor.stop_gradient = True
self.assertEqual(tensor.stop_gradient, True)
self.assertEqual(tensor.type, core.VarDesc.VarType.LOD_TENSOR)
paddle.set_device("cpu")
arr = np.ones([4, 16, 16, 32]).astype('float32')
tensor = paddle.to_tensor(
arr, core.VarDesc.VarType.FP32, core.CPUPlace()
)
self.assertEqual(tensor.shape, [4, 16, 16, 32])
tensor.name = 'tensor_name_test'
self.assertEqual(tensor.name, 'tensor_name_test')
self.assertEqual(tensor.persistable, False)
tensor.persistable = True
self.assertEqual(tensor.persistable, True)
tensor.persistable = False
self.assertEqual(tensor.persistable, False)
self.assertTrue(tensor.place.is_cpu_place())
self.assertEqual(tensor._place_str, 'Place(cpu)')
self.assertEqual(tensor.stop_gradient, True)
tensor.stop_gradient = False
self.assertEqual(tensor.stop_gradient, False)
tensor.stop_gradient = True
self.assertEqual(tensor.stop_gradient, True)
self.assertEqual(tensor.type, core.VarDesc.VarType.LOD_TENSOR)
def test_global_properties(self):
print("Test_global_properties")
_disable_legacy_dygraph()
self.assertTrue(in_dygraph_mode())
with _test_eager_guard():
self.assertTrue(in_dygraph_mode())
self.assertFalse(in_dygraph_mode())
def test_place_guard(self):
if core.is_compiled_with_cuda():
......@@ -829,109 +812,97 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
)
def test_value(self):
with _test_eager_guard():
arr = np.random.rand(4, 16, 16, 32).astype('float64')
egr_tensor0 = core.eager.Tensor(value=arr)
self.assertEqual(egr_tensor0.persistable, False)
self.assertTrue("generated" in egr_tensor0.name)
self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32])
self.assertTrue(
egr_tensor0.place._equals(
paddle.fluid.framework._current_expected_place()
)
)
self.assertEqual(egr_tensor0.dtype, core.VarDesc.VarType.FP64)
self.assertEqual(egr_tensor0.stop_gradient, True)
self.assertTrue(
egr_tensor0.value().get_tensor()._dtype(),
core.VarDesc.VarType.FP64,
)
self.assertTrue(
egr_tensor0.value().get_tensor()._place(),
paddle.fluid.framework._current_expected_place(),
arr = np.random.rand(4, 16, 16, 32).astype('float64')
egr_tensor0 = core.eager.Tensor(value=arr)
self.assertEqual(egr_tensor0.persistable, False)
self.assertTrue("generated" in egr_tensor0.name)
self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32])
self.assertTrue(
egr_tensor0.place._equals(
paddle.fluid.framework._current_expected_place()
)
self.assertTrue(egr_tensor0.value().get_tensor()._is_initialized())
)
self.assertEqual(egr_tensor0.dtype, core.VarDesc.VarType.FP64)
self.assertEqual(egr_tensor0.stop_gradient, True)
self.assertTrue(
egr_tensor0.value().get_tensor()._dtype(),
core.VarDesc.VarType.FP64,
)
self.assertTrue(
egr_tensor0.value().get_tensor()._place(),
paddle.fluid.framework._current_expected_place(),
)
self.assertTrue(egr_tensor0.value().get_tensor()._is_initialized())
def test_set_value(self):
with _test_eager_guard():
ori_arr = np.random.rand(4, 16, 16, 32).astype('float32')
egr_tensor = core.eager.Tensor(value=ori_arr)
self.assertEqual(egr_tensor.stop_gradient, True)
self.assertEqual(egr_tensor.shape, [4, 16, 16, 32])
np.testing.assert_array_equal(egr_tensor.numpy(), ori_arr)
ori_place = egr_tensor.place
new_arr = np.random.rand(4, 16, 16, 32).astype('float32')
self.assertFalse(np.array_equal(egr_tensor.numpy(), new_arr))
egr_tensor.set_value(new_arr)
self.assertEqual(egr_tensor.stop_gradient, True)
self.assertTrue(egr_tensor.place._equals(ori_place))
self.assertEqual(egr_tensor.shape, [4, 16, 16, 32])
np.testing.assert_array_equal(egr_tensor.numpy(), new_arr)
ori_arr = np.random.rand(4, 16, 16, 32).astype('float32')
egr_tensor = core.eager.Tensor(value=ori_arr)
self.assertEqual(egr_tensor.stop_gradient, True)
self.assertEqual(egr_tensor.shape, [4, 16, 16, 32])
np.testing.assert_array_equal(egr_tensor.numpy(), ori_arr)
ori_place = egr_tensor.place
new_arr = np.random.rand(4, 16, 16, 32).astype('float32')
self.assertFalse(np.array_equal(egr_tensor.numpy(), new_arr))
egr_tensor.set_value(new_arr)
self.assertEqual(egr_tensor.stop_gradient, True)
self.assertTrue(egr_tensor.place._equals(ori_place))
self.assertEqual(egr_tensor.shape, [4, 16, 16, 32])
np.testing.assert_array_equal(egr_tensor.numpy(), new_arr)
def test_sharding_related_api(self):
with _test_eager_guard():
arr0 = np.random.rand(4, 16, 16, 32).astype('float32')
egr_tensor1 = core.eager.Tensor(
arr0, core.CPUPlace(), True, False, "numpy_tensor1", False
)
self.assertEqual(egr_tensor1._numel(), 32768)
self.assertEqual(egr_tensor1._slice(0, 2)._numel(), 16384)
arr0 = np.random.rand(4, 16, 16, 32).astype('float32')
egr_tensor1 = core.eager.Tensor(
arr0, core.CPUPlace(), True, False, "numpy_tensor1", False
)
self.assertEqual(egr_tensor1._numel(), 32768)
self.assertEqual(egr_tensor1._slice(0, 2)._numel(), 16384)
def test_copy_gradient_from(self):
with _test_eager_guard():
np_x = np.random.random((2, 2))
np_y = np.random.random((2, 2))
x = paddle.to_tensor(np_x, dtype="float64", stop_gradient=False)
y = paddle.to_tensor(np_y, dtype="float64")
out = x + x
out.backward()
x._copy_gradient_from(y)
np.testing.assert_array_equal(x.grad.numpy(), np_y)
np_x = np.random.random((2, 2))
np_y = np.random.random((2, 2))
x = paddle.to_tensor(np_x, dtype="float64", stop_gradient=False)
y = paddle.to_tensor(np_y, dtype="float64")
out = x + x
out.backward()
x._copy_gradient_from(y)
np.testing.assert_array_equal(x.grad.numpy(), np_y)
def test_clear(self):
with _test_eager_guard():
np_x = np.random.random((3, 8, 8))
x = paddle.to_tensor(np_x, dtype="float64")
self.assertTrue(x._is_initialized())
x._clear()
self.assertFalse(x._is_initialized())
np_x = np.random.random((3, 8, 8))
x = paddle.to_tensor(np_x, dtype="float64")
self.assertTrue(x._is_initialized())
x._clear()
self.assertFalse(x._is_initialized())
def test_use_gpudnn(self):
np_x = np.random.random((3, 8, 8))
with _test_eager_guard():
self.assertTrue(in_dygraph_mode())
x = paddle.to_tensor(np_x, dtype="float64")
y = x._use_gpudnn(False)
np.testing.assert_array_equal(x.numpy(), y.numpy())
y = x._use_gpudnn(True)
np.testing.assert_array_equal(x.numpy(), y.numpy())
self.assertFalse(in_dygraph_mode())
self.assertTrue(in_dygraph_mode())
x = paddle.to_tensor(np_x, dtype="float64")
with self.assertRaises(AttributeError):
x = x._use_gpudnn(False)
y = x._use_gpudnn(False)
np.testing.assert_array_equal(x.numpy(), y.numpy())
y = x._use_gpudnn(True)
np.testing.assert_array_equal(x.numpy(), y.numpy())
class EagerParamBaseUsageTestCase(unittest.TestCase):
def test_print(self):
with _test_eager_guard():
linear = paddle.nn.Linear(3, 3, bias_attr=False)
print(linear.weight)
linear = paddle.nn.Linear(3, 3, bias_attr=False)
print(linear.weight)
def test_copy(self):
with _test_eager_guard():
linear = paddle.nn.Linear(1, 3)
linear_copy = copy.deepcopy(linear)
linear_copy2 = linear.weight._copy_to(core.CPUPlace(), True)
np.testing.assert_array_equal(
linear.weight.numpy(), linear_copy.weight.numpy()
)
np.testing.assert_array_equal(
linear.weight.numpy(), linear_copy2.numpy()
)
linear = paddle.nn.Linear(1, 3)
linear_copy = copy.deepcopy(linear)
linear_copy2 = linear.weight._copy_to(core.CPUPlace(), True)
np.testing.assert_array_equal(
linear.weight.numpy(), linear_copy.weight.numpy()
)
np.testing.assert_array_equal(
linear.weight.numpy(), linear_copy2.numpy()
)
def func_fp16_initilaizer(self):
paddle.set_default_dtype("float16")
......@@ -963,18 +934,6 @@ class EagerParamBaseUsageTestCase(unittest.TestCase):
paddle.set_default_dtype("float32")
return res
def test_fp16_initializer(self):
res1 = list()
res2 = list()
paddle.seed(102)
paddle.framework.random._manual_program_seed(102)
with _test_eager_guard():
res1 = self.func_fp16_initilaizer()
res2 = self.func_fp16_initilaizer()
for i in range(len(res1)):
np.testing.assert_array_equal(res1[i], res2[i])
def func_layer_helper_base(self, value):
base = paddle.fluid.layer_helper_base.LayerHelperBase(
"test_layer", "test_layer"
......@@ -984,53 +943,32 @@ class EagerParamBaseUsageTestCase(unittest.TestCase):
def func_base_to_variable(self, value):
paddle.fluid.dygraph.base.to_variable(value)
def test_to_variable(self):
value = np.random.rand(4, 16, 16, 32).astype('float32')
res1 = None
res3 = None
with _test_eager_guard():
res1 = self.func_layer_helper_base(value)
res3 = self.func_base_to_variable(value)
res2 = self.func_layer_helper_base(value)
res4 = self.func_base_to_variable(value)
np.testing.assert_array_equal(res1, res2)
np.testing.assert_array_equal(res3, res4)
def test_backward_with_single_tensor(self):
with _test_eager_guard():
arr4 = np.random.rand(4, 16, 16, 32).astype('float32')
egr_tensor12 = core.eager.Tensor(arr4, core.CPUPlace())
egr_tensor12.retain_grads()
arr = np.ones([4, 16, 16, 32]).astype('float32')
self.assertEqual(egr_tensor12.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor12.name)
self.assertEqual(egr_tensor12.shape, [4, 16, 16, 32])
self.assertEqual(egr_tensor12.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor12.stop_gradient, True)
self.assertTrue(egr_tensor12.place._equals(paddle.fluid.CPUPlace()))
np.testing.assert_array_equal(egr_tensor12.numpy(), arr4)
np.testing.assert_array_equal(egr_tensor12.gradient(), None)
egr_tensor12.stop_gradient = False
egr_tensor12.backward()
np.testing.assert_array_equal(egr_tensor12.gradient(), arr)
arr4 = np.random.rand(4, 16, 16, 32).astype('float32')
egr_tensor12 = core.eager.Tensor(arr4, core.CPUPlace())
egr_tensor12.retain_grads()
arr = np.ones([4, 16, 16, 32]).astype('float32')
self.assertEqual(egr_tensor12.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor12.name)
self.assertEqual(egr_tensor12.shape, [4, 16, 16, 32])
self.assertEqual(egr_tensor12.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor12.stop_gradient, True)
self.assertTrue(egr_tensor12.place._equals(paddle.fluid.CPUPlace()))
np.testing.assert_array_equal(egr_tensor12.numpy(), arr4)
np.testing.assert_array_equal(egr_tensor12.gradient(), None)
egr_tensor12.stop_gradient = False
egr_tensor12.backward()
np.testing.assert_array_equal(egr_tensor12.gradient(), arr)
def test_set_value(self):
with _test_eager_guard():
linear = paddle.nn.Linear(1, 3)
ori_place = linear.weight.place
new_weight = np.ones([1, 3]).astype('float32')
self.assertFalse(np.array_equal(linear.weight.numpy(), new_weight))
linear.weight.set_value(new_weight)
np.testing.assert_array_equal(linear.weight.numpy(), new_weight)
self.assertTrue(linear.weight.place._equals(ori_place))
class EagerGuardTestCase(unittest.TestCase):
def test__test_eager_guard(self):
tracer = paddle.fluid.dygraph.tracer.Tracer()
with _test_eager_guard(tracer):
self.assertTrue(in_dygraph_mode())
linear = paddle.nn.Linear(1, 3)
ori_place = linear.weight.place
new_weight = np.ones([1, 3]).astype('float32')
self.assertFalse(np.array_equal(linear.weight.numpy(), new_weight))
linear.weight.set_value(new_weight)
np.testing.assert_array_equal(linear.weight.numpy(), new_weight)
self.assertTrue(linear.weight.place._equals(ori_place))
if __name__ == "__main__":
......
......@@ -73,7 +73,6 @@ class LinalgLstsqTestCase(unittest.TestCase):
def test_eager_dygraph(self):
paddle.disable_static()
paddle.fluid.framework._disable_legacy_dygraph()
for dev in self.devices:
paddle.set_device(dev)
place = paddle.CPUPlace() if dev == "cpu" else paddle.CUDAPlace(0)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册