未验证 提交 3125733a 编写于 作者: W Weilong Wu 提交者: GitHub

rm _disable_legacy_dygraph and disable one mkldnn test file (#48721)

* rm _disable_legacy_dygraph

* disable test_flags_mkldnn_ops_on_off test
上级 d050c188
......@@ -5,6 +5,7 @@ file(
string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}")
list(REMOVE_ITEM TEST_OPS "test_onnx_format_quantization_mobilenetv1")
list(REMOVE_ITEM TEST_OPS "test_flags_mkldnn_ops_on_off")
if(WITH_MKLDNN AND NOT WIN32)
list(APPEND TEST_OPS "test_onnx_format_quantization_mobilenetv1")
......@@ -19,4 +20,4 @@ if(WITH_MKLDNN AND NOT WIN32)
set_tests_properties(test_onnx_format_quantization_mobilenetv1
PROPERTIES TIMEOUT 300)
endif()
set_tests_properties(test_flags_mkldnn_ops_on_off PROPERTIES TIMEOUT 120)
# set_tests_properties(test_flags_mkldnn_ops_on_off PROPERTIES TIMEOUT 120)
......@@ -18,11 +18,9 @@ import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.framework import _enable_legacy_dygraph, _global_flags
from paddle.fluid.framework import _global_flags
from paddle.fluid.layer_helper import LayerHelper
_enable_legacy_dygraph()
def check():
print(
......
......@@ -22,7 +22,6 @@ from decorator_helper import prog_scope
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.framework as framework
import paddle.fluid.layers as layers
from paddle.fluid import Program, program_guard
from paddle.fluid.backward import append_backward
......@@ -42,7 +41,6 @@ class TestAssignOp(op_test.OpTest):
self.check_output(check_eager=True)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
paddle.disable_static()
framework._disable_legacy_dygraph()
def test_backward(self):
paddle.enable_static()
......@@ -50,7 +48,6 @@ class TestAssignOp(op_test.OpTest):
self.check_grad(['X'], 'Out', check_eager=True)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
paddle.disable_static()
framework._disable_legacy_dygraph()
class TestAssignFP16Op(op_test.OpTest):
......@@ -67,7 +64,6 @@ class TestAssignFP16Op(op_test.OpTest):
self.check_output(check_eager=True)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
paddle.disable_static()
framework._disable_legacy_dygraph()
def test_backward(self):
paddle.enable_static()
......@@ -75,7 +71,6 @@ class TestAssignFP16Op(op_test.OpTest):
self.check_grad(['X'], 'Out', check_eager=True)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
paddle.disable_static()
framework._disable_legacy_dygraph()
class TestAssignOpWithLoDTensorArray(unittest.TestCase):
......
......@@ -22,15 +22,12 @@ import paddle.fluid.core as core
from paddle.fluid.framework import (
EagerParamBase,
_current_expected_place,
_disable_legacy_dygraph,
_test_eager_guard,
in_dygraph_mode,
)
class EagerScaleTestCase(unittest.TestCase):
def test_scale_base(self):
with _test_eager_guard():
paddle.set_device("cpu")
arr = np.ones([4, 16, 16, 32]).astype('float32')
tensor = paddle.to_tensor(arr, 'float32', core.CPUPlace())
......@@ -43,7 +40,6 @@ class EagerScaleTestCase(unittest.TestCase):
self.assertEqual(tensor.stop_gradient, True)
def test_retain_grad_and_run_backward(self):
with _test_eager_guard():
paddle.set_device("cpu")
input_data = np.ones([4, 16, 16, 32]).astype('float32')
......@@ -63,7 +59,6 @@ class EagerScaleTestCase(unittest.TestCase):
np.testing.assert_array_equal(data_eager.grad.numpy(), input_data)
def test_retain_grad_and_run_backward_raises(self):
with _test_eager_guard():
paddle.set_device("cpu")
input_data = np.ones([4, 16, 16, 32]).astype('float32')
......@@ -74,9 +69,7 @@ class EagerScaleTestCase(unittest.TestCase):
grad_data = np.ones([4, 16, 16, 32]).astype('float32')
grad_data2 = np.ones([4, 16]).astype('float32')
grad_eager = paddle.to_tensor(grad_data, 'float32', core.CPUPlace())
grad_eager2 = paddle.to_tensor(
grad_data2, 'float32', core.CPUPlace()
)
grad_eager2 = paddle.to_tensor(grad_data2, 'float32', core.CPUPlace())
data_eager.retain_grads()
......@@ -96,7 +89,6 @@ class EagerScaleTestCase(unittest.TestCase):
class EagerDtypeTestCase(unittest.TestCase):
def check_to_tesnsor_and_numpy(self, dtype, proto_dtype):
with _test_eager_guard():
arr = np.random.random([4, 16, 16, 32]).astype(dtype)
tensor = paddle.to_tensor(arr, dtype)
self.assertEqual(tensor.dtype, proto_dtype)
......@@ -315,7 +307,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
place_list = [core.CPUPlace()]
if core.is_compiled_with_cuda():
place_list.append(core.CUDAPlace(0))
with _test_eager_guard():
for p in place_list:
self.constructor(p)
......@@ -639,13 +631,13 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
place_list = [core.CPUPlace()]
if core.is_compiled_with_cuda():
place_list.append(core.CUDAPlace(0))
with _test_eager_guard():
for p in place_list:
self.constructor_with_kwargs(p)
def test_copy_and_copy_to(self):
print("Test_copy_and_copy_to")
with _test_eager_guard():
paddle.set_device("cpu")
arr = np.ones([4, 16, 16, 32]).astype('float32')
arr1 = np.zeros([4, 16]).astype('float32')
......@@ -701,9 +693,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
tensor10 = paddle.to_tensor([1, 2, 3], place='gpu_pinned')
tensor11 = tensor10._copy_to(core.CUDAPlace(0), True)
np.testing.assert_array_equal(
tensor10.numpy(), tensor11.numpy()
)
np.testing.assert_array_equal(tensor10.numpy(), tensor11.numpy())
else:
tensor3 = tensor2._copy_to(core.CPUPlace(), True)
np.testing.assert_array_equal(tensor3.numpy(), arr2)
......@@ -718,7 +708,6 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue(tensor4.place.is_cpu_place())
def test_share_buffer_to(self):
with _test_eager_guard():
arr = np.ones([4, 16, 16, 32]).astype('float32')
arr1 = np.zeros([4, 16]).astype('float32')
arr2 = np.ones([4, 16, 16, 32]).astype('float32') + np.ones(
......@@ -750,7 +739,6 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue(tensor3._is_shared_buffer_with(tensor))
def test_share_underline_tensor_to(self):
with _test_eager_guard():
arr = np.ones([4, 16, 16, 32]).astype('float32')
arr1 = np.zeros([4, 16]).astype('float32')
arr2 = np.ones([4, 16, 16, 32]).astype('float32') + np.ones(
......@@ -783,7 +771,6 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
def test_properties(self):
print("Test_properties")
with _test_eager_guard():
paddle.set_device("cpu")
arr = np.ones([4, 16, 16, 32]).astype('float32')
tensor = paddle.to_tensor(
......@@ -808,11 +795,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
def test_global_properties(self):
print("Test_global_properties")
_disable_legacy_dygraph()
self.assertTrue(in_dygraph_mode())
with _test_eager_guard():
self.assertTrue(in_dygraph_mode())
self.assertFalse(in_dygraph_mode())
def test_place_guard(self):
if core.is_compiled_with_cuda():
......@@ -829,7 +812,6 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
)
def test_value(self):
with _test_eager_guard():
arr = np.random.rand(4, 16, 16, 32).astype('float64')
egr_tensor0 = core.eager.Tensor(value=arr)
......@@ -854,7 +836,6 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue(egr_tensor0.value().get_tensor()._is_initialized())
def test_set_value(self):
with _test_eager_guard():
ori_arr = np.random.rand(4, 16, 16, 32).astype('float32')
egr_tensor = core.eager.Tensor(value=ori_arr)
self.assertEqual(egr_tensor.stop_gradient, True)
......@@ -872,7 +853,6 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
np.testing.assert_array_equal(egr_tensor.numpy(), new_arr)
def test_sharding_related_api(self):
with _test_eager_guard():
arr0 = np.random.rand(4, 16, 16, 32).astype('float32')
egr_tensor1 = core.eager.Tensor(
arr0, core.CPUPlace(), True, False, "numpy_tensor1", False
......@@ -881,7 +861,6 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
self.assertEqual(egr_tensor1._slice(0, 2)._numel(), 16384)
def test_copy_gradient_from(self):
with _test_eager_guard():
np_x = np.random.random((2, 2))
np_y = np.random.random((2, 2))
x = paddle.to_tensor(np_x, dtype="float64", stop_gradient=False)
......@@ -892,7 +871,6 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
np.testing.assert_array_equal(x.grad.numpy(), np_y)
def test_clear(self):
with _test_eager_guard():
np_x = np.random.random((3, 8, 8))
x = paddle.to_tensor(np_x, dtype="float64")
self.assertTrue(x._is_initialized())
......@@ -901,7 +879,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
def test_use_gpudnn(self):
np_x = np.random.random((3, 8, 8))
with _test_eager_guard():
self.assertTrue(in_dygraph_mode())
x = paddle.to_tensor(np_x, dtype="float64")
y = x._use_gpudnn(False)
......@@ -909,20 +887,13 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
y = x._use_gpudnn(True)
np.testing.assert_array_equal(x.numpy(), y.numpy())
self.assertFalse(in_dygraph_mode())
x = paddle.to_tensor(np_x, dtype="float64")
with self.assertRaises(AttributeError):
x = x._use_gpudnn(False)
class EagerParamBaseUsageTestCase(unittest.TestCase):
def test_print(self):
with _test_eager_guard():
linear = paddle.nn.Linear(3, 3, bias_attr=False)
print(linear.weight)
def test_copy(self):
with _test_eager_guard():
linear = paddle.nn.Linear(1, 3)
linear_copy = copy.deepcopy(linear)
linear_copy2 = linear.weight._copy_to(core.CPUPlace(), True)
......@@ -963,18 +934,6 @@ class EagerParamBaseUsageTestCase(unittest.TestCase):
paddle.set_default_dtype("float32")
return res
def test_fp16_initializer(self):
res1 = list()
res2 = list()
paddle.seed(102)
paddle.framework.random._manual_program_seed(102)
with _test_eager_guard():
res1 = self.func_fp16_initilaizer()
res2 = self.func_fp16_initilaizer()
for i in range(len(res1)):
np.testing.assert_array_equal(res1[i], res2[i])
def func_layer_helper_base(self, value):
base = paddle.fluid.layer_helper_base.LayerHelperBase(
"test_layer", "test_layer"
......@@ -984,20 +943,7 @@ class EagerParamBaseUsageTestCase(unittest.TestCase):
def func_base_to_variable(self, value):
paddle.fluid.dygraph.base.to_variable(value)
def test_to_variable(self):
value = np.random.rand(4, 16, 16, 32).astype('float32')
res1 = None
res3 = None
with _test_eager_guard():
res1 = self.func_layer_helper_base(value)
res3 = self.func_base_to_variable(value)
res2 = self.func_layer_helper_base(value)
res4 = self.func_base_to_variable(value)
np.testing.assert_array_equal(res1, res2)
np.testing.assert_array_equal(res3, res4)
def test_backward_with_single_tensor(self):
with _test_eager_guard():
arr4 = np.random.rand(4, 16, 16, 32).astype('float32')
egr_tensor12 = core.eager.Tensor(arr4, core.CPUPlace())
egr_tensor12.retain_grads()
......@@ -1015,7 +961,6 @@ class EagerParamBaseUsageTestCase(unittest.TestCase):
np.testing.assert_array_equal(egr_tensor12.gradient(), arr)
def test_set_value(self):
with _test_eager_guard():
linear = paddle.nn.Linear(1, 3)
ori_place = linear.weight.place
new_weight = np.ones([1, 3]).astype('float32')
......@@ -1026,12 +971,5 @@ class EagerParamBaseUsageTestCase(unittest.TestCase):
self.assertTrue(linear.weight.place._equals(ori_place))
class EagerGuardTestCase(unittest.TestCase):
def test__test_eager_guard(self):
tracer = paddle.fluid.dygraph.tracer.Tracer()
with _test_eager_guard(tracer):
self.assertTrue(in_dygraph_mode())
if __name__ == "__main__":
unittest.main()
......@@ -73,7 +73,6 @@ class LinalgLstsqTestCase(unittest.TestCase):
def test_eager_dygraph(self):
paddle.disable_static()
paddle.fluid.framework._disable_legacy_dygraph()
for dev in self.devices:
paddle.set_device(dev)
place = paddle.CPUPlace() if dev == "cpu" else paddle.CUDAPlace(0)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册