未验证 提交 a72a0da0 编写于 作者: 姜永久 提交者: GitHub

rm legacy unittest part5 (#49282)

* rm legacy unittest part5

* add custom op
上级 c8f76337
...@@ -21,7 +21,6 @@ from utils import IS_MAC, extra_cc_args, extra_nvcc_args, paddle_includes ...@@ -21,7 +21,6 @@ from utils import IS_MAC, extra_cc_args, extra_nvcc_args, paddle_includes
import paddle import paddle
from paddle import nn from paddle import nn
from paddle.fluid.framework import _in_legacy_dygraph
from paddle.utils.cpp_extension import get_build_directory, load from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd from paddle.utils.cpp_extension.extension_utils import run_cmd
...@@ -123,14 +122,6 @@ class TestDygraphModel(unittest.TestCase): ...@@ -123,14 +122,6 @@ class TestDygraphModel(unittest.TestCase):
# for train # for train
origin_relu_train_out = self.train_model(use_custom_op=False) origin_relu_train_out = self.train_model(use_custom_op=False)
custom_relu_train_out = self.train_model(use_custom_op=True) custom_relu_train_out = self.train_model(use_custom_op=True)
# open this when dy2stat is ready for eager
if _in_legacy_dygraph():
custom_relu_dy2stat_train_out = self.train_model(
use_custom_op=True, dy2stat=True
) # for to_static
np.testing.assert_array_equal(
origin_relu_train_out, custom_relu_dy2stat_train_out
)
np.testing.assert_array_equal( np.testing.assert_array_equal(
origin_relu_train_out, custom_relu_train_out origin_relu_train_out, custom_relu_train_out
...@@ -139,13 +130,6 @@ class TestDygraphModel(unittest.TestCase): ...@@ -139,13 +130,6 @@ class TestDygraphModel(unittest.TestCase):
# for eval # for eval
origin_relu_eval_out = self.eval_model(use_custom_op=False) origin_relu_eval_out = self.eval_model(use_custom_op=False)
custom_relu_eval_out = self.eval_model(use_custom_op=True) custom_relu_eval_out = self.eval_model(use_custom_op=True)
if _in_legacy_dygraph():
custom_relu_dy2stat_eval_out = self.eval_model(
use_custom_op=True, dy2stat=True
) # for to_static
np.testing.assert_array_equal(
origin_relu_eval_out, custom_relu_dy2stat_eval_out
)
np.testing.assert_array_equal( np.testing.assert_array_equal(
origin_relu_eval_out, custom_relu_eval_out origin_relu_eval_out, custom_relu_eval_out
......
...@@ -24,7 +24,7 @@ from paddle.fluid.executor import ( ...@@ -24,7 +24,7 @@ from paddle.fluid.executor import (
_is_dy2st_enable_standalone_executor, _is_dy2st_enable_standalone_executor,
_is_enable_standalone_executor, _is_enable_standalone_executor,
) )
from paddle.fluid.framework import Variable, _in_legacy_dygraph from paddle.fluid.framework import Variable
from paddle.fluid.layers.utils import _hash_with_id from paddle.fluid.layers.utils import _hash_with_id
...@@ -63,15 +63,6 @@ def _create_out(var): ...@@ -63,15 +63,6 @@ def _create_out(var):
assert isinstance(var, Variable) assert isinstance(var, Variable)
var_desc = var.desc var_desc = var.desc
varbase = None varbase = None
if _in_legacy_dygraph():
var_base = core.VarBase(
var_desc.dtype(),
var_desc.shape(),
var_desc.name(),
var_desc.type(),
False,
)
else:
var_base = core.eager.Tensor( var_base = core.eager.Tensor(
var_desc.dtype(), var_desc.dtype(),
var_desc.shape(), var_desc.shape(),
......
...@@ -16,7 +16,7 @@ import unittest ...@@ -16,7 +16,7 @@ import unittest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.framework import _in_legacy_dygraph, in_dygraph_mode from paddle.fluid.framework import in_dygraph_mode
class TestDataParallelGroup(unittest.TestCase): class TestDataParallelGroup(unittest.TestCase):
...@@ -26,8 +26,6 @@ class TestDataParallelGroup(unittest.TestCase): ...@@ -26,8 +26,6 @@ class TestDataParallelGroup(unittest.TestCase):
def assign_group_by_size(self, *args): def assign_group_by_size(self, *args):
if in_dygraph_mode(): if in_dygraph_mode():
return core.eager_assign_group_by_size(*args) return core.eager_assign_group_by_size(*args)
elif _in_legacy_dygraph():
return core.assign_group_by_size(*args)
def test_construct_group0(self): def test_construct_group0(self):
# one dtype & one limit capability # one dtype & one limit capability
......
...@@ -18,7 +18,6 @@ import warnings ...@@ -18,7 +18,6 @@ import warnings
import numpy as np import numpy as np
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.framework import _in_legacy_dygraph
class TestImperativeNumpyBridge(unittest.TestCase): class TestImperativeNumpyBridge(unittest.TestCase):
...@@ -44,11 +43,6 @@ class TestImperativeNumpyBridge(unittest.TestCase): ...@@ -44,11 +43,6 @@ class TestImperativeNumpyBridge(unittest.TestCase):
np.testing.assert_array_equal(var2.numpy(), data_np) np.testing.assert_array_equal(var2.numpy(), data_np)
data_np[0][0] = -1 data_np[0][0] = -1
self.assertEqual(data_np[0][0], -1) self.assertEqual(data_np[0][0], -1)
if not _in_legacy_dygraph():
# eager_mode, var2 is Tensor, is not subscriptable
# TODO(wuweilong): to support slice in eager mode later
self.assertNotEqual(var2.numpy()[0][0], -1)
else:
self.assertNotEqual(var2[0][0].numpy()[0], -1) self.assertNotEqual(var2[0][0].numpy()[0], -1)
self.assertFalse(np.array_equal(var2.numpy(), data_np)) self.assertFalse(np.array_equal(var2.numpy(), data_np))
......
...@@ -22,7 +22,6 @@ import paddle.fluid as fluid ...@@ -22,7 +22,6 @@ import paddle.fluid as fluid
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle.fluid import Layer, core from paddle.fluid import Layer, core
from paddle.fluid.dygraph import guard, to_variable from paddle.fluid.dygraph import guard, to_variable
from paddle.fluid.framework import _in_legacy_dygraph
from paddle.nn import Linear from paddle.nn import Linear
np.set_printoptions(suppress=True) np.set_printoptions(suppress=True)
...@@ -1193,18 +1192,6 @@ class TestDygraphTransformerSortGradient(unittest.TestCase): ...@@ -1193,18 +1192,6 @@ class TestDygraphTransformerSortGradient(unittest.TestCase):
dy_param_updated, dy_param_updated,
) )
with guard():
fluid.set_flags({'FLAGS_sort_sum_gradient': True})
if _in_legacy_dygraph():
(
dy_avg_cost_value,
dy_sum_cost_value,
dy_predict_value,
dy_token_num_value,
dy_param_init,
dy_param_updated,
) = run_dygraph()
with new_program_scope(): with new_program_scope():
paddle.seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
...@@ -1296,24 +1283,6 @@ class TestDygraphTransformerSortGradient(unittest.TestCase): ...@@ -1296,24 +1283,6 @@ class TestDygraphTransformerSortGradient(unittest.TestCase):
static_param_updated[ static_param_updated[
static_param_name_list[k - 4] static_param_name_list[k - 4]
] = out[k] ] = out[k]
if _in_legacy_dygraph():
np.testing.assert_array_equal(
static_avg_cost_value, dy_avg_cost_value
)
np.testing.assert_array_equal(
static_sum_cost_value, dy_sum_cost_value
)
np.testing.assert_array_equal(
static_predict_value, dy_predict_value
)
np.testing.assert_array_equal(
static_token_num_value, dy_token_num_value
)
for key, value in static_param_init.items():
np.testing.assert_array_equal(value, dy_param_init[key])
for key, value in static_param_updated.items():
np.testing.assert_array_equal(value, dy_param_updated[key])
# compare eager result with imperative result # compare eager result with imperative result
with guard(): with guard():
......
...@@ -20,8 +20,8 @@ from op_test import OpTest, convert_float_to_uint16 ...@@ -20,8 +20,8 @@ from op_test import OpTest, convert_float_to_uint16
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle import _C_ops, _legacy_C_ops from paddle import _C_ops
from paddle.fluid.framework import _in_legacy_dygraph, in_dygraph_mode from paddle.fluid.framework import in_dygraph_mode
# hack method for test p_norm final state # hack method for test p_norm final state
...@@ -30,20 +30,6 @@ def p_norm_python_api( ...@@ -30,20 +30,6 @@ def p_norm_python_api(
): ):
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.p_norm(x, p, axis, epsilon, keepdim, as_vector) return _C_ops.p_norm(x, p, axis, epsilon, keepdim, as_vector)
if _in_legacy_dygraph():
return _legacy_C_ops.p_norm(
x,
'axis',
axis,
'porder',
float(p),
'keepdim',
keepdim,
'epsilon',
epsilon,
'as_vector',
as_vector,
)
def p_norm(x, axis, porder, keepdims=False, reduce_all=False): def p_norm(x, axis, porder, keepdims=False, reduce_all=False):
......
...@@ -18,7 +18,6 @@ import numpy as np ...@@ -18,7 +18,6 @@ import numpy as np
import paddle import paddle
import paddle.optimizer as optimizer import paddle.optimizer as optimizer
from paddle.fluid.framework import _in_legacy_dygraph
class TestOptimizerForVarBase(unittest.TestCase): class TestOptimizerForVarBase(unittest.TestCase):
...@@ -89,15 +88,6 @@ class TestOptimizerForVarBase(unittest.TestCase): ...@@ -89,15 +88,6 @@ class TestOptimizerForVarBase(unittest.TestCase):
optimizer.Adam(learning_rate=self.lr, parameters=x) optimizer.Adam(learning_rate=self.lr, parameters=x)
def test_create_param_lr_with_1_for_coverage(self): def test_create_param_lr_with_1_for_coverage(self):
if _in_legacy_dygraph():
x = paddle.fluid.framework.ParamBase(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="x",
optimize_attr={'learning_rate': 1.0},
)
else:
x = paddle.fluid.framework.EagerParamBase( x = paddle.fluid.framework.EagerParamBase(
dtype="float32", dtype="float32",
shape=[5, 10], shape=[5, 10],
...@@ -117,15 +107,6 @@ class TestOptimizerForVarBase(unittest.TestCase): ...@@ -117,15 +107,6 @@ class TestOptimizerForVarBase(unittest.TestCase):
opt.step() opt.step()
def test_create_param_lr_with_no_1_value_for_coverage(self): def test_create_param_lr_with_no_1_value_for_coverage(self):
if _in_legacy_dygraph():
x = paddle.fluid.framework.ParamBase(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="x",
optimize_attr={'learning_rate': 0.12},
)
else:
x = paddle.fluid.framework.EagerParamBase( x = paddle.fluid.framework.EagerParamBase(
dtype="float32", dtype="float32",
shape=[5, 10], shape=[5, 10],
......
...@@ -20,7 +20,6 @@ import numpy as np ...@@ -20,7 +20,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle.fluid.framework import _in_legacy_dygraph
from paddle.fluid.wrapped_decorator import wrap_decorator from paddle.fluid.wrapped_decorator import wrap_decorator
...@@ -239,22 +238,6 @@ class TestDygraphDoubleGrad(TestCase): ...@@ -239,22 +238,6 @@ class TestDygraphDoubleGrad(TestCase):
).astype('float32') ).astype('float32')
np.testing.assert_allclose(dx_actual.numpy(), dx_expected, rtol=1e-05) np.testing.assert_allclose(dx_actual.numpy(), dx_expected, rtol=1e-05)
if not _in_legacy_dygraph():
pass
else:
loss = paddle.mean(dx_actual * dx_actual + x * x)
loss.backward()
x_grad_actual = x.gradient()
x_grad_expected = (
2.0
/ float(numel)
* (x_np + dx_expected * (x_np > 0) * 2 / float(numel))
).astype('float32')
np.testing.assert_allclose(
x_grad_actual, x_grad_expected, rtol=1e-05
)
@dygraph_guard @dygraph_guard
def test_example_with_gradient_accumulation_and_no_grad_vars(self): def test_example_with_gradient_accumulation_and_no_grad_vars(self):
x = random_var(self.shape) x = random_var(self.shape)
...@@ -286,22 +269,6 @@ class TestDygraphDoubleGrad(TestCase): ...@@ -286,22 +269,6 @@ class TestDygraphDoubleGrad(TestCase):
).astype('float32') ).astype('float32')
np.testing.assert_allclose(dx_actual.numpy(), dx_expected, rtol=1e-05) np.testing.assert_allclose(dx_actual.numpy(), dx_expected, rtol=1e-05)
if not _in_legacy_dygraph():
pass
else:
loss = paddle.mean(dx_actual * dx_actual + x * x)
loss.backward()
x_grad_actual = x.gradient()
x_grad_expected = (
2.0
/ float(numel)
* (x_np + dx_expected * (x_np > 0) * 4 / float(numel))
).astype('float32')
np.testing.assert_allclose(
x_grad_actual, x_grad_expected, rtol=1e-05
)
@dygraph_guard @dygraph_guard
def test_example_with_gradient_accumulation_and_not_create_graph(self): def test_example_with_gradient_accumulation_and_not_create_graph(self):
x = random_var(self.shape) x = random_var(self.shape)
...@@ -327,18 +294,6 @@ class TestDygraphDoubleGrad(TestCase): ...@@ -327,18 +294,6 @@ class TestDygraphDoubleGrad(TestCase):
np.testing.assert_allclose(dx_actual.numpy(), dx_expected, rtol=1e-05) np.testing.assert_allclose(dx_actual.numpy(), dx_expected, rtol=1e-05)
if not _in_legacy_dygraph():
pass
else:
loss = paddle.mean(dx_actual * dx_actual + x * x)
loss.backward()
x_grad_actual = x.gradient()
x_grad_expected = (2.0 * x_np / float(numel)).astype('float32')
np.testing.assert_allclose(
x_grad_actual, x_grad_expected, rtol=1e-05
)
class TestDygraphDoubleGradSortGradient(TestDygraphDoubleGrad): class TestDygraphDoubleGradSortGradient(TestDygraphDoubleGrad):
def setUp(self): def setUp(self):
......
...@@ -18,7 +18,6 @@ import numpy as np ...@@ -18,7 +18,6 @@ import numpy as np
import paddle import paddle
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.framework import _in_legacy_dygraph
class TestTensorCopyFrom(unittest.TestCase): class TestTensorCopyFrom(unittest.TestCase):
...@@ -46,10 +45,6 @@ class TestUVATensorFromNumpy(unittest.TestCase): ...@@ -46,10 +45,6 @@ class TestUVATensorFromNumpy(unittest.TestCase):
] ]
for dtype in dtype_list: for dtype in dtype_list:
data = np.random.randint(10, size=[4, 5]).astype(dtype) data = np.random.randint(10, size=[4, 5]).astype(dtype)
if _in_legacy_dygraph():
tensor = paddle.fluid.core.to_uva_tensor(data, 0)
tensor2 = paddle.fluid.core.to_uva_tensor(data)
else:
tensor = core.eager.to_uva_tensor(data, 0) tensor = core.eager.to_uva_tensor(data, 0)
tensor2 = core.eager.to_uva_tensor(data) tensor2 = core.eager.to_uva_tensor(data)
......
...@@ -21,7 +21,6 @@ import paddle ...@@ -21,7 +21,6 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle.fluid.framework import _in_legacy_dygraph
class TestVarBase(unittest.TestCase): class TestVarBase(unittest.TestCase):
...@@ -443,9 +442,6 @@ class TestVarBase(unittest.TestCase): ...@@ -443,9 +442,6 @@ class TestVarBase(unittest.TestCase):
def test_deep_copy(self): def test_deep_copy(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
if _in_legacy_dygraph():
empty_var = core.VarBase()
else:
empty_var = core.eager.Tensor() empty_var = core.eager.Tensor()
empty_var_copy = copy.deepcopy(empty_var) empty_var_copy = copy.deepcopy(empty_var)
self.assertEqual( self.assertEqual(
...@@ -484,15 +480,6 @@ class TestVarBase(unittest.TestCase): ...@@ -484,15 +480,6 @@ class TestVarBase(unittest.TestCase):
self.assertEqual(id(y_copy), id(y_copy2)) self.assertEqual(id(y_copy), id(y_copy2))
# test copy selected rows # test copy selected rows
if _in_legacy_dygraph():
x = core.VarBase(
core.VarDesc.VarType.FP32,
[3, 100],
"selected_rows",
core.VarDesc.VarType.SELECTED_ROWS,
True,
)
else:
x = core.eager.Tensor( x = core.eager.Tensor(
core.VarDesc.VarType.FP32, core.VarDesc.VarType.FP32,
[3, 100], [3, 100],
...@@ -1247,14 +1234,8 @@ class TestVarBaseSetitem(unittest.TestCase): ...@@ -1247,14 +1234,8 @@ class TestVarBaseSetitem(unittest.TestCase):
self.dtype = "int32" self.dtype = "int32"
def _test(self, value): def _test(self, value):
if _in_legacy_dygraph():
self.assertEqual(self.tensor_x.inplace_version, 0)
id_origin = id(self.tensor_x) id_origin = id(self.tensor_x)
self.tensor_x[0] = value self.tensor_x[0] = value
if _in_legacy_dygraph():
self.assertEqual(self.tensor_x.inplace_version, 1)
if isinstance(value, (int, float)): if isinstance(value, (int, float)):
result = np.zeros((2, 3)).astype(self.dtype) + value result = np.zeros((2, 3)).astype(self.dtype) + value
...@@ -1265,14 +1246,10 @@ class TestVarBaseSetitem(unittest.TestCase): ...@@ -1265,14 +1246,10 @@ class TestVarBaseSetitem(unittest.TestCase):
self.assertEqual(id_origin, id(self.tensor_x)) self.assertEqual(id_origin, id(self.tensor_x))
self.tensor_x[1:2] = value self.tensor_x[1:2] = value
if _in_legacy_dygraph():
self.assertEqual(self.tensor_x.inplace_version, 2)
np.testing.assert_array_equal(self.tensor_x[1].numpy(), result) np.testing.assert_array_equal(self.tensor_x[1].numpy(), result)
self.assertEqual(id_origin, id(self.tensor_x)) self.assertEqual(id_origin, id(self.tensor_x))
self.tensor_x[...] = value self.tensor_x[...] = value
if _in_legacy_dygraph():
self.assertEqual(self.tensor_x.inplace_version, 3)
np.testing.assert_array_equal(self.tensor_x[3].numpy(), result) np.testing.assert_array_equal(self.tensor_x[3].numpy(), result)
self.assertEqual(id_origin, id(self.tensor_x)) self.assertEqual(id_origin, id(self.tensor_x))
...@@ -1476,9 +1453,6 @@ class TestVarBaseShareBufferTo(unittest.TestCase): ...@@ -1476,9 +1453,6 @@ class TestVarBaseShareBufferTo(unittest.TestCase):
np_src = np.random.random((3, 8, 8)) np_src = np.random.random((3, 8, 8))
src = paddle.to_tensor(np_src, dtype="float64") src = paddle.to_tensor(np_src, dtype="float64")
# empty_var # empty_var
if _in_legacy_dygraph():
dst = core.VarBase()
else:
dst = core.eager.Tensor() dst = core.eager.Tensor()
src._share_buffer_to(dst) src._share_buffer_to(dst)
self.assertEqual(src._is_shared_buffer_with(dst), True) self.assertEqual(src._is_shared_buffer_with(dst), True)
...@@ -1553,17 +1527,11 @@ class TestVarBaseInitVarBaseFromTensorWithDevice(unittest.TestCase): ...@@ -1553,17 +1527,11 @@ class TestVarBaseInitVarBaseFromTensorWithDevice(unittest.TestCase):
if paddle.fluid.is_compiled_with_cuda(): if paddle.fluid.is_compiled_with_cuda():
device = paddle.CUDAPlace(0) device = paddle.CUDAPlace(0)
if _in_legacy_dygraph():
tmp = fluid.core.VarBase(t, device)
else:
tmp = fluid.core.eager.Tensor(t, device) tmp = fluid.core.eager.Tensor(t, device)
self.assertTrue(tmp.place.is_gpu_place()) self.assertTrue(tmp.place.is_gpu_place())
self.assertEqual(tmp.numpy().all(), np_x.all()) self.assertEqual(tmp.numpy().all(), np_x.all())
device = paddle.CPUPlace() device = paddle.CPUPlace()
if _in_legacy_dygraph():
tmp = fluid.core.VarBase(t, device)
else:
tmp = fluid.core.eager.Tensor(t, device) tmp = fluid.core.eager.Tensor(t, device)
self.assertEqual(tmp.numpy().all(), np_x.all()) self.assertEqual(tmp.numpy().all(), np_x.all())
...@@ -1579,9 +1547,6 @@ class TestVarBaseNumel(unittest.TestCase): ...@@ -1579,9 +1547,6 @@ class TestVarBaseNumel(unittest.TestCase):
def test_numel_without_holder(self): def test_numel_without_holder(self):
paddle.disable_static() paddle.disable_static()
if _in_legacy_dygraph():
x_without_holder = core.VarBase()
else:
x_without_holder = core.eager.Tensor() x_without_holder = core.eager.Tensor()
x_actual_numel = x_without_holder._numel() x_actual_numel = x_without_holder._numel()
self.assertEqual(x_actual_numel, 0) self.assertEqual(x_actual_numel, 0)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册