未验证 提交 a72a0da0 编写于 作者: 姜永久 提交者: GitHub

rm legacy unittest part5 (#49282)

* rm legacy unittest part5

* add custom op
上级 c8f76337
......@@ -21,7 +21,6 @@ from utils import IS_MAC, extra_cc_args, extra_nvcc_args, paddle_includes
import paddle
from paddle import nn
from paddle.fluid.framework import _in_legacy_dygraph
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd
......@@ -123,14 +122,6 @@ class TestDygraphModel(unittest.TestCase):
# for train
origin_relu_train_out = self.train_model(use_custom_op=False)
custom_relu_train_out = self.train_model(use_custom_op=True)
# open this when dy2stat is ready for eager
if _in_legacy_dygraph():
custom_relu_dy2stat_train_out = self.train_model(
use_custom_op=True, dy2stat=True
) # for to_static
np.testing.assert_array_equal(
origin_relu_train_out, custom_relu_dy2stat_train_out
)
np.testing.assert_array_equal(
origin_relu_train_out, custom_relu_train_out
......@@ -139,13 +130,6 @@ class TestDygraphModel(unittest.TestCase):
# for eval
origin_relu_eval_out = self.eval_model(use_custom_op=False)
custom_relu_eval_out = self.eval_model(use_custom_op=True)
if _in_legacy_dygraph():
custom_relu_dy2stat_eval_out = self.eval_model(
use_custom_op=True, dy2stat=True
) # for to_static
np.testing.assert_array_equal(
origin_relu_eval_out, custom_relu_dy2stat_eval_out
)
np.testing.assert_array_equal(
origin_relu_eval_out, custom_relu_eval_out
......
......@@ -24,7 +24,7 @@ from paddle.fluid.executor import (
_is_dy2st_enable_standalone_executor,
_is_enable_standalone_executor,
)
from paddle.fluid.framework import Variable, _in_legacy_dygraph
from paddle.fluid.framework import Variable
from paddle.fluid.layers.utils import _hash_with_id
......@@ -63,22 +63,13 @@ def _create_out(var):
assert isinstance(var, Variable)
var_desc = var.desc
varbase = None
if _in_legacy_dygraph():
var_base = core.VarBase(
var_desc.dtype(),
var_desc.shape(),
var_desc.name(),
var_desc.type(),
False,
)
else:
var_base = core.eager.Tensor(
var_desc.dtype(),
var_desc.shape(),
var_desc.name(),
var_desc.type(),
False,
)
var_base = core.eager.Tensor(
var_desc.dtype(),
var_desc.shape(),
var_desc.name(),
var_desc.type(),
False,
)
return var_base
......
......@@ -16,7 +16,7 @@ import unittest
import paddle
import paddle.fluid.core as core
from paddle.fluid.framework import _in_legacy_dygraph, in_dygraph_mode
from paddle.fluid.framework import in_dygraph_mode
class TestDataParallelGroup(unittest.TestCase):
......@@ -26,8 +26,6 @@ class TestDataParallelGroup(unittest.TestCase):
def assign_group_by_size(self, *args):
if in_dygraph_mode():
return core.eager_assign_group_by_size(*args)
elif _in_legacy_dygraph():
return core.assign_group_by_size(*args)
def test_construct_group0(self):
# one dtype & one limit capability
......
......@@ -18,7 +18,6 @@ import warnings
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.framework import _in_legacy_dygraph
class TestImperativeNumpyBridge(unittest.TestCase):
......@@ -44,12 +43,7 @@ class TestImperativeNumpyBridge(unittest.TestCase):
np.testing.assert_array_equal(var2.numpy(), data_np)
data_np[0][0] = -1
self.assertEqual(data_np[0][0], -1)
if not _in_legacy_dygraph():
# eager_mode, var2 is Tensor, is not subscriptable
# TODO(wuweilong): to support slice in eager mode later
self.assertNotEqual(var2.numpy()[0][0], -1)
else:
self.assertNotEqual(var2[0][0].numpy()[0], -1)
self.assertNotEqual(var2[0][0].numpy()[0], -1)
self.assertFalse(np.array_equal(var2.numpy(), data_np))
......
......@@ -22,7 +22,6 @@ import paddle.fluid as fluid
import paddle.nn.functional as F
from paddle.fluid import Layer, core
from paddle.fluid.dygraph import guard, to_variable
from paddle.fluid.framework import _in_legacy_dygraph
from paddle.nn import Linear
np.set_printoptions(suppress=True)
......@@ -1193,18 +1192,6 @@ class TestDygraphTransformerSortGradient(unittest.TestCase):
dy_param_updated,
)
with guard():
fluid.set_flags({'FLAGS_sort_sum_gradient': True})
if _in_legacy_dygraph():
(
dy_avg_cost_value,
dy_sum_cost_value,
dy_predict_value,
dy_token_num_value,
dy_param_init,
dy_param_updated,
) = run_dygraph()
with new_program_scope():
paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed)
......@@ -1296,24 +1283,6 @@ class TestDygraphTransformerSortGradient(unittest.TestCase):
static_param_updated[
static_param_name_list[k - 4]
] = out[k]
if _in_legacy_dygraph():
np.testing.assert_array_equal(
static_avg_cost_value, dy_avg_cost_value
)
np.testing.assert_array_equal(
static_sum_cost_value, dy_sum_cost_value
)
np.testing.assert_array_equal(
static_predict_value, dy_predict_value
)
np.testing.assert_array_equal(
static_token_num_value, dy_token_num_value
)
for key, value in static_param_init.items():
np.testing.assert_array_equal(value, dy_param_init[key])
for key, value in static_param_updated.items():
np.testing.assert_array_equal(value, dy_param_updated[key])
# compare eager result with imperative result
with guard():
......
......@@ -20,8 +20,8 @@ from op_test import OpTest, convert_float_to_uint16
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle import _C_ops, _legacy_C_ops
from paddle.fluid.framework import _in_legacy_dygraph, in_dygraph_mode
from paddle import _C_ops
from paddle.fluid.framework import in_dygraph_mode
# hack method for test p_norm final state
......@@ -30,20 +30,6 @@ def p_norm_python_api(
):
if in_dygraph_mode():
return _C_ops.p_norm(x, p, axis, epsilon, keepdim, as_vector)
if _in_legacy_dygraph():
return _legacy_C_ops.p_norm(
x,
'axis',
axis,
'porder',
float(p),
'keepdim',
keepdim,
'epsilon',
epsilon,
'as_vector',
as_vector,
)
def p_norm(x, axis, porder, keepdims=False, reduce_all=False):
......
......@@ -18,7 +18,6 @@ import numpy as np
import paddle
import paddle.optimizer as optimizer
from paddle.fluid.framework import _in_legacy_dygraph
class TestOptimizerForVarBase(unittest.TestCase):
......@@ -89,22 +88,13 @@ class TestOptimizerForVarBase(unittest.TestCase):
optimizer.Adam(learning_rate=self.lr, parameters=x)
def test_create_param_lr_with_1_for_coverage(self):
if _in_legacy_dygraph():
x = paddle.fluid.framework.ParamBase(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="x",
optimize_attr={'learning_rate': 1.0},
)
else:
x = paddle.fluid.framework.EagerParamBase(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="x",
optimize_attr={'learning_rate': 1.0},
)
x = paddle.fluid.framework.EagerParamBase(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="x",
optimize_attr={'learning_rate': 1.0},
)
x.value().get_tensor().set(
np.random.random((5, 10)).astype('float32'),
paddle.fluid.framework._current_expected_place(),
......@@ -117,22 +107,13 @@ class TestOptimizerForVarBase(unittest.TestCase):
opt.step()
def test_create_param_lr_with_no_1_value_for_coverage(self):
if _in_legacy_dygraph():
x = paddle.fluid.framework.ParamBase(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="x",
optimize_attr={'learning_rate': 0.12},
)
else:
x = paddle.fluid.framework.EagerParamBase(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="x",
optimize_attr={'learning_rate': 0.12},
)
x = paddle.fluid.framework.EagerParamBase(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="x",
optimize_attr={'learning_rate': 0.12},
)
x.value().get_tensor().set(
np.random.random((5, 10)).astype('float32'),
paddle.fluid.framework._current_expected_place(),
......
......@@ -20,7 +20,6 @@ import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.nn.functional as F
from paddle.fluid.framework import _in_legacy_dygraph
from paddle.fluid.wrapped_decorator import wrap_decorator
......@@ -239,22 +238,6 @@ class TestDygraphDoubleGrad(TestCase):
).astype('float32')
np.testing.assert_allclose(dx_actual.numpy(), dx_expected, rtol=1e-05)
if not _in_legacy_dygraph():
pass
else:
loss = paddle.mean(dx_actual * dx_actual + x * x)
loss.backward()
x_grad_actual = x.gradient()
x_grad_expected = (
2.0
/ float(numel)
* (x_np + dx_expected * (x_np > 0) * 2 / float(numel))
).astype('float32')
np.testing.assert_allclose(
x_grad_actual, x_grad_expected, rtol=1e-05
)
@dygraph_guard
def test_example_with_gradient_accumulation_and_no_grad_vars(self):
x = random_var(self.shape)
......@@ -286,22 +269,6 @@ class TestDygraphDoubleGrad(TestCase):
).astype('float32')
np.testing.assert_allclose(dx_actual.numpy(), dx_expected, rtol=1e-05)
if not _in_legacy_dygraph():
pass
else:
loss = paddle.mean(dx_actual * dx_actual + x * x)
loss.backward()
x_grad_actual = x.gradient()
x_grad_expected = (
2.0
/ float(numel)
* (x_np + dx_expected * (x_np > 0) * 4 / float(numel))
).astype('float32')
np.testing.assert_allclose(
x_grad_actual, x_grad_expected, rtol=1e-05
)
@dygraph_guard
def test_example_with_gradient_accumulation_and_not_create_graph(self):
x = random_var(self.shape)
......@@ -327,18 +294,6 @@ class TestDygraphDoubleGrad(TestCase):
np.testing.assert_allclose(dx_actual.numpy(), dx_expected, rtol=1e-05)
if not _in_legacy_dygraph():
pass
else:
loss = paddle.mean(dx_actual * dx_actual + x * x)
loss.backward()
x_grad_actual = x.gradient()
x_grad_expected = (2.0 * x_np / float(numel)).astype('float32')
np.testing.assert_allclose(
x_grad_actual, x_grad_expected, rtol=1e-05
)
class TestDygraphDoubleGradSortGradient(TestDygraphDoubleGrad):
def setUp(self):
......
......@@ -18,7 +18,6 @@ import numpy as np
import paddle
from paddle.fluid import core
from paddle.fluid.framework import _in_legacy_dygraph
class TestTensorCopyFrom(unittest.TestCase):
......@@ -46,12 +45,8 @@ class TestUVATensorFromNumpy(unittest.TestCase):
]
for dtype in dtype_list:
data = np.random.randint(10, size=[4, 5]).astype(dtype)
if _in_legacy_dygraph():
tensor = paddle.fluid.core.to_uva_tensor(data, 0)
tensor2 = paddle.fluid.core.to_uva_tensor(data)
else:
tensor = core.eager.to_uva_tensor(data, 0)
tensor2 = core.eager.to_uva_tensor(data)
tensor = core.eager.to_uva_tensor(data, 0)
tensor2 = core.eager.to_uva_tensor(data)
self.assertTrue(tensor.place.is_gpu_place())
self.assertTrue(tensor2.place.is_gpu_place())
......
......@@ -21,7 +21,6 @@ import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.nn.functional as F
from paddle.fluid.framework import _in_legacy_dygraph
class TestVarBase(unittest.TestCase):
......@@ -443,10 +442,7 @@ class TestVarBase(unittest.TestCase):
def test_deep_copy(self):
with fluid.dygraph.guard():
if _in_legacy_dygraph():
empty_var = core.VarBase()
else:
empty_var = core.eager.Tensor()
empty_var = core.eager.Tensor()
empty_var_copy = copy.deepcopy(empty_var)
self.assertEqual(
empty_var.stop_gradient, empty_var_copy.stop_gradient
......@@ -484,22 +480,13 @@ class TestVarBase(unittest.TestCase):
self.assertEqual(id(y_copy), id(y_copy2))
# test copy selected rows
if _in_legacy_dygraph():
x = core.VarBase(
core.VarDesc.VarType.FP32,
[3, 100],
"selected_rows",
core.VarDesc.VarType.SELECTED_ROWS,
True,
)
else:
x = core.eager.Tensor(
core.VarDesc.VarType.FP32,
[3, 100],
"selected_rows",
core.VarDesc.VarType.SELECTED_ROWS,
True,
)
x = core.eager.Tensor(
core.VarDesc.VarType.FP32,
[3, 100],
"selected_rows",
core.VarDesc.VarType.SELECTED_ROWS,
True,
)
selected_rows = x.value().get_selected_rows()
selected_rows.get_tensor().set(
......@@ -1247,14 +1234,8 @@ class TestVarBaseSetitem(unittest.TestCase):
self.dtype = "int32"
def _test(self, value):
if _in_legacy_dygraph():
self.assertEqual(self.tensor_x.inplace_version, 0)
id_origin = id(self.tensor_x)
self.tensor_x[0] = value
if _in_legacy_dygraph():
self.assertEqual(self.tensor_x.inplace_version, 1)
if isinstance(value, (int, float)):
result = np.zeros((2, 3)).astype(self.dtype) + value
......@@ -1265,14 +1246,10 @@ class TestVarBaseSetitem(unittest.TestCase):
self.assertEqual(id_origin, id(self.tensor_x))
self.tensor_x[1:2] = value
if _in_legacy_dygraph():
self.assertEqual(self.tensor_x.inplace_version, 2)
np.testing.assert_array_equal(self.tensor_x[1].numpy(), result)
self.assertEqual(id_origin, id(self.tensor_x))
self.tensor_x[...] = value
if _in_legacy_dygraph():
self.assertEqual(self.tensor_x.inplace_version, 3)
np.testing.assert_array_equal(self.tensor_x[3].numpy(), result)
self.assertEqual(id_origin, id(self.tensor_x))
......@@ -1476,10 +1453,7 @@ class TestVarBaseShareBufferTo(unittest.TestCase):
np_src = np.random.random((3, 8, 8))
src = paddle.to_tensor(np_src, dtype="float64")
# empty_var
if _in_legacy_dygraph():
dst = core.VarBase()
else:
dst = core.eager.Tensor()
dst = core.eager.Tensor()
src._share_buffer_to(dst)
self.assertEqual(src._is_shared_buffer_with(dst), True)
......@@ -1553,18 +1527,12 @@ class TestVarBaseInitVarBaseFromTensorWithDevice(unittest.TestCase):
if paddle.fluid.is_compiled_with_cuda():
device = paddle.CUDAPlace(0)
if _in_legacy_dygraph():
tmp = fluid.core.VarBase(t, device)
else:
tmp = fluid.core.eager.Tensor(t, device)
tmp = fluid.core.eager.Tensor(t, device)
self.assertTrue(tmp.place.is_gpu_place())
self.assertEqual(tmp.numpy().all(), np_x.all())
device = paddle.CPUPlace()
if _in_legacy_dygraph():
tmp = fluid.core.VarBase(t, device)
else:
tmp = fluid.core.eager.Tensor(t, device)
tmp = fluid.core.eager.Tensor(t, device)
self.assertEqual(tmp.numpy().all(), np_x.all())
......@@ -1579,10 +1547,7 @@ class TestVarBaseNumel(unittest.TestCase):
def test_numel_without_holder(self):
paddle.disable_static()
if _in_legacy_dygraph():
x_without_holder = core.VarBase()
else:
x_without_holder = core.eager.Tensor()
x_without_holder = core.eager.Tensor()
x_actual_numel = x_without_holder._numel()
self.assertEqual(x_actual_numel, 0)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册