未验证 提交 1b8d1d36 编写于 作者: 姜永久 提交者: GitHub

remove unittests eager guard tests part2 (#48798)

* remove unittests eager guard tests part2

* rm bicubic eager guard tests

* review

* fix

* fix dtype bug in bce_loss_with_logits
上级 fc0e72fe
......@@ -23,7 +23,6 @@ import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.nn.functional as F
from paddle.fluid import Program, program_guard
from paddle.fluid.framework import _test_eager_guard
paddle.enable_static()
......@@ -2100,11 +2099,6 @@ class TestHardswishAPI(unittest.TestCase):
)
F.hardswish(x_fp16)
def test_api_eager_dygraph(self):
with _test_eager_guard():
self.test_dygraph_api()
self.test_errors()
class TestSoftRelu(TestActivation):
def setUp(self):
......@@ -2355,11 +2349,6 @@ class TestCELUAPI(unittest.TestCase):
)
self.celu(x_fp16)
def test_api_eager_dygraph(self):
with _test_eager_guard():
self.test_dygraph_api()
self.test_errors()
class TestReciprocal(TestActivation):
def setUp(self):
......@@ -3361,7 +3350,7 @@ class TestSwishAPI(unittest.TestCase):
for r in res:
np.testing.assert_allclose(out_ref, r, rtol=1e-05)
def func_test_dygraph_api(self):
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out1 = F.swish(x)
......@@ -3372,11 +3361,6 @@ class TestSwishAPI(unittest.TestCase):
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
paddle.enable_static()
def test_dygraph_api(self):
with _test_eager_guard():
self.func_test_dygraph_api()
self.func_test_dygraph_api()
def test_fluid_api(self):
paddle.enable_static()
with fluid.program_guard(fluid.Program()):
......
......@@ -20,7 +20,6 @@ from op_test import OpTest
import paddle
import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.op import Operator
......@@ -187,10 +186,6 @@ class TestAdamOpMultipleSteps(OpTest):
"float32"
)
def test_api_eager_dygraph(self):
with _test_eager_guard():
self.test_check_output()
def adam_step(inputs, attributes):
'''
......@@ -750,14 +745,6 @@ class TestAdamOpV2(unittest.TestCase):
adam.step()
paddle.enable_static()
def test_api_eager_dygraph(self):
with _test_eager_guard():
self.test_adam_op_dygraph()
self.test_adam_op_with_state_dict()
self.test_adam_with_grad_clip()
self.test_adam_op_with_set_lr()
self.test_adam_op_with_sparse_input_and_weight_decay()
class TestAdamOptimizer(unittest.TestCase):
def _test(
......@@ -1281,10 +1268,6 @@ class TestMultiTensorAdam(unittest.TestCase):
self._check_with_param_arrt(place, use_amp)
self._check_with_param_group(place, use_amp)
def test_api_eager_dygraph(self):
with _test_eager_guard():
self.test_main()
if __name__ == "__main__":
paddle.enable_static()
......
......@@ -18,11 +18,10 @@ import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.framework import _test_eager_guard
class TestAdamaxAPI(unittest.TestCase):
def func_adamax_api_dygraph(self):
def test_adamax_api_dygraph(self):
paddle.disable_static()
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
......@@ -37,12 +36,7 @@ class TestAdamaxAPI(unittest.TestCase):
adam.step()
adam.clear_gradients()
def test_adamax_api_dygraph(self):
with _test_eager_guard():
self.func_adamax_api_dygraph()
self.func_adamax_api_dygraph()
def func_adamax_api(self):
def test_adamax_api(self):
paddle.enable_static()
place = fluid.CPUPlace()
shape = [2, 3, 8, 8]
......@@ -70,14 +64,9 @@ class TestAdamaxAPI(unittest.TestCase):
rets = exe.run(train_prog, feed={"data": data_np}, fetch_list=[loss])
assert rets[0] is not None
def test_adamax_api(self):
with _test_eager_guard():
self.func_adamax_api()
self.func_adamax_api()
class TestAdamaxAPIGroup(TestAdamaxAPI):
def func_adamax_api_dygraph(self):
def test_adamax_api_dygraph(self):
paddle.disable_static()
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
......@@ -103,11 +92,6 @@ class TestAdamaxAPIGroup(TestAdamaxAPI):
adam.step()
adam.clear_gradients()
def test_adamax_api_dygraph(self):
with _test_eager_guard():
self.func_adamax_api_dygraph()
self.func_adamax_api_dygraph()
if __name__ == "__main__":
unittest.main()
......@@ -21,7 +21,6 @@ from op_test import OpTest
import paddle
import paddle.fluid as fluid
from paddle.fluid.framework import _test_eager_guard
from paddle.framework import core
......@@ -250,11 +249,6 @@ class TestAdamWOp(unittest.TestCase):
0.1, epsilon=-1, parameters=linear.parameters()
)
def test_api_eager_dygraph(self):
with _test_eager_guard():
self.test_adamw_op_dygraph()
self.test_adamw_op_invalid_input()
class TestAdamWOpGroup(TestAdamWOp):
def test_adamw_op_dygraph(self):
......
......@@ -181,7 +181,7 @@ class TestAdaptiveMaxPool3DAPI(unittest.TestCase):
assert np.allclose(res_5, self.res_5_np)
def func_dynamic_graph(self):
def test_dynamic_graph(self):
for use_cuda in (
[False, True] if core.is_compiled_with_cuda() else [False]
):
......@@ -216,11 +216,6 @@ class TestAdaptiveMaxPool3DAPI(unittest.TestCase):
assert np.allclose(out_5.numpy(), self.res_5_np)
def test_dynamic_graph(self):
with paddle.fluid.framework._test_eager_guard():
self.func_dynamic_graph()
self.func_dynamic_graph()
class TestAdaptiveMaxPool3DClassAPI(unittest.TestCase):
def setUp(self):
......
......@@ -18,7 +18,6 @@ import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.framework import _test_eager_guard
class TestAllcloseLayer(unittest.TestCase):
......@@ -101,7 +100,7 @@ class TestAllcloseLayer(unittest.TestCase):
with fluid.program_guard(main, startup):
self.allclose_check(use_cuda=True, dtype='float64')
def func_dygraph_mode(self):
def test_dygraph_mode(self):
x_1 = np.array([10000.0, 1e-07]).astype("float32")
y_1 = np.array([10000.1, 1e-08]).astype("float32")
x_2 = np.array([10000.0, 1e-08]).astype("float32")
......@@ -188,11 +187,6 @@ class TestAllcloseLayer(unittest.TestCase):
)
self.assertEqual(ret_5.numpy()[0], True)
def test_dygraph_mode(self):
with _test_eager_guard():
self.func_dygraph_mode()
self.func_dygraph_mode()
if __name__ == "__main__":
unittest.main()
......@@ -20,7 +20,6 @@ import op_test
import paddle
import paddle.fluid.core as core
from paddle.distributed.models.moe import utils
from paddle.fluid.framework import _test_eager_guard
def assign_pos(x, _cum_count):
......@@ -118,7 +117,7 @@ class TestAssignPosAPI(unittest.TestCase):
)
assert_allclose(res[0], self.out, self.cum_count)
def func_api_dygraph(self):
def test_api_dygraph(self):
paddle.disable_static()
x = paddle.to_tensor(self.x)
cum_count = paddle.to_tensor(self.cum_count).astype(x.dtype)
......@@ -126,11 +125,6 @@ class TestAssignPosAPI(unittest.TestCase):
out = utils._assign_pos(x, cum_count)
assert_allclose(out.numpy(), self.out, self.cum_count)
def test_api_dygraph(self):
with _test_eager_guard():
self.func_api_dygraph()
self.func_api_dygraph()
if __name__ == '__main__':
paddle.enable_static()
......
......@@ -19,12 +19,7 @@ import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph import to_variable
from paddle.fluid.framework import (
EagerParamBase,
ParamBase,
_test_eager_guard,
in_dygraph_mode,
)
from paddle.fluid.framework import EagerParamBase, ParamBase, in_dygraph_mode
from paddle.jit import ProgramTranslator
......@@ -66,7 +61,7 @@ class L3(fluid.Layer):
class TestBaseLayer(unittest.TestCase):
def func_test_one_level(self):
def test_one_level(self):
with fluid.dygraph.guard():
l = L1()
ret = l()
......@@ -79,12 +74,7 @@ class TestBaseLayer(unittest.TestCase):
ret.numpy(), 0.2 * np.ones([2, 2]), rtol=1e-05
)
def test_one_level(self):
with _test_eager_guard():
self.func_test_one_level()
self.func_test_one_level()
def func_test_three_level(self):
def test_three_level(self):
with fluid.dygraph.guard():
l = L3()
expected_names = [
......@@ -106,12 +96,7 @@ class TestBaseLayer(unittest.TestCase):
ret.numpy(), 0.8 * np.ones([2, 2]), rtol=1e-05
)
def test_three_level(self):
with _test_eager_guard():
self.func_test_three_level()
self.func_test_three_level()
def func_test_add_parameter_with_error(self):
def test_add_parameter_with_error(self):
with fluid.dygraph.guard():
net = fluid.Layer()
param = net.create_parameter(shape=[1])
......@@ -136,11 +121,6 @@ class TestBaseLayer(unittest.TestCase):
net._loaddict_holder[load_param.name] = load_param
net.add_parameter("load_param", load_param)
def test_add_parameter_with_error(self):
with _test_eager_guard():
self.func_test_add_parameter_with_error()
self.func_test_add_parameter_with_error()
class BufferLayer(fluid.Layer):
def __init__(self):
......@@ -169,7 +149,7 @@ class BufferNet(fluid.Layer):
class TestBuffer(unittest.TestCase):
def func_test_buffers_and_named_buffers(self):
def test_buffers_and_named_buffers(self):
def names(named_buffers):
return [name for name, _ in named_buffers]
......@@ -192,12 +172,7 @@ class TestBuffer(unittest.TestCase):
['net_buffer', 'new_buffer'],
)
def test_buffers_and_named_buffers(self):
with _test_eager_guard():
self.func_test_buffers_and_named_buffers()
self.func_test_buffers_and_named_buffers()
def func_test_register_buffer_with_error(self):
def test_register_buffer_with_error(self):
with fluid.dygraph.guard():
net = fluid.Layer()
var = to_variable(np.zeros([1]))
......@@ -241,12 +216,7 @@ class TestBuffer(unittest.TestCase):
with self.assertRaisesRegexp(KeyError, "already exists"):
net.register_buffer("attr_name", var)
def test_register_buffer_with_error(self):
with _test_eager_guard():
self.func_test_register_buffer_with_error()
self.func_test_register_buffer_with_error()
def func_test_register_buffer_same_name(self):
def test_register_buffer_same_name(self):
with fluid.dygraph.guard():
net = fluid.Layer()
var1 = to_variable(np.zeros([1]))
......@@ -260,12 +230,7 @@ class TestBuffer(unittest.TestCase):
net.register_buffer("buffer_name", var3)
self.assert_var_base_equal(net.buffer_name, var3)
def test_register_buffer_same_name(self):
with _test_eager_guard():
self.func_test_register_buffer_same_name()
self.func_test_register_buffer_same_name()
def func_test_buffer_not_persistable(self):
def test_buffer_not_persistable(self):
with fluid.dygraph.guard():
net = fluid.Layer()
var1 = to_variable(np.zeros([1]))
......@@ -274,12 +239,7 @@ class TestBuffer(unittest.TestCase):
self.assertEqual(len(net.buffers()), 1)
self.assertEqual(len(net.state_dict()), 0)
def test_buffer_not_persistable(self):
with _test_eager_guard():
self.func_test_buffer_not_persistable()
self.func_test_buffer_not_persistable()
def func_test_buffer_not_persistable_del(self):
def test_buffer_not_persistable_del(self):
with fluid.dygraph.guard():
net = fluid.Layer()
var1 = to_variable(np.zeros([1]))
......@@ -287,12 +247,7 @@ class TestBuffer(unittest.TestCase):
del net.buffer_name
self.assertEqual(len(net.buffers()), 0)
def test_buffer_not_persistable_del(self):
with _test_eager_guard():
self.func_test_buffer_not_persistable_del()
self.func_test_buffer_not_persistable_del()
def func_test_buffer_not_persistable_overwrite(self):
def test_buffer_not_persistable_overwrite(self):
with fluid.dygraph.guard():
net = fluid.Layer()
var1 = to_variable(np.zeros([1]))
......@@ -308,12 +263,7 @@ class TestBuffer(unittest.TestCase):
self.assertEqual(len(net.buffers()), 1)
self.assertEqual(len(net.state_dict()), 0)
def test_buffer_not_persistable_overwrite(self):
with _test_eager_guard():
self.func_test_buffer_not_persistable_overwrite()
self.func_test_buffer_not_persistable_overwrite()
def func_test_buffer_not_persistable_assign(self):
def test_buffer_not_persistable_assign(self):
with fluid.dygraph.guard():
net = fluid.Layer()
var1 = to_variable(np.zeros([1]))
......@@ -337,24 +287,14 @@ class TestBuffer(unittest.TestCase):
self.assertEqual(len(net.buffers()), 0)
self.assertEqual(len(net.state_dict()), 1)
def test_buffer_not_persistable_assign(self):
with _test_eager_guard():
self.func_test_buffer_not_persistable_assign()
self.func_test_buffer_not_persistable_assign()
def func_test_buffer_not_persistable_load(self):
def test_buffer_not_persistable_load(self):
with fluid.dygraph.guard():
net = fluid.Layer()
var1 = to_variable(np.zeros([1]))
net.register_buffer("buffer_name", var1, persistable=False)
net.load_dict({})
def test_buffer_not_persistable_load(self):
with _test_eager_guard():
self.func_test_buffer_not_persistable_load()
self.func_test_buffer_not_persistable_load()
def func_test_buffer_state_dict(self):
def test_buffer_state_dict(self):
with fluid.dygraph.guard():
net = fluid.Layer()
var1 = to_variable(np.zeros([2, 3]))
......@@ -375,11 +315,6 @@ class TestBuffer(unittest.TestCase):
self.assert_var_base_equal(net_load.buffer_var1, var1)
def test_buffer_state_dict(self):
with _test_eager_guard():
self.func_test_buffer_state_dict()
self.func_test_buffer_state_dict()
def assert_var_base_equal(self, var1, var2):
np.testing.assert_array_equal(var1.numpy(), var2.numpy())
......@@ -416,7 +351,7 @@ class TestModifiedBuffer(unittest.TestCase):
return out, net.buffer1, net.buffer2
def func_test_modified(self):
def test_modified(self):
self.funcsetUp()
dy_outs = self._run(False)
st_outs = self._run(True)
......@@ -426,11 +361,6 @@ class TestModifiedBuffer(unittest.TestCase):
dy_outs[i].numpy(), st_outs[i].numpy()
)
def test_modified(self):
with _test_eager_guard():
self.func_test_modified()
self.func_test_modified()
class TestLayerTo(unittest.TestCase):
def funcsetUp(self):
......@@ -614,12 +544,6 @@ class TestLayerTo(unittest.TestCase):
self.assertIsNone(model._buffers['buf_name'])
def test_main(self):
with _test_eager_guard():
self.funcsetUp()
self.func_test_to_api()
self.func_test_to_api_paddle_dtype()
self.func_test_to_api_numpy_dtype()
self.func_test_to_api_none_buffer()
self.funcsetUp()
self.func_test_to_api()
self.func_test_to_api_paddle_dtype()
......
......@@ -20,7 +20,6 @@ import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid import Program, program_guard
from paddle.fluid.framework import _test_eager_guard
class TestBatchNorm(unittest.TestCase):
......@@ -91,13 +90,12 @@ class TestBatchNorm(unittest.TestCase):
def compute_1d(x):
with fluid.dygraph.guard(p):
with _test_eager_guard():
bn = paddle.nn.BatchNorm1D(shape[1])
x1 = paddle.to_tensor(x)
x1.stop_gradient = False
y = bn(x1)
y.backward()
return y.numpy(), x1.gradient()
bn = paddle.nn.BatchNorm1D(shape[1])
x1 = paddle.to_tensor(x)
x1.stop_gradient = False
y = bn(x1)
y.backward()
return y.numpy(), x1.gradient()
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
......@@ -138,14 +136,13 @@ class TestBatchNorm(unittest.TestCase):
def compute_v2(x):
with fluid.dygraph.guard(p):
with _test_eager_guard():
print("v2")
bn = paddle.nn.BatchNorm2D(shape[1])
x1 = paddle.to_tensor(x)
x1.stop_gradient = False
y = bn(x1)
y.backward()
return y.numpy(), x1.gradient()
print("v2")
bn = paddle.nn.BatchNorm2D(shape[1])
x1 = paddle.to_tensor(x)
x1.stop_gradient = False
y = bn(x1)
y.backward()
return y.numpy(), x1.gradient()
x = np.random.randn(*shape).astype("float32")
y1, g1 = compute_v1(x)
......@@ -175,9 +172,8 @@ class TestBatchNorm(unittest.TestCase):
bn = paddle.nn.BatchNorm2D(shape[1])
y = bn(paddle.to_tensor(x))
with _test_eager_guard():
bn = paddle.nn.BatchNorm2D(shape[1])
eag_y = bn(paddle.to_tensor(x))
bn = paddle.nn.BatchNorm2D(shape[1])
eag_y = bn(paddle.to_tensor(x))
assert np.allclose(eag_y.numpy(), y.numpy())
return y.numpy()
......
......@@ -18,7 +18,6 @@ import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.framework import _test_eager_guard
def call_bce_layer(
......@@ -172,15 +171,6 @@ class TestBCEWithLogitsLoss(unittest.TestCase):
functional=True,
)
with _test_eager_guard():
eager_functional = test_dygraph(
place,
logit_np,
label_np,
reduction=reduction,
functional=True,
)
np.testing.assert_allclose(
static_functional, expected, rtol=1e-05
)
......@@ -188,9 +178,6 @@ class TestBCEWithLogitsLoss(unittest.TestCase):
static_functional, dy_functional, rtol=1e-05
)
np.testing.assert_allclose(dy_functional, expected, rtol=1e-05)
np.testing.assert_allclose(
eager_functional, expected, rtol=1e-05
)
def test_BCEWithLogitsLoss_weight(self):
logit_np = np.random.uniform(0.1, 0.8, size=(2, 3, 4, 10)).astype(
......
......@@ -18,7 +18,6 @@ import numpy as np
import paddle
import paddle.nn.functional as F
from paddle.fluid.framework import _test_eager_guard
from paddle.incubate.optimizer.functional.bfgs import minimize_bfgs
np.random.seed(123)
......@@ -125,7 +124,7 @@ class TestBfgs(unittest.TestCase):
results = test_static_graph(func, x0, dtype='float64')
np.testing.assert_allclose(0.8, results[2], rtol=1e-05, atol=1e-8)
def func_rosenbrock(self):
def test_rosenbrock(self):
# The Rosenbrock function is a standard optimization test case.
a = np.random.random(size=[1]).astype('float32')
minimum = [a.item(), (a**2).item()]
......@@ -144,11 +143,6 @@ class TestBfgs(unittest.TestCase):
results = test_dynamic_graph(func, x0)
np.testing.assert_allclose(minimum, results[2], rtol=1e-05, atol=1e-8)
def test_rosenbrock(self):
with _test_eager_guard():
self.func_rosenbrock()
self.func_rosenbrock()
def test_exception(self):
def func(x):
return paddle.dot(x, x)
......
......@@ -20,7 +20,6 @@ from op_test import OpTest
import paddle
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
from paddle.fluid.framework import _test_eager_guard
from paddle.nn.functional import interpolate
......@@ -363,12 +362,7 @@ class TestBicubicInterpDataLayout(TestBicubicInterpOp):
class TestBicubicInterpOpAPI(unittest.TestCase):
def test_imperative_case(self):
with _test_eager_guard():
self.func_case()
self.func_case()
def func_case(self):
def test_case(self):
np.random.seed(200)
x_data = np.random.random((2, 3, 6, 6)).astype("float32")
dim_data = np.array([12]).astype("int32")
......
......@@ -813,7 +813,7 @@ def binary_cross_entropy_with_logits(
one = _C_ops.full(
[1],
float(1.0),
core.VarDesc.VarType.FP32,
logit.dtype,
_current_expected_place(),
)
out = _C_ops.sigmoid_cross_entropy_with_logits(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册