From 1b8d1d3681131bbaaec263d4c5f2981d52f14ef6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=A7=9C=E6=B0=B8=E4=B9=85?= <34344716+yjjiang11@users.noreply.github.com> Date: Tue, 20 Dec 2022 13:52:25 +0800 Subject: [PATCH] remove unittests eager guard tests part2 (#48798) * remove unittests eager guard tests part2 * rm bicubic eager guard tests * review * fix * fix dtype bug in bce_loss_with_logits --- .../tests/unittests/test_activation_op.py | 18 +-- .../fluid/tests/unittests/test_adam_op.py | 17 --- .../fluid/tests/unittests/test_adamax_api.py | 22 +--- .../fluid/tests/unittests/test_adamw_op.py | 6 - .../unittests/test_adaptive_max_pool3d.py | 7 +- .../tests/unittests/test_allclose_layer.py | 8 +- .../tests/unittests/test_assign_pos_op.py | 8 +- .../fluid/tests/unittests/test_base_layer.py | 104 +++--------------- .../tests/unittests/test_batch_norm_op_v2.py | 34 +++--- .../unittests/test_bce_with_logits_loss.py | 13 --- .../paddle/fluid/tests/unittests/test_bfgs.py | 8 +- .../unittests/test_bicubic_interp_v2_op.py | 8 +- python/paddle/nn/functional/loss.py | 2 +- 13 files changed, 39 insertions(+), 216 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index db3bb976b8..2f0c594746 100755 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -23,7 +23,6 @@ import paddle.fluid as fluid import paddle.fluid.core as core import paddle.nn.functional as F from paddle.fluid import Program, program_guard -from paddle.fluid.framework import _test_eager_guard paddle.enable_static() @@ -2100,11 +2099,6 @@ class TestHardswishAPI(unittest.TestCase): ) F.hardswish(x_fp16) - def test_api_eager_dygraph(self): - with _test_eager_guard(): - self.test_dygraph_api() - self.test_errors() - class TestSoftRelu(TestActivation): def setUp(self): @@ -2355,11 +2349,6 @@ class TestCELUAPI(unittest.TestCase): ) self.celu(x_fp16) - def test_api_eager_dygraph(self): - with _test_eager_guard(): - self.test_dygraph_api() - self.test_errors() - class TestReciprocal(TestActivation): def setUp(self): @@ -3361,7 +3350,7 @@ class TestSwishAPI(unittest.TestCase): for r in res: np.testing.assert_allclose(out_ref, r, rtol=1e-05) - def func_test_dygraph_api(self): + def test_dygraph_api(self): paddle.disable_static(self.place) x = paddle.to_tensor(self.x_np) out1 = F.swish(x) @@ -3372,11 +3361,6 @@ class TestSwishAPI(unittest.TestCase): np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) paddle.enable_static() - def test_dygraph_api(self): - with _test_eager_guard(): - self.func_test_dygraph_api() - self.func_test_dygraph_api() - def test_fluid_api(self): paddle.enable_static() with fluid.program_guard(fluid.Program()): diff --git a/python/paddle/fluid/tests/unittests/test_adam_op.py b/python/paddle/fluid/tests/unittests/test_adam_op.py index a8d41a3d85..56057513b6 100644 --- a/python/paddle/fluid/tests/unittests/test_adam_op.py +++ b/python/paddle/fluid/tests/unittests/test_adam_op.py @@ -20,7 +20,6 @@ from op_test import OpTest import paddle import paddle.fluid as fluid from paddle.fluid import core -from paddle.fluid.framework import _test_eager_guard from paddle.fluid.op import Operator @@ -187,10 +186,6 @@ class TestAdamOpMultipleSteps(OpTest): "float32" ) - def test_api_eager_dygraph(self): - with _test_eager_guard(): - self.test_check_output() - def adam_step(inputs, attributes): ''' @@ -750,14 +745,6 @@ class TestAdamOpV2(unittest.TestCase): adam.step() paddle.enable_static() - def test_api_eager_dygraph(self): - with _test_eager_guard(): - self.test_adam_op_dygraph() - self.test_adam_op_with_state_dict() - self.test_adam_with_grad_clip() - self.test_adam_op_with_set_lr() - self.test_adam_op_with_sparse_input_and_weight_decay() - class TestAdamOptimizer(unittest.TestCase): def _test( @@ -1281,10 +1268,6 @@ class TestMultiTensorAdam(unittest.TestCase): self._check_with_param_arrt(place, use_amp) self._check_with_param_group(place, use_amp) - def test_api_eager_dygraph(self): - with _test_eager_guard(): - self.test_main() - if __name__ == "__main__": paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_adamax_api.py b/python/paddle/fluid/tests/unittests/test_adamax_api.py index 47c2791e46..9aeeb9e0d4 100644 --- a/python/paddle/fluid/tests/unittests/test_adamax_api.py +++ b/python/paddle/fluid/tests/unittests/test_adamax_api.py @@ -18,11 +18,10 @@ import numpy as np import paddle import paddle.fluid as fluid -from paddle.fluid.framework import _test_eager_guard class TestAdamaxAPI(unittest.TestCase): - def func_adamax_api_dygraph(self): + def test_adamax_api_dygraph(self): paddle.disable_static() value = np.arange(26).reshape(2, 13).astype("float32") a = paddle.to_tensor(value) @@ -37,12 +36,7 @@ class TestAdamaxAPI(unittest.TestCase): adam.step() adam.clear_gradients() - def test_adamax_api_dygraph(self): - with _test_eager_guard(): - self.func_adamax_api_dygraph() - self.func_adamax_api_dygraph() - - def func_adamax_api(self): + def test_adamax_api(self): paddle.enable_static() place = fluid.CPUPlace() shape = [2, 3, 8, 8] @@ -70,14 +64,9 @@ class TestAdamaxAPI(unittest.TestCase): rets = exe.run(train_prog, feed={"data": data_np}, fetch_list=[loss]) assert rets[0] is not None - def test_adamax_api(self): - with _test_eager_guard(): - self.func_adamax_api() - self.func_adamax_api() - class TestAdamaxAPIGroup(TestAdamaxAPI): - def func_adamax_api_dygraph(self): + def test_adamax_api_dygraph(self): paddle.disable_static() value = np.arange(26).reshape(2, 13).astype("float32") a = paddle.to_tensor(value) @@ -103,11 +92,6 @@ class TestAdamaxAPIGroup(TestAdamaxAPI): adam.step() adam.clear_gradients() - def test_adamax_api_dygraph(self): - with _test_eager_guard(): - self.func_adamax_api_dygraph() - self.func_adamax_api_dygraph() - if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_adamw_op.py b/python/paddle/fluid/tests/unittests/test_adamw_op.py index acee63d1ae..f227affca0 100644 --- a/python/paddle/fluid/tests/unittests/test_adamw_op.py +++ b/python/paddle/fluid/tests/unittests/test_adamw_op.py @@ -21,7 +21,6 @@ from op_test import OpTest import paddle import paddle.fluid as fluid -from paddle.fluid.framework import _test_eager_guard from paddle.framework import core @@ -250,11 +249,6 @@ class TestAdamWOp(unittest.TestCase): 0.1, epsilon=-1, parameters=linear.parameters() ) - def test_api_eager_dygraph(self): - with _test_eager_guard(): - self.test_adamw_op_dygraph() - self.test_adamw_op_invalid_input() - class TestAdamWOpGroup(TestAdamWOp): def test_adamw_op_dygraph(self): diff --git a/python/paddle/fluid/tests/unittests/test_adaptive_max_pool3d.py b/python/paddle/fluid/tests/unittests/test_adaptive_max_pool3d.py index d3f76ba395..0edd50158a 100755 --- a/python/paddle/fluid/tests/unittests/test_adaptive_max_pool3d.py +++ b/python/paddle/fluid/tests/unittests/test_adaptive_max_pool3d.py @@ -181,7 +181,7 @@ class TestAdaptiveMaxPool3DAPI(unittest.TestCase): assert np.allclose(res_5, self.res_5_np) - def func_dynamic_graph(self): + def test_dynamic_graph(self): for use_cuda in ( [False, True] if core.is_compiled_with_cuda() else [False] ): @@ -216,11 +216,6 @@ class TestAdaptiveMaxPool3DAPI(unittest.TestCase): assert np.allclose(out_5.numpy(), self.res_5_np) - def test_dynamic_graph(self): - with paddle.fluid.framework._test_eager_guard(): - self.func_dynamic_graph() - self.func_dynamic_graph() - class TestAdaptiveMaxPool3DClassAPI(unittest.TestCase): def setUp(self): diff --git a/python/paddle/fluid/tests/unittests/test_allclose_layer.py b/python/paddle/fluid/tests/unittests/test_allclose_layer.py index 79497985b0..fc9672ec03 100644 --- a/python/paddle/fluid/tests/unittests/test_allclose_layer.py +++ b/python/paddle/fluid/tests/unittests/test_allclose_layer.py @@ -18,7 +18,6 @@ import numpy as np import paddle import paddle.fluid as fluid -from paddle.fluid.framework import _test_eager_guard class TestAllcloseLayer(unittest.TestCase): @@ -101,7 +100,7 @@ class TestAllcloseLayer(unittest.TestCase): with fluid.program_guard(main, startup): self.allclose_check(use_cuda=True, dtype='float64') - def func_dygraph_mode(self): + def test_dygraph_mode(self): x_1 = np.array([10000.0, 1e-07]).astype("float32") y_1 = np.array([10000.1, 1e-08]).astype("float32") x_2 = np.array([10000.0, 1e-08]).astype("float32") @@ -188,11 +187,6 @@ class TestAllcloseLayer(unittest.TestCase): ) self.assertEqual(ret_5.numpy()[0], True) - def test_dygraph_mode(self): - with _test_eager_guard(): - self.func_dygraph_mode() - self.func_dygraph_mode() - if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_assign_pos_op.py b/python/paddle/fluid/tests/unittests/test_assign_pos_op.py index 9478efd51a..13cb3eccf8 100644 --- a/python/paddle/fluid/tests/unittests/test_assign_pos_op.py +++ b/python/paddle/fluid/tests/unittests/test_assign_pos_op.py @@ -20,7 +20,6 @@ import op_test import paddle import paddle.fluid.core as core from paddle.distributed.models.moe import utils -from paddle.fluid.framework import _test_eager_guard def assign_pos(x, _cum_count): @@ -118,7 +117,7 @@ class TestAssignPosAPI(unittest.TestCase): ) assert_allclose(res[0], self.out, self.cum_count) - def func_api_dygraph(self): + def test_api_dygraph(self): paddle.disable_static() x = paddle.to_tensor(self.x) cum_count = paddle.to_tensor(self.cum_count).astype(x.dtype) @@ -126,11 +125,6 @@ class TestAssignPosAPI(unittest.TestCase): out = utils._assign_pos(x, cum_count) assert_allclose(out.numpy(), self.out, self.cum_count) - def test_api_dygraph(self): - with _test_eager_guard(): - self.func_api_dygraph() - self.func_api_dygraph() - if __name__ == '__main__': paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_base_layer.py b/python/paddle/fluid/tests/unittests/test_base_layer.py index 4ec5ddd9dd..bc5c333750 100644 --- a/python/paddle/fluid/tests/unittests/test_base_layer.py +++ b/python/paddle/fluid/tests/unittests/test_base_layer.py @@ -19,12 +19,7 @@ import numpy as np import paddle import paddle.fluid as fluid from paddle.fluid.dygraph import to_variable -from paddle.fluid.framework import ( - EagerParamBase, - ParamBase, - _test_eager_guard, - in_dygraph_mode, -) +from paddle.fluid.framework import EagerParamBase, ParamBase, in_dygraph_mode from paddle.jit import ProgramTranslator @@ -66,7 +61,7 @@ class L3(fluid.Layer): class TestBaseLayer(unittest.TestCase): - def func_test_one_level(self): + def test_one_level(self): with fluid.dygraph.guard(): l = L1() ret = l() @@ -79,12 +74,7 @@ class TestBaseLayer(unittest.TestCase): ret.numpy(), 0.2 * np.ones([2, 2]), rtol=1e-05 ) - def test_one_level(self): - with _test_eager_guard(): - self.func_test_one_level() - self.func_test_one_level() - - def func_test_three_level(self): + def test_three_level(self): with fluid.dygraph.guard(): l = L3() expected_names = [ @@ -106,12 +96,7 @@ class TestBaseLayer(unittest.TestCase): ret.numpy(), 0.8 * np.ones([2, 2]), rtol=1e-05 ) - def test_three_level(self): - with _test_eager_guard(): - self.func_test_three_level() - self.func_test_three_level() - - def func_test_add_parameter_with_error(self): + def test_add_parameter_with_error(self): with fluid.dygraph.guard(): net = fluid.Layer() param = net.create_parameter(shape=[1]) @@ -136,11 +121,6 @@ class TestBaseLayer(unittest.TestCase): net._loaddict_holder[load_param.name] = load_param net.add_parameter("load_param", load_param) - def test_add_parameter_with_error(self): - with _test_eager_guard(): - self.func_test_add_parameter_with_error() - self.func_test_add_parameter_with_error() - class BufferLayer(fluid.Layer): def __init__(self): @@ -169,7 +149,7 @@ class BufferNet(fluid.Layer): class TestBuffer(unittest.TestCase): - def func_test_buffers_and_named_buffers(self): + def test_buffers_and_named_buffers(self): def names(named_buffers): return [name for name, _ in named_buffers] @@ -192,12 +172,7 @@ class TestBuffer(unittest.TestCase): ['net_buffer', 'new_buffer'], ) - def test_buffers_and_named_buffers(self): - with _test_eager_guard(): - self.func_test_buffers_and_named_buffers() - self.func_test_buffers_and_named_buffers() - - def func_test_register_buffer_with_error(self): + def test_register_buffer_with_error(self): with fluid.dygraph.guard(): net = fluid.Layer() var = to_variable(np.zeros([1])) @@ -241,12 +216,7 @@ class TestBuffer(unittest.TestCase): with self.assertRaisesRegexp(KeyError, "already exists"): net.register_buffer("attr_name", var) - def test_register_buffer_with_error(self): - with _test_eager_guard(): - self.func_test_register_buffer_with_error() - self.func_test_register_buffer_with_error() - - def func_test_register_buffer_same_name(self): + def test_register_buffer_same_name(self): with fluid.dygraph.guard(): net = fluid.Layer() var1 = to_variable(np.zeros([1])) @@ -260,12 +230,7 @@ class TestBuffer(unittest.TestCase): net.register_buffer("buffer_name", var3) self.assert_var_base_equal(net.buffer_name, var3) - def test_register_buffer_same_name(self): - with _test_eager_guard(): - self.func_test_register_buffer_same_name() - self.func_test_register_buffer_same_name() - - def func_test_buffer_not_persistable(self): + def test_buffer_not_persistable(self): with fluid.dygraph.guard(): net = fluid.Layer() var1 = to_variable(np.zeros([1])) @@ -274,12 +239,7 @@ class TestBuffer(unittest.TestCase): self.assertEqual(len(net.buffers()), 1) self.assertEqual(len(net.state_dict()), 0) - def test_buffer_not_persistable(self): - with _test_eager_guard(): - self.func_test_buffer_not_persistable() - self.func_test_buffer_not_persistable() - - def func_test_buffer_not_persistable_del(self): + def test_buffer_not_persistable_del(self): with fluid.dygraph.guard(): net = fluid.Layer() var1 = to_variable(np.zeros([1])) @@ -287,12 +247,7 @@ class TestBuffer(unittest.TestCase): del net.buffer_name self.assertEqual(len(net.buffers()), 0) - def test_buffer_not_persistable_del(self): - with _test_eager_guard(): - self.func_test_buffer_not_persistable_del() - self.func_test_buffer_not_persistable_del() - - def func_test_buffer_not_persistable_overwrite(self): + def test_buffer_not_persistable_overwrite(self): with fluid.dygraph.guard(): net = fluid.Layer() var1 = to_variable(np.zeros([1])) @@ -308,12 +263,7 @@ class TestBuffer(unittest.TestCase): self.assertEqual(len(net.buffers()), 1) self.assertEqual(len(net.state_dict()), 0) - def test_buffer_not_persistable_overwrite(self): - with _test_eager_guard(): - self.func_test_buffer_not_persistable_overwrite() - self.func_test_buffer_not_persistable_overwrite() - - def func_test_buffer_not_persistable_assign(self): + def test_buffer_not_persistable_assign(self): with fluid.dygraph.guard(): net = fluid.Layer() var1 = to_variable(np.zeros([1])) @@ -337,24 +287,14 @@ class TestBuffer(unittest.TestCase): self.assertEqual(len(net.buffers()), 0) self.assertEqual(len(net.state_dict()), 1) - def test_buffer_not_persistable_assign(self): - with _test_eager_guard(): - self.func_test_buffer_not_persistable_assign() - self.func_test_buffer_not_persistable_assign() - - def func_test_buffer_not_persistable_load(self): + def test_buffer_not_persistable_load(self): with fluid.dygraph.guard(): net = fluid.Layer() var1 = to_variable(np.zeros([1])) net.register_buffer("buffer_name", var1, persistable=False) net.load_dict({}) - def test_buffer_not_persistable_load(self): - with _test_eager_guard(): - self.func_test_buffer_not_persistable_load() - self.func_test_buffer_not_persistable_load() - - def func_test_buffer_state_dict(self): + def test_buffer_state_dict(self): with fluid.dygraph.guard(): net = fluid.Layer() var1 = to_variable(np.zeros([2, 3])) @@ -375,11 +315,6 @@ class TestBuffer(unittest.TestCase): self.assert_var_base_equal(net_load.buffer_var1, var1) - def test_buffer_state_dict(self): - with _test_eager_guard(): - self.func_test_buffer_state_dict() - self.func_test_buffer_state_dict() - def assert_var_base_equal(self, var1, var2): np.testing.assert_array_equal(var1.numpy(), var2.numpy()) @@ -416,7 +351,7 @@ class TestModifiedBuffer(unittest.TestCase): return out, net.buffer1, net.buffer2 - def func_test_modified(self): + def test_modified(self): self.funcsetUp() dy_outs = self._run(False) st_outs = self._run(True) @@ -426,11 +361,6 @@ class TestModifiedBuffer(unittest.TestCase): dy_outs[i].numpy(), st_outs[i].numpy() ) - def test_modified(self): - with _test_eager_guard(): - self.func_test_modified() - self.func_test_modified() - class TestLayerTo(unittest.TestCase): def funcsetUp(self): @@ -614,12 +544,6 @@ class TestLayerTo(unittest.TestCase): self.assertIsNone(model._buffers['buf_name']) def test_main(self): - with _test_eager_guard(): - self.funcsetUp() - self.func_test_to_api() - self.func_test_to_api_paddle_dtype() - self.func_test_to_api_numpy_dtype() - self.func_test_to_api_none_buffer() self.funcsetUp() self.func_test_to_api() self.func_test_to_api_paddle_dtype() diff --git a/python/paddle/fluid/tests/unittests/test_batch_norm_op_v2.py b/python/paddle/fluid/tests/unittests/test_batch_norm_op_v2.py index 74edcd61d3..ece07889df 100644 --- a/python/paddle/fluid/tests/unittests/test_batch_norm_op_v2.py +++ b/python/paddle/fluid/tests/unittests/test_batch_norm_op_v2.py @@ -20,7 +20,6 @@ import paddle import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid import Program, program_guard -from paddle.fluid.framework import _test_eager_guard class TestBatchNorm(unittest.TestCase): @@ -91,13 +90,12 @@ class TestBatchNorm(unittest.TestCase): def compute_1d(x): with fluid.dygraph.guard(p): - with _test_eager_guard(): - bn = paddle.nn.BatchNorm1D(shape[1]) - x1 = paddle.to_tensor(x) - x1.stop_gradient = False - y = bn(x1) - y.backward() - return y.numpy(), x1.gradient() + bn = paddle.nn.BatchNorm1D(shape[1]) + x1 = paddle.to_tensor(x) + x1.stop_gradient = False + y = bn(x1) + y.backward() + return y.numpy(), x1.gradient() places = [fluid.CPUPlace()] if core.is_compiled_with_cuda(): @@ -138,14 +136,13 @@ class TestBatchNorm(unittest.TestCase): def compute_v2(x): with fluid.dygraph.guard(p): - with _test_eager_guard(): - print("v2") - bn = paddle.nn.BatchNorm2D(shape[1]) - x1 = paddle.to_tensor(x) - x1.stop_gradient = False - y = bn(x1) - y.backward() - return y.numpy(), x1.gradient() + print("v2") + bn = paddle.nn.BatchNorm2D(shape[1]) + x1 = paddle.to_tensor(x) + x1.stop_gradient = False + y = bn(x1) + y.backward() + return y.numpy(), x1.gradient() x = np.random.randn(*shape).astype("float32") y1, g1 = compute_v1(x) @@ -175,9 +172,8 @@ class TestBatchNorm(unittest.TestCase): bn = paddle.nn.BatchNorm2D(shape[1]) y = bn(paddle.to_tensor(x)) - with _test_eager_guard(): - bn = paddle.nn.BatchNorm2D(shape[1]) - eag_y = bn(paddle.to_tensor(x)) + bn = paddle.nn.BatchNorm2D(shape[1]) + eag_y = bn(paddle.to_tensor(x)) assert np.allclose(eag_y.numpy(), y.numpy()) return y.numpy() diff --git a/python/paddle/fluid/tests/unittests/test_bce_with_logits_loss.py b/python/paddle/fluid/tests/unittests/test_bce_with_logits_loss.py index 788c7418f7..91c818eba7 100644 --- a/python/paddle/fluid/tests/unittests/test_bce_with_logits_loss.py +++ b/python/paddle/fluid/tests/unittests/test_bce_with_logits_loss.py @@ -18,7 +18,6 @@ import numpy as np import paddle import paddle.fluid as fluid -from paddle.fluid.framework import _test_eager_guard def call_bce_layer( @@ -172,15 +171,6 @@ class TestBCEWithLogitsLoss(unittest.TestCase): functional=True, ) - with _test_eager_guard(): - eager_functional = test_dygraph( - place, - logit_np, - label_np, - reduction=reduction, - functional=True, - ) - np.testing.assert_allclose( static_functional, expected, rtol=1e-05 ) @@ -188,9 +178,6 @@ class TestBCEWithLogitsLoss(unittest.TestCase): static_functional, dy_functional, rtol=1e-05 ) np.testing.assert_allclose(dy_functional, expected, rtol=1e-05) - np.testing.assert_allclose( - eager_functional, expected, rtol=1e-05 - ) def test_BCEWithLogitsLoss_weight(self): logit_np = np.random.uniform(0.1, 0.8, size=(2, 3, 4, 10)).astype( diff --git a/python/paddle/fluid/tests/unittests/test_bfgs.py b/python/paddle/fluid/tests/unittests/test_bfgs.py index 0e02f18ad0..e709eca7ab 100644 --- a/python/paddle/fluid/tests/unittests/test_bfgs.py +++ b/python/paddle/fluid/tests/unittests/test_bfgs.py @@ -18,7 +18,6 @@ import numpy as np import paddle import paddle.nn.functional as F -from paddle.fluid.framework import _test_eager_guard from paddle.incubate.optimizer.functional.bfgs import minimize_bfgs np.random.seed(123) @@ -125,7 +124,7 @@ class TestBfgs(unittest.TestCase): results = test_static_graph(func, x0, dtype='float64') np.testing.assert_allclose(0.8, results[2], rtol=1e-05, atol=1e-8) - def func_rosenbrock(self): + def test_rosenbrock(self): # The Rosenbrock function is a standard optimization test case. a = np.random.random(size=[1]).astype('float32') minimum = [a.item(), (a**2).item()] @@ -144,11 +143,6 @@ class TestBfgs(unittest.TestCase): results = test_dynamic_graph(func, x0) np.testing.assert_allclose(minimum, results[2], rtol=1e-05, atol=1e-8) - def test_rosenbrock(self): - with _test_eager_guard(): - self.func_rosenbrock() - self.func_rosenbrock() - def test_exception(self): def func(x): return paddle.dot(x, x) diff --git a/python/paddle/fluid/tests/unittests/test_bicubic_interp_v2_op.py b/python/paddle/fluid/tests/unittests/test_bicubic_interp_v2_op.py index 7046d78fff..fed25ad18d 100644 --- a/python/paddle/fluid/tests/unittests/test_bicubic_interp_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_bicubic_interp_v2_op.py @@ -20,7 +20,6 @@ from op_test import OpTest import paddle import paddle.fluid as fluid from paddle.fluid import Program, program_guard -from paddle.fluid.framework import _test_eager_guard from paddle.nn.functional import interpolate @@ -363,12 +362,7 @@ class TestBicubicInterpDataLayout(TestBicubicInterpOp): class TestBicubicInterpOpAPI(unittest.TestCase): - def test_imperative_case(self): - with _test_eager_guard(): - self.func_case() - self.func_case() - - def func_case(self): + def test_case(self): np.random.seed(200) x_data = np.random.random((2, 3, 6, 6)).astype("float32") dim_data = np.array([12]).astype("int32") diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index 77fd38b44d..b70c1cf249 100755 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -813,7 +813,7 @@ def binary_cross_entropy_with_logits( one = _C_ops.full( [1], float(1.0), - core.VarDesc.VarType.FP32, + logit.dtype, _current_expected_place(), ) out = _C_ops.sigmoid_cross_entropy_with_logits( -- GitLab