diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet_v2.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet_v2.py index ab836b088b09fbeafa11123b9b446b3df0fd9bc5..872d419ff8928df767ac44b502ef354f3c53b43c 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet_v2.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet_v2.py @@ -20,9 +20,6 @@ import unittest from simnet_dygraph_model_v2 import BOW, HingeLoss -from paddle.fluid.framework import _enable_legacy_dygraph -_enable_legacy_dygraph() - SEED = 102 random.seed(SEED) diff --git a/python/paddle/fluid/tests/unittests/test_dropout_op.py b/python/paddle/fluid/tests/unittests/test_dropout_op.py index 20abeaec7268c7331b2ef3149ab5bbcf05d7c7e8..e8d4fc260b87a65a4924592c55d49353d5bea7d6 100644 --- a/python/paddle/fluid/tests/unittests/test_dropout_op.py +++ b/python/paddle/fluid/tests/unittests/test_dropout_op.py @@ -23,7 +23,6 @@ import paddle.static as static import paddle.fluid as fluid from paddle.fluid import Program, program_guard from paddle.fluid.framework import _test_eager_guard, _enable_legacy_dygraph -_enable_legacy_dygraph() import os from paddle import _C_ops @@ -979,6 +978,7 @@ class TestDropoutBackward(unittest.TestCase): ), self.cal_grad_downscale_in_infer(mask.numpy()))) def test_backward_upscale_train(self): + _enable_legacy_dygraph() for place in self.places: with fluid.dygraph.guard(place): @@ -1010,6 +1010,7 @@ class TestDropoutBackward(unittest.TestCase): ), self.cal_grad_upscale_train(mask.numpy(), prob))) def test_backward_upscale_train_2(self): + _enable_legacy_dygraph() for place in self.places: with fluid.dygraph.guard(place): @@ -1025,6 +1026,23 @@ class TestDropoutBackward(unittest.TestCase): np.allclose(input.gradient( ), self.cal_grad_upscale_train(mask.numpy(), prob))) + def test_backward_upscale_train_2_eager(self): + for place in self.places: + with fluid.dygraph.guard(place): + with _test_eager_guard(): + + prob = 0.3 + input = paddle.uniform([40, 40], dtype="float32") + input.stop_gradient = False + out, mask = _C_ops.final_state_dropout( + input, None, 0.3, False, "upscale_in_train", 0, False) + + out.backward() + + self.assertTrue( + np.allclose(input.gradient( + ), self.cal_grad_upscale_train(mask.numpy(), prob))) + class TestRandomValue(unittest.TestCase): def test_fixed_random_number(self): diff --git a/python/paddle/fluid/tests/unittests/test_lbfgs.py b/python/paddle/fluid/tests/unittests/test_lbfgs.py index 2cad4822b28b14ecc6536829528da156a2ab5f29..bb3818747601fd7a097def4ffafdadabfb671c48 100644 --- a/python/paddle/fluid/tests/unittests/test_lbfgs.py +++ b/python/paddle/fluid/tests/unittests/test_lbfgs.py @@ -21,9 +21,6 @@ import paddle.nn.functional as F from paddle.incubate.optimizer.functional.lbfgs import minimize_lbfgs -from paddle.fluid.framework import _enable_legacy_dygraph -_enable_legacy_dygraph() - np.random.seed(123) diff --git a/python/paddle/fluid/tests/unittests/test_nan_inf.py b/python/paddle/fluid/tests/unittests/test_nan_inf.py index 9b11f6711afc19aaeee7533e978372ae68c781fc..84559048a2b8a919f96ca3cbbd1ea7bec3c56ffb 100644 --- a/python/paddle/fluid/tests/unittests/test_nan_inf.py +++ b/python/paddle/fluid/tests/unittests/test_nan_inf.py @@ -20,8 +20,6 @@ import os import sys import subprocess import paddle -from paddle.fluid.framework import _enable_legacy_dygraph -_enable_legacy_dygraph() paddle.enable_static()