diff --git a/python/paddle/fluid/tests/unittests/test_uniform_random_inplace_op.py b/python/paddle/fluid/tests/unittests/test_uniform_random_inplace_op.py index ae772818cbc6d057b1ca3b520226f157397b4e98..d677d3b34c64c8e97f9cf551d5876d7a0f0e17f5 100644 --- a/python/paddle/fluid/tests/unittests/test_uniform_random_inplace_op.py +++ b/python/paddle/fluid/tests/unittests/test_uniform_random_inplace_op.py @@ -170,7 +170,7 @@ class TestUniformRandomInplaceGrad(unittest.TestCase): def setUp(self): self.shape = (1000, 784) - def test_uniform_random_inplace_grad(self): + def run_(self): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) def test_grad(): @@ -191,33 +191,12 @@ class TestUniformRandomInplaceGrad(unittest.TestCase): test_grad() fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) - -class TestUniformRandomInplaceGradOldDygraph(unittest.TestCase): - - def setUp(self): - self.shape = (1000, 784) - def test_uniform_random_inplace_grad(self): - _enable_legacy_dygraph() - fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - - def test_grad(): - tensor_a = paddle.ones(self.shape) - tensor_a.stop_gradient = False - tensor_b = tensor_a * 0.5 - tensor_b.uniform_(min=-2, max=2) - loss = tensor_b.sum() - loss.backward() - uniform_grad = tensor_b.grad.numpy() - self.assertTrue((uniform_grad == 0).all()) + self.run_() - places = ['cpu'] - if fluid.core.is_compiled_with_cuda(): - places.append('gpu') - for place in places: - paddle.set_device(place) - test_grad() - fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) + def test_uniform_random_inplace_grad_old_dygraph(self): + _enable_legacy_dygraph() + self.run_() _disable_legacy_dygraph()