From 7a245b7a6aa665ec08db816aba50eb51d0e4219b Mon Sep 17 00:00:00 2001 From: zhulei <563755780@qq.com> Date: Wed, 28 Apr 2021 14:31:10 +0800 Subject: [PATCH] [Rocm] fix test_var_base (#32639) --- paddle/fluid/imperative/tracer.cc | 4 ++-- python/paddle/fluid/tests/unittests/test_var_base.py | 10 ++++++---- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/paddle/fluid/imperative/tracer.cc b/paddle/fluid/imperative/tracer.cc index 742514c0910..41ad70e5a57 100644 --- a/paddle/fluid/imperative/tracer.cc +++ b/paddle/fluid/imperative/tracer.cc @@ -84,7 +84,7 @@ paddle::framework::GarbageCollector* Tracer::MutableGarbageCollectorIfNotExists( if (gcs_.count(place) == 0) { std::unique_ptr gc; if (platform::is_gpu_place(place)) { -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) gc.reset(new framework::DefaultStreamGarbageCollector( BOOST_GET_CONST(platform::CUDAPlace, place), 0)); @@ -95,7 +95,7 @@ paddle::framework::GarbageCollector* Tracer::MutableGarbageCollectorIfNotExists( "Please recompile or reinstall Paddle with GPU support.")); #endif } else if (platform::is_cuda_pinned_place(place)) { -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) gc.reset(new framework::CUDAPinnedGarbageCollector( BOOST_GET_CONST(platform::CUDAPinnedPlace, place), 0)); diff --git a/python/paddle/fluid/tests/unittests/test_var_base.py b/python/paddle/fluid/tests/unittests/test_var_base.py index a65308c84e7..8bf42390d1e 100644 --- a/python/paddle/fluid/tests/unittests/test_var_base.py +++ b/python/paddle/fluid/tests/unittests/test_var_base.py @@ -256,19 +256,21 @@ class TestVarBase(unittest.TestCase): detach_x = x.detach() self.assertTrue(detach_x.stop_gradient, True) + cmp_float = np.allclose if core.is_compiled_with_rocm( + ) else np.array_equal detach_x[:] = 10.0 - self.assertTrue(np.array_equal(x.numpy(), [10.0])) + self.assertTrue(cmp_float(x.numpy(), [10.0])) y = x**2 y.backward() - self.assertTrue(np.array_equal(x.grad.numpy(), [20.0])) + self.assertTrue(cmp_float(x.grad.numpy(), [20.0])) self.assertEqual(detach_x.grad, None) detach_x.stop_gradient = False # Set stop_gradient to be False, supported auto-grad z = 3 * detach_x**2 z.backward() - self.assertTrue(np.array_equal(x.grad.numpy(), [20.0])) - self.assertTrue(np.array_equal(detach_x.grad.numpy(), [60.0])) + self.assertTrue(cmp_float(x.grad.numpy(), [20.0])) + self.assertTrue(cmp_float(detach_x.grad.numpy(), [60.0])) # Due to sharing of data with origin Tensor, There are some unsafe operations: with self.assertRaises(RuntimeError): -- GitLab