diff --git a/paddle/fluid/imperative/tracer.cc b/paddle/fluid/imperative/tracer.cc index 742514c0910a23c99ab5286c23071bfcf2db0385..41ad70e5a5741b78acbf6403bf9cd6b1a390ecf0 100644 --- a/paddle/fluid/imperative/tracer.cc +++ b/paddle/fluid/imperative/tracer.cc @@ -84,7 +84,7 @@ paddle::framework::GarbageCollector* Tracer::MutableGarbageCollectorIfNotExists( if (gcs_.count(place) == 0) { std::unique_ptr gc; if (platform::is_gpu_place(place)) { -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) gc.reset(new framework::DefaultStreamGarbageCollector( BOOST_GET_CONST(platform::CUDAPlace, place), 0)); @@ -95,7 +95,7 @@ paddle::framework::GarbageCollector* Tracer::MutableGarbageCollectorIfNotExists( "Please recompile or reinstall Paddle with GPU support.")); #endif } else if (platform::is_cuda_pinned_place(place)) { -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) gc.reset(new framework::CUDAPinnedGarbageCollector( BOOST_GET_CONST(platform::CUDAPinnedPlace, place), 0)); diff --git a/python/paddle/fluid/tests/unittests/test_var_base.py b/python/paddle/fluid/tests/unittests/test_var_base.py index a65308c84e7193f567e5c17e870e850441daabd0..8bf42390d1ea899b64829e4e109b3f3f56ff6e82 100644 --- a/python/paddle/fluid/tests/unittests/test_var_base.py +++ b/python/paddle/fluid/tests/unittests/test_var_base.py @@ -256,19 +256,21 @@ class TestVarBase(unittest.TestCase): detach_x = x.detach() self.assertTrue(detach_x.stop_gradient, True) + cmp_float = np.allclose if core.is_compiled_with_rocm( + ) else np.array_equal detach_x[:] = 10.0 - self.assertTrue(np.array_equal(x.numpy(), [10.0])) + self.assertTrue(cmp_float(x.numpy(), [10.0])) y = x**2 y.backward() - self.assertTrue(np.array_equal(x.grad.numpy(), [20.0])) + self.assertTrue(cmp_float(x.grad.numpy(), [20.0])) self.assertEqual(detach_x.grad, None) detach_x.stop_gradient = False # Set stop_gradient to be False, supported auto-grad z = 3 * detach_x**2 z.backward() - self.assertTrue(np.array_equal(x.grad.numpy(), [20.0])) - self.assertTrue(np.array_equal(detach_x.grad.numpy(), [60.0])) + self.assertTrue(cmp_float(x.grad.numpy(), [20.0])) + self.assertTrue(cmp_float(detach_x.grad.numpy(), [60.0])) # Due to sharing of data with origin Tensor, There are some unsafe operations: with self.assertRaises(RuntimeError):