diff --git a/paddle/fluid/memory/allocation/allocator_facade.cc b/paddle/fluid/memory/allocation/allocator_facade.cc index 9b2aaa9308e5df7a1527d0fa217ab12ae1ecc156..4d0e485285146e5668793d29fd8effc789fcc339 100644 --- a/paddle/fluid/memory/allocation/allocator_facade.cc +++ b/paddle/fluid/memory/allocation/allocator_facade.cc @@ -493,7 +493,8 @@ class AllocatorFacadePrivate { "support allocating managed memory.\n" "If you don't actually need to use managed memory, please disable " "it with command `export FLAGS_use_cuda_managed_memory=false`.\n" - "Or you must use the gpu device that supports managed memory.")); + "Or you must use the gpu device that supports managed memory.", + p.device)); } return std::make_shared(p); } diff --git a/paddle/fluid/memory/cuda_managed_memory_test.cu b/paddle/fluid/memory/cuda_managed_memory_test.cu index 4243c5fa90f7fad4f7a98a9d87545ef66cbe9875..f8c9ff82f57127d43bba8e7e03770dd3280832a8 100644 --- a/paddle/fluid/memory/cuda_managed_memory_test.cu +++ b/paddle/fluid/memory/cuda_managed_memory_test.cu @@ -128,6 +128,9 @@ TEST(ManagedMemoryTest, OversubscribeGPUMemoryTest) { } TEST(ManagedMemoryTest, OOMExceptionTest) { + if (!platform::IsGPUManagedMemorySupported(0)) { + return; + } EXPECT_THROW(Alloc(platform::CUDAPlace(0), size_t(1) << 60), memory::allocation::BadAlloc); }