未验证 提交 b34933d9 编写于 作者: Z Zeng Jinle 提交者: GitHub

fix retry allocator bug, test=develop (#19794)

上级 c67c8758
......@@ -37,6 +37,7 @@ Allocation* CUDAAllocator::AllocateImpl(size_t size) {
void* ptr;
auto status = cudaMalloc(&ptr, size);
if (UNLIKELY(status != cudaSuccess)) {
PADDLE_ENFORCE_NE(cudaGetLastError(), cudaSuccess);
PADDLE_THROW_BAD_ALLOC("Cannot allocate %d on GPU %d, cuda status %d, %s",
size, place_.device, status,
cudaGetErrorString(status));
......
......@@ -118,6 +118,7 @@ void* GPUAllocator::Alloc(size_t* index, size_t size) {
gpu_alloc_size_ += size;
return p;
} else {
PADDLE_ENFORCE_NE(cudaGetLastError(), cudaSuccess);
PADDLE_THROW_BAD_ALLOC(
"Cannot malloc " + std::to_string(size / 1024.0 / 1024.0) +
" MB GPU memory. Please shrink "
......
......@@ -19,6 +19,7 @@ limitations under the License. */
#include "gflags/gflags.h"
#include "gtest/gtest.h"
#include "paddle/fluid/memory/allocation/allocator.h"
DECLARE_bool(use_pinned_memory);
......@@ -68,4 +69,16 @@ TEST(CUDAPinnedAllocator, Alloc) {
TestAllocator(&a, 2048);
TestAllocator(&a, 0);
}
TEST(GPUAllocator, AllocFailure) {
paddle::memory::detail::GPUAllocator allocator(0);
size_t index;
size_t alloc_size = -1UL; // very large size
try {
allocator.Alloc(&index, alloc_size);
ASSERT_TRUE(false);
} catch (paddle::memory::allocation::BadAlloc&) {
PADDLE_ENFORCE_CUDA_SUCCESS(cudaGetLastError());
}
}
#endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册