diff --git a/paddle/fluid/memory/allocation/cuda_allocator.cc b/paddle/fluid/memory/allocation/cuda_allocator.cc index c1b12f5c0ecbb6e4b367be0eb0ea9730b9f14ea6..b1a45afa99d9a565bfc3b8b3e6192eca7d2ccd05 100644 --- a/paddle/fluid/memory/allocation/cuda_allocator.cc +++ b/paddle/fluid/memory/allocation/cuda_allocator.cc @@ -54,6 +54,7 @@ Allocation* CUDAAllocator::AllocateImpl(size_t size) { size_t avail, total, actual_avail, actual_total; bool is_limited = platform::RecordedCudaMemGetInfo( &avail, &total, &actual_avail, &actual_total, place_.device); + size_t allocated = total - avail; std::string err_msg; if (is_limited) { @@ -68,13 +69,14 @@ Allocation* CUDAAllocator::AllocateImpl(size_t size) { PADDLE_THROW_BAD_ALLOC(platform::errors::ResourceExhausted( "\n\nOut of memory error on GPU %d. " - "Cannot allocate %s memory on GPU %d, " + "Cannot allocate %s memory on GPU %d, %s memory has been allocated and " "available memory is only %s.\n\n" "Please check whether there is any other process using GPU %d.\n" "1. If yes, please stop them, or start PaddlePaddle on another GPU.\n" "2. If no, please decrease the batch size of your model. %s\n\n", place_.device, string::HumanReadableSize(size), place_.device, - string::HumanReadableSize(avail), place_.device, err_msg)); + string::HumanReadableSize(allocated), string::HumanReadableSize(avail), + place_.device, err_msg)); } } // namespace allocation diff --git a/paddle/fluid/memory/detail/system_allocator.cc b/paddle/fluid/memory/detail/system_allocator.cc index c733ba5c68c9bd8623acbc57bd248ebab449ef4c..0d7065d8bfba0e4ba6f443a3f9e87ee0e1a825a6 100644 --- a/paddle/fluid/memory/detail/system_allocator.cc +++ b/paddle/fluid/memory/detail/system_allocator.cc @@ -125,6 +125,7 @@ void* GPUAllocator::Alloc(size_t* index, size_t size) { size_t avail, total, actual_avail, actual_total; bool is_limited = platform::RecordedCudaMemGetInfo( &avail, &total, &actual_avail, &actual_total, gpu_id_); + size_t allocated = total - avail; std::string err_msg; if (is_limited) { @@ -139,7 +140,7 @@ void* GPUAllocator::Alloc(size_t* index, size_t size) { PADDLE_THROW_BAD_ALLOC(platform::errors::ResourceExhausted( "\n\nOut of memory error on GPU %d. " - "Cannot allocate %s memory on GPU %d, " + "Cannot allocate %s memory on GPU %d, %s memory has been allocated and " "available memory is only %s.\n\n" "Please check whether there is any other process using GPU %d.\n" "1. If yes, please stop them, or start PaddlePaddle on another GPU.\n" @@ -150,8 +151,8 @@ void* GPUAllocator::Alloc(size_t* index, size_t size) { " The command is " "`export FLAGS_fraction_of_gpu_memory_to_use=xxx`.%s\n\n", gpu_id_, string::HumanReadableSize(size), gpu_id_, - string::HumanReadableSize(avail), gpu_id_, - FLAGS_fraction_of_gpu_memory_to_use, err_msg)); + string::HumanReadableSize(allocated), string::HumanReadableSize(avail), + gpu_id_, FLAGS_fraction_of_gpu_memory_to_use, err_msg)); } }