未验证 提交 37a09539 编写于 作者: R Ruibiao Chen 提交者: GitHub

Remove managed memory msg in cuda allocator (#53263)

上级 9f9cd919
...@@ -70,29 +70,20 @@ phi::Allocation* CUDAAllocator::AllocateImpl(size_t size) { ...@@ -70,29 +70,20 @@ phi::Allocation* CUDAAllocator::AllocateImpl(size_t size) {
limit_size); limit_size);
} }
std::string managed_memory_msg;
if (platform::IsGPUManagedMemoryOversubscriptionSupported(place_.device)) {
managed_memory_msg = string::Sprintf(
"If the above ways do not solve the out of memory problem, you can try "
"to use CUDA managed memory. The command is `export "
"FLAGS_use_cuda_managed_memory=true`.");
}
PADDLE_THROW_BAD_ALLOC(platform::errors::ResourceExhausted( PADDLE_THROW_BAD_ALLOC(platform::errors::ResourceExhausted(
"\n\nOut of memory error on GPU %d. " "\n\nOut of memory error on GPU %d. "
"Cannot allocate %s memory on GPU %d, %s memory has been allocated and " "Cannot allocate %s memory on GPU %d, %s memory has been allocated and "
"available memory is only %s.\n\n" "available memory is only %s.\n\n"
"Please check whether there is any other process using GPU %d.\n" "Please check whether there is any other process using GPU %d.\n"
"1. If yes, please stop them, or start PaddlePaddle on another GPU.\n" "1. If yes, please stop them, or start PaddlePaddle on another GPU.\n"
"2. If no, please decrease the batch size of your model. %s\n%s\n", "2. If no, please decrease the batch size of your model. %s\n",
place_.device, place_.device,
string::HumanReadableSize(size), string::HumanReadableSize(size),
place_.device, place_.device,
string::HumanReadableSize(allocated), string::HumanReadableSize(allocated),
string::HumanReadableSize(avail), string::HumanReadableSize(avail),
place_.device, place_.device,
err_msg, err_msg));
managed_memory_msg));
} }
} // namespace allocation } // namespace allocation
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册