未验证 提交 ff5be2fb 编写于 作者: L Leo Chen 提交者: GitHub

Refine error message in memory folder (#25095)

* refine PADDLE_THROW, test=develop

* refine error msg, test=develop

* refine cuda error, test=develop

* follow comments, test=develop

* fix compile problem, test=develop

* fix bug, test=develop
上级 1f24c84a
......@@ -13,6 +13,7 @@
// limitations under the License.
#include "paddle/fluid/memory/allocation/aligned_allocator.h"
#include "paddle/fluid/platform/enforce.h"
namespace paddle {
......@@ -35,9 +36,13 @@ class AlignedAllocation : public Allocation {
AlignedAllocator::AlignedAllocator(
const std::shared_ptr<Allocator>& underlyning_allocator, size_t alignment)
: underlying_allocator_(underlyning_allocator), alignment_(alignment) {
PADDLE_ENFORCE(alignment_ > 0, "alignment must be positive integer");
PADDLE_ENFORCE_GT(
alignment_, 0,
platform::errors::InvalidArgument(
"Alignment should be larger than 0, but got %d", alignment_));
if (alignment_ & (alignment_ - 1)) {
PADDLE_THROW("alignment must be 2^N, but got %d", alignment_);
PADDLE_THROW(platform::errors::InvalidArgument(
"Alignment should be power of 2 (2^N), but got %d", alignment_));
}
}
......
......@@ -12,14 +12,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/memory/allocation/allocator.h"
#include "paddle/fluid/memory/allocation/allocator_facade.h"
#include <gflags/gflags.h>
#include <map>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "paddle/fluid/memory/allocation/allocator_facade.h"
#include "paddle/fluid/memory/allocation/allocator.h"
#include "paddle/fluid/memory/allocation/allocator_strategy.h"
#include "paddle/fluid/memory/allocation/auto_growth_best_fit_allocator.h"
#include "paddle/fluid/memory/allocation/cpu_allocator.h"
......@@ -94,8 +97,8 @@ class AllocatorFacadePrivate {
}
default: {
PADDLE_THROW("Unsupported allocator strategy: %d",
static_cast<int>(strategy));
PADDLE_THROW(platform::errors::InvalidArgument(
"Unsupported allocator strategy: %d", static_cast<int>(strategy)));
}
}
InitZeroSizeAllocators();
......@@ -115,8 +118,9 @@ class AllocatorFacadePrivate {
: allocators_)
: zero_size_allocators_);
auto iter = allocators.find(place);
PADDLE_ENFORCE(iter != allocators.end(),
"No such allocator for the place, %s", place);
PADDLE_ENFORCE_NE(iter, allocators.end(),
platform::errors::NotFound(
"No allocator found for the place, %s", place));
return iter->second;
}
......@@ -208,7 +212,10 @@ class AllocatorFacadePrivate {
}
void WrapCUDARetryAllocator(size_t retry_time) {
PADDLE_ENFORCE_GT(retry_time, 0, "Retry time must be larger than 0");
PADDLE_ENFORCE_GT(
retry_time, 0,
platform::errors::InvalidArgument(
"Retry time should be larger than 0, but got %d", retry_time));
for (auto& pair : allocators_) {
if (platform::is_gpu_place(pair.first)) {
pair.second = std::make_shared<RetryAllocator>(pair.second, retry_time);
......
......@@ -36,7 +36,10 @@ static AllocatorStrategy GetStrategyFromFlag() {
return AllocatorStrategy::kThreadLocal;
}
PADDLE_THROW("Unsupported allocator strategy: %s", FLAGS_allocator_strategy);
PADDLE_THROW(platform::errors::InvalidArgument(
"Unsupported allocator strategy: %s, condicates are naive_best_fit, "
"auto_growth or thread_local.",
FLAGS_allocator_strategy));
}
AllocatorStrategy GetAllocatorStrategy() {
......
......@@ -13,6 +13,7 @@
// limitations under the License.
#include "paddle/fluid/memory/allocation/best_fit_allocator.h"
#include <cmath>
#include <list>
#include <map>
......@@ -61,8 +62,13 @@ BestFitAllocator::ListIt BestFitAllocator::SplitChunk(size_t request_size,
auto to_split_it = bin_iterator->second;
free_chunks_[free_chunk_offset].erase(bin_iterator);
PADDLE_ENFORCE(to_split_it->is_free);
PADDLE_ENFORCE_GE(to_split_it->size_, request_size);
PADDLE_ENFORCE_EQ(to_split_it->is_free, true,
platform::errors::PreconditionNotMet(
"The memory chunk to split is not free"));
PADDLE_ENFORCE_GE(to_split_it->size_, request_size,
platform::errors::PreconditionNotMet(
"The size of memory chunk to split is "
"not larger than size of request memory"));
auto remaining_size = to_split_it->size_ - request_size;
details::Chunk to_use;
......@@ -99,7 +105,9 @@ void BestFitAllocator::EraseFreeNode(const ListIt& it) {
while (map_it->second != it && map_it != free_map.end()) {
++map_it;
}
PADDLE_ENFORCE(map_it != free_map.end());
PADDLE_ENFORCE_NE(
map_it, free_map.end(),
platform::errors::NotFound("The node to erase is not found in map"));
free_map.erase(map_it);
}
size_t BestFitAllocator::NumFreeChunks() const {
......@@ -111,10 +119,14 @@ size_t BestFitAllocator::NumFreeChunks() const {
}
void BestFitAllocator::FreeImpl(Allocation* allocation) {
auto* bf_allocation = dynamic_cast<BestFitAllocation*>(allocation);
PADDLE_ENFORCE_NOT_NULL(bf_allocation,
"The input allocation is not BestFitAllocation.");
PADDLE_ENFORCE_NOT_NULL(
bf_allocation,
platform::errors::InvalidArgument(
"The input allocation is not type of BestFitAllocation."));
auto chunk_it = bf_allocation->ChunkIterator();
PADDLE_ENFORCE(!chunk_it->is_free);
PADDLE_ENFORCE_EQ(chunk_it->is_free, false,
platform::errors::PreconditionNotMet(
"The chunk of allocation to free is freed already"));
chunk_it->is_free = true;
if (chunk_it != chunks_.begin()) {
auto prev_it = chunk_it;
......
......@@ -13,6 +13,7 @@
// limitations under the License.
#include "paddle/fluid/memory/allocation/buffered_allocator.h"
#include <algorithm>
#include <limits>
#include <utility>
......@@ -25,7 +26,8 @@ BufferedAllocator::BufferedAllocator(std::shared_ptr<Allocator> allocator)
: underlying_allocator_(std::move(allocator)) {
PADDLE_ENFORCE_NOT_NULL(
underlying_allocator_,
"Underlying allocator of BufferedAllocator must not be null");
platform::errors::InvalidArgument(
"Underlying allocator of BufferedAllocator is NULL"));
if (underlying_allocator_->IsAllocThreadSafe()) {
mtx_.reset(new std::mutex());
}
......
......@@ -13,8 +13,11 @@
// limitations under the License.
#include "paddle/fluid/memory/allocation/buffered_allocator.h"
#include <gtest/gtest.h>
#include <utility>
#include "paddle/fluid/memory/allocation/best_fit_allocator.h"
#include "paddle/fluid/memory/allocation/cpu_allocator.h"
#include "paddle/fluid/memory/allocation/locked_allocator.h"
......@@ -67,7 +70,9 @@ class StubAllocator : public Allocator {
protected:
void FreeImpl(Allocation *allocation) override {
auto *alloc = dynamic_cast<StubAllocation *>(allocation);
PADDLE_ENFORCE_NOT_NULL(alloc);
PADDLE_ENFORCE_NOT_NULL(
alloc, platform::errors::InvalidArgument(
"The input allocation is not type of StubAllocation."));
if (alloc->ptr()) delete[] static_cast<uint8_t *>(alloc->ptr());
++destruct_count_;
delete allocation;
......
......@@ -13,7 +13,9 @@
// limitations under the License.
#include "paddle/fluid/memory/allocation/cpu_allocator.h"
#include <stdlib.h>
#include <string>
namespace paddle {
......@@ -37,8 +39,11 @@ Allocation *CPUAllocator::AllocateImpl(size_t size) {
#ifdef _WIN32
p = _aligned_malloc(size, kAlignment);
#else
PADDLE_ENFORCE_EQ(posix_memalign(&p, kAlignment, size), 0, "Alloc %ld error!",
size);
int error = posix_memalign(&p, kAlignment, size);
PADDLE_ENFORCE_EQ(
error, 0,
platform::errors::ResourceExhausted(
"Fail to alloc memory of %ld size, error code is %d.", size, error));
#endif
return new Allocation(p, size, platform::CPUPlace());
}
......
......@@ -15,6 +15,7 @@
#pragma once
#include <cuda_runtime.h>
#include <map>
#include <memory>
#include <utility>
......@@ -48,7 +49,9 @@ class CUDADeviceContextAllocation : public Allocation {
~CUDADeviceContextAllocation() {
PADDLE_ENFORCE_NOT_NULL(
dev_ctx_, "Didn't set device context for CUDADeviceContextAllocation");
dev_ctx_,
platform::errors::PreconditionNotMet(
"Device context is not set for CUDADeviceContextAllocation"));
auto *p_allocation = underlying_allocation_.release();
VLOG(4) << "Adding callback to delete CUDADeviceContextAllocation at "
<< p_allocation;
......@@ -94,7 +97,8 @@ class CUDADeviceContextAllocator : public Allocator {
Allocation *AllocateImpl(size_t size) override {
PADDLE_ENFORCE_NOT_NULL(
default_stream_,
"Didn't set default stream for CUDADeviceContextAllocator");
platform::errors::PreconditionNotMet(
"Default stream is not set for CUDADeviceContextAllocator"));
platform::CUDADeviceGuard guard(place_.device);
auto allocation =
new CUDADeviceContextAllocation(memory::Alloc(place_, size));
......@@ -130,8 +134,9 @@ class CUDADeviceContextAllocatorPool {
AllocationPtr Alloc(const platform::CUDADeviceContext &dev_ctx, size_t size) {
auto iter = allocators_.find(
BOOST_GET_CONST(platform::CUDAPlace, dev_ctx.GetPlace()));
PADDLE_ENFORCE_EQ(iter != allocators_.end(), true,
"CUDADeviceContextAllocatorPool initialization error");
PADDLE_ENFORCE_NE(
iter, allocators_.end(),
platform::errors::NotFound("No allocator found for CUDAPlace."));
auto &allocator = iter->second;
AllocationPtr allocation = allocator->Allocate(size);
static_cast<CUDADeviceContextAllocation *>(allocation.get())
......
......@@ -13,8 +13,10 @@
// limitations under the License.
#include "paddle/fluid/memory/allocation/locked_allocator.h"
#include <mutex> // NOLINT
#include <utility>
#include "paddle/fluid/platform/lock_guard_ptr.h"
namespace paddle {
......@@ -26,7 +28,10 @@ bool LockedAllocator::IsAllocThreadSafe() const { return true; }
LockedAllocator::LockedAllocator(
std::shared_ptr<Allocator> underlying_allocator)
: underlying_allocator_(std::move(underlying_allocator)) {
PADDLE_ENFORCE_NOT_NULL(underlying_allocator_);
PADDLE_ENFORCE_NOT_NULL(
underlying_allocator_,
platform::errors::InvalidArgument(
"Underlying allocator of LockedAllocator is NULL"));
if (!underlying_allocator_->IsAllocThreadSafe()) {
mtx_.reset(new std::mutex());
}
......
......@@ -12,13 +12,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/memory/allocation/naive_best_fit_allocator.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "glog/logging.h"
#include "paddle/fluid/memory/allocation/naive_best_fit_allocator.h"
#include "paddle/fluid/memory/detail/buddy_allocator.h"
#include "paddle/fluid/memory/detail/system_allocator.h"
#include "paddle/fluid/platform/gpu_info.h"
......@@ -125,7 +126,11 @@ class GPUBuddyAllocatorList {
BuddyAllocator *Get(int gpu_id) {
auto pos = std::distance(
devices_.begin(), std::find(devices_.begin(), devices_.end(), gpu_id));
PADDLE_ENFORCE_LT(pos, devices_.size());
PADDLE_ENFORCE_LT(pos, devices_.size(),
platform::errors::OutOfRange(
"The index exceeds the size of devices, the size of "
"devices is %d, the index is %d",
devices_.size(), pos));
std::call_once(*init_flags_[pos], [this, pos] {
platform::SetDeviceId(devices_[pos]);
......@@ -166,7 +171,8 @@ size_t Used<platform::CUDAPlace>(const platform::CUDAPlace &place) {
#ifdef PADDLE_WITH_CUDA
return GetGPUBuddyAllocator(place.device)->Used();
#else
PADDLE_THROW("'CUDAPlace' is not supported in CPU only device.");
PADDLE_THROW(platform::errors::PermissionDenied(
"'CUDAPlace' is not supported in CPU only device."));
#endif
}
......@@ -195,7 +201,8 @@ void *Alloc<platform::CUDAPlace>(const platform::CUDAPlace &place,
}
return ptr;
#else
PADDLE_THROW("'CUDAPlace' is not supported in CPU only device.");
PADDLE_THROW(platform::errors::PermissionDenied(
"'CUDAPlace' is not supported in CPU only device."));
#endif
}
......@@ -205,7 +212,8 @@ void Free<platform::CUDAPlace>(const platform::CUDAPlace &place, void *p,
#ifdef PADDLE_WITH_CUDA
GetGPUBuddyAllocator(place.device)->Free(p);
#else
PADDLE_THROW("'CUDAPlace' is not supported in CPU only device.");
PADDLE_THROW(platform::errors::PermissionDenied(
"'CUDAPlace' is not supported in CPU only device."));
#endif
}
......@@ -230,7 +238,8 @@ size_t Used<platform::CUDAPinnedPlace>(const platform::CUDAPinnedPlace &place) {
#ifdef PADDLE_WITH_CUDA
return GetCUDAPinnedBuddyAllocator()->Used();
#else
PADDLE_THROW("'CUDAPinnedPlace' is not supported in CPU only device.");
PADDLE_THROW(platform::errors::PermissionDenied(
"'CUDAPinnedPlace' is not supported in CPU only device."));
#endif
}
......@@ -250,7 +259,8 @@ void *Alloc<platform::CUDAPinnedPlace>(const platform::CUDAPinnedPlace &place,
}
return ptr;
#else
PADDLE_THROW("'CUDAPinnedPlace' is not supported in CPU only device.");
PADDLE_THROW(platform::errors::PermissionDenied(
"'CUDAPinnedPlace' is not supported in CPU only device."));
#endif
}
......@@ -260,7 +270,8 @@ void Free<platform::CUDAPinnedPlace>(const platform::CUDAPinnedPlace &place,
#ifdef PADDLE_WITH_CUDA
GetCUDAPinnedBuddyAllocator()->Free(p);
#else
PADDLE_THROW("'CUDAPinnedPlace' is not supported in CPU only device.");
PADDLE_THROW(platform::errors::PermissionDenied(
"'CUDAPinnedPlace' is not supported in CPU only device."));
#endif
}
......@@ -298,7 +309,8 @@ size_t Usage::operator()(const platform::CUDAPlace &gpu) const {
#ifdef PADDLE_WITH_CUDA
return Used(gpu);
#else
PADDLE_THROW("'CUDAPlace' is not supported in CPU only device.");
PADDLE_THROW(platform::errors::PermissionDenied(
"'CUDAPlace' is not supported in CPU only device."));
#endif
}
......@@ -306,7 +318,8 @@ size_t Usage::operator()(const platform::CUDAPinnedPlace &cuda_pinned) const {
#ifdef PADDLE_WITH_CUDA
return Used(cuda_pinned);
#else
PADDLE_THROW("'CUDAPinnedPlace' is not supported in CPU only device.");
PADDLE_THROW(platform::errors::PermissionDenied(
"'CUDAPinnedPlace' is not supported in CPU only device."));
#endif
}
} // namespace legacy
......
......@@ -13,6 +13,7 @@
// limitations under the License.
#include "paddle/fluid/memory/allocation/pinned_allocator.h"
#include <cuda.h>
#include <cuda_runtime.h>
......@@ -21,12 +22,12 @@ namespace memory {
namespace allocation {
bool CPUPinnedAllocator::IsAllocThreadSafe() const { return true; }
void CPUPinnedAllocator::FreeImpl(Allocation *allocation) {
PADDLE_ENFORCE(cudaFreeHost(allocation->ptr()));
PADDLE_ENFORCE_CUDA_SUCCESS(cudaFreeHost(allocation->ptr()));
delete allocation;
}
Allocation *CPUPinnedAllocator::AllocateImpl(size_t size) {
void *ptr;
PADDLE_ENFORCE(cudaHostAlloc(&ptr, size, cudaHostAllocPortable));
PADDLE_ENFORCE_CUDA_SUCCESS(cudaHostAlloc(&ptr, size, cudaHostAllocPortable));
return new Allocation(ptr, size, platform::CUDAPinnedPlace());
}
} // namespace allocation
......
......@@ -20,6 +20,7 @@
#include <memory>
#include <mutex> // NOLINT
#include <utility>
#include "paddle/fluid/memory/allocation/allocator.h"
#include "paddle/fluid/platform/enforce.h"
......@@ -33,9 +34,12 @@ class RetryAllocator : public Allocator {
: underlying_allocator_(std::move(allocator)), retry_time_(retry_ms) {
PADDLE_ENFORCE_NOT_NULL(
underlying_allocator_,
"UnderlyingAllocator of RetryAllocator must not be null");
PADDLE_ENFORCE(underlying_allocator_->IsAllocThreadSafe(),
"UnderlyingAllocator of RetryAllocator must be thread-safe");
platform::errors::InvalidArgument(
"Underlying allocator of RetryAllocator is NULL"));
PADDLE_ENFORCE_EQ(
underlying_allocator_->IsAllocThreadSafe(), true,
platform::errors::PreconditionNotMet(
"Underlying allocator of RetryAllocator is not thread-safe"));
}
bool IsAllocThreadSafe() const override { return true; }
......
......@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/memory/detail/memory_block.h"
#include "paddle/fluid/platform/enforce.h"
namespace paddle {
......@@ -38,7 +39,11 @@ MemoryBlock* MemoryBlock::GetRightBuddy(MetadataCache* cache) {
void MemoryBlock::Split(MetadataCache* cache, size_t size) {
auto desc = cache->LoadDesc(this);
// make sure the split fits
PADDLE_ENFORCE_GE(desc->total_size, size);
PADDLE_ENFORCE_GE(desc->total_size, size,
platform::errors::InvalidArgument(
"The size of memory block (%d) to split is "
"not larger than size of request memory (%d)",
desc->total_size, size));
// bail out if there is no room for another partition
if (desc->total_size - size <= sizeof(MemoryBlock::Desc)) {
......@@ -78,8 +83,12 @@ void MemoryBlock::Merge(MetadataCache* cache, MemoryBlock* right_buddy) {
// only free blocks can be merged
auto desc = cache->LoadDesc(this);
auto rb_desc = cache->LoadDesc(right_buddy);
PADDLE_ENFORCE_EQ(desc->type, FREE_CHUNK);
PADDLE_ENFORCE_EQ(rb_desc->type, FREE_CHUNK);
PADDLE_ENFORCE_EQ(desc->type, FREE_CHUNK,
platform::errors::PreconditionNotMet(
"The destination chunk to merge is not free"));
PADDLE_ENFORCE_EQ(rb_desc->type, FREE_CHUNK,
platform::errors::PreconditionNotMet(
"The source chunk to merge is not free"));
// link this->buddy's buddy
desc->right_buddy = rb_desc->right_buddy;
......@@ -104,8 +113,12 @@ void MemoryBlock::Merge(MetadataCache* cache, MemoryBlock* right_buddy) {
void MemoryBlock::MarkAsFree(MetadataCache* cache) {
// check for double free or corruption
auto desc = cache->LoadDesc(this);
PADDLE_ENFORCE_NE(desc->type, FREE_CHUNK);
PADDLE_ENFORCE_NE(desc->type, INVALID_CHUNK);
PADDLE_ENFORCE_NE(desc->type, FREE_CHUNK,
platform::errors::PreconditionNotMet(
"The chunk to mark as free is free already"));
PADDLE_ENFORCE_NE(desc->type, INVALID_CHUNK,
platform::errors::PreconditionNotMet(
"The chunk to mark as free is invalid"));
desc->type = FREE_CHUNK;
desc->UpdateGuards();
}
......
......@@ -25,14 +25,20 @@ MetadataCache::MetadataCache(bool uses_gpu) : uses_gpu_(uses_gpu) {}
MemoryBlock::Desc* MetadataCache::LoadDesc(MemoryBlock* block) {
if (uses_gpu_) {
auto iter = cache_.find(block);
PADDLE_ENFORCE_NE(iter, cache_.end());
PADDLE_ENFORCE_NE(
iter, cache_.end(),
platform::errors::NotFound("The memory block is not found in cache"));
auto* desc = &(iter->second);
PADDLE_ENFORCE_EQ(desc->CheckGuards(), true, "Invalid CPU memory access");
PADDLE_ENFORCE_EQ(
desc->CheckGuards(), true,
platform::errors::InvalidArgument("Invalid CPU memory access"));
return desc;
} else {
auto* desc = reinterpret_cast<MemoryBlock::Desc*>(block);
VLOG(10) << "Load MemoryBlock::Desc type=" << desc->type;
PADDLE_ENFORCE_EQ(desc->CheckGuards(), true, "Invalid CPU memory access");
PADDLE_ENFORCE_EQ(
desc->CheckGuards(), true,
platform::errors::InvalidArgument("Invalid CPU memory access"));
return reinterpret_cast<MemoryBlock::Desc*>(block);
}
}
......
......@@ -22,6 +22,7 @@ limitations under the License. */
#include <sys/mman.h> // for mlock and munlock
#endif
#include <stdlib.h> // for malloc and free
#include <algorithm> // for std::max
#include <string>
#include <utility>
......@@ -55,10 +56,14 @@ void* AlignedMalloc(size_t size) {
#ifdef _WIN32
p = _aligned_malloc(size, alignment);
#else
PADDLE_ENFORCE_EQ(posix_memalign(&p, alignment, size), 0, "Alloc %ld error!",
size);
int error = posix_memalign(&p, alignment, size);
PADDLE_ENFORCE_EQ(
error, 0,
platform::errors::ResourceExhausted(
"Fail to alloc memory of %ld size, error code is %d.", size, error));
#endif
PADDLE_ENFORCE_NOT_NULL(p, "Fail to allocate CPU memory: size = %d .", size);
PADDLE_ENFORCE_NOT_NULL(p, platform::errors::ResourceExhausted(
"Fail to alloc memory of %ld size.", size));
return p;
}
......@@ -152,8 +157,13 @@ void* GPUAllocator::Alloc(size_t* index, size_t size) {
}
void GPUAllocator::Free(void* p, size_t size, size_t index) {
PADDLE_ENFORCE_EQ(index, 0);
PADDLE_ENFORCE_GE(gpu_alloc_size_, size);
PADDLE_ENFORCE_EQ(index, 0, platform::errors::InvalidArgument(
"The index should be 0, index is %d", index));
PADDLE_ENFORCE_GE(gpu_alloc_size_, size,
platform::errors::InvalidArgument(
"The size of memory (%d) to free exceeds the size of "
"allocated gpu memory (%d)",
size, gpu_alloc_size_));
gpu_alloc_size_ -= size;
platform::RecordedCudaFree(p, size, gpu_id_);
......@@ -197,9 +207,14 @@ void* CUDAPinnedAllocator::Alloc(size_t* index, size_t size) {
void CUDAPinnedAllocator::Free(void* p, size_t size, size_t index) {
cudaError_t err;
PADDLE_ENFORCE_EQ(index, 1);
PADDLE_ENFORCE_GE(cuda_pinnd_alloc_size_, size);
PADDLE_ENFORCE_EQ(index, 1, platform::errors::InvalidArgument(
"The index should be 1, but got %d", index));
PADDLE_ENFORCE_GE(cuda_pinnd_alloc_size_, size,
platform::errors::InvalidArgument(
"The size of memory (%d) to free exceeds the size of "
"allocated cuda pinned memory (%d)",
size, cuda_pinnd_alloc_size_));
cuda_pinnd_alloc_size_ -= size;
err = cudaFreeHost(p);
......@@ -209,7 +224,11 @@ void CUDAPinnedAllocator::Free(void* p, size_t size, size_t index) {
// process is terminating, in which case we don't care if
// cudaFreeHost succeeds.
if (err != cudaErrorCudartUnloading) {
PADDLE_ENFORCE(err, "cudaFreeHost failed in GPUPinnedAllocator::Free.");
PADDLE_ENFORCE_EQ(
err, 0,
platform::errors::Fatal(
"cudaFreeHost failed in GPUPinnedAllocator, error code is %d",
err));
}
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册