From ff5be2fb7761f762015d9ffe8c28838d156c667c Mon Sep 17 00:00:00 2001 From: Leo Chen Date: Wed, 24 Jun 2020 13:37:10 +0800 Subject: [PATCH] Refine error message in memory folder (#25095) * refine PADDLE_THROW, test=develop * refine error msg, test=develop * refine cuda error, test=develop * follow comments, test=develop * fix compile problem, test=develop * fix bug, test=develop --- .../memory/allocation/aligned_allocator.cc | 9 ++++- .../memory/allocation/allocator_facade.cc | 21 ++++++---- .../memory/allocation/allocator_strategy.cc | 5 ++- .../memory/allocation/best_fit_allocator.cc | 24 +++++++++--- .../memory/allocation/buffered_allocator.cc | 4 +- .../allocation/buffered_allocator_test.cc | 7 +++- .../fluid/memory/allocation/cpu_allocator.cc | 9 ++++- .../cuda_device_context_allocator.h | 13 +++++-- .../memory/allocation/locked_allocator.cc | 7 +++- .../allocation/naive_best_fit_allocator.cc | 33 +++++++++++----- .../memory/allocation/pinned_allocator.cc | 5 ++- .../fluid/memory/allocation/retry_allocator.h | 10 +++-- paddle/fluid/memory/detail/memory_block.cc | 23 ++++++++--- paddle/fluid/memory/detail/meta_cache.cc | 12 ++++-- .../fluid/memory/detail/system_allocator.cc | 39 ++++++++++++++----- 15 files changed, 163 insertions(+), 58 deletions(-) diff --git a/paddle/fluid/memory/allocation/aligned_allocator.cc b/paddle/fluid/memory/allocation/aligned_allocator.cc index c9a031dffc..1d89918bfe 100644 --- a/paddle/fluid/memory/allocation/aligned_allocator.cc +++ b/paddle/fluid/memory/allocation/aligned_allocator.cc @@ -13,6 +13,7 @@ // limitations under the License. #include "paddle/fluid/memory/allocation/aligned_allocator.h" + #include "paddle/fluid/platform/enforce.h" namespace paddle { @@ -35,9 +36,13 @@ class AlignedAllocation : public Allocation { AlignedAllocator::AlignedAllocator( const std::shared_ptr& underlyning_allocator, size_t alignment) : underlying_allocator_(underlyning_allocator), alignment_(alignment) { - PADDLE_ENFORCE(alignment_ > 0, "alignment must be positive integer"); + PADDLE_ENFORCE_GT( + alignment_, 0, + platform::errors::InvalidArgument( + "Alignment should be larger than 0, but got %d", alignment_)); if (alignment_ & (alignment_ - 1)) { - PADDLE_THROW("alignment must be 2^N, but got %d", alignment_); + PADDLE_THROW(platform::errors::InvalidArgument( + "Alignment should be power of 2 (2^N), but got %d", alignment_)); } } diff --git a/paddle/fluid/memory/allocation/allocator_facade.cc b/paddle/fluid/memory/allocation/allocator_facade.cc index c851f1b10c..2ab0d69ef8 100644 --- a/paddle/fluid/memory/allocation/allocator_facade.cc +++ b/paddle/fluid/memory/allocation/allocator_facade.cc @@ -12,14 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/memory/allocation/allocator.h" +#include "paddle/fluid/memory/allocation/allocator_facade.h" + #include + #include #include #include #include #include -#include "paddle/fluid/memory/allocation/allocator_facade.h" + +#include "paddle/fluid/memory/allocation/allocator.h" #include "paddle/fluid/memory/allocation/allocator_strategy.h" #include "paddle/fluid/memory/allocation/auto_growth_best_fit_allocator.h" #include "paddle/fluid/memory/allocation/cpu_allocator.h" @@ -94,8 +97,8 @@ class AllocatorFacadePrivate { } default: { - PADDLE_THROW("Unsupported allocator strategy: %d", - static_cast(strategy)); + PADDLE_THROW(platform::errors::InvalidArgument( + "Unsupported allocator strategy: %d", static_cast(strategy))); } } InitZeroSizeAllocators(); @@ -115,8 +118,9 @@ class AllocatorFacadePrivate { : allocators_) : zero_size_allocators_); auto iter = allocators.find(place); - PADDLE_ENFORCE(iter != allocators.end(), - "No such allocator for the place, %s", place); + PADDLE_ENFORCE_NE(iter, allocators.end(), + platform::errors::NotFound( + "No allocator found for the place, %s", place)); return iter->second; } @@ -208,7 +212,10 @@ class AllocatorFacadePrivate { } void WrapCUDARetryAllocator(size_t retry_time) { - PADDLE_ENFORCE_GT(retry_time, 0, "Retry time must be larger than 0"); + PADDLE_ENFORCE_GT( + retry_time, 0, + platform::errors::InvalidArgument( + "Retry time should be larger than 0, but got %d", retry_time)); for (auto& pair : allocators_) { if (platform::is_gpu_place(pair.first)) { pair.second = std::make_shared(pair.second, retry_time); diff --git a/paddle/fluid/memory/allocation/allocator_strategy.cc b/paddle/fluid/memory/allocation/allocator_strategy.cc index 74757439fd..e1c7b243be 100644 --- a/paddle/fluid/memory/allocation/allocator_strategy.cc +++ b/paddle/fluid/memory/allocation/allocator_strategy.cc @@ -36,7 +36,10 @@ static AllocatorStrategy GetStrategyFromFlag() { return AllocatorStrategy::kThreadLocal; } - PADDLE_THROW("Unsupported allocator strategy: %s", FLAGS_allocator_strategy); + PADDLE_THROW(platform::errors::InvalidArgument( + "Unsupported allocator strategy: %s, condicates are naive_best_fit, " + "auto_growth or thread_local.", + FLAGS_allocator_strategy)); } AllocatorStrategy GetAllocatorStrategy() { diff --git a/paddle/fluid/memory/allocation/best_fit_allocator.cc b/paddle/fluid/memory/allocation/best_fit_allocator.cc index 126464f049..957dac4d03 100644 --- a/paddle/fluid/memory/allocation/best_fit_allocator.cc +++ b/paddle/fluid/memory/allocation/best_fit_allocator.cc @@ -13,6 +13,7 @@ // limitations under the License. #include "paddle/fluid/memory/allocation/best_fit_allocator.h" + #include #include #include @@ -61,8 +62,13 @@ BestFitAllocator::ListIt BestFitAllocator::SplitChunk(size_t request_size, auto to_split_it = bin_iterator->second; free_chunks_[free_chunk_offset].erase(bin_iterator); - PADDLE_ENFORCE(to_split_it->is_free); - PADDLE_ENFORCE_GE(to_split_it->size_, request_size); + PADDLE_ENFORCE_EQ(to_split_it->is_free, true, + platform::errors::PreconditionNotMet( + "The memory chunk to split is not free")); + PADDLE_ENFORCE_GE(to_split_it->size_, request_size, + platform::errors::PreconditionNotMet( + "The size of memory chunk to split is " + "not larger than size of request memory")); auto remaining_size = to_split_it->size_ - request_size; details::Chunk to_use; @@ -99,7 +105,9 @@ void BestFitAllocator::EraseFreeNode(const ListIt& it) { while (map_it->second != it && map_it != free_map.end()) { ++map_it; } - PADDLE_ENFORCE(map_it != free_map.end()); + PADDLE_ENFORCE_NE( + map_it, free_map.end(), + platform::errors::NotFound("The node to erase is not found in map")); free_map.erase(map_it); } size_t BestFitAllocator::NumFreeChunks() const { @@ -111,10 +119,14 @@ size_t BestFitAllocator::NumFreeChunks() const { } void BestFitAllocator::FreeImpl(Allocation* allocation) { auto* bf_allocation = dynamic_cast(allocation); - PADDLE_ENFORCE_NOT_NULL(bf_allocation, - "The input allocation is not BestFitAllocation."); + PADDLE_ENFORCE_NOT_NULL( + bf_allocation, + platform::errors::InvalidArgument( + "The input allocation is not type of BestFitAllocation.")); auto chunk_it = bf_allocation->ChunkIterator(); - PADDLE_ENFORCE(!chunk_it->is_free); + PADDLE_ENFORCE_EQ(chunk_it->is_free, false, + platform::errors::PreconditionNotMet( + "The chunk of allocation to free is freed already")); chunk_it->is_free = true; if (chunk_it != chunks_.begin()) { auto prev_it = chunk_it; diff --git a/paddle/fluid/memory/allocation/buffered_allocator.cc b/paddle/fluid/memory/allocation/buffered_allocator.cc index d80616b7a8..7eed81a712 100644 --- a/paddle/fluid/memory/allocation/buffered_allocator.cc +++ b/paddle/fluid/memory/allocation/buffered_allocator.cc @@ -13,6 +13,7 @@ // limitations under the License. #include "paddle/fluid/memory/allocation/buffered_allocator.h" + #include #include #include @@ -25,7 +26,8 @@ BufferedAllocator::BufferedAllocator(std::shared_ptr allocator) : underlying_allocator_(std::move(allocator)) { PADDLE_ENFORCE_NOT_NULL( underlying_allocator_, - "Underlying allocator of BufferedAllocator must not be null"); + platform::errors::InvalidArgument( + "Underlying allocator of BufferedAllocator is NULL")); if (underlying_allocator_->IsAllocThreadSafe()) { mtx_.reset(new std::mutex()); } diff --git a/paddle/fluid/memory/allocation/buffered_allocator_test.cc b/paddle/fluid/memory/allocation/buffered_allocator_test.cc index e4825233d5..0bfa10a161 100644 --- a/paddle/fluid/memory/allocation/buffered_allocator_test.cc +++ b/paddle/fluid/memory/allocation/buffered_allocator_test.cc @@ -13,8 +13,11 @@ // limitations under the License. #include "paddle/fluid/memory/allocation/buffered_allocator.h" + #include + #include + #include "paddle/fluid/memory/allocation/best_fit_allocator.h" #include "paddle/fluid/memory/allocation/cpu_allocator.h" #include "paddle/fluid/memory/allocation/locked_allocator.h" @@ -67,7 +70,9 @@ class StubAllocator : public Allocator { protected: void FreeImpl(Allocation *allocation) override { auto *alloc = dynamic_cast(allocation); - PADDLE_ENFORCE_NOT_NULL(alloc); + PADDLE_ENFORCE_NOT_NULL( + alloc, platform::errors::InvalidArgument( + "The input allocation is not type of StubAllocation.")); if (alloc->ptr()) delete[] static_cast(alloc->ptr()); ++destruct_count_; delete allocation; diff --git a/paddle/fluid/memory/allocation/cpu_allocator.cc b/paddle/fluid/memory/allocation/cpu_allocator.cc index 580cf1af56..b096fbc112 100644 --- a/paddle/fluid/memory/allocation/cpu_allocator.cc +++ b/paddle/fluid/memory/allocation/cpu_allocator.cc @@ -13,7 +13,9 @@ // limitations under the License. #include "paddle/fluid/memory/allocation/cpu_allocator.h" + #include + #include namespace paddle { @@ -37,8 +39,11 @@ Allocation *CPUAllocator::AllocateImpl(size_t size) { #ifdef _WIN32 p = _aligned_malloc(size, kAlignment); #else - PADDLE_ENFORCE_EQ(posix_memalign(&p, kAlignment, size), 0, "Alloc %ld error!", - size); + int error = posix_memalign(&p, kAlignment, size); + PADDLE_ENFORCE_EQ( + error, 0, + platform::errors::ResourceExhausted( + "Fail to alloc memory of %ld size, error code is %d.", size, error)); #endif return new Allocation(p, size, platform::CPUPlace()); } diff --git a/paddle/fluid/memory/allocation/cuda_device_context_allocator.h b/paddle/fluid/memory/allocation/cuda_device_context_allocator.h index d8a4197313..a8e458a999 100644 --- a/paddle/fluid/memory/allocation/cuda_device_context_allocator.h +++ b/paddle/fluid/memory/allocation/cuda_device_context_allocator.h @@ -15,6 +15,7 @@ #pragma once #include + #include #include #include @@ -48,7 +49,9 @@ class CUDADeviceContextAllocation : public Allocation { ~CUDADeviceContextAllocation() { PADDLE_ENFORCE_NOT_NULL( - dev_ctx_, "Didn't set device context for CUDADeviceContextAllocation"); + dev_ctx_, + platform::errors::PreconditionNotMet( + "Device context is not set for CUDADeviceContextAllocation")); auto *p_allocation = underlying_allocation_.release(); VLOG(4) << "Adding callback to delete CUDADeviceContextAllocation at " << p_allocation; @@ -94,7 +97,8 @@ class CUDADeviceContextAllocator : public Allocator { Allocation *AllocateImpl(size_t size) override { PADDLE_ENFORCE_NOT_NULL( default_stream_, - "Didn't set default stream for CUDADeviceContextAllocator"); + platform::errors::PreconditionNotMet( + "Default stream is not set for CUDADeviceContextAllocator")); platform::CUDADeviceGuard guard(place_.device); auto allocation = new CUDADeviceContextAllocation(memory::Alloc(place_, size)); @@ -130,8 +134,9 @@ class CUDADeviceContextAllocatorPool { AllocationPtr Alloc(const platform::CUDADeviceContext &dev_ctx, size_t size) { auto iter = allocators_.find( BOOST_GET_CONST(platform::CUDAPlace, dev_ctx.GetPlace())); - PADDLE_ENFORCE_EQ(iter != allocators_.end(), true, - "CUDADeviceContextAllocatorPool initialization error"); + PADDLE_ENFORCE_NE( + iter, allocators_.end(), + platform::errors::NotFound("No allocator found for CUDAPlace.")); auto &allocator = iter->second; AllocationPtr allocation = allocator->Allocate(size); static_cast(allocation.get()) diff --git a/paddle/fluid/memory/allocation/locked_allocator.cc b/paddle/fluid/memory/allocation/locked_allocator.cc index a912807645..4e9adbf8ff 100644 --- a/paddle/fluid/memory/allocation/locked_allocator.cc +++ b/paddle/fluid/memory/allocation/locked_allocator.cc @@ -13,8 +13,10 @@ // limitations under the License. #include "paddle/fluid/memory/allocation/locked_allocator.h" + #include // NOLINT #include + #include "paddle/fluid/platform/lock_guard_ptr.h" namespace paddle { @@ -26,7 +28,10 @@ bool LockedAllocator::IsAllocThreadSafe() const { return true; } LockedAllocator::LockedAllocator( std::shared_ptr underlying_allocator) : underlying_allocator_(std::move(underlying_allocator)) { - PADDLE_ENFORCE_NOT_NULL(underlying_allocator_); + PADDLE_ENFORCE_NOT_NULL( + underlying_allocator_, + platform::errors::InvalidArgument( + "Underlying allocator of LockedAllocator is NULL")); if (!underlying_allocator_->IsAllocThreadSafe()) { mtx_.reset(new std::mutex()); } diff --git a/paddle/fluid/memory/allocation/naive_best_fit_allocator.cc b/paddle/fluid/memory/allocation/naive_best_fit_allocator.cc index a0ee56c54e..907a266e7b 100644 --- a/paddle/fluid/memory/allocation/naive_best_fit_allocator.cc +++ b/paddle/fluid/memory/allocation/naive_best_fit_allocator.cc @@ -12,13 +12,14 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include "paddle/fluid/memory/allocation/naive_best_fit_allocator.h" + #include #include #include #include #include "glog/logging.h" -#include "paddle/fluid/memory/allocation/naive_best_fit_allocator.h" #include "paddle/fluid/memory/detail/buddy_allocator.h" #include "paddle/fluid/memory/detail/system_allocator.h" #include "paddle/fluid/platform/gpu_info.h" @@ -125,7 +126,11 @@ class GPUBuddyAllocatorList { BuddyAllocator *Get(int gpu_id) { auto pos = std::distance( devices_.begin(), std::find(devices_.begin(), devices_.end(), gpu_id)); - PADDLE_ENFORCE_LT(pos, devices_.size()); + PADDLE_ENFORCE_LT(pos, devices_.size(), + platform::errors::OutOfRange( + "The index exceeds the size of devices, the size of " + "devices is %d, the index is %d", + devices_.size(), pos)); std::call_once(*init_flags_[pos], [this, pos] { platform::SetDeviceId(devices_[pos]); @@ -166,7 +171,8 @@ size_t Used(const platform::CUDAPlace &place) { #ifdef PADDLE_WITH_CUDA return GetGPUBuddyAllocator(place.device)->Used(); #else - PADDLE_THROW("'CUDAPlace' is not supported in CPU only device."); + PADDLE_THROW(platform::errors::PermissionDenied( + "'CUDAPlace' is not supported in CPU only device.")); #endif } @@ -195,7 +201,8 @@ void *Alloc(const platform::CUDAPlace &place, } return ptr; #else - PADDLE_THROW("'CUDAPlace' is not supported in CPU only device."); + PADDLE_THROW(platform::errors::PermissionDenied( + "'CUDAPlace' is not supported in CPU only device.")); #endif } @@ -205,7 +212,8 @@ void Free(const platform::CUDAPlace &place, void *p, #ifdef PADDLE_WITH_CUDA GetGPUBuddyAllocator(place.device)->Free(p); #else - PADDLE_THROW("'CUDAPlace' is not supported in CPU only device."); + PADDLE_THROW(platform::errors::PermissionDenied( + "'CUDAPlace' is not supported in CPU only device.")); #endif } @@ -230,7 +238,8 @@ size_t Used(const platform::CUDAPinnedPlace &place) { #ifdef PADDLE_WITH_CUDA return GetCUDAPinnedBuddyAllocator()->Used(); #else - PADDLE_THROW("'CUDAPinnedPlace' is not supported in CPU only device."); + PADDLE_THROW(platform::errors::PermissionDenied( + "'CUDAPinnedPlace' is not supported in CPU only device.")); #endif } @@ -250,7 +259,8 @@ void *Alloc(const platform::CUDAPinnedPlace &place, } return ptr; #else - PADDLE_THROW("'CUDAPinnedPlace' is not supported in CPU only device."); + PADDLE_THROW(platform::errors::PermissionDenied( + "'CUDAPinnedPlace' is not supported in CPU only device.")); #endif } @@ -260,7 +270,8 @@ void Free(const platform::CUDAPinnedPlace &place, #ifdef PADDLE_WITH_CUDA GetCUDAPinnedBuddyAllocator()->Free(p); #else - PADDLE_THROW("'CUDAPinnedPlace' is not supported in CPU only device."); + PADDLE_THROW(platform::errors::PermissionDenied( + "'CUDAPinnedPlace' is not supported in CPU only device.")); #endif } @@ -298,7 +309,8 @@ size_t Usage::operator()(const platform::CUDAPlace &gpu) const { #ifdef PADDLE_WITH_CUDA return Used(gpu); #else - PADDLE_THROW("'CUDAPlace' is not supported in CPU only device."); + PADDLE_THROW(platform::errors::PermissionDenied( + "'CUDAPlace' is not supported in CPU only device.")); #endif } @@ -306,7 +318,8 @@ size_t Usage::operator()(const platform::CUDAPinnedPlace &cuda_pinned) const { #ifdef PADDLE_WITH_CUDA return Used(cuda_pinned); #else - PADDLE_THROW("'CUDAPinnedPlace' is not supported in CPU only device."); + PADDLE_THROW(platform::errors::PermissionDenied( + "'CUDAPinnedPlace' is not supported in CPU only device.")); #endif } } // namespace legacy diff --git a/paddle/fluid/memory/allocation/pinned_allocator.cc b/paddle/fluid/memory/allocation/pinned_allocator.cc index 35391167fe..3ea2ecf353 100644 --- a/paddle/fluid/memory/allocation/pinned_allocator.cc +++ b/paddle/fluid/memory/allocation/pinned_allocator.cc @@ -13,6 +13,7 @@ // limitations under the License. #include "paddle/fluid/memory/allocation/pinned_allocator.h" + #include #include @@ -21,12 +22,12 @@ namespace memory { namespace allocation { bool CPUPinnedAllocator::IsAllocThreadSafe() const { return true; } void CPUPinnedAllocator::FreeImpl(Allocation *allocation) { - PADDLE_ENFORCE(cudaFreeHost(allocation->ptr())); + PADDLE_ENFORCE_CUDA_SUCCESS(cudaFreeHost(allocation->ptr())); delete allocation; } Allocation *CPUPinnedAllocator::AllocateImpl(size_t size) { void *ptr; - PADDLE_ENFORCE(cudaHostAlloc(&ptr, size, cudaHostAllocPortable)); + PADDLE_ENFORCE_CUDA_SUCCESS(cudaHostAlloc(&ptr, size, cudaHostAllocPortable)); return new Allocation(ptr, size, platform::CUDAPinnedPlace()); } } // namespace allocation diff --git a/paddle/fluid/memory/allocation/retry_allocator.h b/paddle/fluid/memory/allocation/retry_allocator.h index 7c218e25c4..4a787ff2d7 100644 --- a/paddle/fluid/memory/allocation/retry_allocator.h +++ b/paddle/fluid/memory/allocation/retry_allocator.h @@ -20,6 +20,7 @@ #include #include // NOLINT #include + #include "paddle/fluid/memory/allocation/allocator.h" #include "paddle/fluid/platform/enforce.h" @@ -33,9 +34,12 @@ class RetryAllocator : public Allocator { : underlying_allocator_(std::move(allocator)), retry_time_(retry_ms) { PADDLE_ENFORCE_NOT_NULL( underlying_allocator_, - "UnderlyingAllocator of RetryAllocator must not be null"); - PADDLE_ENFORCE(underlying_allocator_->IsAllocThreadSafe(), - "UnderlyingAllocator of RetryAllocator must be thread-safe"); + platform::errors::InvalidArgument( + "Underlying allocator of RetryAllocator is NULL")); + PADDLE_ENFORCE_EQ( + underlying_allocator_->IsAllocThreadSafe(), true, + platform::errors::PreconditionNotMet( + "Underlying allocator of RetryAllocator is not thread-safe")); } bool IsAllocThreadSafe() const override { return true; } diff --git a/paddle/fluid/memory/detail/memory_block.cc b/paddle/fluid/memory/detail/memory_block.cc index 2476326039..11bccdd411 100644 --- a/paddle/fluid/memory/detail/memory_block.cc +++ b/paddle/fluid/memory/detail/memory_block.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/memory/detail/memory_block.h" + #include "paddle/fluid/platform/enforce.h" namespace paddle { @@ -38,7 +39,11 @@ MemoryBlock* MemoryBlock::GetRightBuddy(MetadataCache* cache) { void MemoryBlock::Split(MetadataCache* cache, size_t size) { auto desc = cache->LoadDesc(this); // make sure the split fits - PADDLE_ENFORCE_GE(desc->total_size, size); + PADDLE_ENFORCE_GE(desc->total_size, size, + platform::errors::InvalidArgument( + "The size of memory block (%d) to split is " + "not larger than size of request memory (%d)", + desc->total_size, size)); // bail out if there is no room for another partition if (desc->total_size - size <= sizeof(MemoryBlock::Desc)) { @@ -78,8 +83,12 @@ void MemoryBlock::Merge(MetadataCache* cache, MemoryBlock* right_buddy) { // only free blocks can be merged auto desc = cache->LoadDesc(this); auto rb_desc = cache->LoadDesc(right_buddy); - PADDLE_ENFORCE_EQ(desc->type, FREE_CHUNK); - PADDLE_ENFORCE_EQ(rb_desc->type, FREE_CHUNK); + PADDLE_ENFORCE_EQ(desc->type, FREE_CHUNK, + platform::errors::PreconditionNotMet( + "The destination chunk to merge is not free")); + PADDLE_ENFORCE_EQ(rb_desc->type, FREE_CHUNK, + platform::errors::PreconditionNotMet( + "The source chunk to merge is not free")); // link this->buddy's buddy desc->right_buddy = rb_desc->right_buddy; @@ -104,8 +113,12 @@ void MemoryBlock::Merge(MetadataCache* cache, MemoryBlock* right_buddy) { void MemoryBlock::MarkAsFree(MetadataCache* cache) { // check for double free or corruption auto desc = cache->LoadDesc(this); - PADDLE_ENFORCE_NE(desc->type, FREE_CHUNK); - PADDLE_ENFORCE_NE(desc->type, INVALID_CHUNK); + PADDLE_ENFORCE_NE(desc->type, FREE_CHUNK, + platform::errors::PreconditionNotMet( + "The chunk to mark as free is free already")); + PADDLE_ENFORCE_NE(desc->type, INVALID_CHUNK, + platform::errors::PreconditionNotMet( + "The chunk to mark as free is invalid")); desc->type = FREE_CHUNK; desc->UpdateGuards(); } diff --git a/paddle/fluid/memory/detail/meta_cache.cc b/paddle/fluid/memory/detail/meta_cache.cc index 26667e3abf..f6d48d8685 100644 --- a/paddle/fluid/memory/detail/meta_cache.cc +++ b/paddle/fluid/memory/detail/meta_cache.cc @@ -25,14 +25,20 @@ MetadataCache::MetadataCache(bool uses_gpu) : uses_gpu_(uses_gpu) {} MemoryBlock::Desc* MetadataCache::LoadDesc(MemoryBlock* block) { if (uses_gpu_) { auto iter = cache_.find(block); - PADDLE_ENFORCE_NE(iter, cache_.end()); + PADDLE_ENFORCE_NE( + iter, cache_.end(), + platform::errors::NotFound("The memory block is not found in cache")); auto* desc = &(iter->second); - PADDLE_ENFORCE_EQ(desc->CheckGuards(), true, "Invalid CPU memory access"); + PADDLE_ENFORCE_EQ( + desc->CheckGuards(), true, + platform::errors::InvalidArgument("Invalid CPU memory access")); return desc; } else { auto* desc = reinterpret_cast(block); VLOG(10) << "Load MemoryBlock::Desc type=" << desc->type; - PADDLE_ENFORCE_EQ(desc->CheckGuards(), true, "Invalid CPU memory access"); + PADDLE_ENFORCE_EQ( + desc->CheckGuards(), true, + platform::errors::InvalidArgument("Invalid CPU memory access")); return reinterpret_cast(block); } } diff --git a/paddle/fluid/memory/detail/system_allocator.cc b/paddle/fluid/memory/detail/system_allocator.cc index a3f96ea587..c5b9d88433 100644 --- a/paddle/fluid/memory/detail/system_allocator.cc +++ b/paddle/fluid/memory/detail/system_allocator.cc @@ -21,7 +21,8 @@ limitations under the License. */ #else #include // for mlock and munlock #endif -#include // for malloc and free +#include // for malloc and free + #include // for std::max #include #include @@ -55,10 +56,14 @@ void* AlignedMalloc(size_t size) { #ifdef _WIN32 p = _aligned_malloc(size, alignment); #else - PADDLE_ENFORCE_EQ(posix_memalign(&p, alignment, size), 0, "Alloc %ld error!", - size); + int error = posix_memalign(&p, alignment, size); + PADDLE_ENFORCE_EQ( + error, 0, + platform::errors::ResourceExhausted( + "Fail to alloc memory of %ld size, error code is %d.", size, error)); #endif - PADDLE_ENFORCE_NOT_NULL(p, "Fail to allocate CPU memory: size = %d .", size); + PADDLE_ENFORCE_NOT_NULL(p, platform::errors::ResourceExhausted( + "Fail to alloc memory of %ld size.", size)); return p; } @@ -152,8 +157,13 @@ void* GPUAllocator::Alloc(size_t* index, size_t size) { } void GPUAllocator::Free(void* p, size_t size, size_t index) { - PADDLE_ENFORCE_EQ(index, 0); - PADDLE_ENFORCE_GE(gpu_alloc_size_, size); + PADDLE_ENFORCE_EQ(index, 0, platform::errors::InvalidArgument( + "The index should be 0, index is %d", index)); + PADDLE_ENFORCE_GE(gpu_alloc_size_, size, + platform::errors::InvalidArgument( + "The size of memory (%d) to free exceeds the size of " + "allocated gpu memory (%d)", + size, gpu_alloc_size_)); gpu_alloc_size_ -= size; platform::RecordedCudaFree(p, size, gpu_id_); @@ -197,9 +207,14 @@ void* CUDAPinnedAllocator::Alloc(size_t* index, size_t size) { void CUDAPinnedAllocator::Free(void* p, size_t size, size_t index) { cudaError_t err; - PADDLE_ENFORCE_EQ(index, 1); - - PADDLE_ENFORCE_GE(cuda_pinnd_alloc_size_, size); + PADDLE_ENFORCE_EQ(index, 1, platform::errors::InvalidArgument( + "The index should be 1, but got %d", index)); + + PADDLE_ENFORCE_GE(cuda_pinnd_alloc_size_, size, + platform::errors::InvalidArgument( + "The size of memory (%d) to free exceeds the size of " + "allocated cuda pinned memory (%d)", + size, cuda_pinnd_alloc_size_)); cuda_pinnd_alloc_size_ -= size; err = cudaFreeHost(p); @@ -209,7 +224,11 @@ void CUDAPinnedAllocator::Free(void* p, size_t size, size_t index) { // process is terminating, in which case we don't care if // cudaFreeHost succeeds. if (err != cudaErrorCudartUnloading) { - PADDLE_ENFORCE(err, "cudaFreeHost failed in GPUPinnedAllocator::Free."); + PADDLE_ENFORCE_EQ( + err, 0, + platform::errors::Fatal( + "cudaFreeHost failed in GPUPinnedAllocator, error code is %d", + err)); } } -- GitLab