From d424115f9ee651599c98635a5e11780a9940eb3b Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 19 Nov 2018 10:59:44 +0800 Subject: [PATCH] Clean code test=develop --- paddle/fluid/framework/tensor_util.cc | 1 - .../memory/allocation/allocator_facade.cc | 61 +++++++++---------- .../memory/allocation/best_fit_allocator.cc | 2 +- .../memory/allocation/best_fit_allocator.h | 4 -- .../allocation/best_fit_allocator_test.cu | 1 - .../memory/allocation/conditional_allocator.h | 2 - 6 files changed, 29 insertions(+), 42 deletions(-) diff --git a/paddle/fluid/framework/tensor_util.cc b/paddle/fluid/framework/tensor_util.cc index d4cc318a1..8d8f07a1f 100644 --- a/paddle/fluid/framework/tensor_util.cc +++ b/paddle/fluid/framework/tensor_util.cc @@ -15,7 +15,6 @@ #include #include #include -#include "../memory/allocation/allocator.h" #include "paddle/fluid/framework/data_type.h" namespace paddle { diff --git a/paddle/fluid/memory/allocation/allocator_facade.cc b/paddle/fluid/memory/allocation/allocator_facade.cc index 11c31df24..e207a853c 100644 --- a/paddle/fluid/memory/allocation/allocator_facade.cc +++ b/paddle/fluid/memory/allocation/allocator_facade.cc @@ -64,11 +64,11 @@ class CPUManagedAllocator : public Allocator { }; // TODO(yy): Dirty code here. This class should be configurable in runtime. -class ChunkedManagedAllocator : public Allocator { +class ChunkedAllocator : public Allocator { public: - explicit ChunkedManagedAllocator(std::unique_ptr system_allocator, - size_t max_chunk_size, size_t capacity = 1, - int64_t retry_time = -1) + explicit ChunkedAllocator(std::unique_ptr system_allocator, + size_t max_chunk_size, size_t capacity = 1, + int64_t retry_time = -1) : max_chunk_size_(max_chunk_size), retry_time_(retry_time) { raw_allocator_ = std::move(system_allocator); @@ -78,12 +78,12 @@ class ChunkedManagedAllocator : public Allocator { if (capacity == 1) { VLOG(10) << "Create BestFitAllocator with chunk_size " << max_chunk_size_; - default_allocator_ = BestFitAllocatorCreator(); + default_allocator_ = CreateAllocatorWithChunk(); } else { VLOG(10) << "Create AutoIncrementAllocator with chunk_size " << max_chunk_size_ << " and capacity " << capacity; default_allocator_ = std::make_shared( - [this] { return std::move(BestFitAllocatorCreator()); }, capacity); + [this] { return std::move(CreateAllocatorWithChunk()); }, capacity); } } @@ -100,30 +100,26 @@ class ChunkedManagedAllocator : public Allocator { default_allocator_.reset(cond_allocator); } - ~ChunkedManagedAllocator() { + ~ChunkedAllocator() override { // Specify destruct order. default_allocator_.reset(); chunks_.clear(); raw_allocator_.reset(); } - std::shared_ptr BestFitAllocatorCreator() { + std::shared_ptr CreateAllocatorWithChunk() { chunks_.emplace_back(raw_allocator_->Allocate(max_chunk_size_)); auto* allocation = chunks_.back().get(); - std::unique_ptr unmanaged_allocator(new LockedAllocator( + std::unique_ptr allocator(new LockedAllocator( std::unique_ptr(new BestFitAllocator(allocation)))); - if (retry_time_ <= 0) { - VLOG(10) << "Create NaiveManagedAllocator without retry"; - return std::make_shared>( - std::move(unmanaged_allocator)); - } else { - VLOG(10) << "Create RetryAllocator with retry_time " << retry_time_ - << "ms"; - auto tmp = std::make_shared( - std::move(unmanaged_allocator), static_cast(retry_time_)); - return std::make_shared>(tmp); + if (retry_time_ > 0) { + auto* retry_allocator = + new RetryAllocator(std::move(allocator), retry_time_); + allocator.reset(retry_allocator); } + + return std::make_shared>(std::move(allocator)); } bool IsAllocThreadSafe() const override { return true; } @@ -143,13 +139,13 @@ class ChunkedManagedAllocator : public Allocator { #ifdef PADDLE_WITH_CUDA -class CUDAManagedAllocator : public ChunkedManagedAllocator { +class CUDAChunkedAllocator : public ChunkedAllocator { public: - explicit CUDAManagedAllocator(int dev_id) - : ChunkedManagedAllocator( - std::unique_ptr( - new CUDAAllocator(platform::CUDAPlace(dev_id))), - GetMaxChunkSize(dev_id), GetCapcity(dev_id), GetRetryTime()) {} + explicit CUDAChunkedAllocator(int dev_id) + : ChunkedAllocator(std::unique_ptr( + new CUDAAllocator(platform::CUDAPlace(dev_id))), + GetMaxChunkSize(dev_id), GetCapcity(dev_id), + GetRetryTime()) {} private: static size_t GetMaxChunkSize(int dev_id) { @@ -168,13 +164,12 @@ class CUDAManagedAllocator : public ChunkedManagedAllocator { static int64_t GetRetryTime() { return FLAGS_gpu_allocator_retry_time; } }; -class CUDAPinnedManagedAllocator : public ChunkedManagedAllocator { +class CUDAPinnedChunkedAllocator : public ChunkedAllocator { public: - CUDAPinnedManagedAllocator() - : ChunkedManagedAllocator( - std::unique_ptr(new CPUPinnedAllocator()), - platform::CUDAPinnedMaxChunkSize(), GetCapacity(), -1) { - } // never retry + CUDAPinnedChunkedAllocator() + : ChunkedAllocator(std::unique_ptr(new CPUPinnedAllocator()), + platform::CUDAPinnedMaxChunkSize(), GetCapacity(), + -1) {} // never retry private: static size_t GetCapacity() { @@ -226,7 +221,7 @@ class AllocatorFacadePrivate { int device_count = platform::GetCUDADeviceCount(); for (int dev_id = 0; dev_id < device_count; ++dev_id) { allocators_[platform::CUDAPlace(dev_id)] = - std::make_shared(dev_id); + std::make_shared(dev_id); } #endif } @@ -234,7 +229,7 @@ class AllocatorFacadePrivate { void InitCUDAPinnedAllocator() { #ifdef PADDLE_WITH_CUDA allocators_[platform::CUDAPinnedPlace()] = - std::make_shared(); + std::make_shared(); #endif } diff --git a/paddle/fluid/memory/allocation/best_fit_allocator.cc b/paddle/fluid/memory/allocation/best_fit_allocator.cc index fa9ad51d4..6f3e512fb 100644 --- a/paddle/fluid/memory/allocation/best_fit_allocator.cc +++ b/paddle/fluid/memory/allocation/best_fit_allocator.cc @@ -13,7 +13,7 @@ // limitations under the License. #include "paddle/fluid/memory/allocation/best_fit_allocator.h" -#include +#include #include #include #include diff --git a/paddle/fluid/memory/allocation/best_fit_allocator.h b/paddle/fluid/memory/allocation/best_fit_allocator.h index 141fb55d6..4f10f2b53 100644 --- a/paddle/fluid/memory/allocation/best_fit_allocator.h +++ b/paddle/fluid/memory/allocation/best_fit_allocator.h @@ -106,10 +106,6 @@ class BestFitAllocator : public Allocator { const platform::Place& Place() const { return allocation_->place(); } - // std::unique_ptr Allocate(size_t size, - // Attr attr = kDefault) override; - // void FreeUniquePtr(std::unique_ptr allocation) override; - size_t NumFreeChunks() const; private: diff --git a/paddle/fluid/memory/allocation/best_fit_allocator_test.cu b/paddle/fluid/memory/allocation/best_fit_allocator_test.cu index eb200ffdc..50aecda97 100644 --- a/paddle/fluid/memory/allocation/best_fit_allocator_test.cu +++ b/paddle/fluid/memory/allocation/best_fit_allocator_test.cu @@ -80,7 +80,6 @@ TEST(BestFitAllocator, concurrent_cuda) { th.join(); } } - // allocator.FreeUniquePtr(std::move(cuda_allocation)); } } // namespace allocation diff --git a/paddle/fluid/memory/allocation/conditional_allocator.h b/paddle/fluid/memory/allocation/conditional_allocator.h index 7140e1b30..94cba4432 100644 --- a/paddle/fluid/memory/allocation/conditional_allocator.h +++ b/paddle/fluid/memory/allocation/conditional_allocator.h @@ -45,8 +45,6 @@ class ConditionalAllocator : public Allocator { ConditionalAllocator& AddAllocator(std::function func, std::shared_ptr allocator); - // AllocationPtr Allocate(size_t size, Attr attr) override; - bool IsAllocThreadSafe() const override; protected: -- GitLab