提交 d424115f 编写于 作者: Y Yu Yang

Clean code

test=develop
上级 b12c77da
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
#include <algorithm> #include <algorithm>
#include <limits> #include <limits>
#include <vector> #include <vector>
#include "../memory/allocation/allocator.h"
#include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/framework/data_type.h"
namespace paddle { namespace paddle {
......
...@@ -64,11 +64,11 @@ class CPUManagedAllocator : public Allocator { ...@@ -64,11 +64,11 @@ class CPUManagedAllocator : public Allocator {
}; };
// TODO(yy): Dirty code here. This class should be configurable in runtime. // TODO(yy): Dirty code here. This class should be configurable in runtime.
class ChunkedManagedAllocator : public Allocator { class ChunkedAllocator : public Allocator {
public: public:
explicit ChunkedManagedAllocator(std::unique_ptr<Allocator> system_allocator, explicit ChunkedAllocator(std::unique_ptr<Allocator> system_allocator,
size_t max_chunk_size, size_t capacity = 1, size_t max_chunk_size, size_t capacity = 1,
int64_t retry_time = -1) int64_t retry_time = -1)
: max_chunk_size_(max_chunk_size), retry_time_(retry_time) { : max_chunk_size_(max_chunk_size), retry_time_(retry_time) {
raw_allocator_ = std::move(system_allocator); raw_allocator_ = std::move(system_allocator);
...@@ -78,12 +78,12 @@ class ChunkedManagedAllocator : public Allocator { ...@@ -78,12 +78,12 @@ class ChunkedManagedAllocator : public Allocator {
if (capacity == 1) { if (capacity == 1) {
VLOG(10) << "Create BestFitAllocator with chunk_size " VLOG(10) << "Create BestFitAllocator with chunk_size "
<< max_chunk_size_; << max_chunk_size_;
default_allocator_ = BestFitAllocatorCreator(); default_allocator_ = CreateAllocatorWithChunk();
} else { } else {
VLOG(10) << "Create AutoIncrementAllocator with chunk_size " VLOG(10) << "Create AutoIncrementAllocator with chunk_size "
<< max_chunk_size_ << " and capacity " << capacity; << max_chunk_size_ << " and capacity " << capacity;
default_allocator_ = std::make_shared<AutoIncrementAllocator>( default_allocator_ = std::make_shared<AutoIncrementAllocator>(
[this] { return std::move(BestFitAllocatorCreator()); }, capacity); [this] { return std::move(CreateAllocatorWithChunk()); }, capacity);
} }
} }
...@@ -100,30 +100,26 @@ class ChunkedManagedAllocator : public Allocator { ...@@ -100,30 +100,26 @@ class ChunkedManagedAllocator : public Allocator {
default_allocator_.reset(cond_allocator); default_allocator_.reset(cond_allocator);
} }
~ChunkedManagedAllocator() { ~ChunkedAllocator() override {
// Specify destruct order. // Specify destruct order.
default_allocator_.reset(); default_allocator_.reset();
chunks_.clear(); chunks_.clear();
raw_allocator_.reset(); raw_allocator_.reset();
} }
std::shared_ptr<Allocator> BestFitAllocatorCreator() { std::shared_ptr<Allocator> CreateAllocatorWithChunk() {
chunks_.emplace_back(raw_allocator_->Allocate(max_chunk_size_)); chunks_.emplace_back(raw_allocator_->Allocate(max_chunk_size_));
auto* allocation = chunks_.back().get(); auto* allocation = chunks_.back().get();
std::unique_ptr<Allocator> unmanaged_allocator(new LockedAllocator( std::unique_ptr<Allocator> allocator(new LockedAllocator(
std::unique_ptr<Allocator>(new BestFitAllocator(allocation)))); std::unique_ptr<Allocator>(new BestFitAllocator(allocation))));
if (retry_time_ <= 0) { if (retry_time_ > 0) {
VLOG(10) << "Create NaiveManagedAllocator without retry"; auto* retry_allocator =
return std::make_shared<AlignedAllocator<64u>>( new RetryAllocator(std::move(allocator), retry_time_);
std::move(unmanaged_allocator)); allocator.reset(retry_allocator);
} else {
VLOG(10) << "Create RetryAllocator with retry_time " << retry_time_
<< "ms";
auto tmp = std::make_shared<RetryAllocator>(
std::move(unmanaged_allocator), static_cast<size_t>(retry_time_));
return std::make_shared<AlignedAllocator<64u>>(tmp);
} }
return std::make_shared<AlignedAllocator<64u>>(std::move(allocator));
} }
bool IsAllocThreadSafe() const override { return true; } bool IsAllocThreadSafe() const override { return true; }
...@@ -143,13 +139,13 @@ class ChunkedManagedAllocator : public Allocator { ...@@ -143,13 +139,13 @@ class ChunkedManagedAllocator : public Allocator {
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
class CUDAManagedAllocator : public ChunkedManagedAllocator { class CUDAChunkedAllocator : public ChunkedAllocator {
public: public:
explicit CUDAManagedAllocator(int dev_id) explicit CUDAChunkedAllocator(int dev_id)
: ChunkedManagedAllocator( : ChunkedAllocator(std::unique_ptr<Allocator>(
std::unique_ptr<Allocator>( new CUDAAllocator(platform::CUDAPlace(dev_id))),
new CUDAAllocator(platform::CUDAPlace(dev_id))), GetMaxChunkSize(dev_id), GetCapcity(dev_id),
GetMaxChunkSize(dev_id), GetCapcity(dev_id), GetRetryTime()) {} GetRetryTime()) {}
private: private:
static size_t GetMaxChunkSize(int dev_id) { static size_t GetMaxChunkSize(int dev_id) {
...@@ -168,13 +164,12 @@ class CUDAManagedAllocator : public ChunkedManagedAllocator { ...@@ -168,13 +164,12 @@ class CUDAManagedAllocator : public ChunkedManagedAllocator {
static int64_t GetRetryTime() { return FLAGS_gpu_allocator_retry_time; } static int64_t GetRetryTime() { return FLAGS_gpu_allocator_retry_time; }
}; };
class CUDAPinnedManagedAllocator : public ChunkedManagedAllocator { class CUDAPinnedChunkedAllocator : public ChunkedAllocator {
public: public:
CUDAPinnedManagedAllocator() CUDAPinnedChunkedAllocator()
: ChunkedManagedAllocator( : ChunkedAllocator(std::unique_ptr<Allocator>(new CPUPinnedAllocator()),
std::unique_ptr<Allocator>(new CPUPinnedAllocator()), platform::CUDAPinnedMaxChunkSize(), GetCapacity(),
platform::CUDAPinnedMaxChunkSize(), GetCapacity(), -1) { -1) {} // never retry
} // never retry
private: private:
static size_t GetCapacity() { static size_t GetCapacity() {
...@@ -226,7 +221,7 @@ class AllocatorFacadePrivate { ...@@ -226,7 +221,7 @@ class AllocatorFacadePrivate {
int device_count = platform::GetCUDADeviceCount(); int device_count = platform::GetCUDADeviceCount();
for (int dev_id = 0; dev_id < device_count; ++dev_id) { for (int dev_id = 0; dev_id < device_count; ++dev_id) {
allocators_[platform::CUDAPlace(dev_id)] = allocators_[platform::CUDAPlace(dev_id)] =
std::make_shared<CUDAManagedAllocator>(dev_id); std::make_shared<CUDAChunkedAllocator>(dev_id);
} }
#endif #endif
} }
...@@ -234,7 +229,7 @@ class AllocatorFacadePrivate { ...@@ -234,7 +229,7 @@ class AllocatorFacadePrivate {
void InitCUDAPinnedAllocator() { void InitCUDAPinnedAllocator() {
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
allocators_[platform::CUDAPinnedPlace()] = allocators_[platform::CUDAPinnedPlace()] =
std::make_shared<CUDAPinnedManagedAllocator>(); std::make_shared<CUDAPinnedChunkedAllocator>();
#endif #endif
} }
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
// limitations under the License. // limitations under the License.
#include "paddle/fluid/memory/allocation/best_fit_allocator.h" #include "paddle/fluid/memory/allocation/best_fit_allocator.h"
#include <bits/stdc++.h> #include <cmath>
#include <list> #include <list>
#include <map> #include <map>
#include <string> #include <string>
......
...@@ -106,10 +106,6 @@ class BestFitAllocator : public Allocator { ...@@ -106,10 +106,6 @@ class BestFitAllocator : public Allocator {
const platform::Place& Place() const { return allocation_->place(); } const platform::Place& Place() const { return allocation_->place(); }
// std::unique_ptr<Allocation> Allocate(size_t size,
// Attr attr = kDefault) override;
// void FreeUniquePtr(std::unique_ptr<Allocation> allocation) override;
size_t NumFreeChunks() const; size_t NumFreeChunks() const;
private: private:
......
...@@ -80,7 +80,6 @@ TEST(BestFitAllocator, concurrent_cuda) { ...@@ -80,7 +80,6 @@ TEST(BestFitAllocator, concurrent_cuda) {
th.join(); th.join();
} }
} }
// allocator.FreeUniquePtr(std::move(cuda_allocation));
} }
} // namespace allocation } // namespace allocation
......
...@@ -45,8 +45,6 @@ class ConditionalAllocator : public Allocator { ...@@ -45,8 +45,6 @@ class ConditionalAllocator : public Allocator {
ConditionalAllocator& AddAllocator(std::function<bool(size_t, Attr)> func, ConditionalAllocator& AddAllocator(std::function<bool(size_t, Attr)> func,
std::shared_ptr<Allocator> allocator); std::shared_ptr<Allocator> allocator);
// AllocationPtr Allocate(size_t size, Attr attr) override;
bool IsAllocThreadSafe() const override; bool IsAllocThreadSafe() const override;
protected: protected:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册