From 2bef0ca34631fc9a86f9e97c19600a1b95897091 Mon Sep 17 00:00:00 2001 From: sneaxiy Date: Thu, 1 Nov 2018 06:05:15 +0000 Subject: [PATCH] add buffered_allocator remove Free() method in UnmanagedAllocator --- paddle/fluid/memory/allocation/CMakeLists.txt | 4 +- paddle/fluid/memory/allocation/allocator.h | 22 +-- .../memory/allocation/best_fit_allocator.cc | 4 +- .../memory/allocation/best_fit_allocator.h | 2 +- .../memory/allocation/buffered_allocator.cc | 176 ++++++++++++++++++ .../memory/allocation/buffered_allocator.h | 70 +++++++ .../fluid/memory/allocation/cpu_allocator.cc | 4 +- .../fluid/memory/allocation/cpu_allocator.h | 2 +- .../fluid/memory/allocation/cuda_allocator.cc | 4 +- .../fluid/memory/allocation/cuda_allocator.h | 2 +- .../memory/allocation/locked_allocator.cc | 6 +- .../memory/allocation/locked_allocator.h | 2 +- .../naive_managed_allocator_test.cc | 4 +- .../memory/allocation/pinned_allocator.cc | 4 +- .../memory/allocation/pinned_allocator.h | 2 +- .../memory/allocation/retry_allocator.cc | 2 +- 16 files changed, 270 insertions(+), 40 deletions(-) create mode 100644 paddle/fluid/memory/allocation/buffered_allocator.cc create mode 100644 paddle/fluid/memory/allocation/buffered_allocator.h diff --git a/paddle/fluid/memory/allocation/CMakeLists.txt b/paddle/fluid/memory/allocation/CMakeLists.txt index b2be837832..2f69b5c0c8 100644 --- a/paddle/fluid/memory/allocation/CMakeLists.txt +++ b/paddle/fluid/memory/allocation/CMakeLists.txt @@ -2,6 +2,7 @@ cc_library(allocator SRCS allocator.cc DEPS place) cc_library(cpu_allocator SRCS cpu_allocator.cc DEPS allocator) cc_library(best_fit_allocator SRCS best_fit_allocator.cc DEPS allocator) cc_library(locked_allocator SRCS locked_allocator.cc DEPS allocator) +cc_library(buffered_allocator SRCS buffered_allocator.cc DEPS allocator) if (WITH_GPU) nv_library(cuda_allocator SRCS cuda_allocator.cc DEPS allocator cuda_device_guard) @@ -51,7 +52,8 @@ cc_library(allocator_facade SRCS allocator_facade.cc DEPS auto_increment_allocator zero_size_allocator conditional_allocator - retry_allocator) + retry_allocator + buffered_allocator) nv_test(allocation_and_eigen_test SRCS allocation_and_eigen_test.cu DEPS allocator_facade) diff --git a/paddle/fluid/memory/allocation/allocator.h b/paddle/fluid/memory/allocation/allocator.h index e117a2d153..9c838362d9 100644 --- a/paddle/fluid/memory/allocation/allocator.h +++ b/paddle/fluid/memory/allocation/allocator.h @@ -12,22 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include - -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - #pragma once #include #include @@ -141,11 +125,7 @@ class Allocator { // a manally managed allocator. class UnmanagedAllocator : public Allocator { public: - virtual void Free(Allocation* allocation) = 0; - - void FreeUniquePtr(std::unique_ptr allocation) { - Free(allocation.get()); - } + virtual void FreeUniquePtr(std::unique_ptr allocation) = 0; }; // The allocation will be managed by smart pointers. i.e., users do not need diff --git a/paddle/fluid/memory/allocation/best_fit_allocator.cc b/paddle/fluid/memory/allocation/best_fit_allocator.cc index 8cc943c861..b903fa437b 100644 --- a/paddle/fluid/memory/allocation/best_fit_allocator.cc +++ b/paddle/fluid/memory/allocation/best_fit_allocator.cc @@ -104,8 +104,8 @@ BestFitAllocator::ListIt BestFitAllocator::SplitChunk(size_t request_size, return to_use_it; } -void BestFitAllocator::Free(Allocation* allocation) { - auto* bf_allocation = dynamic_cast(allocation); +void BestFitAllocator::FreeUniquePtr(std::unique_ptr allocation) { + auto* bf_allocation = dynamic_cast(allocation.get()); auto chunk_it = bf_allocation->ChunkIterator(); PADDLE_ENFORCE(!chunk_it->is_free); chunk_it->is_free = true; diff --git a/paddle/fluid/memory/allocation/best_fit_allocator.h b/paddle/fluid/memory/allocation/best_fit_allocator.h index da62bc4bb6..405306bba7 100644 --- a/paddle/fluid/memory/allocation/best_fit_allocator.h +++ b/paddle/fluid/memory/allocation/best_fit_allocator.h @@ -109,7 +109,7 @@ class BestFitAllocator : public UnmanagedAllocator { std::unique_ptr Allocate(size_t size, Attr attr = kDefault) override; - void Free(Allocation* allocation) override; + void FreeUniquePtr(std::unique_ptr allocation) override; size_t NumFreeChunks() const; diff --git a/paddle/fluid/memory/allocation/buffered_allocator.cc b/paddle/fluid/memory/allocation/buffered_allocator.cc new file mode 100644 index 0000000000..1eb1d3c7e8 --- /dev/null +++ b/paddle/fluid/memory/allocation/buffered_allocator.cc @@ -0,0 +1,176 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/memory/allocation/buffered_allocator.h" +#include +#include +#include + +namespace paddle { +namespace memory { +namespace allocation { + +BufferedAllocator::BufferedAllocator(std::unique_ptr&& allocator) { + std::vector division_plan(8 * sizeof(size_t)); + for (size_t i = 0; i < 8 * sizeof(size_t); ++i) { + division_plan[i] = (static_cast(1) << i); + } + InitAndEnforceCheck(std::move(allocator), division_plan); +} + +BufferedAllocator::BufferedAllocator(std::unique_ptr&& allocator, + const std::vector& division_plan) { + InitAndEnforceCheck(std::move(allocator), division_plan); +} + +BufferedAllocator::~BufferedAllocator() { + for (auto& v : allocations_) { + for (auto& pair : v) { + underlying_allocator_->FreeUniquePtr(std::move(pair.second)); + } + } +} + +void BufferedAllocator::InitAndEnforceCheck( + std::unique_ptr&& allocator, + const std::vector& division_plan) { + underlying_allocator_.reset( + dynamic_cast(allocator.release())); + PADDLE_ENFORCE_NOT_NULL( + underlying_allocator_, + "Underlying allocator of BufferedAllocator must be unmanaged"); + if (underlying_allocator_->IsAllocThreadSafe()) { + mtx_.reset(new std::mutex()); + } + constexpr size_t kMax = std::numeric_limits::max(); + if (division_plan.empty()) { + division_plan_.assign({0, kMax}); + } else { + auto from = division_plan.front() == 0 ? division_plan.begin() + 1 + : division_plan.begin(); + auto to = division_plan.back() == kMax ? division_plan.end() - 1 + : division_plan.end(); + division_plan_.reserve(to - from + 2); + division_plan_.push_back(0); + division_plan_.insert(division_plan_.end(), from, to); + division_plan_.push_back(kMax); + for (size_t i = 1; i < division_plan_.size(); ++i) { + PADDLE_ENFORCE_LT(division_plan_[i - 1], division_plan_[i], + "Division plan must be strictly sorted"); + } + } + allocations_.resize(division_plan_.size() - 1); +} + +void BufferedAllocator::InsertAllocationImpl( + std::unique_ptr&& allocation) { + auto size = allocation->size(); + auto idx = GetListIndex(size); + allocations_[idx].insert(std::pair>( + size, std::move(allocation))); +} + +void BufferedAllocator::InsertAllocation( + std::unique_ptr&& allocation) { + if (mtx_) { + std::lock_guard lock(*mtx_); + InsertAllocationImpl(std::move(allocation)); + } else { + InsertAllocationImpl(std::move(allocation)); + } +} + +bool BufferedAllocator::Match(const std::unique_ptr& allocation, + size_t size) { + return (allocation->size() >> 1) <= size; +} + +size_t BufferedAllocator::GetListIndex(size_t size) { + auto it = + std::upper_bound(division_plan_.begin(), division_plan_.end(), size); + return static_cast(it - division_plan_.begin()) - 1; +} + +std::unique_ptr BufferedAllocator::RemoveAllocationImpl( + size_t size) { + auto idx = GetListIndex(size); + auto& allocation_map = allocations_[idx]; + auto it = allocation_map.lower_bound(size); + // Only remove allocation whose size is not more than twice of requested size + if (it != allocation_map.end() && Match(it->second, size)) { + auto ret = std::move(it->second); + allocation_map.erase(it); + return ret; + } else { + return nullptr; + } +} + +std::unique_ptr BufferedAllocator::RemoveAllocation(size_t size) { + if (mtx_) { + std::lock_guard lock(*mtx_); + return RemoveAllocationImpl(size); + } else { + return RemoveAllocationImpl(size); + } +} + +std::unique_ptr BufferedAllocator::Allocate(size_t size, + Allocator::Attr attr) { + auto ret = RemoveAllocation(size); + if (!ret) { + try { + return underlying_allocator_->Allocate(size, attr); + } catch (BadAlloc&) { + // if allocation failed, try to free some memorys from buffers + FreeAllocations(size); + return underlying_allocator_->Allocate(size, attr); + } + } + return ret; +} + +void BufferedAllocator::FreeAllocationsImpl(size_t size) { + if (UNLIKELY(size == 0)) return; + size_t cur = 0; + for (auto& alloc_map : allocations_) { + // use reverse iterator to free large allocations first + while (!alloc_map.empty()) { + auto it = --(alloc_map.end()); + cur += it->second->size(); + underlying_allocator_->FreeUniquePtr(std::move(it->second)); + alloc_map.erase(it); + if (cur >= size) return; + } + } +} + +void BufferedAllocator::FreeAllocations(size_t size) { + if (mtx_) { + std::lock_guard lock(*mtx_); + FreeAllocationsImpl(size); + } else { + FreeAllocationsImpl(size); + } +} + +void BufferedAllocator::FreeUniquePtr(std::unique_ptr allocation) { + InsertAllocation(std::move(allocation)); +} + +bool BufferedAllocator::IsAllocThreadSafe() const { return mtx_ != nullptr; } + +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/buffered_allocator.h b/paddle/fluid/memory/allocation/buffered_allocator.h new file mode 100644 index 0000000000..630b3ad800 --- /dev/null +++ b/paddle/fluid/memory/allocation/buffered_allocator.h @@ -0,0 +1,70 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include +#include +#include "paddle/fluid/memory/allocation/allocator.h" + +namespace paddle { +namespace memory { +namespace allocation { + +// NOTE(zjl): BufferedAllocator maintains a memory pool to accelerate +// memory allocation and reuse memory. +// BufferedAllocator provides the same thread-safety level as +// underlying_allocator_ +class BufferedAllocator : public UnmanagedAllocator { + public: + explicit BufferedAllocator(std::unique_ptr&& allocator); + + BufferedAllocator(std::unique_ptr&& allocator, + const std::vector& division_plan); + + ~BufferedAllocator(); + + std::unique_ptr Allocate(size_t size, Allocator::Attr) override; + + void FreeUniquePtr(std::unique_ptr allocation) override; + + bool IsAllocThreadSafe() const override; + + private: + void InitAndEnforceCheck(std::unique_ptr&& allocator, + const std::vector& division_plan); + + void InsertAllocation(std::unique_ptr&& allocation); + void InsertAllocationImpl(std::unique_ptr&& allocation); + + static bool Match(const std::unique_ptr& allocation, size_t size); + std::unique_ptr RemoveAllocation(size_t size); + std::unique_ptr RemoveAllocationImpl(size_t size); + + void FreeAllocations(size_t size); + void FreeAllocationsImpl(size_t size); + + size_t GetListIndex(size_t size); + + std::unique_ptr underlying_allocator_; + std::vector>> allocations_; + std::vector division_plan_; + std::unique_ptr mtx_; +}; + +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/cpu_allocator.cc b/paddle/fluid/memory/allocation/cpu_allocator.cc index 3133627bf7..3714c0da74 100644 --- a/paddle/fluid/memory/allocation/cpu_allocator.cc +++ b/paddle/fluid/memory/allocation/cpu_allocator.cc @@ -29,8 +29,8 @@ std::unique_ptr CPUAllocator::Allocate(size_t size, Attr attr) { } return std::unique_ptr(new CPUAllocation(ptr, size)); } -void CPUAllocator::Free(Allocation* allocation) { - PADDLE_ENFORCE_NOT_NULL(dynamic_cast(allocation)); +void CPUAllocator::FreeUniquePtr(std::unique_ptr allocation) { + PADDLE_ENFORCE_NOT_NULL(dynamic_cast(allocation.get())); free(allocation->ptr()); } diff --git a/paddle/fluid/memory/allocation/cpu_allocator.h b/paddle/fluid/memory/allocation/cpu_allocator.h index b2df77f122..0852a58e57 100644 --- a/paddle/fluid/memory/allocation/cpu_allocator.h +++ b/paddle/fluid/memory/allocation/cpu_allocator.h @@ -36,7 +36,7 @@ class CPUAllocator : public UnmanagedAllocator { constexpr static size_t kAlignment = 64u; std::unique_ptr Allocate(size_t size, Attr attr = kDefault) override; - void Free(Allocation* allocation) override; + void FreeUniquePtr(std::unique_ptr allocation) override; bool IsAllocThreadSafe() const override; }; } // namespace allocation diff --git a/paddle/fluid/memory/allocation/cuda_allocator.cc b/paddle/fluid/memory/allocation/cuda_allocator.cc index 7b477c53ea..20a62ea067 100644 --- a/paddle/fluid/memory/allocation/cuda_allocator.cc +++ b/paddle/fluid/memory/allocation/cuda_allocator.cc @@ -35,9 +35,9 @@ std::unique_ptr CUDAAllocator::Allocate(size_t size, Attr attr) { new CUDAAllocation(ptr, size, platform::Place(place_))); } -void CUDAAllocator::Free(Allocation* allocation) { +void CUDAAllocator::FreeUniquePtr(std::unique_ptr allocation) { platform::CUDADeviceGuard guard(place_.device); - auto* cuda_allocation = dynamic_cast(allocation); + auto* cuda_allocation = dynamic_cast(allocation.get()); PADDLE_ENFORCE_NOT_NULL(cuda_allocation); PADDLE_ENFORCE_EQ(boost::get(cuda_allocation->place()), place_); diff --git a/paddle/fluid/memory/allocation/cuda_allocator.h b/paddle/fluid/memory/allocation/cuda_allocator.h index dea01e6089..33556413df 100644 --- a/paddle/fluid/memory/allocation/cuda_allocator.h +++ b/paddle/fluid/memory/allocation/cuda_allocator.h @@ -34,7 +34,7 @@ class CUDAAllocator : public UnmanagedAllocator { : place_(boost::get(place)) {} std::unique_ptr Allocate(size_t size, Attr attr = kDefault) override; - void Free(Allocation* allocation) override; + void FreeUniquePtr(std::unique_ptr allocation) override; bool IsAllocThreadSafe() const override; private: diff --git a/paddle/fluid/memory/allocation/locked_allocator.cc b/paddle/fluid/memory/allocation/locked_allocator.cc index dea87229f9..0b9f1f7531 100644 --- a/paddle/fluid/memory/allocation/locked_allocator.cc +++ b/paddle/fluid/memory/allocation/locked_allocator.cc @@ -27,12 +27,12 @@ std::unique_ptr LockedAllocator::Allocate(size_t size, Attr attr) { return underlying_allocator_->Allocate(size, attr); } } -void LockedAllocator::Free(Allocation *allocation) { +void LockedAllocator::FreeUniquePtr(std::unique_ptr allocation) { if (underlying_allocator_->IsAllocThreadSafe()) { - return underlying_allocator_->Free(allocation); + return underlying_allocator_->FreeUniquePtr(std::move(allocation)); } else { std::lock_guard guard(mtx_); - return underlying_allocator_->Free(allocation); + return underlying_allocator_->FreeUniquePtr(std::move(allocation)); } } bool LockedAllocator::IsAllocThreadSafe() const { return true; } diff --git a/paddle/fluid/memory/allocation/locked_allocator.h b/paddle/fluid/memory/allocation/locked_allocator.h index d6b877ba4f..952622f534 100644 --- a/paddle/fluid/memory/allocation/locked_allocator.h +++ b/paddle/fluid/memory/allocation/locked_allocator.h @@ -27,7 +27,7 @@ class LockedAllocator : public UnmanagedAllocator { explicit LockedAllocator(std::unique_ptr&& underlying_allocator); std::unique_ptr Allocate(size_t size, Attr attr = kDefault) override; - void Free(Allocation* allocation) override; + void FreeUniquePtr(std::unique_ptr allocation) override; bool IsAllocThreadSafe() const override; private: diff --git a/paddle/fluid/memory/allocation/naive_managed_allocator_test.cc b/paddle/fluid/memory/allocation/naive_managed_allocator_test.cc index 027fdec26d..bb7440d394 100644 --- a/paddle/fluid/memory/allocation/naive_managed_allocator_test.cc +++ b/paddle/fluid/memory/allocation/naive_managed_allocator_test.cc @@ -31,7 +31,9 @@ class StubAllocator : public UnmanagedAllocator { return std::unique_ptr( new Allocation(nullptr, size, platform::CPUPlace())); } - void Free(Allocation* allocation) override { counter_.fetch_sub(1); } + void FreeUniquePtr(std::unique_ptr allocation) override { + counter_.fetch_sub(1); + } bool IsAllocThreadSafe() const override { return true; } std::atomic counter_{0}; diff --git a/paddle/fluid/memory/allocation/pinned_allocator.cc b/paddle/fluid/memory/allocation/pinned_allocator.cc index 650dab1b27..581dd64aaf 100644 --- a/paddle/fluid/memory/allocation/pinned_allocator.cc +++ b/paddle/fluid/memory/allocation/pinned_allocator.cc @@ -32,8 +32,8 @@ std::unique_ptr CPUPinnedAllocator::Allocate(size_t size, new CPUPinnedAllocation(ptr, size)); } -void CPUPinnedAllocator::Free(Allocation* allocation) { - PADDLE_ENFORCE_NOT_NULL(dynamic_cast(allocation)); +void CPUPinnedAllocator::FreeUniquePtr(std::unique_ptr allocation) { + PADDLE_ENFORCE_NOT_NULL(dynamic_cast(allocation.get())); PADDLE_ENFORCE(cudaFreeHost(allocation->ptr())); } diff --git a/paddle/fluid/memory/allocation/pinned_allocator.h b/paddle/fluid/memory/allocation/pinned_allocator.h index d001a91d89..b0d7e9091e 100644 --- a/paddle/fluid/memory/allocation/pinned_allocator.h +++ b/paddle/fluid/memory/allocation/pinned_allocator.h @@ -29,7 +29,7 @@ class CPUPinnedAllocation : public Allocation { class CPUPinnedAllocator : public UnmanagedAllocator { public: std::unique_ptr Allocate(size_t size, Attr attr) override; - void Free(Allocation* allocation) override; + void FreeUniquePtr(std::unique_ptr allocation) override; bool IsAllocThreadSafe() const override; }; diff --git a/paddle/fluid/memory/allocation/retry_allocator.cc b/paddle/fluid/memory/allocation/retry_allocator.cc index 9a4ff2f51d..9dc568ef2a 100644 --- a/paddle/fluid/memory/allocation/retry_allocator.cc +++ b/paddle/fluid/memory/allocation/retry_allocator.cc @@ -75,7 +75,7 @@ Allocation* RetryAllocator::AllocateImpl(size_t size, Allocator::Attr attr) { } void RetryAllocator::FreeUnderlyingAllocation( std::unique_ptr&& allocation) { - underlying_allocator_->Free(allocation.get()); + underlying_allocator_->FreeUniquePtr(std::move(allocation)); { // notify all waited allocators, they can try to allocate memory after free. std::lock_guard lock(mutex_); -- GitLab