diff --git a/paddle/fluid/memory/allocation/CMakeLists.txt b/paddle/fluid/memory/allocation/CMakeLists.txt index b2be837832336b2e5e08291feae67333854ae676..bb4253e0ed2fed17dd8fb0fb874ddd9196a5013b 100644 --- a/paddle/fluid/memory/allocation/CMakeLists.txt +++ b/paddle/fluid/memory/allocation/CMakeLists.txt @@ -2,6 +2,8 @@ cc_library(allocator SRCS allocator.cc DEPS place) cc_library(cpu_allocator SRCS cpu_allocator.cc DEPS allocator) cc_library(best_fit_allocator SRCS best_fit_allocator.cc DEPS allocator) cc_library(locked_allocator SRCS locked_allocator.cc DEPS allocator) +cc_library(buffered_allocator SRCS buffered_allocator.cc DEPS allocator) +cc_test(buffered_allocator_test SRCS buffered_allocator_test.cc DEPS best_fit_allocator locked_allocator buffered_allocator cpu_allocator) if (WITH_GPU) nv_library(cuda_allocator SRCS cuda_allocator.cc DEPS allocator cuda_device_guard) @@ -51,7 +53,8 @@ cc_library(allocator_facade SRCS allocator_facade.cc DEPS auto_increment_allocator zero_size_allocator conditional_allocator - retry_allocator) + retry_allocator + buffered_allocator) nv_test(allocation_and_eigen_test SRCS allocation_and_eigen_test.cu DEPS allocator_facade) diff --git a/paddle/fluid/memory/allocation/allocator.h b/paddle/fluid/memory/allocation/allocator.h index e117a2d1537a899e3d0fe990e2aece38c1cfbd63..9c838362d975b5b7c99f5196da7f757286126242 100644 --- a/paddle/fluid/memory/allocation/allocator.h +++ b/paddle/fluid/memory/allocation/allocator.h @@ -12,22 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include - -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - #pragma once #include #include @@ -141,11 +125,7 @@ class Allocator { // a manally managed allocator. class UnmanagedAllocator : public Allocator { public: - virtual void Free(Allocation* allocation) = 0; - - void FreeUniquePtr(std::unique_ptr allocation) { - Free(allocation.get()); - } + virtual void FreeUniquePtr(std::unique_ptr allocation) = 0; }; // The allocation will be managed by smart pointers. i.e., users do not need diff --git a/paddle/fluid/memory/allocation/best_fit_allocator.cc b/paddle/fluid/memory/allocation/best_fit_allocator.cc index 8cc943c861acff4abb148bde1397343f03e896b1..b903fa437bb5d4aae1a0d152263d2ec70c8034b3 100644 --- a/paddle/fluid/memory/allocation/best_fit_allocator.cc +++ b/paddle/fluid/memory/allocation/best_fit_allocator.cc @@ -104,8 +104,8 @@ BestFitAllocator::ListIt BestFitAllocator::SplitChunk(size_t request_size, return to_use_it; } -void BestFitAllocator::Free(Allocation* allocation) { - auto* bf_allocation = dynamic_cast(allocation); +void BestFitAllocator::FreeUniquePtr(std::unique_ptr allocation) { + auto* bf_allocation = dynamic_cast(allocation.get()); auto chunk_it = bf_allocation->ChunkIterator(); PADDLE_ENFORCE(!chunk_it->is_free); chunk_it->is_free = true; diff --git a/paddle/fluid/memory/allocation/best_fit_allocator.h b/paddle/fluid/memory/allocation/best_fit_allocator.h index da62bc4bb61e09f8b49cdc5e67f7932aa21a860e..405306bba7b17abe7305740d1b700185076f8ee8 100644 --- a/paddle/fluid/memory/allocation/best_fit_allocator.h +++ b/paddle/fluid/memory/allocation/best_fit_allocator.h @@ -109,7 +109,7 @@ class BestFitAllocator : public UnmanagedAllocator { std::unique_ptr Allocate(size_t size, Attr attr = kDefault) override; - void Free(Allocation* allocation) override; + void FreeUniquePtr(std::unique_ptr allocation) override; size_t NumFreeChunks() const; diff --git a/paddle/fluid/memory/allocation/buffered_allocator.cc b/paddle/fluid/memory/allocation/buffered_allocator.cc new file mode 100644 index 0000000000000000000000000000000000000000..89ce628c5d51bda7e819a3a8e9ebdb3822a26f53 --- /dev/null +++ b/paddle/fluid/memory/allocation/buffered_allocator.cc @@ -0,0 +1,207 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/memory/allocation/buffered_allocator.h" +#include +#include +#include + +namespace paddle { +namespace memory { +namespace allocation { + +BufferedAllocator::BufferedAllocator(std::unique_ptr&& allocator) { + std::vector division_plan(8 * sizeof(size_t)); + for (size_t i = 0; i < 8 * sizeof(size_t); ++i) { + division_plan[i] = (static_cast(1) << i); + } + InitAndEnforceCheck(std::move(allocator), division_plan); +} + +BufferedAllocator::BufferedAllocator(std::unique_ptr&& allocator, + const std::vector& division_plan) { + InitAndEnforceCheck(std::move(allocator), division_plan); +} + +BufferedAllocator::~BufferedAllocator() { FlushImpl(); } + +void BufferedAllocator::FlushImpl() { + for (auto& v : allocations_) { + for (auto& pair : v) { + underlying_allocator_->FreeUniquePtr(std::move(pair.second)); + } + v.clear(); + } +} + +void BufferedAllocator::Flush() { + if (mtx_) { + std::lock_guard lock(*mtx_); + FlushImpl(); + } else { + FlushImpl(); + } +} + +void BufferedAllocator::InitAndEnforceCheck( + std::unique_ptr&& allocator, + const std::vector& division_plan) { + underlying_allocator_.reset( + dynamic_cast(allocator.release())); + PADDLE_ENFORCE_NOT_NULL( + underlying_allocator_, + "Underlying allocator of BufferedAllocator must be unmanaged"); + if (underlying_allocator_->IsAllocThreadSafe()) { + mtx_.reset(new std::mutex()); + } + constexpr size_t kMax = std::numeric_limits::max(); + if (division_plan.empty()) { + division_plan_.assign({0, kMax}); + } else { + auto from = division_plan.front() == 0 ? division_plan.begin() + 1 + : division_plan.begin(); + auto to = division_plan.back() == kMax ? division_plan.end() - 1 + : division_plan.end(); + division_plan_.reserve(to - from + 2); + division_plan_.push_back(0); + division_plan_.insert(division_plan_.end(), from, to); + division_plan_.push_back(kMax); + for (size_t i = 1; i < division_plan_.size(); ++i) { + PADDLE_ENFORCE_LT(division_plan_[i - 1], division_plan_[i], + "Division plan must be strictly sorted"); + } + } + allocations_.resize(division_plan_.size() - 1); +} + +void BufferedAllocator::InsertAllocationImpl( + std::unique_ptr&& allocation) { + auto size = allocation->size(); + auto idx = GetListIndex(size); + allocations_[idx].emplace(size, std::move(allocation)); +} + +void BufferedAllocator::InsertAllocation( + std::unique_ptr&& allocation) { + if (mtx_) { + std::lock_guard lock(*mtx_); + InsertAllocationImpl(std::move(allocation)); + } else { + InsertAllocationImpl(std::move(allocation)); + } +} + +bool BufferedAllocator::Match(size_t actual_size, size_t requested_size) { + return (actual_size >> 1) < requested_size; +} + +size_t BufferedAllocator::GetListIndex(size_t size) { + auto it = + std::upper_bound(division_plan_.begin(), division_plan_.end(), size); + return static_cast(it - division_plan_.begin()) - 1; +} + +std::unique_ptr BufferedAllocator::RemoveAllocationImpl( + size_t size) { + auto idx = GetListIndex(size); + auto& allocation_map = allocations_[idx]; + auto it = allocation_map.lower_bound(size); + // Only remove allocation whose size is not more than twice of requested size + if (it != allocation_map.end()) { + if (Match(it->second->size(), size)) { + auto ret = std::move(it->second); + allocation_map.erase(it); + return ret; + } else { + return nullptr; + } + } else { + while (++idx < allocations_.size() && Match(division_plan_[idx], size)) { + auto& allocation_map = allocations_[idx]; + if (!allocation_map.empty()) { + auto it = allocation_map.begin(); + if (Match(it->second->size(), size)) { + auto ret = std::move(it->second); + allocation_map.erase(it); + return ret; + } else { + return nullptr; + } + } + } + return nullptr; + } +} + +std::unique_ptr BufferedAllocator::RemoveAllocation(size_t size) { + if (mtx_) { + std::lock_guard lock(*mtx_); + return RemoveAllocationImpl(size); + } else { + return RemoveAllocationImpl(size); + } +} + +std::unique_ptr BufferedAllocator::Allocate(size_t size, + Allocator::Attr attr) { + auto ret = RemoveAllocation(size); + if (!ret) { + try { + return underlying_allocator_->Allocate(size, attr); + } catch (BadAlloc&) { + // if allocation failed, try to free some memorys from buffers + FreeAllocations(size); + return underlying_allocator_->Allocate(size, attr); + } + } + return ret; +} + +void BufferedAllocator::FreeAllocationsImpl(size_t size) { + if (UNLIKELY(size == 0)) return; + size_t cur = 0; + for (auto& alloc_map : allocations_) { + // use reverse iterator to free large allocations first + while (!alloc_map.empty()) { + auto it = --(alloc_map.end()); + cur += it->second->size(); + underlying_allocator_->FreeUniquePtr(std::move(it->second)); + alloc_map.erase(it); + if (cur >= size) return; + } + } +} + +void BufferedAllocator::FreeAllocations(size_t size) { + if (mtx_) { + std::lock_guard lock(*mtx_); + FreeAllocationsImpl(size); + } else { + FreeAllocationsImpl(size); + } +} + +void BufferedAllocator::FreeUniquePtr(std::unique_ptr allocation) { + InsertAllocation(std::move(allocation)); +} + +bool BufferedAllocator::IsAllocThreadSafe() const { return mtx_ != nullptr; } + +const std::vector& BufferedAllocator::GetDivisionPlan() const { + return division_plan_; +} + +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/buffered_allocator.h b/paddle/fluid/memory/allocation/buffered_allocator.h new file mode 100644 index 0000000000000000000000000000000000000000..0fe6e5a19a84995a9d143f4c3803ff54b77a1f92 --- /dev/null +++ b/paddle/fluid/memory/allocation/buffered_allocator.h @@ -0,0 +1,77 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include +#include +#include "paddle/fluid/memory/allocation/allocator.h" + +namespace paddle { +namespace memory { +namespace allocation { + +// NOTE(zjl): BufferedAllocator maintains a memory pool to accelerate +// memory allocation and reuse memory. +// BufferedAllocator provides the same thread-safety level as +// underlying_allocator_ +class BufferedAllocator : public UnmanagedAllocator { + public: + explicit BufferedAllocator(std::unique_ptr&& allocator); + + BufferedAllocator(std::unique_ptr&& allocator, + const std::vector& division_plan); + + ~BufferedAllocator(); + + std::unique_ptr Allocate( + size_t size, Allocator::Attr attr = Allocator::Attr::kDefault) override; + + void FreeUniquePtr(std::unique_ptr allocation) override; + + bool IsAllocThreadSafe() const override; + + const std::vector& GetDivisionPlan() const; + + void Flush(); + + private: + void InitAndEnforceCheck(std::unique_ptr&& allocator, + const std::vector& division_plan); + + void InsertAllocation(std::unique_ptr&& allocation); + void InsertAllocationImpl(std::unique_ptr&& allocation); + + static bool Match(size_t actual_size, size_t requested_size); + std::unique_ptr RemoveAllocation(size_t size); + std::unique_ptr RemoveAllocationImpl(size_t size); + + void FreeAllocations(size_t size); + void FreeAllocationsImpl(size_t size); + + void FlushImpl(); + + size_t GetListIndex(size_t size); + + std::unique_ptr underlying_allocator_; + std::vector>> allocations_; + std::vector division_plan_; + std::unique_ptr mtx_; +}; + +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/buffered_allocator_test.cc b/paddle/fluid/memory/allocation/buffered_allocator_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..a9fb4f3926c7b9fab50abb9c6b9f25ddd44b093f --- /dev/null +++ b/paddle/fluid/memory/allocation/buffered_allocator_test.cc @@ -0,0 +1,148 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/memory/allocation/buffered_allocator.h" +#include +#include "paddle/fluid/memory/allocation/best_fit_allocator.h" +#include "paddle/fluid/memory/allocation/cpu_allocator.h" +#include "paddle/fluid/memory/allocation/locked_allocator.h" + +namespace paddle { +namespace memory { +namespace allocation { + +inline std::unique_ptr GetBufferedAllocator( + Allocation *allocation, bool thread_safe) { + std::unique_ptr allocator(new BestFitAllocator(allocation)); + if (thread_safe) { + allocator.reset(new LockedAllocator(std::move(allocator))); + } + + return std::unique_ptr( + new BufferedAllocator(std::move(allocator))); +} + +TEST(buffered_allocator, thread_safety) { + std::unique_ptr allocator(new CPUAllocator()); + auto chunk = allocator->Allocate(1 << 20); + { + auto buf_allocator = GetBufferedAllocator(chunk.get(), true); + ASSERT_EQ(buf_allocator->IsAllocThreadSafe(), true); + } + + { + auto buf_allocator = GetBufferedAllocator(chunk.get(), false); + ASSERT_EQ(buf_allocator->IsAllocThreadSafe(), false); + } + + allocator->FreeUniquePtr(std::move(chunk)); +} + +class StubAllocation : public Allocation { + public: + using Allocation::Allocation; +}; + +class StubAllocator : public UnmanagedAllocator { + public: + std::unique_ptr Allocate(size_t size, + Allocator::Attr attr) override { + ++construct_count_; + if (size == 0) { + return std::unique_ptr( + new StubAllocation(nullptr, 0, platform::CPUPlace())); + } else { + return std::unique_ptr( + new StubAllocation(new uint8_t[size], size, platform::CPUPlace())); + } + } + + void FreeUniquePtr(std::unique_ptr allocation) { + StubAllocation *alloc = dynamic_cast(allocation.get()); + PADDLE_ENFORCE_NOT_NULL(alloc); + if (alloc->ptr()) delete[] static_cast(alloc->ptr()); + ++destruct_count_; + } + + void ResetCounter() { + construct_count_ = 0; + destruct_count_ = 0; + } + + size_t GetAllocCount() const { return construct_count_; } + + size_t GetFreeCount() const { return destruct_count_; } + + private: + size_t construct_count_ = 0; + size_t destruct_count_ = 0; +}; + +constexpr size_t kZero = 0; +constexpr size_t kOne = 1; +constexpr size_t kTwo = 2; + +TEST(buffered_allocator, lazy_free) { + std::unique_ptr stub_allocator(new StubAllocator()); + auto *underlying_allocator = stub_allocator.get(); + std::unique_ptr allocator( + new BufferedAllocator(std::move(stub_allocator))); + + { + underlying_allocator->ResetCounter(); + auto x = allocator->Allocate(1025); + ASSERT_EQ(underlying_allocator->GetAllocCount(), kOne); + ASSERT_EQ(underlying_allocator->GetFreeCount(), kZero); + allocator->FreeUniquePtr(std::move(x)); + ASSERT_EQ(underlying_allocator->GetFreeCount(), kZero); + } + + { + underlying_allocator->ResetCounter(); + auto x = allocator->Allocate(900); + ASSERT_EQ(underlying_allocator->GetAllocCount(), kZero); + ASSERT_EQ(underlying_allocator->GetFreeCount(), kZero); + auto y = allocator->Allocate(2048); + ASSERT_EQ(underlying_allocator->GetAllocCount(), kOne); + ASSERT_EQ(underlying_allocator->GetFreeCount(), kZero); + allocator->FreeUniquePtr(std::move(x)); + ASSERT_EQ(underlying_allocator->GetFreeCount(), kZero); + allocator->FreeUniquePtr(std::move(y)); + ASSERT_EQ(underlying_allocator->GetFreeCount(), kZero); + } + + { + underlying_allocator->ResetCounter(); + allocator->Flush(); + ASSERT_EQ(underlying_allocator->GetAllocCount(), kZero); + ASSERT_EQ(underlying_allocator->GetFreeCount(), kTwo); + } +} + +TEST(buffered_allocator, garbage_collection) { + std::unique_ptr cpu_allocator(new CPUAllocator()); + auto chunk = cpu_allocator->Allocate(2048); + auto allocator = GetBufferedAllocator(chunk.get(), false); + auto x1 = allocator->Allocate(1600); + auto x2 = allocator->Allocate(400); + allocator->FreeUniquePtr(std::move(x1)); + allocator->FreeUniquePtr(std::move(x2)); + auto x3 = allocator->Allocate(1600); + ASSERT_NE(x3, nullptr); + ASSERT_NE(x3->ptr(), nullptr); +} + +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/cpu_allocator.cc b/paddle/fluid/memory/allocation/cpu_allocator.cc index 3133627bf7202f4fa9d9fd5ec2e62376133779a9..3714c0da746474db1a89f7cbb93a50ebdbb9ee02 100644 --- a/paddle/fluid/memory/allocation/cpu_allocator.cc +++ b/paddle/fluid/memory/allocation/cpu_allocator.cc @@ -29,8 +29,8 @@ std::unique_ptr CPUAllocator::Allocate(size_t size, Attr attr) { } return std::unique_ptr(new CPUAllocation(ptr, size)); } -void CPUAllocator::Free(Allocation* allocation) { - PADDLE_ENFORCE_NOT_NULL(dynamic_cast(allocation)); +void CPUAllocator::FreeUniquePtr(std::unique_ptr allocation) { + PADDLE_ENFORCE_NOT_NULL(dynamic_cast(allocation.get())); free(allocation->ptr()); } diff --git a/paddle/fluid/memory/allocation/cpu_allocator.h b/paddle/fluid/memory/allocation/cpu_allocator.h index b2df77f1227c658e6ba83075fbc0f46340305334..0852a58e577732a73a5bc6d924204533b6558c68 100644 --- a/paddle/fluid/memory/allocation/cpu_allocator.h +++ b/paddle/fluid/memory/allocation/cpu_allocator.h @@ -36,7 +36,7 @@ class CPUAllocator : public UnmanagedAllocator { constexpr static size_t kAlignment = 64u; std::unique_ptr Allocate(size_t size, Attr attr = kDefault) override; - void Free(Allocation* allocation) override; + void FreeUniquePtr(std::unique_ptr allocation) override; bool IsAllocThreadSafe() const override; }; } // namespace allocation diff --git a/paddle/fluid/memory/allocation/cuda_allocator.cc b/paddle/fluid/memory/allocation/cuda_allocator.cc index 7b477c53ea2ead54073658a015ca7dc5a41309eb..20a62ea067c0b85be1f5b41a1f2adfddcc38f7d6 100644 --- a/paddle/fluid/memory/allocation/cuda_allocator.cc +++ b/paddle/fluid/memory/allocation/cuda_allocator.cc @@ -35,9 +35,9 @@ std::unique_ptr CUDAAllocator::Allocate(size_t size, Attr attr) { new CUDAAllocation(ptr, size, platform::Place(place_))); } -void CUDAAllocator::Free(Allocation* allocation) { +void CUDAAllocator::FreeUniquePtr(std::unique_ptr allocation) { platform::CUDADeviceGuard guard(place_.device); - auto* cuda_allocation = dynamic_cast(allocation); + auto* cuda_allocation = dynamic_cast(allocation.get()); PADDLE_ENFORCE_NOT_NULL(cuda_allocation); PADDLE_ENFORCE_EQ(boost::get(cuda_allocation->place()), place_); diff --git a/paddle/fluid/memory/allocation/cuda_allocator.h b/paddle/fluid/memory/allocation/cuda_allocator.h index dea01e60890741877a387e5588fae8703dd202ac..33556413df9acc3858e1afead92e3a2b375a106c 100644 --- a/paddle/fluid/memory/allocation/cuda_allocator.h +++ b/paddle/fluid/memory/allocation/cuda_allocator.h @@ -34,7 +34,7 @@ class CUDAAllocator : public UnmanagedAllocator { : place_(boost::get(place)) {} std::unique_ptr Allocate(size_t size, Attr attr = kDefault) override; - void Free(Allocation* allocation) override; + void FreeUniquePtr(std::unique_ptr allocation) override; bool IsAllocThreadSafe() const override; private: diff --git a/paddle/fluid/memory/allocation/locked_allocator.cc b/paddle/fluid/memory/allocation/locked_allocator.cc index dea87229f9143efb1d0efbe121bf923c6df0810a..0b9f1f753146e4c4e97a2a0402e8d0b1524324cd 100644 --- a/paddle/fluid/memory/allocation/locked_allocator.cc +++ b/paddle/fluid/memory/allocation/locked_allocator.cc @@ -27,12 +27,12 @@ std::unique_ptr LockedAllocator::Allocate(size_t size, Attr attr) { return underlying_allocator_->Allocate(size, attr); } } -void LockedAllocator::Free(Allocation *allocation) { +void LockedAllocator::FreeUniquePtr(std::unique_ptr allocation) { if (underlying_allocator_->IsAllocThreadSafe()) { - return underlying_allocator_->Free(allocation); + return underlying_allocator_->FreeUniquePtr(std::move(allocation)); } else { std::lock_guard guard(mtx_); - return underlying_allocator_->Free(allocation); + return underlying_allocator_->FreeUniquePtr(std::move(allocation)); } } bool LockedAllocator::IsAllocThreadSafe() const { return true; } diff --git a/paddle/fluid/memory/allocation/locked_allocator.h b/paddle/fluid/memory/allocation/locked_allocator.h index d6b877ba4f7da3e191624b94eb832b7aa8c0069f..952622f534477dc01afc51788f58ba142481f7e9 100644 --- a/paddle/fluid/memory/allocation/locked_allocator.h +++ b/paddle/fluid/memory/allocation/locked_allocator.h @@ -27,7 +27,7 @@ class LockedAllocator : public UnmanagedAllocator { explicit LockedAllocator(std::unique_ptr&& underlying_allocator); std::unique_ptr Allocate(size_t size, Attr attr = kDefault) override; - void Free(Allocation* allocation) override; + void FreeUniquePtr(std::unique_ptr allocation) override; bool IsAllocThreadSafe() const override; private: diff --git a/paddle/fluid/memory/allocation/naive_managed_allocator_test.cc b/paddle/fluid/memory/allocation/naive_managed_allocator_test.cc index 027fdec26def0208ff961688799b472a2be0f671..bb7440d394621f92c8a1bac97c1b5444caacd042 100644 --- a/paddle/fluid/memory/allocation/naive_managed_allocator_test.cc +++ b/paddle/fluid/memory/allocation/naive_managed_allocator_test.cc @@ -31,7 +31,9 @@ class StubAllocator : public UnmanagedAllocator { return std::unique_ptr( new Allocation(nullptr, size, platform::CPUPlace())); } - void Free(Allocation* allocation) override { counter_.fetch_sub(1); } + void FreeUniquePtr(std::unique_ptr allocation) override { + counter_.fetch_sub(1); + } bool IsAllocThreadSafe() const override { return true; } std::atomic counter_{0}; diff --git a/paddle/fluid/memory/allocation/pinned_allocator.cc b/paddle/fluid/memory/allocation/pinned_allocator.cc index 650dab1b27c8095b3bd1c6d33cff20a2d0d7c5de..581dd64aaf2bd95452f342b3f7a4beba3c9aa032 100644 --- a/paddle/fluid/memory/allocation/pinned_allocator.cc +++ b/paddle/fluid/memory/allocation/pinned_allocator.cc @@ -32,8 +32,8 @@ std::unique_ptr CPUPinnedAllocator::Allocate(size_t size, new CPUPinnedAllocation(ptr, size)); } -void CPUPinnedAllocator::Free(Allocation* allocation) { - PADDLE_ENFORCE_NOT_NULL(dynamic_cast(allocation)); +void CPUPinnedAllocator::FreeUniquePtr(std::unique_ptr allocation) { + PADDLE_ENFORCE_NOT_NULL(dynamic_cast(allocation.get())); PADDLE_ENFORCE(cudaFreeHost(allocation->ptr())); } diff --git a/paddle/fluid/memory/allocation/pinned_allocator.h b/paddle/fluid/memory/allocation/pinned_allocator.h index d001a91d893e759ae838c93f6e104f5ed4b3a00b..b0d7e9091eff0d185499b2d9fe40216743845105 100644 --- a/paddle/fluid/memory/allocation/pinned_allocator.h +++ b/paddle/fluid/memory/allocation/pinned_allocator.h @@ -29,7 +29,7 @@ class CPUPinnedAllocation : public Allocation { class CPUPinnedAllocator : public UnmanagedAllocator { public: std::unique_ptr Allocate(size_t size, Attr attr) override; - void Free(Allocation* allocation) override; + void FreeUniquePtr(std::unique_ptr allocation) override; bool IsAllocThreadSafe() const override; }; diff --git a/paddle/fluid/memory/allocation/retry_allocator.cc b/paddle/fluid/memory/allocation/retry_allocator.cc index 9a4ff2f51d08713b425f2a21c3287b71a1857327..9dc568ef2ab6d4ce79e2c923311a53cf0cede278 100644 --- a/paddle/fluid/memory/allocation/retry_allocator.cc +++ b/paddle/fluid/memory/allocation/retry_allocator.cc @@ -75,7 +75,7 @@ Allocation* RetryAllocator::AllocateImpl(size_t size, Allocator::Attr attr) { } void RetryAllocator::FreeUnderlyingAllocation( std::unique_ptr&& allocation) { - underlying_allocator_->Free(allocation.get()); + underlying_allocator_->FreeUniquePtr(std::move(allocation)); { // notify all waited allocators, they can try to allocate memory after free. std::lock_guard lock(mutex_);