From 402a9f1f24b0b2044940fc73102d652c2164ec5c Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Sat, 7 Apr 2018 17:34:33 -0700 Subject: [PATCH] Rewrite the interface of memroy/detail --- paddle/fluid/memory/CMakeLists.txt | 6 +- paddle/fluid/memory/detail/CMakeLists.txt | 10 +- paddle/fluid/memory/detail/buddy_allocator.h | 14 +- paddle/fluid/memory/detail/memory_block.cc | 95 +++++++------ paddle/fluid/memory/detail/memory_block.h | 125 ++++++++++++------ paddle/fluid/memory/detail/meta_cache.cc | 7 +- paddle/fluid/memory/detail/meta_cache.h | 64 --------- paddle/fluid/memory/detail/meta_data.cc | 26 ++-- paddle/fluid/memory/detail/meta_data.h | 54 -------- .../fluid/memory/detail/system_allocator.cc | 22 +-- paddle/fluid/memory/detail/system_allocator.h | 8 +- .../memory/detail/system_allocator_test.cc | 18 +-- 12 files changed, 180 insertions(+), 269 deletions(-) delete mode 100644 paddle/fluid/memory/detail/meta_cache.h delete mode 100644 paddle/fluid/memory/detail/meta_data.h diff --git a/paddle/fluid/memory/CMakeLists.txt b/paddle/fluid/memory/CMakeLists.txt index 8b3043af7a..3b73e79eb3 100644 --- a/paddle/fluid/memory/CMakeLists.txt +++ b/paddle/fluid/memory/CMakeLists.txt @@ -7,11 +7,7 @@ cc_library(paddle_memory DEPS memory memcpy - meta_data - meta_cache - memory_block - buddy_allocator - system_allocator) + buddy_allocator) cc_test(memory_test SRCS memory_test.cc DEPS place paddle_memory) diff --git a/paddle/fluid/memory/detail/CMakeLists.txt b/paddle/fluid/memory/detail/CMakeLists.txt index b9c3fc31c1..1636b120ae 100644 --- a/paddle/fluid/memory/detail/CMakeLists.txt +++ b/paddle/fluid/memory/detail/CMakeLists.txt @@ -1,3 +1,5 @@ +cc_library(memory_block SRCS memory_block.cc meta_data.cc meta_cache.cc) + if(${WITH_GPU}) nv_library(system_allocator SRCS system_allocator.cc DEPS gflags cpu_info gpu_info) else(${WITH_GPU}) @@ -6,10 +8,4 @@ endif(${WITH_GPU}) cc_test(system_allocator_test SRCS system_allocator_test.cc DEPS system_allocator) -cc_library(meta_data SRCS meta_data.cc) - -cc_library(meta_cache SRCS meta_cache.cc) - -cc_library(memory_block SRCS memory_block.cc) - -cc_library(buddy_allocator SRCS buddy_allocator.cc DEPS glog) +cc_library(buddy_allocator SRCS buddy_allocator.cc DEPS memory_block system_allocator glog) diff --git a/paddle/fluid/memory/detail/buddy_allocator.h b/paddle/fluid/memory/detail/buddy_allocator.h index a4ee70c258..2f39d774d6 100644 --- a/paddle/fluid/memory/detail/buddy_allocator.h +++ b/paddle/fluid/memory/detail/buddy_allocator.h @@ -14,18 +14,18 @@ limitations under the License. */ #pragma once -#include "paddle/fluid/memory/detail/meta_cache.h" -#include "paddle/fluid/memory/detail/meta_data.h" +#include // NOLINT +#include +#include +#include +#include + +#include "paddle/fluid/memory/detail/memory_block.h" #include "paddle/fluid/memory/detail/system_allocator.h" #include "paddle/fluid/platform/assert.h" #include "paddle/fluid/platform/cpu_info.h" #include "paddle/fluid/platform/gpu_info.h" -#include -#include -#include -#include - namespace paddle { namespace memory { namespace detail { diff --git a/paddle/fluid/memory/detail/memory_block.cc b/paddle/fluid/memory/detail/memory_block.cc index 07123f2669..f744450f48 100644 --- a/paddle/fluid/memory/detail/memory_block.cc +++ b/paddle/fluid/memory/detail/memory_block.cc @@ -13,17 +13,15 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/memory/detail/memory_block.h" -#include "paddle/fluid/memory/detail/meta_cache.h" -#include "paddle/fluid/memory/detail/meta_data.h" #include "paddle/fluid/platform/assert.h" namespace paddle { namespace memory { namespace detail { -void MemoryBlock::init(MetadataCache& cache, Type t, size_t index, size_t size, +void MemoryBlock::init(MetadataCache* cache, Type t, size_t index, size_t size, void* left_buddy, void* right_buddy) { - cache.store(this, Metadata(t, index, size - sizeof(Metadata), size, + cache->save(this, Metadata(t, index, size - sizeof(Metadata), size, static_cast(left_buddy), static_cast(right_buddy))); } @@ -32,115 +30,112 @@ MemoryBlock::Type MemoryBlock::type(MetadataCache& cache) const { return cache.load(this).type; } -size_t MemoryBlock::size(MetadataCache& cache) const { +size_t MemoryBlock::size(const MetadataCache& cache) const { return cache.load(this).size; } -size_t MemoryBlock::total_size(MetadataCache& cache) const { +size_t MemoryBlock::index(const MetadataCache& cache) const { + return cache.load(this).index; +} + +size_t MemoryBlock::total_size(const MetadataCache& cache) const { return cache.load(this).total_size; } -MemoryBlock* MemoryBlock::left_buddy(MetadataCache& cache) const { +bool MemoryBlock::has_left_buddy(const MetadataCache& cache) const { + return left_buddy(cache) != nullptr; +} + +bool MemoryBlock::has_right_buddy(const MetadataCache& cache) const { + return right_buddy(cache) != nullptr; +} + +MemoryBlock* MemoryBlock::left_buddy(const MetadataCache& cache) const { return cache.load(this).left_buddy; } -MemoryBlock* MemoryBlock::right_buddy(MetadataCache& cache) const { +MemoryBlock* MemoryBlock::right_buddy(const MetadataCache& cache) const { return cache.load(this).right_buddy; } -void MemoryBlock::split(MetadataCache& cache, size_t size) { +void MemoryBlock::split(MetadataCache* cache, size_t size) { // make sure the split fits - PADDLE_ASSERT(total_size(cache) >= size); + PADDLE_ASSERT(total_size(*cache) >= size); // bail out if there is no room for another partition - if (total_size(cache) - size <= sizeof(Metadata)) { + if (total_size(*cache) - size <= sizeof(Metadata)) { return; } // find the position of the split void* right_partition = reinterpret_cast(this) + size; - size_t remaining_size = total_size(cache) - size; + size_t remaining_size = total_size(*cache) - size; // Add the new block as a buddy - auto metadata = cache.load(this); + auto metadata = cache->load(this); // Write the metadata for the new block auto new_block_right_buddy = metadata.right_buddy; - cache.store( + cache->save( static_cast(right_partition), - Metadata(FREE_CHUNK, index(cache), remaining_size - sizeof(Metadata), + Metadata(FREE_CHUNK, index(*cache), remaining_size - sizeof(Metadata), remaining_size, this, new_block_right_buddy)); metadata.right_buddy = static_cast(right_partition); metadata.size = size - sizeof(Metadata); metadata.total_size = size; - cache.store(this, metadata); + cache->save(this, metadata); // Write metadata for the new block's right buddy if (new_block_right_buddy != nullptr) { - auto buddy_metadata = cache.load(new_block_right_buddy); + auto buddy_metadata = cache->load(new_block_right_buddy); buddy_metadata.left_buddy = static_cast(right_partition); - cache.store(new_block_right_buddy, buddy_metadata); + cache->save(new_block_right_buddy, buddy_metadata); } } -void MemoryBlock::merge(MetadataCache& cache, MemoryBlock* right_buddy) { +void MemoryBlock::merge(MetadataCache* cache, MemoryBlock* right_buddy) { // only free blocks can be merged - PADDLE_ASSERT(type(cache) == FREE_CHUNK); - PADDLE_ASSERT(right_buddy->type(cache) == FREE_CHUNK); + PADDLE_ASSERT(type(*cache) == FREE_CHUNK); + PADDLE_ASSERT(right_buddy->type(*cache) == FREE_CHUNK); - auto metadata = cache.load(this); + auto metadata = cache->load(this); // link this->buddy's buddy - metadata.right_buddy = right_buddy->right_buddy(cache); + metadata.right_buddy = right_buddy->right_buddy(*cache); // link buddy's buddy -> this if (metadata.right_buddy != nullptr) { - auto buddy_metadata = cache.load(metadata.right_buddy); + auto buddy_metadata = cache->load(metadata.right_buddy); buddy_metadata.left_buddy = this; - cache.store(metadata.right_buddy, buddy_metadata); + cache->save(metadata.right_buddy, buddy_metadata); } - metadata.size += right_buddy->total_size(cache); - metadata.total_size += right_buddy->total_size(cache); + metadata.size += right_buddy->total_size(*cache); + metadata.total_size += right_buddy->total_size(*cache); - cache.store(this, metadata); - cache.store(right_buddy, Metadata(INVALID_CHUNK, 0, 0, 0, nullptr, nullptr)); + cache->save(this, metadata); + cache->save(right_buddy, Metadata(INVALID_CHUNK, 0, 0, 0, nullptr, nullptr)); } -void MemoryBlock::mark_as_free(MetadataCache& cache) { +void MemoryBlock::mark_as_free(MetadataCache* cache) { // check for double free or corruption - PADDLE_ASSERT(type(cache) != FREE_CHUNK); - PADDLE_ASSERT(type(cache) != INVALID_CHUNK); - + PADDLE_ASSERT(type(*cache) != FREE_CHUNK); + PADDLE_ASSERT(type(*cache) != INVALID_CHUNK); set_type(cache, FREE_CHUNK); } -void MemoryBlock::set_type(MetadataCache& cache, Type t) { - auto metadata = cache.load(this); - +void MemoryBlock::set_type(MetadataCache* cache, Type t) { + auto metadata = cache->load(this); metadata.type = t; - - cache.store(this, metadata); -} - -bool MemoryBlock::has_left_buddy(MetadataCache& cache) const { - return left_buddy(cache) != nullptr; -} - -bool MemoryBlock::has_right_buddy(MetadataCache& cache) const { - return right_buddy(cache) != nullptr; -} - -size_t MemoryBlock::index(MetadataCache& cache) const { - return cache.load(this).index; + cache->save(this, metadata); } void* MemoryBlock::data() const { diff --git a/paddle/fluid/memory/detail/memory_block.h b/paddle/fluid/memory/detail/memory_block.h index 72b40b7317..5e83a2f899 100644 --- a/paddle/fluid/memory/detail/memory_block.h +++ b/paddle/fluid/memory/detail/memory_block.h @@ -11,7 +11,6 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #pragma once #include @@ -20,10 +19,11 @@ namespace paddle { namespace memory { namespace detail { -// Forward Declarations +// Forward declaration. class MetadataCache; -/*! \brief A class used to interpret the contents of a memory block */ +// MemoryBlock represents Each allocated memory block, which contains +// Metadata and the payload. class MemoryBlock { public: enum Type { @@ -33,57 +33,96 @@ class MemoryBlock { INVALID_CHUNK // memory is invalid }; - public: - void init(MetadataCache& cache, Type t, size_t index, size_t size, + // init saves the Metadata of the memory block in a MetadataCache. + // If it is a CPU memory block, the MetadataCache writes the + // Metadata to the beginning of the block; or, if it is a GPU memory + // block, the MetadataCache writes the Meatadata to a std::map in + // the CPU. + void init(MetadataCache* cache, Type t, size_t index, size_t size, void* left_buddy, void* right_buddy); - public: - /*! \brief The type of the allocation */ - Type type(MetadataCache& cache) const; - - /*! \brief The size of the data region */ - size_t size(MetadataCache& cache) const; - - /*! \brief An index to track the allocator */ - size_t index(MetadataCache& cache) const; - - /*! \brief The total size of the block */ - size_t total_size(MetadataCache& cache) const; - - /*! \brief Check the left buddy of the block */ - bool has_left_buddy(MetadataCache& cache) const; - - /*! \brief Check the right buddy of the block */ - bool has_right_buddy(MetadataCache& cache) const; + // All these accessors returns fields in the Metadata of the memory + // block. They all need a MetadataCache instance as their first + // parameter because they read the Metadata from the cache. + Type type(const MetadataCache& cache) const; + size_t size(const MetadataCache& cache) const; + size_t index(const MetadataCache& cache) const; + size_t total_size(const MetadataCache& cache) const; + bool has_left_buddy(const MetadataCache& cache) const; + bool has_right_buddy(const MetadataCache& cache) const; + MemoryBlock* left_buddy(const MetadataCache& cache) const; + MemoryBlock* right_buddy(const MetadataCache& cache) const; - /*! \brief Get the left buddy */ - MemoryBlock* left_buddy(MetadataCache& cache) const; + // Split the allocation into left/right blocks. + void split(MetadataCache* cache, size_t size); - /*! \brief Get the right buddy */ - MemoryBlock* right_buddy(MetadataCache& cache) const; + // Merge left and right blocks together. + void merge(MetadataCache* cache, MemoryBlock* right_buddy); - public: - /*! \brief Split the allocation into left/right blocks */ - void split(MetadataCache& cache, size_t size); - - /*! \brief Merge left and right blocks together */ - void merge(MetadataCache& cache, MemoryBlock* right_buddy); + // Mark the allocation as free. + void mark_as_free(MetadataCache* cache); - /*! \brief Mark the allocation as free */ - void mark_as_free(MetadataCache& cache); + // Change the type of the allocation. + void set_type(MetadataCache* cache, Type t); - /*! \brief Change the type of the allocation */ - void set_type(MetadataCache& cache, Type t); - - public: - /*! \brief Get a pointer to the memory block's data */ void* data() const; - - /*! \brief Get a pointer to the memory block's metadata */ MemoryBlock* metadata() const; + private: + // Metadata describes a MemoryBlock. + struct Metadata { + Metadata(MemoryBlock::Type t, size_t i, size_t s, size_t ts, MemoryBlock* l, + MemoryBlock* r); + Metadata(); + + // Updates guard_begin and guard_end by hashes of the Metadata object. + void update_guards(); + + // Checks that guard_begin and guard_end are hashes of the Metadata object. + bool check_guards() const; + + // TODO(gangliao): compress this + size_t guard_begin = 0; + MemoryBlock::Type type = MemoryBlock::INVALID_CHUNK; + size_t index = 0; + size_t size = 0; + size_t total_size = 0; + MemoryBlock* left_buddy = nullptr; + MemoryBlock* right_buddy = nullptr; + size_t guard_end = 0; + }; +}; + +// A cache for accessing memory block meta-data that may be expensive +// to access directly. This class exists to unify the metadata format +// between GPU and CPU allocations. It should be removed when the CPU +// can access all GPU allocations directly via UVM. +class MetadataCache { public: - static size_t overhead(); + explicit MetadataCache(bool uses_gpu); + + // Disable copying and assignment. + MetadataCache(const MetadataCache&) = delete; + MetadataCache& operator=(const MetadataCache&) = delete; + + // Returns the Metadata for a memory block. When MetadataCache is + // used to manage CPU memory, the Metadata resides at the beginning + // of the memory block; when used to manage GPU memory, the + // Meatadata resides in CPU memory indexed by cache_. + Metadata load(const MemoryBlock* memory_block) const; + + // Saves the Metadata of a memory block into the cache. For CPU + // memory block, writes the Metadata to the beginning of the memory + // block; whereas for GPU memory, writes it to cache_. + void save(MemoryBlock* memory_block, const Metadata& meta_data); + + // For GPU memory block, erases its Metadata from cache_. + void invalidate(MemoryBlock* memory_block); + + private: + typedef std::unordered_map MetadataMap; + MetadataMap cache_; + bool uses_gpu_; }; } // namespace detail diff --git a/paddle/fluid/memory/detail/meta_cache.cc b/paddle/fluid/memory/detail/meta_cache.cc index 43249e842a..6bcb3a659e 100644 --- a/paddle/fluid/memory/detail/meta_cache.cc +++ b/paddle/fluid/memory/detail/meta_cache.cc @@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/memory/detail/meta_cache.h" #include "glog/logging.h" #include "paddle/fluid/memory/detail/memory_block.h" #include "paddle/fluid/platform/assert.h" @@ -23,7 +22,7 @@ namespace detail { MetadataCache::MetadataCache(bool uses_gpu) : uses_gpu_(uses_gpu) {} -Metadata MetadataCache::load(const MemoryBlock* block) { +Metadata MetadataCache::load(const MemoryBlock* block) const { if (uses_gpu_) { auto existing_metadata = cache_.find(block); PADDLE_ASSERT(existing_metadata->second.check_guards()); @@ -36,8 +35,8 @@ Metadata MetadataCache::load(const MemoryBlock* block) { } } -void MetadataCache::store(MemoryBlock* block, - const Metadata& original_metadata) { +void MetadataCache::save(MemoryBlock* block, + const Metadata& original_metadata) { auto metadata = original_metadata; metadata.update_guards(); diff --git a/paddle/fluid/memory/detail/meta_cache.h b/paddle/fluid/memory/detail/meta_cache.h deleted file mode 100644 index 3283d756a6..0000000000 --- a/paddle/fluid/memory/detail/meta_cache.h +++ /dev/null @@ -1,64 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include "paddle/fluid/memory/detail/memory_block.h" -#include "paddle/fluid/memory/detail/meta_data.h" - -#include - -namespace paddle { -namespace memory { -namespace detail { - -/** - * \brief A cache for accessing memory block meta-data that may be expensive - * to access directly. - * - * \note This class exists to unify the metadata format between GPU and CPU - * allocations. It should be removed when the CPU can access all GPU - * allocations directly via UVM. - */ -class MetadataCache { - public: - explicit MetadataCache(bool uses_gpu); - - public: - /*! \brief Load the associated metadata for the specified memory block. */ - Metadata load(const MemoryBlock* memory_block); - - /*! \brief Store the associated metadata for the specified memory block. */ - void store(MemoryBlock* memory_block, const Metadata& meta_data); - - /*! \brief Indicate that the specified metadata will no longer be used. */ - void invalidate(MemoryBlock* memory_block); - - public: - MetadataCache(const MetadataCache&) = delete; - MetadataCache& operator=(const MetadataCache&) = delete; - - private: - bool uses_gpu_; - - private: - typedef std::unordered_map MetadataMap; - - private: - MetadataMap cache_; -}; - -} // namespace detail -} // namespace memory -} // namespace paddle diff --git a/paddle/fluid/memory/detail/meta_data.cc b/paddle/fluid/memory/detail/meta_data.cc index ad862af170..02f5235d14 100644 --- a/paddle/fluid/memory/detail/meta_data.cc +++ b/paddle/fluid/memory/detail/meta_data.cc @@ -12,10 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/memory/detail/meta_data.h" - #include +#include "paddle/fluid/memory/detail/memory_block.h" + namespace paddle { namespace memory { namespace detail { @@ -37,25 +37,29 @@ Metadata::Metadata() left_buddy(nullptr), right_buddy(nullptr) {} +namespace { + template -inline void hash_combine(std::size_t& seed, const T& v) { +inline void hash_combine(std::size_t* seed, const T& v) { std::hash hasher; - seed ^= hasher(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2); + (*seed) ^= hasher(v) + 0x9e3779b9 + ((*seed) << 6) + ((*seed) >> 2); } -inline size_t hash(const Metadata* metadata, size_t initial_seed) { +inline size_t hash(const Metadata& metadata, size_t initial_seed) { size_t seed = initial_seed; - hash_combine(seed, (size_t)metadata->type); - hash_combine(seed, metadata->index); - hash_combine(seed, metadata->size); - hash_combine(seed, metadata->total_size); - hash_combine(seed, metadata->left_buddy); - hash_combine(seed, metadata->right_buddy); + hash_combine(&seed, static_cast(metadata.type)); + hash_combine(&seed, metadata.index); + hash_combine(&seed, metadata.size); + hash_combine(&seed, metadata.total_size); + hash_combine(&seed, metadata.left_buddy); + hash_combine(&seed, metadata.right_buddy); return seed; } +} // namespace + void Metadata::update_guards() { guard_begin = hash(this, 1); guard_end = hash(this, 2); diff --git a/paddle/fluid/memory/detail/meta_data.h b/paddle/fluid/memory/detail/meta_data.h deleted file mode 100644 index 14895ee872..0000000000 --- a/paddle/fluid/memory/detail/meta_data.h +++ /dev/null @@ -1,54 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include "paddle/fluid/memory/detail/memory_block.h" - -#include - -namespace paddle { -namespace memory { -namespace detail { - -class Metadata { - public: - Metadata(MemoryBlock::Type t, size_t i, size_t s, size_t ts, MemoryBlock* l, - MemoryBlock* r); - Metadata(); - - public: - /*! \brief Update the guards when metadata is changed */ - void update_guards(); - - /*! \brief Check consistency to previous modification */ - bool check_guards() const; - - public: - // TODO(gangliao): compress this - // clang-format off - size_t guard_begin = 0; - MemoryBlock::Type type = MemoryBlock::INVALID_CHUNK; - size_t index = 0; - size_t size = 0; - size_t total_size = 0; - MemoryBlock* left_buddy = nullptr; - MemoryBlock* right_buddy = nullptr; - size_t guard_end = 0; - // clang-format on -}; - -} // namespace detail -} // namespace memory -} // namespace paddle diff --git a/paddle/fluid/memory/detail/system_allocator.cc b/paddle/fluid/memory/detail/system_allocator.cc index a45f8c33ee..d539052916 100644 --- a/paddle/fluid/memory/detail/system_allocator.cc +++ b/paddle/fluid/memory/detail/system_allocator.cc @@ -13,16 +13,16 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/memory/detail/system_allocator.h" -#include "paddle/fluid/platform/assert.h" -#include "paddle/fluid/platform/cpu_info.h" -#include "paddle/fluid/platform/enforce.h" -#include "paddle/fluid/platform/gpu_info.h" #include // for malloc and free #include // for mlock and munlock #include // for std::max #include "gflags/gflags.h" +#include "paddle/fluid/platform/assert.h" +#include "paddle/fluid/platform/cpu_info.h" +#include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/platform/gpu_info.h" // If use_pinned_memory is true, CPUAllocator calls mlock, which // returns pinned and locked memory as staging areas for data exchange @@ -35,13 +35,13 @@ namespace paddle { namespace memory { namespace detail { -void* CPUAllocator::Alloc(size_t& index, size_t size) { +void* CPUAllocator::Alloc(size_t* index, size_t size) { // According to http://www.cplusplus.com/reference/cstdlib/malloc/, // malloc might not return nullptr if size is zero, but the returned // pointer shall not be dereferenced -- so we make it nullptr. if (size <= 0) return nullptr; - index = 0; // unlock memory + *index = 0; // unlock memory void* p; @@ -56,7 +56,7 @@ void* CPUAllocator::Alloc(size_t& index, size_t size) { if (p != nullptr) { if (FLAGS_use_pinned_memory) { - index = 1; + *index = 1; mlock(p, size); // lock memory } } @@ -75,7 +75,7 @@ bool CPUAllocator::UseGpu() const { return false; } #ifdef PADDLE_WITH_CUDA -void* GPUAllocator::Alloc(size_t& index, size_t size) { +void* GPUAllocator::Alloc(size_t* index, size_t size) { // CUDA documentation doesn't explain if cudaMalloc returns nullptr // if size is 0. We just make sure it does. if (size <= 0) return nullptr; @@ -93,7 +93,7 @@ void* GPUAllocator::Alloc(size_t& index, size_t size) { } if (result == cudaSuccess) { - index = 0; + *index = 0; gpu_alloc_size_ += size; return p; } else { @@ -133,7 +133,7 @@ bool GPUAllocator::UseGpu() const { return true; } // PINNED memory allows direct DMA transfers by the GPU to and from system // memory. It’s locked to a physical address. -void* CUDAPinnedAllocator::Alloc(size_t& index, size_t size) { +void* CUDAPinnedAllocator::Alloc(size_t* index, size_t size) { if (size <= 0) return nullptr; // NOTE: here, we use CUDAPinnedMaxAllocSize as the maximum memory size @@ -154,7 +154,7 @@ void* CUDAPinnedAllocator::Alloc(size_t& index, size_t size) { cudaError_t result = cudaMallocHost(&p, size); if (result == cudaSuccess) { - index = 1; // PINNED memory + *index = 1; // PINNED memory cuda_pinnd_alloc_size_ += size; return p; } else { diff --git a/paddle/fluid/memory/detail/system_allocator.h b/paddle/fluid/memory/detail/system_allocator.h index e3c50ef648..a0386a2dad 100644 --- a/paddle/fluid/memory/detail/system_allocator.h +++ b/paddle/fluid/memory/detail/system_allocator.h @@ -29,14 +29,14 @@ namespace detail { class SystemAllocator { public: virtual ~SystemAllocator() {} - virtual void* Alloc(size_t& index, size_t size) = 0; + virtual void* Alloc(size_t* index, size_t size) = 0; virtual void Free(void* p, size_t size, size_t index) = 0; virtual bool UseGpu() const = 0; }; class CPUAllocator : public SystemAllocator { public: - virtual void* Alloc(size_t& index, size_t size); + virtual void* Alloc(size_t* index, size_t size); virtual void Free(void* p, size_t size, size_t index); virtual bool UseGpu() const; }; @@ -46,7 +46,7 @@ class GPUAllocator : public SystemAllocator { public: explicit GPUAllocator(int gpu_id) : gpu_id_(gpu_id) {} - virtual void* Alloc(size_t& index, size_t size); + virtual void* Alloc(size_t* index, size_t size); virtual void Free(void* p, size_t size, size_t index); virtual bool UseGpu() const; @@ -58,7 +58,7 @@ class GPUAllocator : public SystemAllocator { class CUDAPinnedAllocator : public SystemAllocator { public: - virtual void* Alloc(size_t& index, size_t size); + virtual void* Alloc(size_t* index, size_t size); virtual void Free(void* p, size_t size, size_t index); virtual bool UseGpu() const; diff --git a/paddle/fluid/memory/detail/system_allocator_test.cc b/paddle/fluid/memory/detail/system_allocator_test.cc index 3e1926f632..95467a6f60 100644 --- a/paddle/fluid/memory/detail/system_allocator_test.cc +++ b/paddle/fluid/memory/detail/system_allocator_test.cc @@ -22,11 +22,11 @@ limitations under the License. */ DECLARE_bool(use_pinned_memory); -void TestAllocator(paddle::memory::detail::SystemAllocator& a, size_t size) { +void TestAllocator(paddle::memory::detail::SystemAllocator* a, size_t size) { bool freed = false; { size_t index; - void* p = a.Alloc(index, size); + void* p = a->Alloc(index, size); if (size > 0) { EXPECT_NE(p, nullptr); } else { @@ -36,7 +36,7 @@ void TestAllocator(paddle::memory::detail::SystemAllocator& a, size_t size) { int* i = static_cast(p); std::shared_ptr ptr(i, [&](void* p) { freed = true; - a.Free(p, size, index); + a->Free(p, size, index); }); } EXPECT_TRUE(freed); @@ -45,21 +45,21 @@ void TestAllocator(paddle::memory::detail::SystemAllocator& a, size_t size) { TEST(CPUAllocator, NoLockMem) { FLAGS_use_pinned_memory = false; paddle::memory::detail::CPUAllocator a; - TestAllocator(a, 2048); - TestAllocator(a, 0); + TestAllocator(&a, 2048); + TestAllocator(&a, 0); } TEST(CPUAllocator, LockMem) { FLAGS_use_pinned_memory = true; paddle::memory::detail::CPUAllocator a; - TestAllocator(a, 2048); - TestAllocator(a, 0); + TestAllocator(&a, 2048); + TestAllocator(&a, 0); } #ifdef PADDLE_WITH_CUDA TEST(GPUAllocator, Alloc) { paddle::memory::detail::GPUAllocator a(0); - TestAllocator(a, 2048); - TestAllocator(a, 0); + TestAllocator(&a, 2048); + TestAllocator(&a, 0); } #endif -- GitLab