提交 74691789 编写于 作者: L liaogang

ENH: add memory unit test

上级 ada1c20b
add_subdirectory(detail)
cc_library(memory
SRCS
memory.cc)
cc_library(memory SRCS memory.cc)
cc_library(paddle_memory
DEPS
memory meta_data
meta_cache memory_block
buddy_allocator system_allocator)
cc_test(memory_test SRCS memory_test.cc DEPS place paddle_memory)
if(${WITH_GPU})
nv_library(system_allocator SRCS system_allocator.cc DEPS gflags gpu_info)
nv_library(system_allocator SRCS system_allocator.cc DEPS gflags cpu_info gpu_info)
else(${WITH_GPU})
cc_library(system_allocator SRCS system_allocator.cc DEPS gflags)
cc_library(system_allocator SRCS system_allocator.cc DEPS gflags cpu_info)
endif(${WITH_GPU})
cc_test(system_allocator_test SRCS system_allocator_test.cc DEPS system_allocator)
......
......@@ -24,10 +24,20 @@ BuddyAllocator::BuddyAllocator(SystemAllocator* system_allocator,
: min_chunk_size_(min_chunk_size),
max_chunk_size_(max_chunk_size),
cache_(system_allocator->UseGpu()),
system_allocator_(std::move(system_allocator)) {
PADDLE_ASSERT(min_chunk_size > 0);
PADDLE_ASSERT(max_chunk_size > 0);
PADDLE_ASSERT(system_allocator != nullptr);
system_allocator_(std::move(system_allocator)) {}
BuddyAllocator::~BuddyAllocator() {
DLOG(INFO) << "BuddyAllocator Disconstructor makes sure that all of these "
"have actually been freed";
while (!pool_.empty()) {
auto block = static_cast<MemoryBlock*>(std::get<2>(*pool_.begin()));
DLOG(INFO) << "Free from block (" << block << ", " << max_chunk_size_
<< ")";
system_allocator_->Free(block, max_chunk_size_, block->index(cache_));
cache_.invalidate(block);
pool_.erase(pool_.begin());
}
}
inline size_t align(size_t size, size_t alignment) {
......@@ -62,7 +72,7 @@ void* BuddyAllocator::Alloc(size_t unaligned_size) {
return nullptr;
}
} else {
DLOG(INFO) << " Allocation from existing memory block " << std::get<2>(*it)
DLOG(INFO) << "Allocation from existing memory block " << std::get<2>(*it)
<< " at address "
<< reinterpret_cast<MemoryBlock*>(std::get<2>(*it))->data();
}
......@@ -142,6 +152,8 @@ void BuddyAllocator::Free(void* p) {
// TODO(gangliao): Clean up if existing too much free memory
}
size_t BuddyAllocator::Used() { return total_used_; }
void* BuddyAllocator::SystemAlloc(size_t size) {
size_t index = 0;
void* p = system_allocator_->Alloc(index, size);
......@@ -172,7 +184,7 @@ BuddyAllocator::PoolSet::iterator BuddyAllocator::RefillPool() {
if (p == nullptr) return pool_.end();
DLOG(INFO) << " Creating and inserting new block " << p
DLOG(INFO) << "Creating and inserting new block " << p
<< " from system allocator";
static_cast<MemoryBlock*>(p)->init(cache_, MemoryBlock::FREE_CHUNK, index,
......@@ -211,20 +223,19 @@ void* BuddyAllocator::SplitToAlloc(BuddyAllocator::PoolSet::iterator it,
auto block = static_cast<MemoryBlock*>(std::get<2>(*it));
pool_.erase(it);
DLOG(INFO) << " Split block (" << block << ", " << block->total_size(cache_)
DLOG(INFO) << "Split block (" << block << ", " << block->total_size(cache_)
<< ") into";
block->split(cache_, size);
DLOG(INFO) << " Left block (" << block << ", " << block->total_size(cache_)
DLOG(INFO) << "Left block (" << block << ", " << block->total_size(cache_)
<< ")";
block->set_type(cache_, MemoryBlock::ARENA_CHUNK);
// the rest of memory if exist
if (block->has_right_buddy(cache_)) {
if (block->right_buddy(cache_)->type(cache_) == MemoryBlock::FREE_CHUNK) {
DLOG(INFO) << " Insert right block (" << block->right_buddy(cache_)
<< ", " << block->right_buddy(cache_)->total_size(cache_)
<< ")";
DLOG(INFO) << "Insert right block (" << block->right_buddy(cache_) << ", "
<< block->right_buddy(cache_)->total_size(cache_) << ")";
pool_.insert({block->right_buddy(cache_)->index(cache_),
block->right_buddy(cache_)->total_size(cache_),
......
......@@ -93,33 +93,6 @@ class BuddyAllocator {
std::mutex mutex_;
};
BuddyAllocator* GetCPUBuddyAllocator() {
static BuddyAllocator* a = nullptr;
if (a == nullptr) {
a = new BuddyAllocator(new CPUAllocator, platform::CpuMinChunkSize(),
platform::CpuMaxChunkSize());
}
return a;
}
#ifndef PADDLE_ONLY_CPU // The following code are for CUDA.
BuddyAllocator* GetGPUBuddyAllocator(int gpu_id) {
static BuddyAllocator** as = NULL;
if (as == NULL) {
int gpu_num = platform::GpuDeviceCount();
as = new BuddyAllocator*[gpu_num];
for (int gpu = 0; gpu < gpu_num; gpu++) {
as[gpu] =
new BuddyAllocator(new GPUAllocator, platform::GpuMinChunkSize(),
platform::GpuMaxChunkSize());
}
}
return as[gpu_id];
}
#endif // PADDLE_ONLY_CPU
} // namespace detail
} // namespace memory
} // namespace paddle
......@@ -22,37 +22,67 @@ limitations under the License. */
namespace paddle {
namespace memory {
detail::BuddyAllocator* GetCPUBuddyAllocator() {
static detail::BuddyAllocator* a = nullptr;
if (a == nullptr) {
a = new detail::BuddyAllocator(new detail::CPUAllocator,
platform::CpuMinChunkSize(),
platform::CpuMaxChunkSize());
}
return a;
}
#ifndef PADDLE_ONLY_CPU // The following code are for CUDA.
detail::BuddyAllocator* GetGPUBuddyAllocator(int gpu_id) {
static detail::BuddyAllocator** as = NULL;
if (as == NULL) {
int gpu_num = platform::GpuDeviceCount();
as = new detail::BuddyAllocator*[gpu_num];
for (int gpu = 0; gpu < gpu_num; gpu++) {
platform::SetDeviceId(gpu);
as[gpu] = new detail::BuddyAllocator(new detail::GPUAllocator,
platform::GpuMinChunkSize(),
platform::GpuMaxChunkSize());
}
}
return as[gpu_id];
}
#endif // PADDLE_ONLY_CPU
void* Alloc(platform::Place pl, size_t size) {
#ifndef PADDLE_ONLY_CPU
if (paddle::platform::is_gpu_place(pl)) {
size_t gpu_id = boost::get<platform::GPUPlace>(pl).device;
return detail::GetGPUBuddyAllocator(gpu_id)->Alloc(size);
return GetGPUBuddyAllocator(gpu_id)->Alloc(size);
}
#endif // PADDLE_ONLY_CPU
PADDLE_ASSERT(paddle::platform::is_cpu_place(pl));
return detail::GetCPUBuddyAllocator()->Alloc(size);
return GetCPUBuddyAllocator()->Alloc(size);
}
void Free(paddle::platform::Place pl, void* p) {
#ifndef PADDLE_ONLY_CPU
if (paddle::platform::is_gpu_place(pl)) {
size_t gpu_id = boost::get<platform::GPUPlace>(pl).device;
detail::GetGPUBuddyAllocator(gpu_id)->Free(p);
GetGPUBuddyAllocator(gpu_id)->Free(p);
return;
}
#endif // PADDLE_ONLY_CPU
PADDLE_ASSERT(paddle::platform::is_cpu_place(pl));
detail::GetCPUBuddyAllocator()->Free(p);
GetCPUBuddyAllocator()->Free(p);
}
size_t Used(paddle::platform::Place pl) {
#ifndef PADDLE_ONLY_CPU
if (paddle::platform::is_gpu_place(pl)) {
size_t gpu_id = boost::get<platform::GPUPlace>(pl).device;
return detail::GetGPUBuddyAllocator(gpu_id)->Used();
return GetGPUBuddyAllocator(gpu_id)->Used();
}
#endif // PADDLE_ONLY_CPU
PADDLE_ASSERT(paddle::platform::is_cpu_place(pl));
return detail::GetCPUBuddyAllocator()->Used();
return GetCPUBuddyAllocator()->Used();
}
} // namespace memory
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/memory/memory.h"
#include "paddle/platform/place.h"
#include "gtest/gtest.h"
TEST(BuddyAllocator, CPUAllocation) {
void *p = nullptr;
EXPECT_EQ(p, nullptr);
paddle::platform::CPUPlace cpu;
p = paddle::memory::Alloc(cpu, 4096);
EXPECT_NE(p, nullptr);
paddle::memory::Free(cpu, p);
}
#ifndef PADDLE_ONLY_CPU
TEST(BuddyAllocator, GPUAllocation) {
void *p = nullptr;
EXPECT_EQ(p, nullptr);
paddle::platform::GPUPlace gpu(0);
p = paddle::memory::Alloc(gpu, 4096);
EXPECT_NE(p, nullptr);
paddle::memory::Free(gpu, p);
}
#endif // PADDLE_ONLY_CPU
......@@ -56,7 +56,7 @@ size_t GpuMaxAllocSize() {
GpuMemoryUsage(available, total);
// Reserve the rest for page tables, etc.
return total * FLAGS_fraction_of_gpu_memory_to_use;
return static_cast<size_t>(total * FLAGS_fraction_of_gpu_memory_to_use);
}
size_t GpuMinChunkSize() {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册