From b22dd12854150c31b9cb9e3e550bdee4b5df5977 Mon Sep 17 00:00:00 2001 From: liaogang Date: Wed, 28 Jun 2017 01:32:06 +0800 Subject: [PATCH] ENH: Add buddy allocator draft --- paddle/memory/detail/CMakeLists.txt | 4 +- paddle/memory/detail/buddy_allocator.h | 79 ++++++++++++++++++++++++++ 2 files changed, 81 insertions(+), 2 deletions(-) create mode 100644 paddle/memory/detail/buddy_allocator.h diff --git a/paddle/memory/detail/CMakeLists.txt b/paddle/memory/detail/CMakeLists.txt index c16dfadeb..cd5622203 100644 --- a/paddle/memory/detail/CMakeLists.txt +++ b/paddle/memory/detail/CMakeLists.txt @@ -1,5 +1,5 @@ if(${WITH_GPU}) - nv_test(system_allocator_test SRCS system_allocator_test.cc) + nv_test(system_allocator_test SRCS system_allocator_test.cc DEPS gflags glog) else(${WITH_GPU}) - cc_test(system_allocator_test SRCS system_allocator_test.cc) + cc_test(system_allocator_test SRCS system_allocator_test.cc DEPS gflags glog) endif(${WITH_GPU}) diff --git a/paddle/memory/detail/buddy_allocator.h b/paddle/memory/detail/buddy_allocator.h new file mode 100644 index 000000000..35e96fd50 --- /dev/null +++ b/paddle/memory/detail/buddy_allocator.h @@ -0,0 +1,79 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/memory/detail/system_allocator.h" + +namespace paddle { +namespace memory { +namespace detail { + +template +class BuddyAllocator { + public: + // TODO(gangliao): This is a draft, add Buddy Allocator Algorithm soon + BuddyAllocator() {} + ~BuddyAllocator() {} + + public: + void* Alloc(size_t size) { + return Allocator::Alloc(size); + } + void Free(void*) { + // Because all info like size are stored in meta data, + // thus it's duplicate if add the parameter `size` in + // `Free(void*)` interface. + } + size_t Used(); + + public: + BuddyAllocator(const BuddyAllocator&) = delete; + BuddyAllocator& operator=(const BuddyAllocator&) = delete; + + private: + size_t min_alloc_size_; + size_t max_alloc_size_; + + private: + std::mutex mutex_; +}; + +BuddyAllocator* GetCPUBuddyAllocator() { + static BuddyAllocator* a = nullptr; + if (a == nullptr) { + a = new BuddyAllocator(); + } + return a; +} + +#ifndef PADDLE_ONLY_CPU // The following code are for CUDA. + +BuddyAllocator* GetGPUBuddyAllocator(int gpu_id) { + static BuddyAllocator** as = NULL; + if (as == NULL) { + int gpu_num = platform::GetDeviceCount(); + as = new BuddyAllocator*[gpu_num]; + for (int gpu = 0; gpu < gpu_num; gpu++) { + as[gpu] = new BuddyAllocator(); + } + } + return as[gpu_id]; +} + +#endif // PADDLE_ONLY_CPU + +} // namespace detail +} // namespace memory +} // namespace paddle -- GitLab