buddy_allocator.cc 9.5 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/memory/detail/buddy_allocator.h"
16 17
#include <algorithm>
#include <utility>
L
liaogang 已提交
18
#include "glog/logging.h"
19

20 21 22 23
#ifdef PADDLE_WITH_CUDA
DECLARE_uint64(reallocate_gpu_memory_in_mb);
#endif

24 25 26 27
namespace paddle {
namespace memory {
namespace detail {

G
gongweibao 已提交
28 29 30
BuddyAllocator::BuddyAllocator(
    std::unique_ptr<SystemAllocator> system_allocator, size_t min_chunk_size,
    size_t max_chunk_size)
31 32 33
    : min_chunk_size_(min_chunk_size),
      max_chunk_size_(max_chunk_size),
      cache_(system_allocator->UseGpu()),
L
liaogang 已提交
34 35 36
      system_allocator_(std::move(system_allocator)) {}

BuddyAllocator::~BuddyAllocator() {
M
minqiyang 已提交
37 38
  VLOG(10) << "BuddyAllocator Disconstructor makes sure that all of these "
              "have actually been freed";
L
liaogang 已提交
39 40
  while (!pool_.empty()) {
    auto block = static_cast<MemoryBlock*>(std::get<2>(*pool_.begin()));
41
    auto desc = cache_.LoadDesc(block);
42 43
    VLOG(10) << "Free from block (" << block << ", " << desc->get_total_size()
             << ")";
L
liaogang 已提交
44

45
    system_allocator_->Free(block, desc->get_total_size(), desc->get_index());
46
    cache_.Invalidate(block);
L
liaogang 已提交
47 48
    pool_.erase(pool_.begin());
  }
L
liaogang 已提交
49 50 51 52 53 54 55 56 57
}

inline size_t align(size_t size, size_t alignment) {
  size_t remaining = size % alignment;
  return remaining == 0 ? size : size + (alignment - remaining);
}

void* BuddyAllocator::Alloc(size_t unaligned_size) {
  // adjust allocation alignment
Y
Update  
Yi Wang 已提交
58 59
  size_t size =
      align(unaligned_size + sizeof(MemoryBlock::Desc), min_chunk_size_);
L
liaogang 已提交
60 61 62 63

  // acquire the allocator lock
  std::lock_guard<std::mutex> lock(mutex_);

M
minqiyang 已提交
64 65
  VLOG(10) << "Allocate " << unaligned_size << " bytes from chunk size "
           << size;
L
liaogang 已提交
66 67 68

  // if the allocation is huge, send directly to the system allocator
  if (size > max_chunk_size_) {
M
minqiyang 已提交
69
    VLOG(10) << "Allocate from system allocator.";
L
liaogang 已提交
70 71 72 73 74 75 76 77
    return SystemAlloc(size);
  }

  // query and allocate from the existing chunk
  auto it = FindExistChunk(size);

  // refill the pool if failure
  if (it == pool_.end()) {
78
    it = RefillPool(size);
L
liaogang 已提交
79 80 81 82
    // if still failure, fail fatally
    if (it == pool_.end()) {
      return nullptr;
    }
L
liaogang 已提交
83
  } else {
M
minqiyang 已提交
84 85
    VLOG(10) << "Allocation from existing memory block " << std::get<2>(*it)
             << " at address "
86
             << reinterpret_cast<MemoryBlock*>(std::get<2>(*it))->Data();
L
liaogang 已提交
87 88 89 90 91 92
  }

  total_used_ += size;
  total_free_ -= size;

  // split the allocation and return data for use
93
  return reinterpret_cast<MemoryBlock*>(SplitToAlloc(it, size))->Data();
L
liaogang 已提交
94 95
}

L
liaogang 已提交
96
void BuddyAllocator::Free(void* p) {
L
liaogang 已提交
97
  // Point back to metadata
98
  auto block = static_cast<MemoryBlock*>(p)->Metadata();
L
liaogang 已提交
99

L
liaogang 已提交
100
  // Acquire the allocator lock
L
liaogang 已提交
101
  std::lock_guard<std::mutex> lock(mutex_);
L
liaogang 已提交
102

M
minqiyang 已提交
103
  VLOG(10) << "Free from address " << block;
L
liaogang 已提交
104

105 106
  auto* desc = cache_.LoadDesc(block);
  if (desc->get_type() == MemoryBlock::HUGE_CHUNK) {
M
minqiyang 已提交
107
    VLOG(10) << "Free directly from system allocator";
108
    system_allocator_->Free(block, desc->get_total_size(), desc->get_index());
L
liaogang 已提交
109 110

    // Invalidate GPU allocation from cache
111
    cache_.Invalidate(block);
112

L
liaogang 已提交
113 114 115
    return;
  }

116
  block->MarkAsFree(&cache_);
L
liaogang 已提交
117

118 119
  total_used_ -= desc->get_total_size();
  total_free_ += desc->get_total_size();
L
liaogang 已提交
120 121

  // Trying to merge the right buddy
122 123
  MemoryBlock* right_buddy = block->GetRightBuddy(&cache_);
  if (right_buddy) {
M
minqiyang 已提交
124
    VLOG(10) << "Merging this block " << block << " with its right buddy "
125
             << right_buddy;
126

127 128
    auto rb_desc = cache_.LoadDesc(right_buddy);
    if (rb_desc->get_type() == MemoryBlock::FREE_CHUNK) {
129
      // Take away right buddy from pool
130 131
      pool_.erase(IndexSizeAddress(rb_desc->get_index(),
                                   rb_desc->get_total_size(), right_buddy));
132 133

      // merge its right buddy to the block
134
      block->Merge(&cache_, right_buddy);
135
    }
L
liaogang 已提交
136 137 138
  }

  // Trying to merge the left buddy
139 140
  MemoryBlock* left_buddy = block->GetLeftBuddy(&cache_);
  if (left_buddy) {
M
minqiyang 已提交
141
    VLOG(10) << "Merging this block " << block << " with its left buddy "
142
             << left_buddy;
143

144 145 146
    // auto left_buddy = block->left_buddy(cache_);
    auto* lb_desc = cache_.LoadDesc(left_buddy);
    if (lb_desc->get_type() == MemoryBlock::FREE_CHUNK) {
147
      // Take away right buddy from pool
148 149
      pool_.erase(IndexSizeAddress(lb_desc->get_index(),
                                   lb_desc->get_total_size(), left_buddy));
150 151

      // merge the block to its left buddy
152
      left_buddy->Merge(&cache_, block);
153
      block = left_buddy;
154
      desc = lb_desc;
155
    }
L
liaogang 已提交
156 157 158
  }

  // Dumping this block into pool
M
minqiyang 已提交
159
  VLOG(10) << "Inserting free block (" << block << ", "
160
           << desc->get_total_size() << ")";
L
liaogang 已提交
161
  pool_.insert(
162
      IndexSizeAddress(desc->get_index(), desc->get_total_size(), block));
L
liaogang 已提交
163 164
}

165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
void BuddyAllocator::Release() {
  std::lock_guard<std::mutex> lock(mutex_);
  int num = 0;
  uint64_t bytes = 0;
  bool del_flag = false;
  for (auto iter = pool_.begin(); iter != pool_.end();) {
    auto remain_size = std::get<1>(*iter);
    auto remain_ptr = std::get<2>(*iter);
    for (auto& chunk : chunks_) {
      auto init_size = std::get<1>(chunk);
      auto init_ptr = std::get<2>(chunk);

      if (init_size == remain_size && init_ptr == remain_ptr) {
        ++num;
        bytes += init_size;
        total_free_ -= init_size;
        auto block = static_cast<MemoryBlock*>(std::get<2>(chunk));
        system_allocator_->Free(init_ptr, init_size, std::get<0>(chunk));
        cache_.Invalidate(block);
        del_flag = true;
        break;
      }
    }

    if (del_flag) {
      iter = pool_.erase(iter);
    } else {
      iter++;
    }
  }
  VLOG(10) << "Release " << num << " chunk, Free " << bytes << " bytes.";
}

L
liaogang 已提交
198
size_t BuddyAllocator::Used() { return total_used_; }
D
Dun Liang 已提交
199 200
size_t BuddyAllocator::GetMinChunkSize() { return min_chunk_size_; }
size_t BuddyAllocator::GetMaxChunkSize() { return max_chunk_size_; }
L
liaogang 已提交
201

L
liaogang 已提交
202 203
void* BuddyAllocator::SystemAlloc(size_t size) {
  size_t index = 0;
Y
Update  
Yi Wang 已提交
204
  void* p = system_allocator_->Alloc(&index, size);
L
liaogang 已提交
205

M
minqiyang 已提交
206
  VLOG(10) << "Allocated " << p << " from system allocator.";
L
liaogang 已提交
207 208 209

  if (p == nullptr) return nullptr;

210
  static_cast<MemoryBlock*>(p)->Init(&cache_, MemoryBlock::HUGE_CHUNK, index,
L
liaogang 已提交
211 212
                                     size, nullptr, nullptr);

213
  return static_cast<MemoryBlock*>(p)->Data();
L
liaogang 已提交
214 215
}

216 217
BuddyAllocator::PoolSet::iterator BuddyAllocator::RefillPool(
    size_t request_bytes) {
Z
zhhsplendid 已提交
218 219 220
  size_t allocate_bytes = max_chunk_size_;
  size_t index = 0;

221
#ifdef PADDLE_WITH_CUDA
L
liaogang 已提交
222 223
  if (system_allocator_->UseGpu()) {
    if ((total_used_ + total_free_) == 0) {
Z
zhhsplendid 已提交
224
      // Compute the allocation size for gpu for the first allocation.
225
      allocate_bytes = std::max(platform::GpuInitAllocSize(), request_bytes);
Z
zhhsplendid 已提交
226
    } else {
227 228 229
      // Compute the re-allocation size, we store the re-allocation size when
      // user set FLAGS_reallocate_gpu_memory_in_mb to fix value.
      if (realloc_size_ == 0 || FLAGS_reallocate_gpu_memory_in_mb == 0ul) {
Z
zhhsplendid 已提交
230 231
        realloc_size_ = platform::GpuReallocSize();
      }
232
      allocate_bytes = std::max(realloc_size_, request_bytes);
L
liaogang 已提交
233 234
    }
  }
L
Luo Tao 已提交
235
#endif
L
liaogang 已提交
236

Z
zhhsplendid 已提交
237 238
  // Allocate a new block
  void* p = system_allocator_->Alloc(&index, allocate_bytes);
L
liaogang 已提交
239 240 241

  if (p == nullptr) return pool_.end();

M
minqiyang 已提交
242 243
  VLOG(10) << "Creating and inserting new block " << p
           << " from system allocator";
L
liaogang 已提交
244

245
  static_cast<MemoryBlock*>(p)->Init(&cache_, MemoryBlock::FREE_CHUNK, index,
Z
zhhsplendid 已提交
246
                                     allocate_bytes, nullptr, nullptr);
L
liaogang 已提交
247

Z
zhhsplendid 已提交
248
  total_free_ += allocate_bytes;
L
liaogang 已提交
249

250 251 252
  // record the chunk.
  chunks_.insert(IndexSizeAddress(index, allocate_bytes, p));

L
liaogang 已提交
253
  // dump the block into pool
Z
zhhsplendid 已提交
254
  return pool_.insert(IndexSizeAddress(index, allocate_bytes, p)).first;
L
liaogang 已提交
255 256 257 258 259 260
}

BuddyAllocator::PoolSet::iterator BuddyAllocator::FindExistChunk(size_t size) {
  size_t index = 0;

  while (1) {
L
liaogang 已提交
261
    auto it = pool_.lower_bound(IndexSizeAddress(index, size, nullptr));
262 263

    // no match chunk memory
L
liaogang 已提交
264 265 266
    if (it == pool_.end()) return it;

    if (std::get<0>(*it) > index) {
267
      // find suitable one
L
liaogang 已提交
268 269 270
      if (std::get<1>(*it) >= size) {
        return it;
      }
271
      // update and continue
L
liaogang 已提交
272 273 274 275 276 277 278 279 280 281
      index = std::get<0>(*it);
      continue;
    }
    return it;
  }
}

void* BuddyAllocator::SplitToAlloc(BuddyAllocator::PoolSet::iterator it,
                                   size_t size) {
  auto block = static_cast<MemoryBlock*>(std::get<2>(*it));
282
  auto desc = cache_.LoadDesc(block);
L
liaogang 已提交
283 284
  pool_.erase(it);

285
  VLOG(10) << "Split block (" << block << ", " << desc->get_total_size()
M
minqiyang 已提交
286
           << ") into";
287
  block->Split(&cache_, size);
L
liaogang 已提交
288

289 290
  VLOG(10) << "Left block (" << block << ", " << desc->get_total_size() << ")";
  desc->set_type(MemoryBlock::ARENA_CHUNK);
L
liaogang 已提交
291 292

  // the rest of memory if exist
293 294 295 296 297 298 299 300 301
  MemoryBlock* right_buddy = block->GetRightBuddy(&cache_);
  if (right_buddy) {
    auto* rb_desc = cache_.LoadDesc(right_buddy);
    if (rb_desc->get_type() == MemoryBlock::FREE_CHUNK) {
      VLOG(10) << "Insert right block (" << right_buddy << ", "
               << rb_desc->get_total_size() << ")";

      pool_.insert(IndexSizeAddress(rb_desc->get_index(),
                                    rb_desc->get_total_size(), right_buddy));
L
liaogang 已提交
302 303 304 305
    }
  }

  return block;
306 307 308 309 310
}

}  // namespace detail
}  // namespace memory
}  // namespace paddle