“81195c877e809fda705f7f9d854bfcd196b184b3”上不存在“paddle/legacy/math/tests/test_matrixCompare.cpp”
buddy_allocator.cc 10.0 KB
Newer Older
1 2
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14

L
liaogang 已提交
15
#include "paddle/memory/detail/buddy_allocator.h"
L
liaogang 已提交
16
#include "glog/logging.h"
17 18 19 20 21

namespace paddle {
namespace memory {
namespace detail {

L
liaogang 已提交
22
BuddyAllocator::BuddyAllocator(SystemAllocator* system_allocator,
23 24 25 26
                               size_t min_chunk_size, size_t max_chunk_size)
    : min_chunk_size_(min_chunk_size),
      max_chunk_size_(max_chunk_size),
      cache_(system_allocator->UseGpu()),
L
liaogang 已提交
27 28 29
      system_allocator_(std::move(system_allocator)) {}

BuddyAllocator::~BuddyAllocator() {
30 31
  VLOG(10) << "BuddyAllocator Disconstructor makes sure that all of these "
              "have actually been freed";
L
liaogang 已提交
32 33
  while (!pool_.empty()) {
    auto block = static_cast<MemoryBlock*>(std::get<2>(*pool_.begin()));
34
    VLOG(10) << "Free from block (" << block << ", " << max_chunk_size_ << ")";
L
liaogang 已提交
35 36 37 38 39

    system_allocator_->Free(block, max_chunk_size_, block->index(cache_));
    cache_.invalidate(block);
    pool_.erase(pool_.begin());
  }
L
liaogang 已提交
40 41 42 43 44 45 46 47 48 49 50 51 52 53
}

inline size_t align(size_t size, size_t alignment) {
  size_t remaining = size % alignment;
  return remaining == 0 ? size : size + (alignment - remaining);
}

void* BuddyAllocator::Alloc(size_t unaligned_size) {
  // adjust allocation alignment
  size_t size = align(unaligned_size + sizeof(Metadata), min_chunk_size_);

  // acquire the allocator lock
  std::lock_guard<std::mutex> lock(mutex_);

54 55
  VLOG(10) << "Allocate " << unaligned_size << " bytes from chunk size "
           << size;
L
liaogang 已提交
56 57 58

  // if the allocation is huge, send directly to the system allocator
  if (size > max_chunk_size_) {
59
    VLOG(10) << "Allocate from system allocator.";
L
liaogang 已提交
60 61 62 63 64 65 66 67 68
    return SystemAlloc(size);
  }

  // query and allocate from the existing chunk
  auto it = FindExistChunk(size);

  // refill the pool if failure
  if (it == pool_.end()) {
    it = RefillPool();
L
liaogang 已提交
69 70 71 72
    // if still failure, fail fatally
    if (it == pool_.end()) {
      return nullptr;
    }
L
liaogang 已提交
73
  } else {
74 75 76
    VLOG(10) << "Allocation from existing memory block " << std::get<2>(*it)
             << " at address "
             << reinterpret_cast<MemoryBlock*>(std::get<2>(*it))->data();
L
liaogang 已提交
77 78 79 80 81 82 83 84 85
  }

  total_used_ += size;
  total_free_ -= size;

  // split the allocation and return data for use
  return reinterpret_cast<MemoryBlock*>(SplitToAlloc(it, size))->data();
}

L
liaogang 已提交
86
void BuddyAllocator::Free(void* p) {
L
liaogang 已提交
87
  // Point back to metadata
L
liaogang 已提交
88 89
  auto block = static_cast<MemoryBlock*>(p)->metadata();

L
liaogang 已提交
90
  // Acquire the allocator lock
L
liaogang 已提交
91
  std::lock_guard<std::mutex> lock(mutex_);
L
liaogang 已提交
92

93
  VLOG(10) << "Free from address " << block;
L
liaogang 已提交
94 95

  if (block->type(cache_) == MemoryBlock::HUGE_CHUNK) {
96
    VLOG(10) << "Free directly from system allocator";
L
liaogang 已提交
97 98 99 100
    system_allocator_->Free(block, block->total_size(cache_),
                            block->index(cache_));

    // Invalidate GPU allocation from cache
101 102
    cache_.invalidate(block);

L
liaogang 已提交
103 104 105 106 107 108 109 110 111 112
    return;
  }

  block->mark_as_free(cache_);

  total_used_ -= block->total_size(cache_);
  total_free_ += block->total_size(cache_);

  // Trying to merge the right buddy
  if (block->has_right_buddy(cache_)) {
113 114
    VLOG(10) << "Merging this block " << block << " with its right buddy "
             << block->right_buddy(cache_);
115 116 117 118 119

    auto right_buddy = block->right_buddy(cache_);

    if (right_buddy->type(cache_) == MemoryBlock::FREE_CHUNK) {
      // Take away right buddy from pool
L
liaogang 已提交
120 121 122
      pool_.erase(IndexSizeAddress(right_buddy->index(cache_),
                                   right_buddy->total_size(cache_),
                                   right_buddy));
123 124 125 126

      // merge its right buddy to the block
      block->merge(cache_, right_buddy);
    }
L
liaogang 已提交
127 128 129 130
  }

  // Trying to merge the left buddy
  if (block->has_left_buddy(cache_)) {
131 132
    VLOG(10) << "Merging this block " << block << " with its left buddy "
             << block->left_buddy(cache_);
133 134 135 136 137

    auto left_buddy = block->left_buddy(cache_);

    if (left_buddy->type(cache_) == MemoryBlock::FREE_CHUNK) {
      // Take away right buddy from pool
L
liaogang 已提交
138 139
      pool_.erase(IndexSizeAddress(left_buddy->index(cache_),
                                   left_buddy->total_size(cache_), left_buddy));
140 141 142 143 144

      // merge the block to its left buddy
      left_buddy->merge(cache_, block);
      block = left_buddy;
    }
L
liaogang 已提交
145 146 147
  }

  // Dumping this block into pool
148 149
  VLOG(10) << "Inserting free block (" << block << ", "
           << block->total_size(cache_) << ")";
L
liaogang 已提交
150 151
  pool_.insert(
      IndexSizeAddress(block->index(cache_), block->total_size(cache_), block));
L
liaogang 已提交
152

L
liaogang 已提交
153
  // Clean up if existing too much free memory
154

L
liaogang 已提交
155 156 157 158 159
  // Prefer freeing fallback allocation first
  CleanIdleFallBackAlloc();

  // Free normal allocation
  CleanIdleNormalAlloc();
L
liaogang 已提交
160 161
}

L
liaogang 已提交
162 163
size_t BuddyAllocator::Used() { return total_used_; }

L
liaogang 已提交
164 165 166 167
void* BuddyAllocator::SystemAlloc(size_t size) {
  size_t index = 0;
  void* p = system_allocator_->Alloc(index, size);

168
  VLOG(10) << "Allocated " << p << " from system allocator.";
L
liaogang 已提交
169 170 171 172 173 174 175 176 177 178

  if (p == nullptr) return nullptr;

  static_cast<MemoryBlock*>(p)->init(cache_, MemoryBlock::HUGE_CHUNK, index,
                                     size, nullptr, nullptr);

  return static_cast<MemoryBlock*>(p)->data();
}

BuddyAllocator::PoolSet::iterator BuddyAllocator::RefillPool() {
179
#ifdef PADDLE_WITH_CUDA
L
liaogang 已提交
180 181 182 183 184 185
  if (system_allocator_->UseGpu()) {
    if ((total_used_ + total_free_) == 0) {
      // Compute the maximum allocation size for the first allocation.
      max_chunk_size_ = platform::GpuMaxChunkSize();
    }
  }
L
Luo Tao 已提交
186
#endif
L
liaogang 已提交
187 188 189 190 191 192 193

  // Allocate a new maximum sized block
  size_t index = 0;
  void* p = system_allocator_->Alloc(index, max_chunk_size_);

  if (p == nullptr) return pool_.end();

194 195
  VLOG(10) << "Creating and inserting new block " << p
           << " from system allocator";
L
liaogang 已提交
196 197 198 199

  static_cast<MemoryBlock*>(p)->init(cache_, MemoryBlock::FREE_CHUNK, index,
                                     max_chunk_size_, nullptr, nullptr);

200 201 202 203 204 205
  // gpu fallback allocation
  if (system_allocator_->UseGpu() &&
      static_cast<MemoryBlock*>(p)->index(cache_) == 1) {
    fallback_alloc_count_++;
  }

L
liaogang 已提交
206 207 208
  total_free_ += max_chunk_size_;

  // dump the block into pool
L
liaogang 已提交
209
  return pool_.insert(IndexSizeAddress(index, max_chunk_size_, p)).first;
L
liaogang 已提交
210 211 212 213 214 215
}

BuddyAllocator::PoolSet::iterator BuddyAllocator::FindExistChunk(size_t size) {
  size_t index = 0;

  while (1) {
L
liaogang 已提交
216
    auto it = pool_.lower_bound(IndexSizeAddress(index, size, nullptr));
217 218

    // no match chunk memory
L
liaogang 已提交
219 220 221
    if (it == pool_.end()) return it;

    if (std::get<0>(*it) > index) {
222
      // find suitable one
L
liaogang 已提交
223 224 225
      if (std::get<1>(*it) >= size) {
        return it;
      }
226
      // update and continue
L
liaogang 已提交
227 228 229 230 231 232 233 234 235 236 237 238
      index = std::get<0>(*it);
      continue;
    }
    return it;
  }
}

void* BuddyAllocator::SplitToAlloc(BuddyAllocator::PoolSet::iterator it,
                                   size_t size) {
  auto block = static_cast<MemoryBlock*>(std::get<2>(*it));
  pool_.erase(it);

239 240
  VLOG(10) << "Split block (" << block << ", " << block->total_size(cache_)
           << ") into";
L
liaogang 已提交
241 242
  block->split(cache_, size);

243 244
  VLOG(10) << "Left block (" << block << ", " << block->total_size(cache_)
           << ")";
L
liaogang 已提交
245 246 247 248 249
  block->set_type(cache_, MemoryBlock::ARENA_CHUNK);

  // the rest of memory if exist
  if (block->has_right_buddy(cache_)) {
    if (block->right_buddy(cache_)->type(cache_) == MemoryBlock::FREE_CHUNK) {
250 251
      VLOG(10) << "Insert right block (" << block->right_buddy(cache_) << ", "
               << block->right_buddy(cache_)->total_size(cache_) << ")";
L
liaogang 已提交
252

L
liaogang 已提交
253 254 255 256
      pool_.insert(
          IndexSizeAddress(block->right_buddy(cache_)->index(cache_),
                           block->right_buddy(cache_)->total_size(cache_),
                           block->right_buddy(cache_)));
L
liaogang 已提交
257 258 259 260
    }
  }

  return block;
261 262
}

L
liaogang 已提交
263
void BuddyAllocator::CleanIdleFallBackAlloc() {
264 265 266 267 268 269 270 271 272 273 274 275 276 277
  // If fallback allocation does not exist, return directly
  if (!fallback_alloc_count_) return;

  for (auto pool = pool_.rbegin(); pool != pool_.rend();) {
    // If free memory block less than max_chunk_size_, return directly
    if (std::get<1>(*pool) < max_chunk_size_) return;

    MemoryBlock* block = static_cast<MemoryBlock*>(std::get<2>(*pool));

    // If no GPU fallback allocator, return
    if (!system_allocator_->UseGpu() || block->index(cache_) == 0) {
      return;
    }

278
    VLOG(10) << "Return block " << block << " to fallback allocator.";
279 280 281 282 283 284 285 286 287 288 289 290

    system_allocator_->Free(block, max_chunk_size_, block->index(cache_));
    cache_.invalidate(block);

    pool = PoolSet::reverse_iterator(pool_.erase(std::next(pool).base()));

    total_free_ -= max_chunk_size_;
    fallback_alloc_count_--;

    // If no fall allocation exists, return directly
    if (!fallback_alloc_count_) return;
  }
291 292
}

293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313
void BuddyAllocator::CleanIdleNormalAlloc() {
  auto shall_free_alloc = [&]() -> bool {
    // free all fallback allocations
    if (fallback_alloc_count_ > 0) {
      return true;
    }
    // keep 2x overhead if we haven't fallen back
    if ((total_used_ + max_chunk_size_) * 2 < total_free_) {
      return true;
    }
    return false;
  };

  if (!shall_free_alloc()) return;

  for (auto pool = pool_.rbegin(); pool != pool_.rend();) {
    // If free memory block less than max_chunk_size_, return directly
    if (std::get<1>(*pool) < max_chunk_size_) return;

    MemoryBlock* block = static_cast<MemoryBlock*>(std::get<2>(*pool));

314
    VLOG(10) << "Return block " << block << " to base allocator.";
315 316 317 318 319 320 321 322 323 324 325

    system_allocator_->Free(block, max_chunk_size_, block->index(cache_));
    cache_.invalidate(block);

    pool = PoolSet::reverse_iterator(pool_.erase(std::next(pool).base()));

    total_free_ -= max_chunk_size_;

    if (!shall_free_alloc()) return;
  }
}
L
liaogang 已提交
326

327 328 329
}  // namespace detail
}  // namespace memory
}  // namespace paddle