buddy_allocator.cc 11.3 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14

L
Leo Chen 已提交
15
#include "paddle/fluid/memory/allocation/buddy_allocator.h"
Y
Yang 已提交
16

17
#include <algorithm>
18

L
liaogang 已提交
19
#include "glog/logging.h"
20
#include "paddle/phi/core/flags.h"
21

22
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
Y
Yang 已提交
23
#define USE_DEVICE
24
PHI_DECLARE_uint64(reallocate_gpu_memory_in_mb);
25
#endif
Y
Yang 已提交
26

27
#include "paddle/fluid/platform/device/device_wrapper.h"
28
#include "paddle/fluid/platform/place.h"
29

30 31 32 33
namespace paddle {
namespace memory {
namespace detail {

G
gongweibao 已提交
34
BuddyAllocator::BuddyAllocator(
35 36 37 38
    std::unique_ptr<SystemAllocator> system_allocator,
    size_t min_chunk_size,
    size_t max_chunk_size,
    size_t extra_padding_size,
39
    const std::string dev_type)
40 41
    : min_chunk_size_(min_chunk_size),
      max_chunk_size_(max_chunk_size),
42
      extra_padding_size_(extra_padding_size),
43
      cache_(system_allocator->UseGpu()),
44 45 46 47
      system_allocator_(std::move(system_allocator)) {
#ifdef PADDLE_WITH_CUSTOM_DEVICE
  if (!dev_type.empty()) {
    init_allocate_size_func_ = [dev_type]() {
48
      return phi::DeviceManager::GetInitAllocSize(
49 50 51
          platform::PlaceHelper::CreatePlace(dev_type));
    };
    re_allocate_size_func_ = [dev_type]() {
52
      return phi::DeviceManager::GetReallocSize(
53 54 55 56 57 58 59 60 61
          platform::PlaceHelper::CreatePlace(dev_type));
    };
  } else {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
    init_allocate_size_func_ = &platform::GpuInitAllocSize;
    re_allocate_size_func_ = &platform::GpuReallocSize;
#endif
  }
#endif
L
Leo Chen 已提交
62 63
  VLOG(1) << "min_chunk_size_: " << min_chunk_size_
          << ", max_chunk_size_:" << max_chunk_size_;
64
}
L
liaogang 已提交
65 66

BuddyAllocator::~BuddyAllocator() {
M
minqiyang 已提交
67 68
  VLOG(10) << "BuddyAllocator Disconstructor makes sure that all of these "
              "have actually been freed";
L
liaogang 已提交
69 70
  while (!pool_.empty()) {
    auto block = static_cast<MemoryBlock*>(std::get<2>(*pool_.begin()));
71
    auto desc = cache_.LoadDesc(block);
72 73
    VLOG(10) << "Free from block (" << block << ", " << desc->get_total_size()
             << ")";
L
liaogang 已提交
74

75
    system_allocator_->Free(block, desc->get_total_size(), desc->get_index());
76
    cache_.Invalidate(block);
L
liaogang 已提交
77 78
    pool_.erase(pool_.begin());
  }
L
liaogang 已提交
79 80 81 82 83 84 85 86 87 88
}

inline size_t align(size_t size, size_t alignment) {
  size_t remaining = size % alignment;
  return remaining == 0 ? size : size + (alignment - remaining);
}

void* BuddyAllocator::Alloc(size_t unaligned_size) {
  // adjust allocation alignment

89 90 91 92 93 94 95
  size_t size =
      align(unaligned_size + sizeof(MemoryBlock::Desc) + extra_padding_size_,
            min_chunk_size_);
  VLOG(10) << "alloc: " << unaligned_size
           << ", padding for desc: " << sizeof(MemoryBlock::Desc)
           << ", extra padding: " << extra_padding_size_
           << ", alignment: " << min_chunk_size_;
L
liaogang 已提交
96 97 98
  // acquire the allocator lock
  std::lock_guard<std::mutex> lock(mutex_);

M
minqiyang 已提交
99 100
  VLOG(10) << "Allocate " << unaligned_size << " bytes from chunk size "
           << size;
L
liaogang 已提交
101 102 103

  // if the allocation is huge, send directly to the system allocator
  if (size > max_chunk_size_) {
M
minqiyang 已提交
104
    VLOG(10) << "Allocate from system allocator.";
L
liaogang 已提交
105 106 107 108 109 110 111 112
    return SystemAlloc(size);
  }

  // query and allocate from the existing chunk
  auto it = FindExistChunk(size);

  // refill the pool if failure
  if (it == pool_.end()) {
113
    it = RefillPool(size);
L
liaogang 已提交
114 115 116 117
    // if still failure, fail fatally
    if (it == pool_.end()) {
      return nullptr;
    }
L
liaogang 已提交
118
  } else {
M
minqiyang 已提交
119 120
    VLOG(10) << "Allocation from existing memory block " << std::get<2>(*it)
             << " at address "
121
             << reinterpret_cast<MemoryBlock*>(std::get<2>(*it))->Data();
L
liaogang 已提交
122 123 124 125 126 127
  }

  total_used_ += size;
  total_free_ -= size;

  // split the allocation and return data for use
128
  return reinterpret_cast<MemoryBlock*>(SplitToAlloc(it, size))->Data();
L
liaogang 已提交
129 130
}

L
liaogang 已提交
131
void BuddyAllocator::Free(void* p) {
L
liaogang 已提交
132
  // Point back to metadata
133
  auto block = static_cast<MemoryBlock*>(p)->Metadata();
L
liaogang 已提交
134

L
liaogang 已提交
135
  // Acquire the allocator lock
L
liaogang 已提交
136
  std::lock_guard<std::mutex> lock(mutex_);
L
liaogang 已提交
137

M
minqiyang 已提交
138
  VLOG(10) << "Free from address " << block;
L
liaogang 已提交
139

140 141
  auto* desc = cache_.LoadDesc(block);
  if (desc->get_type() == MemoryBlock::HUGE_CHUNK) {
M
minqiyang 已提交
142
    VLOG(10) << "Free directly from system allocator";
143
    system_allocator_->Free(block, desc->get_total_size(), desc->get_index());
L
liaogang 已提交
144 145

    // Invalidate GPU allocation from cache
146
    cache_.Invalidate(block);
147

L
liaogang 已提交
148 149 150
    return;
  }

151
  block->MarkAsFree(&cache_);
L
liaogang 已提交
152

153 154
  total_used_ -= desc->get_total_size();
  total_free_ += desc->get_total_size();
L
liaogang 已提交
155 156

  // Trying to merge the right buddy
157 158
  MemoryBlock* right_buddy = block->GetRightBuddy(&cache_);
  if (right_buddy) {
M
minqiyang 已提交
159
    VLOG(10) << "Merging this block " << block << " with its right buddy "
160
             << right_buddy;
161

162 163
    auto rb_desc = cache_.LoadDesc(right_buddy);
    if (rb_desc->get_type() == MemoryBlock::FREE_CHUNK) {
164
      // Take away right buddy from pool
165 166
      pool_.erase(IndexSizeAddress(
          rb_desc->get_index(), rb_desc->get_total_size(), right_buddy));
167 168

      // merge its right buddy to the block
169
      block->Merge(&cache_, right_buddy);
170
    }
L
liaogang 已提交
171 172 173
  }

  // Trying to merge the left buddy
174 175
  MemoryBlock* left_buddy = block->GetLeftBuddy(&cache_);
  if (left_buddy) {
M
minqiyang 已提交
176
    VLOG(10) << "Merging this block " << block << " with its left buddy "
177
             << left_buddy;
178

179 180 181
    // auto left_buddy = block->left_buddy(cache_);
    auto* lb_desc = cache_.LoadDesc(left_buddy);
    if (lb_desc->get_type() == MemoryBlock::FREE_CHUNK) {
182
      // Take away right buddy from pool
183 184
      pool_.erase(IndexSizeAddress(
          lb_desc->get_index(), lb_desc->get_total_size(), left_buddy));
185 186

      // merge the block to its left buddy
187
      left_buddy->Merge(&cache_, block);
188
      block = left_buddy;
189
      desc = lb_desc;
190
    }
L
liaogang 已提交
191 192 193
  }

  // Dumping this block into pool
M
minqiyang 已提交
194
  VLOG(10) << "Inserting free block (" << block << ", "
195
           << desc->get_total_size() << ")";
L
liaogang 已提交
196
  pool_.insert(
197
      IndexSizeAddress(desc->get_index(), desc->get_total_size(), block));
L
liaogang 已提交
198 199
}

W
Wilber 已提交
200
uint64_t BuddyAllocator::Release() {
201 202 203 204 205 206
  std::lock_guard<std::mutex> lock(mutex_);
  int num = 0;
  uint64_t bytes = 0;
  for (auto iter = pool_.begin(); iter != pool_.end();) {
    auto remain_size = std::get<1>(*iter);
    auto remain_ptr = std::get<2>(*iter);
Y
Yang 已提交
207 208 209 210 211 212 213 214 215
    auto found = chunks_.find({remain_size, remain_ptr});
    if (found != chunks_.end()) {
      size_t index = found->second;
      ++num;
      bytes += remain_size;
      total_free_ -= remain_size;
      auto block = static_cast<MemoryBlock*>(remain_ptr);
      system_allocator_->Free(remain_ptr, remain_size, index);
      cache_.Invalidate(block);
216 217 218 219 220
      iter = pool_.erase(iter);
    } else {
      iter++;
    }
  }
Y
Yang 已提交
221
  VLOG(10) << "Release " << num << " chunks, Free " << bytes << " bytes.";
W
Wilber 已提交
222
  return bytes;
223 224
}

L
liaogang 已提交
225
size_t BuddyAllocator::Used() { return total_used_; }
D
Dun Liang 已提交
226 227
size_t BuddyAllocator::GetMinChunkSize() { return min_chunk_size_; }
size_t BuddyAllocator::GetMaxChunkSize() { return max_chunk_size_; }
L
liaogang 已提交
228

L
liaogang 已提交
229 230
void* BuddyAllocator::SystemAlloc(size_t size) {
  size_t index = 0;
Y
Update  
Yi Wang 已提交
231
  void* p = system_allocator_->Alloc(&index, size);
L
liaogang 已提交
232

L
Leo Chen 已提交
233
  VLOG(8) << "Allocated " << p << " size " << size << " from system allocator.";
L
liaogang 已提交
234 235 236

  if (p == nullptr) return nullptr;

237 238
  static_cast<MemoryBlock*>(p)->Init(
      &cache_, MemoryBlock::HUGE_CHUNK, index, size, nullptr, nullptr);
L
liaogang 已提交
239

240
  return static_cast<MemoryBlock*>(p)->Data();
L
liaogang 已提交
241 242
}

243 244
BuddyAllocator::PoolSet::iterator BuddyAllocator::RefillPool(
    size_t request_bytes) {
Z
zhhsplendid 已提交
245 246 247
  size_t allocate_bytes = max_chunk_size_;
  size_t index = 0;

248
#ifdef PADDLE_WITH_CUSTOM_DEVICE
249 250
  allocate_bytes = DeviceAllocateSize(
      init_allocate_size_func_, re_allocate_size_func_, request_bytes);
251
#else
252
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
253 254
  allocate_bytes = DeviceAllocateSize(
      &platform::GpuInitAllocSize, &platform::GpuReallocSize, request_bytes);
255
#endif
F
fwenguang 已提交
256
#endif
L
liaogang 已提交
257

Z
zhhsplendid 已提交
258 259
  // Allocate a new block
  void* p = system_allocator_->Alloc(&index, allocate_bytes);
L
liaogang 已提交
260 261 262

  if (p == nullptr) return pool_.end();

L
Leo Chen 已提交
263 264
  VLOG(8) << "Creating and inserting new block " << p << " size "
          << allocate_bytes << " from system allocator";
L
liaogang 已提交
265

266 267 268 269 270 271
  static_cast<MemoryBlock*>(p)->Init(&cache_,
                                     MemoryBlock::FREE_CHUNK,
                                     index,
                                     allocate_bytes,
                                     nullptr,
                                     nullptr);
L
liaogang 已提交
272

Z
zhhsplendid 已提交
273
  total_free_ += allocate_bytes;
L
liaogang 已提交
274

275
  // record the chunk.
Y
Yang 已提交
276
  chunks_.insert({{allocate_bytes, p}, index});
277

L
liaogang 已提交
278
  // dump the block into pool
Z
zhhsplendid 已提交
279
  return pool_.insert(IndexSizeAddress(index, allocate_bytes, p)).first;
L
liaogang 已提交
280 281 282 283 284
}

BuddyAllocator::PoolSet::iterator BuddyAllocator::FindExistChunk(size_t size) {
  size_t index = 0;

285
  while (true) {
L
liaogang 已提交
286
    auto it = pool_.lower_bound(IndexSizeAddress(index, size, nullptr));
287 288

    // no match chunk memory
L
liaogang 已提交
289 290 291
    if (it == pool_.end()) return it;

    if (std::get<0>(*it) > index) {
292
      // find suitable one
L
liaogang 已提交
293 294 295
      if (std::get<1>(*it) >= size) {
        return it;
      }
296
      // update and continue
L
liaogang 已提交
297 298 299 300 301 302 303 304 305 306
      index = std::get<0>(*it);
      continue;
    }
    return it;
  }
}

void* BuddyAllocator::SplitToAlloc(BuddyAllocator::PoolSet::iterator it,
                                   size_t size) {
  auto block = static_cast<MemoryBlock*>(std::get<2>(*it));
307
  auto desc = cache_.LoadDesc(block);
L
liaogang 已提交
308 309
  pool_.erase(it);

310
  VLOG(10) << "Split block (" << block << ", " << desc->get_total_size()
M
minqiyang 已提交
311
           << ") into";
312
  block->Split(&cache_, size);
L
liaogang 已提交
313

314 315
  VLOG(10) << "Left block (" << block << ", " << desc->get_total_size() << ")";
  desc->set_type(MemoryBlock::ARENA_CHUNK);
L
liaogang 已提交
316 317

  // the rest of memory if exist
318 319 320 321 322 323 324
  MemoryBlock* right_buddy = block->GetRightBuddy(&cache_);
  if (right_buddy) {
    auto* rb_desc = cache_.LoadDesc(right_buddy);
    if (rb_desc->get_type() == MemoryBlock::FREE_CHUNK) {
      VLOG(10) << "Insert right block (" << right_buddy << ", "
               << rb_desc->get_total_size() << ")";

325 326
      pool_.insert(IndexSizeAddress(
          rb_desc->get_index(), rb_desc->get_total_size(), right_buddy));
L
liaogang 已提交
327 328 329 330
    }
  }

  return block;
331 332
}

Y
Yang 已提交
333 334
size_t BuddyAllocator::DeviceAllocateSize(
    std::function<size_t()> init_allocate_size_func,
335 336
    std::function<size_t()> re_allocate_size_func,
    size_t request_bytes) {
Y
Yang 已提交
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
  size_t allocate_bytes = max_chunk_size_;
#if defined(USE_DEVICE)
  const bool use_gpu = system_allocator_->UseGpu();
  VLOG(10) << "use_gpu " << use_gpu << ", total_used " << total_used_
           << ", total_free " << total_free_;
  if (use_gpu) {
    if (total_used_ == 0 && total_free_ == 0) {
      // Compute the allocation size for gpu for the first allocation.
      allocate_bytes = std::max(init_allocate_size_func(), request_bytes);
    } else {
      // Compute the re-allocation size, we store the re-allocation size when
      // user set FLAGS_reallocate_gpu_memory_in_mb to fix value.
      if (realloc_size_ == 0 || FLAGS_reallocate_gpu_memory_in_mb == 0ul) {
        realloc_size_ = re_allocate_size_func();
      }
      allocate_bytes = std::max(realloc_size_, request_bytes);
    }
  }
#endif
  return allocate_bytes;
}

359 360 361
}  // namespace detail
}  // namespace memory
}  // namespace paddle