buddy_allocator.cc 11.9 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/memory/detail/buddy_allocator.h"
Y
Yang 已提交
16

17
#include <algorithm>
18

19
#include "gflags/gflags.h"
L
liaogang 已提交
20
#include "glog/logging.h"
21

F
fwenguang 已提交
22
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || \
Y
Yang 已提交
23 24
    defined(PADDLE_WITH_MLU) || defined(PADDLE_WITH_ASCEND_CL)
#define USE_DEVICE
25 26
DECLARE_uint64(reallocate_gpu_memory_in_mb);
#endif
Y
Yang 已提交
27

28
#include "paddle/fluid/platform/device/device_wrapper.h"
29

30 31 32 33
namespace paddle {
namespace memory {
namespace detail {

G
gongweibao 已提交
34 35
BuddyAllocator::BuddyAllocator(
    std::unique_ptr<SystemAllocator> system_allocator, size_t min_chunk_size,
36 37
    size_t max_chunk_size, size_t extra_padding_size,
    const std::string dev_type)
38 39
    : min_chunk_size_(min_chunk_size),
      max_chunk_size_(max_chunk_size),
40
      extra_padding_size_(extra_padding_size),
41
      cache_(system_allocator->UseGpu()),
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
      system_allocator_(std::move(system_allocator)) {
#ifdef PADDLE_WITH_CUSTOM_DEVICE
  if (!dev_type.empty()) {
    init_allocate_size_func_ = [dev_type]() {
      return platform::DeviceManager::GetInitAllocSize(
          platform::PlaceHelper::CreatePlace(dev_type));
    };
    re_allocate_size_func_ = [dev_type]() {
      return platform::DeviceManager::GetReallocSize(
          platform::PlaceHelper::CreatePlace(dev_type));
    };
  } else {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
    init_allocate_size_func_ = &platform::GpuInitAllocSize;
    re_allocate_size_func_ = &platform::GpuReallocSize;
#elif defined(PADDLE_WITH_ASCEND_CL)
    init_allocate_size_func_ = &platform::NPUInitAllocSize;
    re_allocate_size_func_ = &platform::NPUReallocSize;
#elif defined(PADDLE_WITH_MLU)
    init_allocate_size_func_ = &platform::MLUInitAllocSize;
    re_allocate_size_func_ = &platform::MLUReallocSize;
#endif
  }
#endif
}
L
liaogang 已提交
67 68

BuddyAllocator::~BuddyAllocator() {
M
minqiyang 已提交
69 70
  VLOG(10) << "BuddyAllocator Disconstructor makes sure that all of these "
              "have actually been freed";
L
liaogang 已提交
71 72
  while (!pool_.empty()) {
    auto block = static_cast<MemoryBlock*>(std::get<2>(*pool_.begin()));
73
    auto desc = cache_.LoadDesc(block);
74 75
    VLOG(10) << "Free from block (" << block << ", " << desc->get_total_size()
             << ")";
L
liaogang 已提交
76

77
    system_allocator_->Free(block, desc->get_total_size(), desc->get_index());
78
    cache_.Invalidate(block);
L
liaogang 已提交
79 80
    pool_.erase(pool_.begin());
  }
L
liaogang 已提交
81 82 83 84 85 86 87 88 89 90
}

inline size_t align(size_t size, size_t alignment) {
  size_t remaining = size % alignment;
  return remaining == 0 ? size : size + (alignment - remaining);
}

void* BuddyAllocator::Alloc(size_t unaligned_size) {
  // adjust allocation alignment

91 92 93 94 95 96 97
  size_t size =
      align(unaligned_size + sizeof(MemoryBlock::Desc) + extra_padding_size_,
            min_chunk_size_);
  VLOG(10) << "alloc: " << unaligned_size
           << ", padding for desc: " << sizeof(MemoryBlock::Desc)
           << ", extra padding: " << extra_padding_size_
           << ", alignment: " << min_chunk_size_;
L
liaogang 已提交
98 99 100
  // acquire the allocator lock
  std::lock_guard<std::mutex> lock(mutex_);

M
minqiyang 已提交
101 102
  VLOG(10) << "Allocate " << unaligned_size << " bytes from chunk size "
           << size;
L
liaogang 已提交
103 104 105

  // if the allocation is huge, send directly to the system allocator
  if (size > max_chunk_size_) {
M
minqiyang 已提交
106
    VLOG(10) << "Allocate from system allocator.";
L
liaogang 已提交
107 108 109 110 111 112 113 114
    return SystemAlloc(size);
  }

  // query and allocate from the existing chunk
  auto it = FindExistChunk(size);

  // refill the pool if failure
  if (it == pool_.end()) {
115
    it = RefillPool(size);
L
liaogang 已提交
116 117 118 119
    // if still failure, fail fatally
    if (it == pool_.end()) {
      return nullptr;
    }
L
liaogang 已提交
120
  } else {
M
minqiyang 已提交
121 122
    VLOG(10) << "Allocation from existing memory block " << std::get<2>(*it)
             << " at address "
123
             << reinterpret_cast<MemoryBlock*>(std::get<2>(*it))->Data();
L
liaogang 已提交
124 125 126 127 128 129
  }

  total_used_ += size;
  total_free_ -= size;

  // split the allocation and return data for use
130
  return reinterpret_cast<MemoryBlock*>(SplitToAlloc(it, size))->Data();
L
liaogang 已提交
131 132
}

L
liaogang 已提交
133
void BuddyAllocator::Free(void* p) {
L
liaogang 已提交
134
  // Point back to metadata
135
  auto block = static_cast<MemoryBlock*>(p)->Metadata();
L
liaogang 已提交
136

L
liaogang 已提交
137
  // Acquire the allocator lock
L
liaogang 已提交
138
  std::lock_guard<std::mutex> lock(mutex_);
L
liaogang 已提交
139

M
minqiyang 已提交
140
  VLOG(10) << "Free from address " << block;
L
liaogang 已提交
141

142 143
  auto* desc = cache_.LoadDesc(block);
  if (desc->get_type() == MemoryBlock::HUGE_CHUNK) {
M
minqiyang 已提交
144
    VLOG(10) << "Free directly from system allocator";
145
    system_allocator_->Free(block, desc->get_total_size(), desc->get_index());
L
liaogang 已提交
146 147

    // Invalidate GPU allocation from cache
148
    cache_.Invalidate(block);
149

L
liaogang 已提交
150 151 152
    return;
  }

153
  block->MarkAsFree(&cache_);
L
liaogang 已提交
154

155 156
  total_used_ -= desc->get_total_size();
  total_free_ += desc->get_total_size();
L
liaogang 已提交
157 158

  // Trying to merge the right buddy
159 160
  MemoryBlock* right_buddy = block->GetRightBuddy(&cache_);
  if (right_buddy) {
M
minqiyang 已提交
161
    VLOG(10) << "Merging this block " << block << " with its right buddy "
162
             << right_buddy;
163

164 165
    auto rb_desc = cache_.LoadDesc(right_buddy);
    if (rb_desc->get_type() == MemoryBlock::FREE_CHUNK) {
166
      // Take away right buddy from pool
167 168
      pool_.erase(IndexSizeAddress(rb_desc->get_index(),
                                   rb_desc->get_total_size(), right_buddy));
169 170

      // merge its right buddy to the block
171
      block->Merge(&cache_, right_buddy);
172
    }
L
liaogang 已提交
173 174 175
  }

  // Trying to merge the left buddy
176 177
  MemoryBlock* left_buddy = block->GetLeftBuddy(&cache_);
  if (left_buddy) {
M
minqiyang 已提交
178
    VLOG(10) << "Merging this block " << block << " with its left buddy "
179
             << left_buddy;
180

181 182 183
    // auto left_buddy = block->left_buddy(cache_);
    auto* lb_desc = cache_.LoadDesc(left_buddy);
    if (lb_desc->get_type() == MemoryBlock::FREE_CHUNK) {
184
      // Take away right buddy from pool
185 186
      pool_.erase(IndexSizeAddress(lb_desc->get_index(),
                                   lb_desc->get_total_size(), left_buddy));
187 188

      // merge the block to its left buddy
189
      left_buddy->Merge(&cache_, block);
190
      block = left_buddy;
191
      desc = lb_desc;
192
    }
L
liaogang 已提交
193 194 195
  }

  // Dumping this block into pool
M
minqiyang 已提交
196
  VLOG(10) << "Inserting free block (" << block << ", "
197
           << desc->get_total_size() << ")";
L
liaogang 已提交
198
  pool_.insert(
199
      IndexSizeAddress(desc->get_index(), desc->get_total_size(), block));
L
liaogang 已提交
200 201
}

W
Wilber 已提交
202
uint64_t BuddyAllocator::Release() {
203 204 205 206 207 208
  std::lock_guard<std::mutex> lock(mutex_);
  int num = 0;
  uint64_t bytes = 0;
  for (auto iter = pool_.begin(); iter != pool_.end();) {
    auto remain_size = std::get<1>(*iter);
    auto remain_ptr = std::get<2>(*iter);
Y
Yang 已提交
209 210 211 212 213 214 215 216 217
    auto found = chunks_.find({remain_size, remain_ptr});
    if (found != chunks_.end()) {
      size_t index = found->second;
      ++num;
      bytes += remain_size;
      total_free_ -= remain_size;
      auto block = static_cast<MemoryBlock*>(remain_ptr);
      system_allocator_->Free(remain_ptr, remain_size, index);
      cache_.Invalidate(block);
218 219 220 221 222
      iter = pool_.erase(iter);
    } else {
      iter++;
    }
  }
Y
Yang 已提交
223
  VLOG(10) << "Release " << num << " chunks, Free " << bytes << " bytes.";
W
Wilber 已提交
224
  return bytes;
225 226
}

L
liaogang 已提交
227
size_t BuddyAllocator::Used() { return total_used_; }
D
Dun Liang 已提交
228 229
size_t BuddyAllocator::GetMinChunkSize() { return min_chunk_size_; }
size_t BuddyAllocator::GetMaxChunkSize() { return max_chunk_size_; }
L
liaogang 已提交
230

L
liaogang 已提交
231 232
void* BuddyAllocator::SystemAlloc(size_t size) {
  size_t index = 0;
Y
Update  
Yi Wang 已提交
233
  void* p = system_allocator_->Alloc(&index, size);
L
liaogang 已提交
234

M
minqiyang 已提交
235
  VLOG(10) << "Allocated " << p << " from system allocator.";
L
liaogang 已提交
236 237 238

  if (p == nullptr) return nullptr;

239
  static_cast<MemoryBlock*>(p)->Init(&cache_, MemoryBlock::HUGE_CHUNK, index,
L
liaogang 已提交
240 241
                                     size, nullptr, nullptr);

242
  return static_cast<MemoryBlock*>(p)->Data();
L
liaogang 已提交
243 244
}

245 246
BuddyAllocator::PoolSet::iterator BuddyAllocator::RefillPool(
    size_t request_bytes) {
Z
zhhsplendid 已提交
247 248 249
  size_t allocate_bytes = max_chunk_size_;
  size_t index = 0;

250 251 252 253
#ifdef PADDLE_WITH_CUSTOM_DEVICE
  allocate_bytes = DeviceAllocateSize(init_allocate_size_func_,
                                      re_allocate_size_func_, request_bytes);
#else
254
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
Y
Yang 已提交
255 256 257 258 259 260
  allocate_bytes = DeviceAllocateSize(&platform::GpuInitAllocSize,
                                      &platform::GpuReallocSize, request_bytes);
#elif defined(PADDLE_WITH_ASCEND_CL)
  allocate_bytes = DeviceAllocateSize(&platform::NPUInitAllocSize,
                                      &platform::NPUReallocSize, request_bytes);
#elif defined(PADDLE_WITH_MLU)
261 262
  allocate_bytes = DeviceAllocateSize(&platform::MLUInitAllocSize,
                                      &platform::MLUReallocSize, request_bytes);
263
#endif
F
fwenguang 已提交
264
#endif
L
liaogang 已提交
265

Z
zhhsplendid 已提交
266 267
  // Allocate a new block
  void* p = system_allocator_->Alloc(&index, allocate_bytes);
L
liaogang 已提交
268 269 270

  if (p == nullptr) return pool_.end();

M
minqiyang 已提交
271 272
  VLOG(10) << "Creating and inserting new block " << p
           << " from system allocator";
L
liaogang 已提交
273

274
  static_cast<MemoryBlock*>(p)->Init(&cache_, MemoryBlock::FREE_CHUNK, index,
Z
zhhsplendid 已提交
275
                                     allocate_bytes, nullptr, nullptr);
L
liaogang 已提交
276

Z
zhhsplendid 已提交
277
  total_free_ += allocate_bytes;
L
liaogang 已提交
278

279
  // record the chunk.
Y
Yang 已提交
280
  chunks_.insert({{allocate_bytes, p}, index});
281

L
liaogang 已提交
282
  // dump the block into pool
Z
zhhsplendid 已提交
283
  return pool_.insert(IndexSizeAddress(index, allocate_bytes, p)).first;
L
liaogang 已提交
284 285 286 287 288 289
}

BuddyAllocator::PoolSet::iterator BuddyAllocator::FindExistChunk(size_t size) {
  size_t index = 0;

  while (1) {
L
liaogang 已提交
290
    auto it = pool_.lower_bound(IndexSizeAddress(index, size, nullptr));
291 292

    // no match chunk memory
L
liaogang 已提交
293 294 295
    if (it == pool_.end()) return it;

    if (std::get<0>(*it) > index) {
296
      // find suitable one
L
liaogang 已提交
297 298 299
      if (std::get<1>(*it) >= size) {
        return it;
      }
300
      // update and continue
L
liaogang 已提交
301 302 303 304 305 306 307 308 309 310
      index = std::get<0>(*it);
      continue;
    }
    return it;
  }
}

void* BuddyAllocator::SplitToAlloc(BuddyAllocator::PoolSet::iterator it,
                                   size_t size) {
  auto block = static_cast<MemoryBlock*>(std::get<2>(*it));
311
  auto desc = cache_.LoadDesc(block);
L
liaogang 已提交
312 313
  pool_.erase(it);

314
  VLOG(10) << "Split block (" << block << ", " << desc->get_total_size()
M
minqiyang 已提交
315
           << ") into";
316
  block->Split(&cache_, size);
L
liaogang 已提交
317

318 319
  VLOG(10) << "Left block (" << block << ", " << desc->get_total_size() << ")";
  desc->set_type(MemoryBlock::ARENA_CHUNK);
L
liaogang 已提交
320 321

  // the rest of memory if exist
322 323 324 325 326 327 328 329 330
  MemoryBlock* right_buddy = block->GetRightBuddy(&cache_);
  if (right_buddy) {
    auto* rb_desc = cache_.LoadDesc(right_buddy);
    if (rb_desc->get_type() == MemoryBlock::FREE_CHUNK) {
      VLOG(10) << "Insert right block (" << right_buddy << ", "
               << rb_desc->get_total_size() << ")";

      pool_.insert(IndexSizeAddress(rb_desc->get_index(),
                                    rb_desc->get_total_size(), right_buddy));
L
liaogang 已提交
331 332 333 334
    }
  }

  return block;
335 336
}

Y
Yang 已提交
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361
size_t BuddyAllocator::DeviceAllocateSize(
    std::function<size_t()> init_allocate_size_func,
    std::function<size_t()> re_allocate_size_func, size_t request_bytes) {
  size_t allocate_bytes = max_chunk_size_;
#if defined(USE_DEVICE)
  const bool use_gpu = system_allocator_->UseGpu();
  VLOG(10) << "use_gpu " << use_gpu << ", total_used " << total_used_
           << ", total_free " << total_free_;
  if (use_gpu) {
    if (total_used_ == 0 && total_free_ == 0) {
      // Compute the allocation size for gpu for the first allocation.
      allocate_bytes = std::max(init_allocate_size_func(), request_bytes);
    } else {
      // Compute the re-allocation size, we store the re-allocation size when
      // user set FLAGS_reallocate_gpu_memory_in_mb to fix value.
      if (realloc_size_ == 0 || FLAGS_reallocate_gpu_memory_in_mb == 0ul) {
        realloc_size_ = re_allocate_size_func();
      }
      allocate_bytes = std::max(realloc_size_, request_bytes);
    }
  }
#endif
  return allocate_bytes;
}

362 363 364
}  // namespace detail
}  // namespace memory
}  // namespace paddle