buddy_allocator.cc 10.8 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/memory/detail/buddy_allocator.h"
Y
Yang 已提交
16

17
#include <algorithm>
18

19
#include "gflags/gflags.h"
L
liaogang 已提交
20
#include "glog/logging.h"
21

F
fwenguang 已提交
22
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || \
Y
Yang 已提交
23 24
    defined(PADDLE_WITH_MLU) || defined(PADDLE_WITH_ASCEND_CL)
#define USE_DEVICE
25 26
DECLARE_uint64(reallocate_gpu_memory_in_mb);
#endif
Y
Yang 已提交
27

F
fwenguang 已提交
28 29 30
#ifdef PADDLE_WITH_MLU
#include "paddle/fluid/platform/device/mlu/mlu_info.h"
#endif
31

32 33 34 35
namespace paddle {
namespace memory {
namespace detail {

G
gongweibao 已提交
36 37
BuddyAllocator::BuddyAllocator(
    std::unique_ptr<SystemAllocator> system_allocator, size_t min_chunk_size,
38
    size_t max_chunk_size, size_t extra_padding_size)
39 40
    : min_chunk_size_(min_chunk_size),
      max_chunk_size_(max_chunk_size),
41
      extra_padding_size_(extra_padding_size),
42
      cache_(system_allocator->UseGpu()),
L
liaogang 已提交
43 44 45
      system_allocator_(std::move(system_allocator)) {}

BuddyAllocator::~BuddyAllocator() {
M
minqiyang 已提交
46 47
  VLOG(10) << "BuddyAllocator Disconstructor makes sure that all of these "
              "have actually been freed";
L
liaogang 已提交
48 49
  while (!pool_.empty()) {
    auto block = static_cast<MemoryBlock*>(std::get<2>(*pool_.begin()));
50
    auto desc = cache_.LoadDesc(block);
51 52
    VLOG(10) << "Free from block (" << block << ", " << desc->get_total_size()
             << ")";
L
liaogang 已提交
53

54
    system_allocator_->Free(block, desc->get_total_size(), desc->get_index());
55
    cache_.Invalidate(block);
L
liaogang 已提交
56 57
    pool_.erase(pool_.begin());
  }
L
liaogang 已提交
58 59 60 61 62 63 64 65 66 67
}

inline size_t align(size_t size, size_t alignment) {
  size_t remaining = size % alignment;
  return remaining == 0 ? size : size + (alignment - remaining);
}

void* BuddyAllocator::Alloc(size_t unaligned_size) {
  // adjust allocation alignment

68 69 70 71 72 73 74
  size_t size =
      align(unaligned_size + sizeof(MemoryBlock::Desc) + extra_padding_size_,
            min_chunk_size_);
  VLOG(10) << "alloc: " << unaligned_size
           << ", padding for desc: " << sizeof(MemoryBlock::Desc)
           << ", extra padding: " << extra_padding_size_
           << ", alignment: " << min_chunk_size_;
L
liaogang 已提交
75 76 77
  // acquire the allocator lock
  std::lock_guard<std::mutex> lock(mutex_);

M
minqiyang 已提交
78 79
  VLOG(10) << "Allocate " << unaligned_size << " bytes from chunk size "
           << size;
L
liaogang 已提交
80 81 82

  // if the allocation is huge, send directly to the system allocator
  if (size > max_chunk_size_) {
M
minqiyang 已提交
83
    VLOG(10) << "Allocate from system allocator.";
L
liaogang 已提交
84 85 86 87 88 89 90 91
    return SystemAlloc(size);
  }

  // query and allocate from the existing chunk
  auto it = FindExistChunk(size);

  // refill the pool if failure
  if (it == pool_.end()) {
92
    it = RefillPool(size);
L
liaogang 已提交
93 94 95 96
    // if still failure, fail fatally
    if (it == pool_.end()) {
      return nullptr;
    }
L
liaogang 已提交
97
  } else {
M
minqiyang 已提交
98 99
    VLOG(10) << "Allocation from existing memory block " << std::get<2>(*it)
             << " at address "
100
             << reinterpret_cast<MemoryBlock*>(std::get<2>(*it))->Data();
L
liaogang 已提交
101 102 103 104 105 106
  }

  total_used_ += size;
  total_free_ -= size;

  // split the allocation and return data for use
107
  return reinterpret_cast<MemoryBlock*>(SplitToAlloc(it, size))->Data();
L
liaogang 已提交
108 109
}

L
liaogang 已提交
110
void BuddyAllocator::Free(void* p) {
L
liaogang 已提交
111
  // Point back to metadata
112
  auto block = static_cast<MemoryBlock*>(p)->Metadata();
L
liaogang 已提交
113

L
liaogang 已提交
114
  // Acquire the allocator lock
L
liaogang 已提交
115
  std::lock_guard<std::mutex> lock(mutex_);
L
liaogang 已提交
116

M
minqiyang 已提交
117
  VLOG(10) << "Free from address " << block;
L
liaogang 已提交
118

119 120
  auto* desc = cache_.LoadDesc(block);
  if (desc->get_type() == MemoryBlock::HUGE_CHUNK) {
M
minqiyang 已提交
121
    VLOG(10) << "Free directly from system allocator";
122
    system_allocator_->Free(block, desc->get_total_size(), desc->get_index());
L
liaogang 已提交
123 124

    // Invalidate GPU allocation from cache
125
    cache_.Invalidate(block);
126

L
liaogang 已提交
127 128 129
    return;
  }

130
  block->MarkAsFree(&cache_);
L
liaogang 已提交
131

132 133
  total_used_ -= desc->get_total_size();
  total_free_ += desc->get_total_size();
L
liaogang 已提交
134 135

  // Trying to merge the right buddy
136 137
  MemoryBlock* right_buddy = block->GetRightBuddy(&cache_);
  if (right_buddy) {
M
minqiyang 已提交
138
    VLOG(10) << "Merging this block " << block << " with its right buddy "
139
             << right_buddy;
140

141 142
    auto rb_desc = cache_.LoadDesc(right_buddy);
    if (rb_desc->get_type() == MemoryBlock::FREE_CHUNK) {
143
      // Take away right buddy from pool
144 145
      pool_.erase(IndexSizeAddress(rb_desc->get_index(),
                                   rb_desc->get_total_size(), right_buddy));
146 147

      // merge its right buddy to the block
148
      block->Merge(&cache_, right_buddy);
149
    }
L
liaogang 已提交
150 151 152
  }

  // Trying to merge the left buddy
153 154
  MemoryBlock* left_buddy = block->GetLeftBuddy(&cache_);
  if (left_buddy) {
M
minqiyang 已提交
155
    VLOG(10) << "Merging this block " << block << " with its left buddy "
156
             << left_buddy;
157

158 159 160
    // auto left_buddy = block->left_buddy(cache_);
    auto* lb_desc = cache_.LoadDesc(left_buddy);
    if (lb_desc->get_type() == MemoryBlock::FREE_CHUNK) {
161
      // Take away right buddy from pool
162 163
      pool_.erase(IndexSizeAddress(lb_desc->get_index(),
                                   lb_desc->get_total_size(), left_buddy));
164 165

      // merge the block to its left buddy
166
      left_buddy->Merge(&cache_, block);
167
      block = left_buddy;
168
      desc = lb_desc;
169
    }
L
liaogang 已提交
170 171 172
  }

  // Dumping this block into pool
M
minqiyang 已提交
173
  VLOG(10) << "Inserting free block (" << block << ", "
174
           << desc->get_total_size() << ")";
L
liaogang 已提交
175
  pool_.insert(
176
      IndexSizeAddress(desc->get_index(), desc->get_total_size(), block));
L
liaogang 已提交
177 178
}

W
Wilber 已提交
179
uint64_t BuddyAllocator::Release() {
180 181 182 183 184 185
  std::lock_guard<std::mutex> lock(mutex_);
  int num = 0;
  uint64_t bytes = 0;
  for (auto iter = pool_.begin(); iter != pool_.end();) {
    auto remain_size = std::get<1>(*iter);
    auto remain_ptr = std::get<2>(*iter);
Y
Yang 已提交
186 187 188 189 190 191 192 193 194
    auto found = chunks_.find({remain_size, remain_ptr});
    if (found != chunks_.end()) {
      size_t index = found->second;
      ++num;
      bytes += remain_size;
      total_free_ -= remain_size;
      auto block = static_cast<MemoryBlock*>(remain_ptr);
      system_allocator_->Free(remain_ptr, remain_size, index);
      cache_.Invalidate(block);
195 196 197 198 199
      iter = pool_.erase(iter);
    } else {
      iter++;
    }
  }
Y
Yang 已提交
200
  VLOG(10) << "Release " << num << " chunks, Free " << bytes << " bytes.";
W
Wilber 已提交
201
  return bytes;
202 203
}

L
liaogang 已提交
204
size_t BuddyAllocator::Used() { return total_used_; }
D
Dun Liang 已提交
205 206
size_t BuddyAllocator::GetMinChunkSize() { return min_chunk_size_; }
size_t BuddyAllocator::GetMaxChunkSize() { return max_chunk_size_; }
L
liaogang 已提交
207

L
liaogang 已提交
208 209
void* BuddyAllocator::SystemAlloc(size_t size) {
  size_t index = 0;
Y
Update  
Yi Wang 已提交
210
  void* p = system_allocator_->Alloc(&index, size);
L
liaogang 已提交
211

M
minqiyang 已提交
212
  VLOG(10) << "Allocated " << p << " from system allocator.";
L
liaogang 已提交
213 214 215

  if (p == nullptr) return nullptr;

216
  static_cast<MemoryBlock*>(p)->Init(&cache_, MemoryBlock::HUGE_CHUNK, index,
L
liaogang 已提交
217 218
                                     size, nullptr, nullptr);

219
  return static_cast<MemoryBlock*>(p)->Data();
L
liaogang 已提交
220 221
}

222 223
BuddyAllocator::PoolSet::iterator BuddyAllocator::RefillPool(
    size_t request_bytes) {
Z
zhhsplendid 已提交
224 225 226
  size_t allocate_bytes = max_chunk_size_;
  size_t index = 0;

227
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
Y
Yang 已提交
228 229 230 231 232 233 234 235 236
  allocate_bytes = DeviceAllocateSize(&platform::GpuInitAllocSize,
                                      &platform::GpuReallocSize, request_bytes);
#elif defined(PADDLE_WITH_ASCEND_CL)
  allocate_bytes = DeviceAllocateSize(&platform::NPUInitAllocSize,
                                      &platform::NPUReallocSize, request_bytes);
#elif defined(PADDLE_WITH_MLU)
  allocate_bytes =
      DeviceAllocateSize(&platform::MLUInitAllocSize(),
                         &platform::MLUReallocSize(), request_bytes);
F
fwenguang 已提交
237
#endif
L
liaogang 已提交
238

Z
zhhsplendid 已提交
239 240
  // Allocate a new block
  void* p = system_allocator_->Alloc(&index, allocate_bytes);
L
liaogang 已提交
241 242 243

  if (p == nullptr) return pool_.end();

M
minqiyang 已提交
244 245
  VLOG(10) << "Creating and inserting new block " << p
           << " from system allocator";
L
liaogang 已提交
246

247
  static_cast<MemoryBlock*>(p)->Init(&cache_, MemoryBlock::FREE_CHUNK, index,
Z
zhhsplendid 已提交
248
                                     allocate_bytes, nullptr, nullptr);
L
liaogang 已提交
249

Z
zhhsplendid 已提交
250
  total_free_ += allocate_bytes;
L
liaogang 已提交
251

252
  // record the chunk.
Y
Yang 已提交
253
  chunks_.insert({{allocate_bytes, p}, index});
254

L
liaogang 已提交
255
  // dump the block into pool
Z
zhhsplendid 已提交
256
  return pool_.insert(IndexSizeAddress(index, allocate_bytes, p)).first;
L
liaogang 已提交
257 258 259 260 261 262
}

BuddyAllocator::PoolSet::iterator BuddyAllocator::FindExistChunk(size_t size) {
  size_t index = 0;

  while (1) {
L
liaogang 已提交
263
    auto it = pool_.lower_bound(IndexSizeAddress(index, size, nullptr));
264 265

    // no match chunk memory
L
liaogang 已提交
266 267 268
    if (it == pool_.end()) return it;

    if (std::get<0>(*it) > index) {
269
      // find suitable one
L
liaogang 已提交
270 271 272
      if (std::get<1>(*it) >= size) {
        return it;
      }
273
      // update and continue
L
liaogang 已提交
274 275 276 277 278 279 280 281 282 283
      index = std::get<0>(*it);
      continue;
    }
    return it;
  }
}

void* BuddyAllocator::SplitToAlloc(BuddyAllocator::PoolSet::iterator it,
                                   size_t size) {
  auto block = static_cast<MemoryBlock*>(std::get<2>(*it));
284
  auto desc = cache_.LoadDesc(block);
L
liaogang 已提交
285 286
  pool_.erase(it);

287
  VLOG(10) << "Split block (" << block << ", " << desc->get_total_size()
M
minqiyang 已提交
288
           << ") into";
289
  block->Split(&cache_, size);
L
liaogang 已提交
290

291 292
  VLOG(10) << "Left block (" << block << ", " << desc->get_total_size() << ")";
  desc->set_type(MemoryBlock::ARENA_CHUNK);
L
liaogang 已提交
293 294

  // the rest of memory if exist
295 296 297 298 299 300 301 302 303
  MemoryBlock* right_buddy = block->GetRightBuddy(&cache_);
  if (right_buddy) {
    auto* rb_desc = cache_.LoadDesc(right_buddy);
    if (rb_desc->get_type() == MemoryBlock::FREE_CHUNK) {
      VLOG(10) << "Insert right block (" << right_buddy << ", "
               << rb_desc->get_total_size() << ")";

      pool_.insert(IndexSizeAddress(rb_desc->get_index(),
                                    rb_desc->get_total_size(), right_buddy));
L
liaogang 已提交
304 305 306 307
    }
  }

  return block;
308 309
}

Y
Yang 已提交
310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334
size_t BuddyAllocator::DeviceAllocateSize(
    std::function<size_t()> init_allocate_size_func,
    std::function<size_t()> re_allocate_size_func, size_t request_bytes) {
  size_t allocate_bytes = max_chunk_size_;
#if defined(USE_DEVICE)
  const bool use_gpu = system_allocator_->UseGpu();
  VLOG(10) << "use_gpu " << use_gpu << ", total_used " << total_used_
           << ", total_free " << total_free_;
  if (use_gpu) {
    if (total_used_ == 0 && total_free_ == 0) {
      // Compute the allocation size for gpu for the first allocation.
      allocate_bytes = std::max(init_allocate_size_func(), request_bytes);
    } else {
      // Compute the re-allocation size, we store the re-allocation size when
      // user set FLAGS_reallocate_gpu_memory_in_mb to fix value.
      if (realloc_size_ == 0 || FLAGS_reallocate_gpu_memory_in_mb == 0ul) {
        realloc_size_ = re_allocate_size_func();
      }
      allocate_bytes = std::max(realloc_size_, request_bytes);
    }
  }
#endif
  return allocate_bytes;
}

335 336 337
}  // namespace detail
}  // namespace memory
}  // namespace paddle