buddy_allocator.cc 9.6 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/memory/detail/buddy_allocator.h"
16
#include <algorithm>
17

18
#include "gflags/gflags.h"
L
liaogang 已提交
19
#include "glog/logging.h"
20

21
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
22 23 24
DECLARE_uint64(reallocate_gpu_memory_in_mb);
#endif

25 26 27 28
namespace paddle {
namespace memory {
namespace detail {

G
gongweibao 已提交
29 30 31
BuddyAllocator::BuddyAllocator(
    std::unique_ptr<SystemAllocator> system_allocator, size_t min_chunk_size,
    size_t max_chunk_size)
32 33 34
    : min_chunk_size_(min_chunk_size),
      max_chunk_size_(max_chunk_size),
      cache_(system_allocator->UseGpu()),
L
liaogang 已提交
35 36 37
      system_allocator_(std::move(system_allocator)) {}

BuddyAllocator::~BuddyAllocator() {
M
minqiyang 已提交
38 39
  VLOG(10) << "BuddyAllocator Disconstructor makes sure that all of these "
              "have actually been freed";
L
liaogang 已提交
40 41
  while (!pool_.empty()) {
    auto block = static_cast<MemoryBlock*>(std::get<2>(*pool_.begin()));
42
    auto desc = cache_.LoadDesc(block);
43 44
    VLOG(10) << "Free from block (" << block << ", " << desc->get_total_size()
             << ")";
L
liaogang 已提交
45

46
    system_allocator_->Free(block, desc->get_total_size(), desc->get_index());
47
    cache_.Invalidate(block);
L
liaogang 已提交
48 49
    pool_.erase(pool_.begin());
  }
L
liaogang 已提交
50 51 52 53 54 55 56 57 58
}

inline size_t align(size_t size, size_t alignment) {
  size_t remaining = size % alignment;
  return remaining == 0 ? size : size + (alignment - remaining);
}

void* BuddyAllocator::Alloc(size_t unaligned_size) {
  // adjust allocation alignment
Y
Update  
Yi Wang 已提交
59 60
  size_t size =
      align(unaligned_size + sizeof(MemoryBlock::Desc), min_chunk_size_);
L
liaogang 已提交
61 62 63 64

  // acquire the allocator lock
  std::lock_guard<std::mutex> lock(mutex_);

M
minqiyang 已提交
65 66
  VLOG(10) << "Allocate " << unaligned_size << " bytes from chunk size "
           << size;
L
liaogang 已提交
67 68 69

  // if the allocation is huge, send directly to the system allocator
  if (size > max_chunk_size_) {
M
minqiyang 已提交
70
    VLOG(10) << "Allocate from system allocator.";
L
liaogang 已提交
71 72 73 74 75 76 77 78
    return SystemAlloc(size);
  }

  // query and allocate from the existing chunk
  auto it = FindExistChunk(size);

  // refill the pool if failure
  if (it == pool_.end()) {
79
    it = RefillPool(size);
L
liaogang 已提交
80 81 82 83
    // if still failure, fail fatally
    if (it == pool_.end()) {
      return nullptr;
    }
L
liaogang 已提交
84
  } else {
M
minqiyang 已提交
85 86
    VLOG(10) << "Allocation from existing memory block " << std::get<2>(*it)
             << " at address "
87
             << reinterpret_cast<MemoryBlock*>(std::get<2>(*it))->Data();
L
liaogang 已提交
88 89 90 91 92 93
  }

  total_used_ += size;
  total_free_ -= size;

  // split the allocation and return data for use
94
  return reinterpret_cast<MemoryBlock*>(SplitToAlloc(it, size))->Data();
L
liaogang 已提交
95 96
}

L
liaogang 已提交
97
void BuddyAllocator::Free(void* p) {
L
liaogang 已提交
98
  // Point back to metadata
99
  auto block = static_cast<MemoryBlock*>(p)->Metadata();
L
liaogang 已提交
100

L
liaogang 已提交
101
  // Acquire the allocator lock
L
liaogang 已提交
102
  std::lock_guard<std::mutex> lock(mutex_);
L
liaogang 已提交
103

M
minqiyang 已提交
104
  VLOG(10) << "Free from address " << block;
L
liaogang 已提交
105

106 107
  auto* desc = cache_.LoadDesc(block);
  if (desc->get_type() == MemoryBlock::HUGE_CHUNK) {
M
minqiyang 已提交
108
    VLOG(10) << "Free directly from system allocator";
109
    system_allocator_->Free(block, desc->get_total_size(), desc->get_index());
L
liaogang 已提交
110 111

    // Invalidate GPU allocation from cache
112
    cache_.Invalidate(block);
113

L
liaogang 已提交
114 115 116
    return;
  }

117
  block->MarkAsFree(&cache_);
L
liaogang 已提交
118

119 120
  total_used_ -= desc->get_total_size();
  total_free_ += desc->get_total_size();
L
liaogang 已提交
121 122

  // Trying to merge the right buddy
123 124
  MemoryBlock* right_buddy = block->GetRightBuddy(&cache_);
  if (right_buddy) {
M
minqiyang 已提交
125
    VLOG(10) << "Merging this block " << block << " with its right buddy "
126
             << right_buddy;
127

128 129
    auto rb_desc = cache_.LoadDesc(right_buddy);
    if (rb_desc->get_type() == MemoryBlock::FREE_CHUNK) {
130
      // Take away right buddy from pool
131 132
      pool_.erase(IndexSizeAddress(rb_desc->get_index(),
                                   rb_desc->get_total_size(), right_buddy));
133 134

      // merge its right buddy to the block
135
      block->Merge(&cache_, right_buddy);
136
    }
L
liaogang 已提交
137 138 139
  }

  // Trying to merge the left buddy
140 141
  MemoryBlock* left_buddy = block->GetLeftBuddy(&cache_);
  if (left_buddy) {
M
minqiyang 已提交
142
    VLOG(10) << "Merging this block " << block << " with its left buddy "
143
             << left_buddy;
144

145 146 147
    // auto left_buddy = block->left_buddy(cache_);
    auto* lb_desc = cache_.LoadDesc(left_buddy);
    if (lb_desc->get_type() == MemoryBlock::FREE_CHUNK) {
148
      // Take away right buddy from pool
149 150
      pool_.erase(IndexSizeAddress(lb_desc->get_index(),
                                   lb_desc->get_total_size(), left_buddy));
151 152

      // merge the block to its left buddy
153
      left_buddy->Merge(&cache_, block);
154
      block = left_buddy;
155
      desc = lb_desc;
156
    }
L
liaogang 已提交
157 158 159
  }

  // Dumping this block into pool
M
minqiyang 已提交
160
  VLOG(10) << "Inserting free block (" << block << ", "
161
           << desc->get_total_size() << ")";
L
liaogang 已提交
162
  pool_.insert(
163
      IndexSizeAddress(desc->get_index(), desc->get_total_size(), block));
L
liaogang 已提交
164 165
}

W
Wilber 已提交
166
uint64_t BuddyAllocator::Release() {
167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
  std::lock_guard<std::mutex> lock(mutex_);
  int num = 0;
  uint64_t bytes = 0;
  bool del_flag = false;
  for (auto iter = pool_.begin(); iter != pool_.end();) {
    auto remain_size = std::get<1>(*iter);
    auto remain_ptr = std::get<2>(*iter);
    for (auto& chunk : chunks_) {
      auto init_size = std::get<1>(chunk);
      auto init_ptr = std::get<2>(chunk);

      if (init_size == remain_size && init_ptr == remain_ptr) {
        ++num;
        bytes += init_size;
        total_free_ -= init_size;
        auto block = static_cast<MemoryBlock*>(std::get<2>(chunk));
        system_allocator_->Free(init_ptr, init_size, std::get<0>(chunk));
        cache_.Invalidate(block);
        del_flag = true;
        break;
      }
    }

    if (del_flag) {
      iter = pool_.erase(iter);
    } else {
      iter++;
    }
  }
  VLOG(10) << "Release " << num << " chunk, Free " << bytes << " bytes.";
W
Wilber 已提交
197
  return bytes;
198 199
}

L
liaogang 已提交
200
size_t BuddyAllocator::Used() { return total_used_; }
D
Dun Liang 已提交
201 202
size_t BuddyAllocator::GetMinChunkSize() { return min_chunk_size_; }
size_t BuddyAllocator::GetMaxChunkSize() { return max_chunk_size_; }
L
liaogang 已提交
203

L
liaogang 已提交
204 205
void* BuddyAllocator::SystemAlloc(size_t size) {
  size_t index = 0;
Y
Update  
Yi Wang 已提交
206
  void* p = system_allocator_->Alloc(&index, size);
L
liaogang 已提交
207

M
minqiyang 已提交
208
  VLOG(10) << "Allocated " << p << " from system allocator.";
L
liaogang 已提交
209 210 211

  if (p == nullptr) return nullptr;

212
  static_cast<MemoryBlock*>(p)->Init(&cache_, MemoryBlock::HUGE_CHUNK, index,
L
liaogang 已提交
213 214
                                     size, nullptr, nullptr);

215
  return static_cast<MemoryBlock*>(p)->Data();
L
liaogang 已提交
216 217
}

218 219
BuddyAllocator::PoolSet::iterator BuddyAllocator::RefillPool(
    size_t request_bytes) {
Z
zhhsplendid 已提交
220 221 222
  size_t allocate_bytes = max_chunk_size_;
  size_t index = 0;

223
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
L
liaogang 已提交
224 225
  if (system_allocator_->UseGpu()) {
    if ((total_used_ + total_free_) == 0) {
Z
zhhsplendid 已提交
226
      // Compute the allocation size for gpu for the first allocation.
227
      allocate_bytes = std::max(platform::GpuInitAllocSize(), request_bytes);
Z
zhhsplendid 已提交
228
    } else {
229 230 231
      // Compute the re-allocation size, we store the re-allocation size when
      // user set FLAGS_reallocate_gpu_memory_in_mb to fix value.
      if (realloc_size_ == 0 || FLAGS_reallocate_gpu_memory_in_mb == 0ul) {
Z
zhhsplendid 已提交
232 233
        realloc_size_ = platform::GpuReallocSize();
      }
234
      allocate_bytes = std::max(realloc_size_, request_bytes);
L
liaogang 已提交
235 236
    }
  }
L
Luo Tao 已提交
237
#endif
L
liaogang 已提交
238

Z
zhhsplendid 已提交
239 240
  // Allocate a new block
  void* p = system_allocator_->Alloc(&index, allocate_bytes);
L
liaogang 已提交
241 242 243

  if (p == nullptr) return pool_.end();

M
minqiyang 已提交
244 245
  VLOG(10) << "Creating and inserting new block " << p
           << " from system allocator";
L
liaogang 已提交
246

247
  static_cast<MemoryBlock*>(p)->Init(&cache_, MemoryBlock::FREE_CHUNK, index,
Z
zhhsplendid 已提交
248
                                     allocate_bytes, nullptr, nullptr);
L
liaogang 已提交
249

Z
zhhsplendid 已提交
250
  total_free_ += allocate_bytes;
L
liaogang 已提交
251

252 253 254
  // record the chunk.
  chunks_.insert(IndexSizeAddress(index, allocate_bytes, p));

L
liaogang 已提交
255
  // dump the block into pool
Z
zhhsplendid 已提交
256
  return pool_.insert(IndexSizeAddress(index, allocate_bytes, p)).first;
L
liaogang 已提交
257 258 259 260 261 262
}

BuddyAllocator::PoolSet::iterator BuddyAllocator::FindExistChunk(size_t size) {
  size_t index = 0;

  while (1) {
L
liaogang 已提交
263
    auto it = pool_.lower_bound(IndexSizeAddress(index, size, nullptr));
264 265

    // no match chunk memory
L
liaogang 已提交
266 267 268
    if (it == pool_.end()) return it;

    if (std::get<0>(*it) > index) {
269
      // find suitable one
L
liaogang 已提交
270 271 272
      if (std::get<1>(*it) >= size) {
        return it;
      }
273
      // update and continue
L
liaogang 已提交
274 275 276 277 278 279 280 281 282 283
      index = std::get<0>(*it);
      continue;
    }
    return it;
  }
}

void* BuddyAllocator::SplitToAlloc(BuddyAllocator::PoolSet::iterator it,
                                   size_t size) {
  auto block = static_cast<MemoryBlock*>(std::get<2>(*it));
284
  auto desc = cache_.LoadDesc(block);
L
liaogang 已提交
285 286
  pool_.erase(it);

287
  VLOG(10) << "Split block (" << block << ", " << desc->get_total_size()
M
minqiyang 已提交
288
           << ") into";
289
  block->Split(&cache_, size);
L
liaogang 已提交
290

291 292
  VLOG(10) << "Left block (" << block << ", " << desc->get_total_size() << ")";
  desc->set_type(MemoryBlock::ARENA_CHUNK);
L
liaogang 已提交
293 294

  // the rest of memory if exist
295 296 297 298 299 300 301 302 303
  MemoryBlock* right_buddy = block->GetRightBuddy(&cache_);
  if (right_buddy) {
    auto* rb_desc = cache_.LoadDesc(right_buddy);
    if (rb_desc->get_type() == MemoryBlock::FREE_CHUNK) {
      VLOG(10) << "Insert right block (" << right_buddy << ", "
               << rb_desc->get_total_size() << ")";

      pool_.insert(IndexSizeAddress(rb_desc->get_index(),
                                    rb_desc->get_total_size(), right_buddy));
L
liaogang 已提交
304 305 306 307
    }
  }

  return block;
308 309 310 311 312
}

}  // namespace detail
}  // namespace memory
}  // namespace paddle