best_fit_allocator.cc 5.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/memory/allocation/best_fit_allocator.h"
#include <bits/stdc++.h>
#include <list>
#include <map>
#include <string>

namespace paddle {
namespace memory {
namespace allocation {

static int HighestBitPos(size_t N) {
  if (UNLIKELY(N == 0)) {
    return 0;
  } else {
Y
Yu Yang 已提交
29
#ifdef __GNUCC__
S
sneaxiy 已提交
30 31
    return sizeof(unsigned int) * 8 - __builtin_clz(N);
#else
32
    return static_cast<int>(std::log2(N) + 1);
S
sneaxiy 已提交
33
#endif
34 35 36 37 38 39 40 41 42 43
  }
}

BestFitAllocator::BestFitAllocator(Allocation* allocation)
    : allocation_(allocation) {
  details::Chunk chunk;
  chunk.size_ = allocation_->size();
  chunk.offset_ = 0;
  chunk.is_free = true;
  chunks_.emplace_back(chunk);
Y
Yu Yang 已提交
44 45
  free_chunks_[HighestBitPos(chunk.size_)].insert(
      {chunk.size_, chunks_.begin()});
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
}

size_t BestFitAllocator::FreeSize() const {
  size_t acc = 0;
  for (auto& array_item : free_chunks_) {
    for (auto& pair : array_item) {
      acc += pair.second->size_;
    }
  }
  return acc;
}

BestFitAllocator::ListIt BestFitAllocator::SplitChunk(size_t request_size,
                                                      size_t free_chunk_offset,
                                                      MapIt bin_iterator) {
  auto to_split_it = bin_iterator->second;
  free_chunks_[free_chunk_offset].erase(bin_iterator);

  PADDLE_ENFORCE(to_split_it->is_free);
  PADDLE_ENFORCE_GE(to_split_it->size_, request_size);

  auto remaining_size = to_split_it->size_ - request_size;
  details::Chunk to_use;
  details::Chunk remaining;
  to_use.size_ = request_size;
  to_use.is_free = false;
Y
Yu Yang 已提交
72 73 74
  remaining.size_ = remaining_size;
  remaining.is_free = true;

75 76
  // calc offsets
  to_use.offset_ = to_split_it->offset_;
Y
Yu Yang 已提交
77
  remaining.offset_ = to_use.offset_ + to_use.size_;
78 79 80

  // insert to chunk list
  auto to_use_it = chunks_.insert(to_split_it, to_use);
Y
Yu Yang 已提交
81 82 83 84
  if (remaining.size_ != 0) {
    auto bit_size = static_cast<size_t>(HighestBitPos(remaining.size_));
    free_chunks_[bit_size].insert(
        {remaining.size_, chunks_.insert(to_split_it, remaining)});
85 86 87 88 89
  }
  chunks_.erase(to_split_it);
  return to_use_it;
}

Y
Yu Yang 已提交
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
void BestFitAllocator::InsertFreeNode(const ListIt& it) {
  auto pos = static_cast<size_t>(HighestBitPos(it->size_));
  auto& free_map = free_chunks_[pos];
  free_map.insert({it->size_, it});
}
void BestFitAllocator::EraseFreeNode(const ListIt& it) {
  size_t pos = static_cast<size_t>(HighestBitPos(it->size_));
  auto& free_map = free_chunks_[pos];
  auto map_it = free_map.find(it->size_);
  while (map_it->second != it && map_it != free_map.end()) {
    ++map_it;
  }
  PADDLE_ENFORCE(map_it != free_map.end());
  free_map.erase(map_it);
}
size_t BestFitAllocator::NumFreeChunks() const {
  size_t num = 0;
  for (auto& array_item : free_chunks_) {
    num += array_item.size();
  }
  return num;
}
void BestFitAllocator::Free(MannualFreeAllocation* allocation) {
  auto* bf_allocation = dynamic_cast<BestFitAllocation*>(allocation);
114 115 116
  auto chunk_it = bf_allocation->ChunkIterator();
  PADDLE_ENFORCE(!chunk_it->is_free);
  chunk_it->is_free = true;
Y
Yu Yang 已提交
117
  if (chunk_it != chunks_.begin()) {
118 119 120 121
    auto prev_it = chunk_it;
    --prev_it;

    if (prev_it->is_free) {
Y
Yu Yang 已提交
122
      // Merge Left.
123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
      EraseFreeNode(prev_it);
      prev_it->size_ += chunk_it->size_;
      chunks_.erase(chunk_it);
      chunk_it = prev_it;
    }
  }

  auto next_it = chunk_it;
  ++next_it;
  if (next_it != chunks_.end() && next_it->is_free) {
    EraseFreeNode(next_it);
    chunk_it->size_ += next_it->size_;
    chunks_.erase(next_it);
  }

  InsertFreeNode(chunk_it);
}
Y
Yu Yang 已提交
140 141 142 143 144 145 146 147 148
MannualFreeAllocation* BestFitAllocator::AllocateImpl(size_t size,
                                                      Allocator::Attr attr) {
  auto highest_set_bit = static_cast<size_t>(HighestBitPos(size));
  MapIt map_it;
  for (; highest_set_bit < free_chunks_.size(); ++highest_set_bit) {
    map_it = free_chunks_[highest_set_bit].lower_bound(size);
    if (map_it != free_chunks_[highest_set_bit].end()) {
      break;
    }
149
  }
Y
Yu Yang 已提交
150 151 152
  if (UNLIKELY(highest_set_bit == free_chunks_.size())) {
    throw BadAlloc(string::Sprintf(
        "Cannot allocate %d, All fragments size is %d", size, FreeSize()));
153
  }
Y
Yu Yang 已提交
154 155
  auto chunk_it = SplitChunk(size, highest_set_bit, map_it);
  return new BestFitAllocation(this, chunk_it);
156 157 158 159 160
}

BestFitAllocation::BestFitAllocation(
    paddle::memory::allocation::BestFitAllocator* allocator,
    typename details::ChunkList::iterator chunk_it)
Y
Yu Yang 已提交
161 162 163 164 165
    : MannualFreeAllocation(
          allocator, reinterpret_cast<void*>(
                         reinterpret_cast<uintptr_t>(allocator->BasePtr()) +
                         chunk_it->offset_),
          chunk_it->size_, allocator->Place()),
166 167 168 169
      chunk_it_(chunk_it) {}
}  // namespace allocation
}  // namespace memory
}  // namespace paddle