auto_increment_allocator.cc 2.8 KB
Newer Older
Y
Yu Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/memory/allocation/auto_increment_allocator.h"

namespace paddle {
namespace memory {
namespace allocation {

Y
Yu Yang 已提交
21 22
AllocationPtr AutoIncrementAllocator::Allocate(size_t size,
                                               Allocator::Attr attr) {
Y
Yu Yang 已提交
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
  auto cur = prev_success_allocator_.load();
  size_t retry_count = allocator_num_.load();
  size_t allocator_num = retry_count;
  while (retry_count-- > 0) {  // until there retry count is zero
    try {
      auto res = underlying_allocators_[cur]->Allocate(size, attr);
      prev_success_allocator_ = cur;
      return res;
    } catch (BadAlloc&) {
      if (++cur >= allocator_num) {
        cur = 0;
      }
    } catch (...) {
      // if there is another type of allocation, just rethrow it.
      throw;
    }
  }
Y
Yu Yang 已提交
40

Y
Yu Yang 已提交
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
  // This happens when the first allocator is exhausted and
  // there are more than 1 allocation requests
  // In this situation, the first allocation request would success
  // and the second allocation request would fail if we do not use
  // the newly created allocator by the first allocation request.
  for (cur = allocator_num; cur < allocator_num_; ++cur) {
    try {
      auto ret = underlying_allocators_[cur]->Allocate(size, attr);
      prev_success_allocator_ = cur;
      return ret;
    } catch (BadAlloc&) {
    } catch (...) {
      throw;
    }
  }
  // No suitable allocator
  return CreateNewAllocator()->Allocate(size, attr);
Y
Yu Yang 已提交
58 59 60 61
}

bool AutoIncrementAllocator::IsAllocThreadSafe() const { return true; }

Y
Yu Yang 已提交
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
std::shared_ptr<Allocator> AutoIncrementAllocator::CreateNewAllocator() {
  std::lock_guard<std::mutex> guard(mtx_);
  auto old_size = allocator_num_.load();
  PADDLE_ENFORCE_LT(old_size, underlying_allocators_.size(),
                    "Allocator number exceeds capacity %d",
                    underlying_allocators_.size());
  underlying_allocators_[old_size] = creator_();
  prev_success_allocator_ = old_size;
  ++allocator_num_;
  PADDLE_ENFORCE(
      underlying_allocators_[old_size]->IsAllocThreadSafe(),
      "the underlying allocator must be thread safe. This is a program "
      "bug.");
  return underlying_allocators_[old_size];
}

Y
Yu Yang 已提交
78 79 80
}  // namespace allocation
}  // namespace memory
}  // namespace paddle