auto_increment_allocator.h 2.8 KB
Newer Older
Y
Yu Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

17
#include <atomic>  // NOLINT
Y
Yu Yang 已提交
18 19
#include <functional>
#include <memory>
S
sneaxiy 已提交
20
#include <mutex>   // NOLINT
Y
Yu Yang 已提交
21 22 23 24 25 26 27 28
#include <thread>  // NOLINT
#include <vector>
#include "paddle/fluid/memory/allocation/allocator.h"

namespace paddle {
namespace memory {
namespace allocation {

Y
Yu Yang 已提交
29 30 31 32 33
// The AutoIncrementAllocator manages many underlying allocators. If none of
// them can allocate the request memory, a new allocator will be created and
// invoke its `allocate` method.
//
// NOTE(yy): The AutoIncrementAllocator will prefer to allocate memory from
Y
Yu Yang 已提交
34
// the latest successful allocator.
Y
Yu Yang 已提交
35 36 37 38 39 40 41 42 43
//
// NOTE(yy): We may need to release an underlying allocator if it allocate
// nothing. However, it is generally not useful, since it will make performance
// undetermined.
//
// NOTE(yy): This allocator is only locked when creating new underlying
// allocator. The allocation requests from many threads may be dispatched
// to the same underlying allocator. So the underlying allocator must be
// thread safe.
S
sneaxiy 已提交
44 45 46 47 48
//
// NOTE(zjl): Add capacity parameters to constructor. A high-performance
// thread-safe std::vector with varying size is hard to implement.
// Fortunately, we can get the total GPU memory and each chunk size.
// Therefore, we can get the suitable capacity of AutoIncrementAllocator.
Y
Yu Yang 已提交
49
class AutoIncrementAllocator : public Allocator {
Y
Yu Yang 已提交
50
 public:
Y
Yu Yang 已提交
51
  // Creator is the method to create ManagedAllocator
Y
Yu Yang 已提交
52
  using AllocatorCreator = std::function<std::shared_ptr<Allocator>()>;
Y
Yu Yang 已提交
53

S
sneaxiy 已提交
54 55
  explicit AutoIncrementAllocator(AllocatorCreator&& creator, size_t capacity)
      : creator_(std::move(creator)), underlying_allocators_(capacity) {}
Y
Yu Yang 已提交
56

Y
Yu Yang 已提交
57 58 59
  bool IsAllocThreadSafe() const override;

 private:
Y
Yu Yang 已提交
60
  std::shared_ptr<Allocator> CreateNewAllocator();
Y
Yu Yang 已提交
61

Y
Yu Yang 已提交
62 63 64 65
 protected:
  Allocation* AllocateImpl(size_t size, Allocator::Attr attr) override;

 private:
Y
Yu Yang 已提交
66
  AllocatorCreator creator_;
67

S
sneaxiy 已提交
68 69
  std::vector<AllocatorCreator::result_type> underlying_allocators_;
  std::atomic<size_t> allocator_num_{0};
70 71 72 73 74 75

  // Use std::atomic rather than std::mutex, since std::atomic is usually
  // lock-free
  std::atomic<size_t> prev_success_allocator_{0};

  std::mutex mtx_;
Y
Yu Yang 已提交
76 77 78 79
};
}  // namespace allocation
}  // namespace memory
}  // namespace paddle