allocator_facade.cc 5.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/memory/allocation/allocator.h"
#include <map>
#include <vector>
#include "paddle/fluid/memory/allocation/aligned_allocator.h"
#include "paddle/fluid/memory/allocation/allocator_facade.h"
Y
Yu Yang 已提交
20
#include "paddle/fluid/memory/allocation/auto_increment_allocator.h"
21 22 23 24
#include "paddle/fluid/memory/allocation/best_fit_allocator.h"
#include "paddle/fluid/memory/allocation/cpu_allocator.h"
#include "paddle/fluid/memory/allocation/locked_allocator.h"
#include "paddle/fluid/memory/allocation/naive_managed_allocator.h"
Y
Yu Yang 已提交
25
#include "paddle/fluid/memory/allocation/pinned_allocator.h"
Y
Yu Yang 已提交
26
#include "paddle/fluid/platform/cuda_device_guard.h"
27 28 29 30 31 32 33 34 35 36
#include "paddle/fluid/platform/gpu_info.h"
#include "paddle/fluid/platform/place.h"
#ifdef PADDLE_WITH_CUDA
#include "paddle/fluid/memory/allocation/cuda_allocator.h"
#endif

namespace paddle {
namespace memory {
namespace allocation {

Y
Yu Yang 已提交
37
// TODO(yy): Dirty code here. This class should be configurable in runtime.
Y
Yu Yang 已提交
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
class CPUManagedAllocator : public ManagedAllocator {
 public:
  CPUManagedAllocator()
      : normal_allocator_(NaiveManagedAllocator::Create(
            std::unique_ptr<Allocator>(new CPUAllocator()))),
        communication_allocator_(NaiveManagedAllocator::Create(
            std::unique_ptr<Allocator>(new CPUPinnedAllocator()))) {}

  std::unique_ptr<Allocation> Allocate(size_t size, Attr attr) override {
    if (attr == kCommunication) {
      return communication_allocator_->Allocate(size, attr);
    } else {
      return normal_allocator_->Allocate(size, attr);
    }
  }

  std::shared_ptr<Allocation> AllocateShared(size_t size, Attr attr) override {
    if (attr == kCommunication) {
      return communication_allocator_->AllocateShared(size, attr);
    } else {
      return normal_allocator_->AllocateShared(size, attr);
    }
  }
Y
Yu Yang 已提交
61
  bool IsAllocThreadSafe() const override { return true; }
Y
Yu Yang 已提交
62 63 64 65 66 67

 private:
  std::shared_ptr<ManagedAllocator> normal_allocator_;
  std::shared_ptr<ManagedAllocator> communication_allocator_;
};

Y
Refine  
Yu Yang 已提交
68
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
69 70
// TODO(yy): Dirty code here. This class should be configurable in runtime.
class CUDAManagedAllocator : public ManagedAllocator {
71
 public:
Y
Yu Yang 已提交
72 73 74 75 76 77 78 79
  explicit CUDAManagedAllocator(int dev_id) {
    platform::CUDADeviceGuard guard(dev_id);
    max_chunk_size_ = platform::GpuMaxChunkSize();
    raw_allocator_ = NaiveManagedAllocator::Create(std::unique_ptr<Allocator>(
        new CUDAAllocator(platform::CUDAPlace(dev_id))));
    default_allocator_ = std::make_shared<AutoIncrementAllocator>(
        [this] { return std::move(BestFitAllocatorCreator()); });
  }
80

Y
Yu Yang 已提交
81
  ~CUDAManagedAllocator() {
82
    // Specify destruct order.
Y
Yu Yang 已提交
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
    default_allocator_.reset();
    chunks_.clear();
    raw_allocator_.reset();
  }

  std::unique_ptr<Allocation> Allocate(size_t size, Attr attr) override {
    return default_allocator_->Allocate(size, attr);
  }
  std::shared_ptr<Allocation> AllocateShared(size_t size, Attr attr) override {
    return default_allocator_->AllocateShared(size, attr);
  }

  std::shared_ptr<ManagedAllocator> BestFitAllocatorCreator() {
    chunks_.emplace_back(raw_allocator_->Allocate(max_chunk_size_));
    auto* allocation = chunks_.back().get();
Y
Refine  
Yu Yang 已提交
98 99 100
    return std::make_shared<AlignedAllocator<64u>>(
        NaiveManagedAllocator::Create(
            std::unique_ptr<Allocator>(new BestFitAllocator(allocation))));
101
  }
Y
Yu Yang 已提交
102 103 104 105 106 107 108 109
  bool IsAllocThreadSafe() const override { return true; }

 private:
  size_t max_chunk_size_;
  std::vector<std::unique_ptr<Allocation>> chunks_;
  std::shared_ptr<ManagedAllocator> raw_allocator_;
  std::shared_ptr<ManagedAllocator> default_allocator_;
};
Y
Refine  
Yu Yang 已提交
110
#endif
Y
Yu Yang 已提交
111 112 113 114 115

class AllocatorFacadePrivate {
 public:
  std::map<platform::Place, std::shared_ptr<ManagedAllocator>> allocators_;

Y
Refine  
Yu Yang 已提交
116
  ~AllocatorFacadePrivate() = default;
117 118 119 120 121 122 123 124

  AllocatorFacadePrivate() {
    InitCPUAllocator();
    InitCUDAAllocator();
  }

 private:
  void InitCPUAllocator() {
Y
Yu Yang 已提交
125
    allocators_[platform::CPUPlace()] = std::make_shared<CPUManagedAllocator>();
126 127 128 129 130 131
  }

  void InitCUDAAllocator() {
#ifdef PADDLE_WITH_CUDA
    for (int dev_id = 0; dev_id < platform::GetCUDADeviceCount(); ++dev_id) {
      allocators_[platform::CUDAPlace(dev_id)] =
Y
Yu Yang 已提交
132
          std::make_shared<CUDAManagedAllocator>(dev_id);
133 134 135 136 137
    }
#endif
  }
};

Y
Refine  
Yu Yang 已提交
138
// Pimpl. Make interface clean.
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
AllocatorFacade::AllocatorFacade() : m_(new AllocatorFacadePrivate()) {}
AllocatorFacade::~AllocatorFacade() { delete m_; }

AllocatorFacade& AllocatorFacade::Instance() {
  static AllocatorFacade instance;
  return instance;
}

std::shared_ptr<Allocation> AllocatorFacade::AllocShared(
    const platform::Place& place, size_t size, Allocator::Attr attr) {
  return m_->allocators_[place]->AllocateShared(size, attr);
}

std::unique_ptr<Allocation> AllocatorFacade::Alloc(const platform::Place& place,
                                                   size_t size,
                                                   Allocator::Attr attr) {
  return m_->allocators_[place]->Allocate(size, attr);
}

}  // namespace allocation
}  // namespace memory
}  // namespace paddle