allocator_facade.h 2.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once
#include <memory>
#include "paddle/fluid/memory/allocation/allocator.h"
18 19 20
#ifdef PADDLE_WITH_ASCEND_CL
#include "paddle/fluid/memory/allocation/npu_pinned_allocator.h"
#endif
21 22 23
#ifdef PADDLE_WITH_CUDA
#include "paddle/fluid/platform/gpu_info.h"
#endif
24 25 26 27 28
#include "paddle/fluid/platform/place.h"

namespace paddle {
namespace memory {
namespace allocation {
29

30 31 32
#ifdef PADDLE_WITH_ASCEND_CL
using NPUPinnedAllocator = paddle::memory::allocation::NPUPinnedAllocator;
#endif
33

Y
Yu Yang 已提交
34 35 36
// Allocator Facade is the interface exposed to other modules.
// All the configuration or dirty code under development should
// be hidden behind this facade.
Y
Yu Yang 已提交
37 38 39 40
//
// NOTE(yy): This class is a singleton class.
// NOTE(yy): To create a stable ABI and make compilation faster. Here we use
// a Pimpl trick;
41 42 43 44 45
class AllocatorFacadePrivate;
class AllocatorFacade {
 public:
  AllocatorFacade(const AllocatorFacade& o) = delete;
  const AllocatorFacade& operator=(const AllocatorFacade& o) = delete;
46
  ~AllocatorFacade();
47 48 49

  static AllocatorFacade& Instance();

50 51
  const std::shared_ptr<Allocator>& GetAllocator(const platform::Place& place);

Y
Yu Yang 已提交
52
  // Allocate a shared allocation.
53 54
  std::shared_ptr<Allocation> AllocShared(const platform::Place& place,
                                          size_t size);
Y
Yu Yang 已提交
55
  // Allocate a unique allocation.
56
  AllocationPtr Alloc(const platform::Place& place, size_t size);
57
  // Release unused memory pool.
W
Wilber 已提交
58
  uint64_t Release(const platform::Place& place);
59

60 61 62 63 64 65 66 67
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  std::shared_ptr<Allocation> AllocShared(const platform::CUDAPlace& place,
                                          size_t size,
                                          const gpuStream_t& stream);
  AllocationPtr Alloc(const platform::CUDAPlace& place, size_t size,
                      const gpuStream_t& stream);
  uint64_t Release(const platform::CUDAPlace& place, const gpuStream_t& stream);
  void RecordStream(Allocation* allocation, const gpuStream_t& stream);
68 69 70
#ifdef PADDLE_WITH_CUDA
  void PrepareMemoryPoolForCUDAGraph(CUDAGraphID id);
  void RemoveMemoryPoolOfCUDAGraph(CUDAGraphID id);
71
#endif
72 73
#endif

Y
Yu Yang 已提交
74
  // TODO(yy): Allocate a Copy-On-Write allocation?
75 76 77 78 79 80 81 82
 private:
  AllocatorFacade();
  AllocatorFacadePrivate* m_;
};

}  // namespace allocation
}  // namespace memory
}  // namespace paddle