allocator_facade.h 3.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once
#include <memory>
#include "paddle/fluid/memory/allocation/allocator.h"
18 19 20
#ifdef PADDLE_WITH_ASCEND_CL
#include "paddle/fluid/memory/allocation/npu_pinned_allocator.h"
#endif
21
#ifdef PADDLE_WITH_CUDA
22
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
23
#endif
24
#include "paddle/fluid/platform/place.h"
25
#include "paddle/phi/core/stream.h"
26 27 28 29

namespace paddle {
namespace memory {
namespace allocation {
30

31 32 33
#ifdef PADDLE_WITH_ASCEND_CL
using NPUPinnedAllocator = paddle::memory::allocation::NPUPinnedAllocator;
#endif
34

Y
Yu Yang 已提交
35 36 37
// Allocator Facade is the interface exposed to other modules.
// All the configuration or dirty code under development should
// be hidden behind this facade.
Y
Yu Yang 已提交
38 39 40 41
//
// NOTE(yy): This class is a singleton class.
// NOTE(yy): To create a stable ABI and make compilation faster. Here we use
// a Pimpl trick;
42 43 44
class AllocatorFacadePrivate;
class AllocatorFacade {
 public:
45
  using Allocation = phi::Allocation;
46 47
  AllocatorFacade(const AllocatorFacade& o) = delete;
  const AllocatorFacade& operator=(const AllocatorFacade& o) = delete;
48
  ~AllocatorFacade();
49 50 51

  static AllocatorFacade& Instance();

52 53
  const std::shared_ptr<Allocator>& GetAllocator(const platform::Place& place);

54 55
  void* GetBasePtr(const std::shared_ptr<Allocation>& allocation);

56 57 58 59 60 61 62 63
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  const std::shared_ptr<Allocator>& GetAllocator(const platform::Place& place,
                                                 const gpuStream_t& stream);
#endif

  const std::shared_ptr<Allocator>& GetZeroAllocator(
      const platform::Place& place);

Y
Yu Yang 已提交
64
  // Allocate a shared allocation.
65 66
  std::shared_ptr<Allocation> AllocShared(const platform::Place& place,
                                          size_t size);
Y
Yu Yang 已提交
67
  // Allocate a unique allocation.
68
  AllocationPtr Alloc(const platform::Place& place, size_t size);
69
  // Release unused memory pool.
W
Wilber 已提交
70
  uint64_t Release(const platform::Place& place);
71

72
  std::shared_ptr<Allocation> AllocShared(const platform::Place& place,
73
                                          size_t size,
74
                                          const phi::Stream& stream);
75 76

  bool InSameStream(const std::shared_ptr<Allocation>& allocation,
77
                    const phi::Stream& stream);
78

79
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
80
  // TODO(zhiqiu): change gpuStream_t to phi::Stream if needed.
81
  AllocationPtr Alloc(const platform::Place& place, size_t size,
82 83
                      const gpuStream_t& stream);
  uint64_t Release(const platform::CUDAPlace& place, const gpuStream_t& stream);
84 85 86 87
  void RecordStream(std::shared_ptr<Allocation> allocation,
                    const gpuStream_t& stream);
  const gpuStream_t& GetStream(
      const std::shared_ptr<Allocation>& allocation) const;
88 89
#endif

90 91 92 93 94
#ifdef PADDLE_WITH_CUDA
  void PrepareMemoryPoolForCUDAGraph(CUDAGraphID id);
  void RemoveMemoryPoolOfCUDAGraph(CUDAGraphID id);
#endif

Y
Yu Yang 已提交
95
  // TODO(yy): Allocate a Copy-On-Write allocation?
96 97 98 99 100 101 102 103
 private:
  AllocatorFacade();
  AllocatorFacadePrivate* m_;
};

}  // namespace allocation
}  // namespace memory
}  // namespace paddle