allocator_facade.h 3.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once
#include <memory>
#include "paddle/fluid/memory/allocation/allocator.h"
18 19 20
#ifdef PADDLE_WITH_ASCEND_CL
#include "paddle/fluid/memory/allocation/npu_pinned_allocator.h"
#endif
21
#ifdef PADDLE_WITH_CUDA
22
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
23
#endif
24
#include "paddle/fluid/platform/place.h"
25
#include "paddle/fluid/platform/stream/stream.h"
26 27 28 29

namespace paddle {
namespace memory {
namespace allocation {
30

31 32 33
#ifdef PADDLE_WITH_ASCEND_CL
using NPUPinnedAllocator = paddle::memory::allocation::NPUPinnedAllocator;
#endif
34

Y
Yu Yang 已提交
35 36 37
// Allocator Facade is the interface exposed to other modules.
// All the configuration or dirty code under development should
// be hidden behind this facade.
Y
Yu Yang 已提交
38 39 40 41
//
// NOTE(yy): This class is a singleton class.
// NOTE(yy): To create a stable ABI and make compilation faster. Here we use
// a Pimpl trick;
42 43 44
class AllocatorFacadePrivate;
class AllocatorFacade {
 public:
45
  using Allocation = pten::Allocation;
46 47
  AllocatorFacade(const AllocatorFacade& o) = delete;
  const AllocatorFacade& operator=(const AllocatorFacade& o) = delete;
48
  ~AllocatorFacade();
49 50 51

  static AllocatorFacade& Instance();

52 53
  const std::shared_ptr<Allocator>& GetAllocator(const platform::Place& place);

Y
Yu Yang 已提交
54
  // Allocate a shared allocation.
55 56
  std::shared_ptr<Allocation> AllocShared(const platform::Place& place,
                                          size_t size);
Y
Yu Yang 已提交
57
  // Allocate a unique allocation.
58
  AllocationPtr Alloc(const platform::Place& place, size_t size);
59
  // Release unused memory pool.
W
Wilber 已提交
60
  uint64_t Release(const platform::Place& place);
61

62
  std::shared_ptr<Allocation> AllocShared(const platform::Place& place,
63
                                          size_t size,
64
                                          const platform::Stream& stream);
65 66 67 68

  bool InSameStream(const std::shared_ptr<Allocation>& allocation,
                    const platform::Stream& stream);

69 70 71
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  // TODO(zhiqiu): change gpuStream_t to platform::Stream if needed.
  AllocationPtr Alloc(const platform::Place& place, size_t size,
72 73
                      const gpuStream_t& stream);
  uint64_t Release(const platform::CUDAPlace& place, const gpuStream_t& stream);
74 75 76 77
  void RecordStream(std::shared_ptr<Allocation> allocation,
                    const gpuStream_t& stream);
  const gpuStream_t& GetStream(
      const std::shared_ptr<Allocation>& allocation) const;
78 79
#endif

80 81 82 83 84
#ifdef PADDLE_WITH_CUDA
  void PrepareMemoryPoolForCUDAGraph(CUDAGraphID id);
  void RemoveMemoryPoolOfCUDAGraph(CUDAGraphID id);
#endif

Y
Yu Yang 已提交
85
  // TODO(yy): Allocate a Copy-On-Write allocation?
86 87 88 89 90 91 92 93
 private:
  AllocatorFacade();
  AllocatorFacadePrivate* m_;
};

}  // namespace allocation
}  // namespace memory
}  // namespace paddle