device_context.h 4.8 KB
Newer Older
W
Wilber 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include <memory>

19
#include "paddle/phi/api/include/dll_decl.h"
20 21 22 23
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/common/place.h"
#include "paddle/phi/core/allocator.h"
#include "paddle/phi/core/generator.h"
24
#include "paddle/phi/core/utils/type_registry.h"
W
Wilber 已提交
25

26
namespace phi {
W
Wilber 已提交
27 28 29 30 31 32 33 34
class TensorBase;

/**
 * DeviceContext provides device-related interfaces.
 *
 * All kernels must access the interfaces provided by the backend through
 * DeviceContext.
 */
35
class PADDLE_API DeviceContext {
36 37
  using DataType = paddle::experimental::DataType;

W
Wilber 已提交
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
 public:
  /**
   * @brief Default construct.
   */
  DeviceContext();

  /**
   * @brief Copy construct.
   */
  DeviceContext(const DeviceContext&);

  /**
   * @brief Move construct.
   */
  DeviceContext(DeviceContext&&);

54 55 56 57 58
  /**
   * @brief Move assign operator.
   */
  DeviceContext& operator=(DeviceContext&&);

W
Wilber 已提交
59 60 61 62 63 64
  /**
   * @brief Default destruct.
   */
  virtual ~DeviceContext();

  /**
65
   * @brief Set the device-related Allocator object.
W
Wilber 已提交
66 67 68
   *
   * @param allocator
   */
W
Wilber 已提交
69
  void SetAllocator(const Allocator*);
W
Wilber 已提交
70 71

  /**
72
   * @brief Set the host Allocator object.
W
Wilber 已提交
73
   *
74
   * @param allocator
W
Wilber 已提交
75
   */
76
  void SetHostAllocator(const Allocator*);
W
Wilber 已提交
77 78

  /**
79 80 81 82
   * @brief Set the zero-size Allocator object.
   *
   * @param allocator
   */
83
  void SetZeroAllocator(const Allocator*);
W
Wilber 已提交
84

W
wanghuancoder 已提交
85
  /**
86 87 88 89
   * @brief Set the zero-size Allocator object.
   *
   * @param allocator
   */
W
wanghuancoder 已提交
90 91
  void SetPinnedAllocator(const Allocator*);

W
Wilber 已提交
92
  /**
93
   * @brief Get the const Allocator object.
W
Wilber 已提交
94
   *
95
   * @return Allocator
W
Wilber 已提交
96
   */
W
Wilber 已提交
97
  const Allocator& GetAllocator() const;
W
Wilber 已提交
98 99

  /**
100
   * @brief Get the const device-related Allocator object.
W
Wilber 已提交
101 102 103 104 105
   *
   * @return Allocator
   */
  const Allocator& GetHostAllocator() const;

106 107
  const Allocator& GetZeroAllocator() const;

W
wanghuancoder 已提交
108 109
  const Allocator& GetPinnedAllocator() const;

Y
Yuang Liu 已提交
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
#ifdef PADDLE_WITH_CUDA
  /**
   * @brief Set the CUDA graph Allocator object.
   *
   * @param allocator
   */
  void SetCUDAGraphAllocator(const Allocator*);

  /**
   * @brief Get the const CUDA graph Allocator object.
   *
   * @return Allocator
   */
  const Allocator& GetCUDAGraphAllocator() const;

  /**
   * @brief Test whether the CUDA graph allocator is valid
   *
   * This method should be called before calling GetCUDAGraphAllocator().
   * Other unit can calls GetCUDAGraphAllocator() method,
   * only when this method returns True!
   *
   * @return true if cuda_graph_allocator_ is valid, false otherwise
   */
  bool IsCUDAGraphAllocatorValid() const;
#endif

137 138 139
  /**
   * @brief Allocate device memory for tensor.
   */
W
wanghuancoder 已提交
140 141 142 143
  void* Alloc(TensorBase*,
              DataType dtype,
              size_t requested_size = 0,
              bool pinned = false) const;
144 145

  template <typename T>
W
wanghuancoder 已提交
146 147 148
  T* Alloc(TensorBase* tensor,
           size_t requested_size = 0,
           bool pinned = false) const;
149

W
Wilber 已提交
150 151 152
  /**
   * @brief Allocate host memory for tensor.
   */
153
  void* HostAlloc(TensorBase* tensor,
154
                  DataType dtype,
155 156 157 158
                  size_t requested_size = 0) const;

  template <typename T>
  T* HostAlloc(TensorBase* tensor, size_t requested_size = 0) const;
W
Wilber 已提交
159

W
Wilber 已提交
160
  virtual const Place& GetPlace() const = 0;
161

W
Wilber 已提交
162 163 164 165
  // TODO(wilber): The fluid framework uses wait() in many places, how to delete
  // this API interface.
  virtual void Wait() const {}

W
Wilber 已提交
166
  /**
167 168 169 170
   * @brief Set the generator for special op.
   *
   * @param Generator
   */
W
Wilber 已提交
171 172 173 174 175 176 177 178
  void SetGenerator(Generator*);
  /**
   * @brief Get the generator object.
   *
   * @return Generator
   */
  Generator* GetGenerator() const;

L
Leo Chen 已提交
179
  /**
180 181 182 183
   * @brief Set the host generator for special op.
   *
   * @param Generator
   */
L
Leo Chen 已提交
184 185 186 187 188 189 190 191
  void SetHostGenerator(Generator*);
  /**
   * @brief Get the host generator object.
   *
   * @return Generator
   */
  Generator* GetHostGenerator() const;

192 193 194 195 196 197 198 199
  /**
   * @brief Return the type information of the derived class to support
   *        safely downcast in non-rtti environment.
   *
   * @return The type information of the derived class.
   */
  TypeInfo<DeviceContext> type_info() const { return type_info_; }

W
Wilber 已提交
200 201 202
 private:
  struct Impl;
  std::unique_ptr<Impl> impl_;
203 204 205 206

  template <typename T, typename U>
  friend class TypeInfoTraits;
  TypeInfo<DeviceContext> type_info_{TypeInfo<DeviceContext>::kUnknownType};
W
Wilber 已提交
207 208
};

209
}  // namespace phi