kernel_utils.h 15.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
//   Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

17
#include "paddle/phi/backends/cpu/cpu_context.h"
18
#include "paddle/phi/backends/custom/custom_context.h"
19 20
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/xpu/xpu_context.h"
21
#include "paddle/phi/common/int_array.h"
22 23 24 25 26 27 28
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/kernel_context.h"
#include "paddle/phi/core/selected_rows.h"
#include "paddle/phi/core/sparse_coo_tensor.h"
#include "paddle/phi/core/sparse_csr_tensor.h"
J
Jack Zhou 已提交
29
#include "paddle/phi/core/string_tensor.h"
30
#include "paddle/phi/core/type_defs.h"
31

32
namespace phi {
33

34 35
// PD_KERNEL has been used by custom op api
#define PHI_KERNEL(...) \
36
  ::phi::KernelImpl<decltype(&__VA_ARGS__), &__VA_ARGS__>::Compute
37

38
#define PHI_VARIADIC_KERNEL(...)                                     \
39 40
  reinterpret_cast<void*>(&::phi::KernelImpl<decltype(&__VA_ARGS__), \
                                             &__VA_ARGS__>::VariadicCompute)
41

42
#define PD_SPECIALIZE_KernelCallHelper_FOR_DEVICE_CONTEXT(dev_ctx)           \
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
  template <typename... Tail>                                                \
  struct KernelCallHelper<const dev_ctx&, Tail...> {                         \
    template <int dev_ctx_idx,                                               \
              int in_idx,                                                    \
              int attr_idx,                                                  \
              int out_idx,                                                   \
              typename... PreviousArgs>                                      \
    static void Compute(KernelContext* ctx, PreviousArgs&... pargs) {        \
      static_assert(in_idx == 0,                                             \
                    "Kernel's DeviceContext should appear before Inputs.");  \
      static_assert(                                                         \
          attr_idx == 0,                                                     \
          "Kernel's DeviceContext should appear before Attributes.");        \
      static_assert(out_idx == 0,                                            \
                    "Kernel's DeviceContext should appear before Outputs."); \
      const dev_ctx& arg = ctx->GetDeviceContext<dev_ctx>();                 \
      KernelCallHelper<Tail...>::                                            \
          template Compute<dev_ctx_idx + 1, in_idx, attr_idx, out_idx>(      \
              ctx, pargs..., arg);                                           \
    }                                                                        \
  }

65
#define PD_SPECIALIZE_KernelCallHelper_FOR_INPUT(tensor_type)           \
66 67 68 69 70 71 72 73 74 75 76 77
  template <typename... Tail>                                           \
  struct KernelCallHelper<const tensor_type&, Tail...> {                \
    template <int dev_ctx_idx,                                          \
              int in_idx,                                               \
              int attr_idx,                                             \
              int out_idx,                                              \
              typename... PreviousArgs>                                 \
    static void Compute(KernelContext* ctx, PreviousArgs&... pargs) {   \
      static_assert(attr_idx == 0,                                      \
                    "Kernel's Input should appear before Attributes."); \
      static_assert(out_idx == 0,                                       \
                    "Kernel's Input should appear before Outputs.");    \
78 79 80 81 82 83 84 85
      const std::pair<int, int> range = ctx->InputRangeAt(in_idx);      \
      const tensor_type& arg = ctx->InputAt<tensor_type>(range.first);  \
      KernelCallHelper<Tail...>::                                       \
          template Compute<dev_ctx_idx, in_idx + 1, attr_idx, out_idx>( \
              ctx, pargs..., arg);                                      \
    }                                                                   \
  }

86
#define PD_SPECIALIZE_KernelCallHelper_FOR_OPTIONAL_INPUT(tensor_type)     \
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
  template <typename... Tail>                                              \
  struct KernelCallHelper<paddle::optional<const tensor_type&>, Tail...> { \
    template <int dev_ctx_idx,                                             \
              int in_idx,                                                  \
              int attr_idx,                                                \
              int out_idx,                                                 \
              typename... PreviousArgs>                                    \
    static void Compute(KernelContext* ctx, PreviousArgs&... pargs) {      \
      static_assert(attr_idx == 0,                                         \
                    "Kernel's Input should appear before Attributes.");    \
      static_assert(out_idx == 0,                                          \
                    "Kernel's Input should appear before Outputs.");       \
      const std::pair<int, int> range = ctx->InputRangeAt(in_idx);         \
      auto arg = ctx->OptionalInputAt<tensor_type>(range.first);           \
      KernelCallHelper<Tail...>::                                          \
          template Compute<dev_ctx_idx, in_idx + 1, attr_idx, out_idx>(    \
              ctx, pargs..., arg);                                         \
    }                                                                      \
  }

107
#define PD_SPECIALIZE_KernelCallHelper_FOR_MULTI_INPUT(tensor_type)          \
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
  template <typename... Tail>                                                \
  struct KernelCallHelper<const std::vector<const tensor_type*>&, Tail...> { \
    template <int dev_ctx_idx,                                               \
              int in_idx,                                                    \
              int attr_idx,                                                  \
              int out_idx,                                                   \
              typename... PreviousArgs>                                      \
    static void Compute(KernelContext* ctx, PreviousArgs&... pargs) {        \
      static_assert(attr_idx == 0,                                           \
                    "Kernel's Input should appear before Attributes.");      \
      static_assert(out_idx == 0,                                            \
                    "Kernel's Input should appear before Outputs.");         \
      const std::pair<int, int> range = ctx->InputRangeAt(in_idx);           \
      std::vector<const tensor_type*> arg = std::move(                       \
          ctx->InputsBetween<tensor_type>(range.first, range.second));       \
      KernelCallHelper<Tail...>::                                            \
          template Compute<dev_ctx_idx, in_idx + 1, attr_idx, out_idx>(      \
              ctx, pargs..., arg);                                           \
    }                                                                        \
127 128
  }

129
#define PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(attr_type)           \
130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
  template <typename... Tail>                                             \
  struct KernelCallHelper<attr_type, Tail...> {                           \
    template <int dev_ctx_idx,                                            \
              int in_idx,                                                 \
              int attr_idx,                                               \
              int out_idx,                                                \
              typename... PreviousArgs>                                   \
    static void Compute(KernelContext* ctx, PreviousArgs&... pargs) {     \
      static_assert(out_idx == 0,                                         \
                    "Kernel's Attributes should appear before Outputs."); \
      attr_type arg = ctx->AttrAt<attr_type>(attr_idx);                   \
      KernelCallHelper<Tail...>::                                         \
          template Compute<dev_ctx_idx, in_idx, attr_idx + 1, out_idx>(   \
              ctx, pargs..., arg);                                        \
    }                                                                     \
  }

147
#define PD_SPECIALIZE_KernelCallHelper_FOR_OUTPUT(tensor_type)           \
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
  template <typename... Tail>                                            \
  struct KernelCallHelper<tensor_type*, Tail...> {                       \
    template <int dev_ctx_idx,                                           \
              int in_idx,                                                \
              int attr_idx,                                              \
              int out_idx,                                               \
              typename... PreviousArgs>                                  \
    static void Compute(KernelContext* ctx, PreviousArgs&... pargs) {    \
      const std::pair<int, int> range = ctx->OutputRangeAt(out_idx);     \
      tensor_type* arg = ctx->MutableOutputAt<tensor_type>(range.first); \
      KernelCallHelper<Tail...>::                                        \
          template Compute<dev_ctx_idx, in_idx, attr_idx, out_idx + 1>(  \
              ctx, pargs..., arg);                                       \
    }                                                                    \
  }

164
#define PD_SPECIALIZE_KernelCallHelper_FOR_MULTI_OUTPUT(tensor_type)          \
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
  template <typename... Tail>                                                 \
  struct KernelCallHelper<std::vector<tensor_type*>, Tail...> {               \
    template <int dev_ctx_idx,                                                \
              int in_idx,                                                     \
              int attr_idx,                                                   \
              int out_idx,                                                    \
              typename... PreviousArgs>                                       \
    static void Compute(KernelContext* ctx, PreviousArgs&... pargs) {         \
      const std::pair<int, int> range = ctx->OutputRangeAt(out_idx);          \
      std::vector<tensor_type*> arg = std::move(                              \
          ctx->MutableOutputBetween<tensor_type>(range.first, range.second)); \
      KernelCallHelper<Tail...>::                                             \
          template Compute<dev_ctx_idx, in_idx, attr_idx, out_idx + 1>(       \
              ctx, pargs..., arg);                                            \
    }                                                                         \
180 181 182 183 184 185 186 187
  }

template <typename T>
struct TypeTag {};

template <typename Fn, Fn fn>
struct KernelImpl;

188 189 190 191 192
template <typename Return,
          typename DevCtx,
          typename... Args,
          Return (*kernel_fn)(DevCtx, Args...)>
struct KernelImpl<Return (*)(DevCtx, Args...), kernel_fn> {
193
  static void Compute(KernelContext* ctx) {
194 195 196 197 198 199 200
    KernelCallHelper<DevCtx,
                     Args...,
                     TypeTag<int>>::template Compute<0, 0, 0, 0>(ctx);
  }

  static void VariadicCompute(const DeviceContext& dev_ctx, Args... args) {
    return kernel_fn(static_cast<DevCtx>(dev_ctx), std::forward<Args>(args)...);
201 202 203 204 205 206 207 208
  }

 private:
  template <typename... RemainingArgs>
  struct KernelCallHelper;

  /* DeviceContext Helpers */

209
  PD_SPECIALIZE_KernelCallHelper_FOR_DEVICE_CONTEXT(CPUContext);
210
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
211
  PD_SPECIALIZE_KernelCallHelper_FOR_DEVICE_CONTEXT(GPUContext);
212 213
#endif
#ifdef PADDLE_WITH_XPU
214
  PD_SPECIALIZE_KernelCallHelper_FOR_DEVICE_CONTEXT(XPUContext);
215
#endif
216
#ifdef PADDLE_WITH_CUSTOM_DEVICE
217
  PD_SPECIALIZE_KernelCallHelper_FOR_DEVICE_CONTEXT(CustomContext);
218
#endif
219 220 221

  /* Input Helpers */

222 223 224 225 226
  PD_SPECIALIZE_KernelCallHelper_FOR_INPUT(DenseTensor);
  PD_SPECIALIZE_KernelCallHelper_FOR_OPTIONAL_INPUT(DenseTensor);
  PD_SPECIALIZE_KernelCallHelper_FOR_OPTIONAL_INPUT(SelectedRows);
  PD_SPECIALIZE_KernelCallHelper_FOR_MULTI_INPUT(DenseTensor);
  PD_SPECIALIZE_KernelCallHelper_FOR_INPUT(SelectedRows);
227

228 229 230
  PD_SPECIALIZE_KernelCallHelper_FOR_INPUT(SparseCooTensor);
  PD_SPECIALIZE_KernelCallHelper_FOR_OPTIONAL_INPUT(SparseCooTensor);
  PD_SPECIALIZE_KernelCallHelper_FOR_MULTI_INPUT(SparseCooTensor);
231

232 233 234
  PD_SPECIALIZE_KernelCallHelper_FOR_INPUT(SparseCsrTensor);
  PD_SPECIALIZE_KernelCallHelper_FOR_OPTIONAL_INPUT(SparseCsrTensor);
  PD_SPECIALIZE_KernelCallHelper_FOR_MULTI_INPUT(SparseCsrTensor);
235

J
Jack Zhou 已提交
236 237 238 239
  PD_SPECIALIZE_KernelCallHelper_FOR_INPUT(StringTensor);
  PD_SPECIALIZE_KernelCallHelper_FOR_OPTIONAL_INPUT(StringTensor);
  PD_SPECIALIZE_KernelCallHelper_FOR_MULTI_INPUT(StringTensor);

240 241
  /* Attribute Helpers */

242 243 244 245 246 247 248 249 250 251 252
  PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(bool);
  PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(float);
  PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(double);
  PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(int);
  PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(int64_t);
  PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(phi::dtype::float16);
  PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const Scalar&);
  PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(DataType);
  PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(DataLayout);
  PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(Place);
  PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const std::vector<int64_t>&);
253
  PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const IntArray&);
254 255 256 257 258 259
  PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const std::vector<int>&);
  PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const std::string&);
  PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const std::vector<bool>&);
  PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const std::vector<float>&);
  PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const std::vector<double>&);
  PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const std::vector<std::string>&);
260
  PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const std::vector<Scalar>&);
261 262 263

  /* Output Helpers */

264 265 266
  PD_SPECIALIZE_KernelCallHelper_FOR_OUTPUT(DenseTensor);
  PD_SPECIALIZE_KernelCallHelper_FOR_MULTI_OUTPUT(DenseTensor);
  PD_SPECIALIZE_KernelCallHelper_FOR_OUTPUT(SelectedRows);
267

268 269
  PD_SPECIALIZE_KernelCallHelper_FOR_OUTPUT(SparseCooTensor);
  PD_SPECIALIZE_KernelCallHelper_FOR_MULTI_OUTPUT(SparseCooTensor);
270

271 272
  PD_SPECIALIZE_KernelCallHelper_FOR_OUTPUT(SparseCsrTensor);
  PD_SPECIALIZE_KernelCallHelper_FOR_MULTI_OUTPUT(SparseCsrTensor);
273

J
Jack Zhou 已提交
274 275
  PD_SPECIALIZE_KernelCallHelper_FOR_OUTPUT(StringTensor);
  PD_SPECIALIZE_KernelCallHelper_FOR_MULTI_OUTPUT(StringTensor);
276 277 278 279
  /* End case */
  template <typename T>
  struct KernelCallHelper<TypeTag<T>> {
    template <int dev_ctx_idx, int in_idx, int attr_idx, int out_idx>
280
    static void Compute(KernelContext* ctx, DevCtx dev_ctx, Args&... args) {
281 282 283 284
      static_assert(dev_ctx_idx > 0,
                    "Kernel should pass DeviceContext as argument.");
      static_assert(out_idx > 0, "Kernel should have output argument.");
      // TODO(chenweihang): check dev_ctx, in, attr, out number
285
      return kernel_fn(dev_ctx, args...);
286 287 288 289
    }
  };
};

290
}  // namespace phi