math_kernel.cu 5.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15
#include "paddle/phi/kernels/math_kernel.h"
16

17
#include "paddle/phi/backends/gpu/gpu_context.h"
18
#include "paddle/phi/kernels/funcs/broadcast_function.h"
19 20
#include "paddle/phi/kernels/funcs/elementwise_functor.h"
#include "paddle/phi/kernels/gpu/reduce.h"
21 22 23 24 25 26 27 28 29

#ifdef __NVCC__
#include "cub/cub.cuh"
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif

30 31 32 33 34
#include "paddle/phi/common/complex.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/compat/convert_utils.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/kernel_registry.h"
35

36
namespace phi {
37

38 39
#define DEFINE_CUDA_ELEMENTWISE_OP(name)                             \
  template <typename T, typename Context>                            \
40 41 42 43 44
  void name##RawKernel(const Context& dev_ctx,                       \
                       const DenseTensor& x,                         \
                       const DenseTensor& y,                         \
                       int axis,                                     \
                       DenseTensor* out) {                           \
45 46 47 48 49
    std::vector<const DenseTensor*> inputs;                          \
    std::vector<DenseTensor*> outputs;                               \
    inputs.emplace_back(&x);                                         \
    inputs.emplace_back(&y);                                         \
    outputs.emplace_back(out);                                       \
50
    dev_ctx.template Alloc<T>(out);                                  \
51
    funcs::BroadcastKernel<ElementwiseType::kBinary, T, T>(          \
52
        dev_ctx, inputs, &outputs, axis, funcs::name##Functor<T>()); \
53 54 55 56 57 58 59
  }

/**
 * Kernels
 */

template <typename T, typename Context>
60 61 62 63 64 65
void MeanRawKernel(const Context& dev_ctx,
                   const DenseTensor& x,
                   const std::vector<int64_t>& dims,
                   bool keep_dim,
                   bool reduce_all,
                   DenseTensor* out) {
66
  auto out_dtype = x.dtype();
67
  phi::Reduce<T, kps::AddFunctor, kps::DivideFunctor>(
68 69 70
      dev_ctx, x, reduce_all, dims, keep_dim, out_dtype, out);
}

71 72 73 74 75 76 77 78
template <typename T, typename Context>
void SumRawKernel(const Context& dev_ctx,
                  const DenseTensor& x,
                  const std::vector<int64_t>& dims,
                  bool keep_dim,
                  bool reduce_all,
                  DataType out_dtype,
                  DenseTensor* out) {
79
  phi::Reduce<T, kps::AddFunctor, kps::IdentityFunctor>(
80 81 82
      dev_ctx, x, reduce_all, dims, keep_dim, out_dtype, out);
}

83 84 85 86 87 88 89 90 91
// Create the definition of Add
DEFINE_CUDA_ELEMENTWISE_OP(Add)
// Create the definition of Subtract
DEFINE_CUDA_ELEMENTWISE_OP(Subtract)
// Create the definition of Multiply
DEFINE_CUDA_ELEMENTWISE_OP(Multiply)
// Create the definition of Divide
DEFINE_CUDA_ELEMENTWISE_OP(Divide)

92
}  // namespace phi
93

94
using float16 = phi::dtype::float16;
95
using bfloat16 = phi::dtype::bfloat16;
96 97
using complex64 = ::phi::dtype::complex<float>;
using complex128 = ::phi::dtype::complex<double>;
98

99
PD_REGISTER_KERNEL(add_raw,
100 101
                   GPU,
                   ALL_LAYOUT,
102
                   phi::AddRawKernel,
103 104
                   float,
                   double,
105
                   int16_t,
106 107 108
                   int,
                   int64_t,
                   float16,
109
                   bfloat16,
110 111
                   complex64,
                   complex128) {}
112
PD_REGISTER_KERNEL(subtract_raw,
113 114
                   GPU,
                   ALL_LAYOUT,
115
                   phi::SubtractRawKernel,
116 117
                   float,
                   double,
118
                   int16_t,
119 120 121
                   int,
                   int64_t,
                   float16,
122
                   bfloat16,
123 124
                   complex64,
                   complex128) {}
125
PD_REGISTER_KERNEL(divide_raw,
126 127
                   GPU,
                   ALL_LAYOUT,
128
                   phi::DivideRawKernel,
129 130 131 132 133
                   float,
                   double,
                   int,
                   int64_t,
                   float16,
134
                   bfloat16,
135 136
                   complex64,
                   complex128) {}
137
PD_REGISTER_KERNEL(multiply_raw,
138 139
                   GPU,
                   ALL_LAYOUT,
140
                   phi::MultiplyRawKernel,
141 142 143 144 145 146 147
                   float,
                   double,
                   int,
                   int64_t,
                   bool,
                   float16,
                   complex64,
148 149
                   complex128,
                   bfloat16) {}
150
PD_REGISTER_KERNEL(sum_raw,
151 152
                   GPU,
                   ALL_LAYOUT,
153
                   phi::SumRawKernel,
154 155 156 157
                   bool,
                   float,
                   double,
                   float16,
158
                   bfloat16,
159
                   int16_t,
160 161 162 163
                   int,
                   int64_t,
                   complex64,
                   complex128) {
164 165
  kernel->OutputAt(0).SetDataType(paddle::experimental::DataType::UNDEFINED);
}
166

167
PD_REGISTER_KERNEL(mean_raw,
168 169
                   GPU,
                   ALL_LAYOUT,
170
                   phi::MeanRawKernel,
171 172 173
                   float,
                   double,
                   bool,
174 175 176
                   float16,
                   int,
                   int64_t) {}