math_kernel.cu 5.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15
#include "paddle/phi/kernels/math_kernel.h"
16

17 18 19 20
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/kernels/funcs/elementwise_functor.h"
#include "paddle/phi/kernels/gpu/elementwise.h"
#include "paddle/phi/kernels/gpu/reduce.h"
21 22 23 24 25 26 27 28 29

#ifdef __NVCC__
#include "cub/cub.cuh"
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif

30 31 32 33 34
#include "paddle/phi/common/complex.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/compat/convert_utils.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/kernel_registry.h"
35

36
namespace phi {
37

38 39
#define DEFINE_CUDA_ELEMENTWISE_OP(name)                             \
  template <typename T, typename Context>                            \
40 41 42 43 44
  void name##RawKernel(const Context& dev_ctx,                       \
                       const DenseTensor& x,                         \
                       const DenseTensor& y,                         \
                       int axis,                                     \
                       DenseTensor* out) {                           \
45 46 47 48 49
    std::vector<const DenseTensor*> inputs;                          \
    std::vector<DenseTensor*> outputs;                               \
    inputs.emplace_back(&x);                                         \
    inputs.emplace_back(&y);                                         \
    outputs.emplace_back(out);                                       \
50
    dev_ctx.template Alloc<T>(out);                                  \
51
    funcs::BroadcastKernel<ElementwiseType::kBinary, T, T>(          \
52
        dev_ctx, inputs, &outputs, axis, funcs::name##Functor<T>()); \
53 54 55 56 57 58 59
  }

/**
 * Kernels
 */

template <typename T, typename Context>
60 61 62 63 64 65
void MeanRawKernel(const Context& dev_ctx,
                   const DenseTensor& x,
                   const std::vector<int64_t>& dims,
                   bool keep_dim,
                   bool reduce_all,
                   DenseTensor* out) {
66
  auto out_dtype = x.dtype();
67
  phi::Reduce<T, kps::AddFunctor, kps::DivideFunctor>(
68 69 70
      dev_ctx, x, reduce_all, dims, keep_dim, out_dtype, out);
}

71 72 73 74 75 76 77 78
template <typename T, typename Context>
void SumRawKernel(const Context& dev_ctx,
                  const DenseTensor& x,
                  const std::vector<int64_t>& dims,
                  bool keep_dim,
                  bool reduce_all,
                  DataType out_dtype,
                  DenseTensor* out) {
79
  phi::Reduce<T, kps::AddFunctor, kps::IdentityFunctor>(
80 81 82
      dev_ctx, x, reduce_all, dims, keep_dim, out_dtype, out);
}

83 84 85 86 87 88 89 90 91
// Create the definition of Add
DEFINE_CUDA_ELEMENTWISE_OP(Add)
// Create the definition of Subtract
DEFINE_CUDA_ELEMENTWISE_OP(Subtract)
// Create the definition of Multiply
DEFINE_CUDA_ELEMENTWISE_OP(Multiply)
// Create the definition of Divide
DEFINE_CUDA_ELEMENTWISE_OP(Divide)

92
}  // namespace phi
93

94
using float16 = phi::dtype::float16;
95
using bfloat16 = phi::dtype::bfloat16;
96 97
using complex64 = ::phi::dtype::complex<float>;
using complex128 = ::phi::dtype::complex<double>;
98

99
PD_REGISTER_KERNEL(add_raw,
100 101
                   GPU,
                   ALL_LAYOUT,
102
                   phi::AddRawKernel,
103 104
                   float,
                   double,
105
                   int16_t,
106 107 108 109 110
                   int,
                   int64_t,
                   float16,
                   complex64,
                   complex128) {}
111
PD_REGISTER_KERNEL(subtract_raw,
112 113
                   GPU,
                   ALL_LAYOUT,
114
                   phi::SubtractRawKernel,
115 116
                   float,
                   double,
117
                   int16_t,
118 119 120 121 122
                   int,
                   int64_t,
                   float16,
                   complex64,
                   complex128) {}
123
PD_REGISTER_KERNEL(divide_raw,
124 125
                   GPU,
                   ALL_LAYOUT,
126
                   phi::DivideRawKernel,
127 128 129 130 131
                   float,
                   double,
                   int,
                   int64_t,
                   float16,
132
                   bfloat16,
133 134
                   complex64,
                   complex128) {}
135
PD_REGISTER_KERNEL(multiply_raw,
136 137
                   GPU,
                   ALL_LAYOUT,
138
                   phi::MultiplyRawKernel,
139 140 141 142 143 144 145 146
                   float,
                   double,
                   int,
                   int64_t,
                   bool,
                   float16,
                   complex64,
                   complex128) {}
147
PD_REGISTER_KERNEL(sum_raw,
148 149
                   GPU,
                   ALL_LAYOUT,
150
                   phi::SumRawKernel,
151 152 153 154
                   bool,
                   float,
                   double,
                   float16,
155
                   int16_t,
156 157 158 159
                   int,
                   int64_t,
                   complex64,
                   complex128) {
160 161
  kernel->OutputAt(0).SetDataType(paddle::experimental::DataType::UNDEFINED);
}
162

163
PD_REGISTER_KERNEL(mean_raw,
164 165
                   GPU,
                   ALL_LAYOUT,
166
                   phi::MeanRawKernel,
167 168 169
                   float,
                   double,
                   bool,
170 171 172
                   float16,
                   int,
                   int64_t) {}