elementwise_kernel.cc 4.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

Z
zhangyuqin1998 已提交
15
#include "paddle/phi/kernels/legacy/elementwise_kernel.h"
16
#include "paddle/phi/backends/cpu/cpu_context.h"
Y
YuanRisheng 已提交
17 18
#include "paddle/phi/common/bfloat16.h"
#include "paddle/phi/common/complex.h"
19
#include "paddle/phi/core/kernel_registry.h"
20
#include "paddle/phi/kernels/cpu/elementwise.h"
21 22
#include "paddle/phi/kernels/impl/elementwise_kernel_impl.h"

Y
YuanRisheng 已提交
23 24
namespace phi {

25
template <typename T, typename Context>
Z
zhangyuqin1998 已提交
26 27 28 29 30 31
void MaximumKernel(const Context& dev_ctx,
                   const DenseTensor& x,
                   const DenseTensor& y,
                   DenseTensor* out) {
  int axis = -1;
  MaximumRawKernel<T>(dev_ctx, x, y, axis, out);
32 33 34
}

template <typename T, typename Context>
Z
zhangyuqin1998 已提交
35 36 37 38 39 40
void MinimumKernel(const Context& dev_ctx,
                   const DenseTensor& x,
                   const DenseTensor& y,
                   DenseTensor* out) {
  int axis = -1;
  MinimumRawKernel<T>(dev_ctx, x, y, axis, out);
41 42 43
}

template <typename T, typename Context>
Z
zhangyuqin1998 已提交
44 45 46 47 48 49
void RemainderKernel(const Context& dev_ctx,
                     const DenseTensor& x,
                     const DenseTensor& y,
                     DenseTensor* out) {
  int axis = -1;
  RemainderRawKernel<T>(dev_ctx, x, y, axis, out);
50 51
}

52
template <typename T, typename Context>
Z
zhangyuqin1998 已提交
53 54 55 56 57 58
void FloorDivideKernel(const Context& dev_ctx,
                       const DenseTensor& x,
                       const DenseTensor& y,
                       DenseTensor* out) {
  int axis = -1;
  FloorDivideRawKernel<T>(dev_ctx, x, y, axis, out);
59 60 61
}

template <typename T, typename Context>
Z
zhangyuqin1998 已提交
62 63 64 65 66 67
void ElementwisePowKernel(const Context& dev_ctx,
                          const DenseTensor& x,
                          const DenseTensor& y,
                          DenseTensor* out) {
  int axis = -1;
  ElementwisePowRawKernel<T>(dev_ctx, x, y, axis, out);
68
}
Y
YuanRisheng 已提交
69

70
template <typename T, typename Context>
71 72 73 74
void HeavisideKernel(const Context& dev_ctx,
                     const DenseTensor& x,
                     const DenseTensor& y,
                     DenseTensor* out) {
75 76 77
  // allocate memory for out
  dev_ctx.template Alloc<T>(out);
  funcs::ElementwiseCompute<funcs::ElementwiseHeavisideFunctor<T>, T>(
78
      dev_ctx, x, y, funcs::ElementwiseHeavisideFunctor<T>(), out);
79 80
}

Y
YuanRisheng 已提交
81 82 83 84 85 86 87 88
}  // namespace phi

using complex64 = ::phi::dtype::complex<float>;
using complex128 = ::phi::dtype::complex<double>;

// NOTE(chenweihang): using bfloat16 will cause redefine with xpu bfloat16
// using bfloat16 = ::phi::dtype::bfloat16;

Z
zhangyuqin1998 已提交
89 90
PD_REGISTER_KERNEL(
    fmax, CPU, ALL_LAYOUT, phi::FMaxKernel, float, double, int, int64_t) {}
91

Z
zyfncg 已提交
92 93
PD_REGISTER_KERNEL(
    fmin, CPU, ALL_LAYOUT, phi::FMinKernel, float, double, int, int64_t) {}
Y
YuanRisheng 已提交
94

Z
zhangyuqin1998 已提交
95
PD_REGISTER_KERNEL(maximum,
96 97
                   CPU,
                   ALL_LAYOUT,
Z
zhangyuqin1998 已提交
98
                   phi::MaximumKernel,
99 100 101 102 103
                   float,
                   double,
                   int,
                   int64_t,
                   phi::dtype::bfloat16) {}
Z
zhangyuqin1998 已提交
104
PD_REGISTER_KERNEL(minimum,
105 106
                   CPU,
                   ALL_LAYOUT,
Z
zhangyuqin1998 已提交
107
                   phi::MinimumKernel,
108 109 110 111 112
                   float,
                   double,
                   int,
                   int64_t,
                   phi::dtype::bfloat16) {}
Z
zhangyuqin1998 已提交
113
PD_REGISTER_KERNEL(remainder,
114 115
                   CPU,
                   ALL_LAYOUT,
Z
zhangyuqin1998 已提交
116
                   phi::RemainderKernel,
117 118 119 120
                   float,
                   double,
                   int,
                   int64_t) {}
Z
zhangyuqin1998 已提交
121 122 123
PD_REGISTER_KERNEL(
    floor_divide, CPU, ALL_LAYOUT, phi::FloorDivideKernel, int, int64_t) {}
PD_REGISTER_KERNEL(elementwise_pow,
124 125
                   CPU,
                   ALL_LAYOUT,
Z
zhangyuqin1998 已提交
126
                   phi::ElementwisePowKernel,
127 128 129
                   float,
                   double,
                   int,
130 131
                   int64_t,
                   phi::dtype::bfloat16) {}
132 133

PD_REGISTER_KERNEL(heaviside,
134 135
                   CPU,
                   ALL_LAYOUT,
136
                   phi::HeavisideKernel,
137 138 139 140
                   float,
                   double,
                   int,
                   int64_t) {}