activation_kernel.cc 11.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/phi/kernels/activation_kernel.h"
16

17 18
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
19
#include "paddle/phi/kernels/funcs/activation_functor.h"
20 21 22 23
#include "paddle/phi/kernels/impl/activation_impl.h"

namespace phi {

Y
YuanRisheng 已提交
24 25 26 27 28
#define DEFINE_CPU_ACTIVATION_KERNEL(name, functor_class)               \
  template <typename T, typename Context>                               \
  void name##Kernel(                                                    \
      const Context& dev_ctx, const DenseTensor& x, DenseTensor* out) { \
    funcs::functor_class<T> functor;                                    \
29
    ActivationImpl<T, T, Context, funcs::functor_class<T>>(             \
Y
YuanRisheng 已提交
30
        dev_ctx, x, out, functor);                                      \
31 32
  }

33 34 35 36 37 38 39 40 41 42 43 44
#define DEFINE_CPU_ACTIVATION_KERNEL_WITH_INT_IN_FLOAT_OUT(name,           \
                                                           functor_class)  \
  template <typename T, typename Context>                                  \
  void name##Kernel(                                                       \
      const Context& dev_ctx, const DenseTensor& x, DenseTensor* out) {    \
    funcs::functor_class<T> functor;                                       \
    using U =                                                              \
        typename std::conditional_t<std::is_integral<T>::value, float, T>; \
    ActivationImpl<T, U, Context, funcs::functor_class<T>>(                \
        dev_ctx, x, out, functor);                                         \
  }

Y
YuanRisheng 已提交
45 46 47 48 49 50 51 52 53
#define DEFINE_CPU_ACT_KERNEL_WITH_ONE_ATTRS(name, functor_class, attr) \
  template <typename T, typename Context>                               \
  void name##Kernel(const Context& dev_ctx,                             \
                    const DenseTensor& x,                               \
                    float attr,                                         \
                    DenseTensor* out) {                                 \
    funcs::functor_class<T> functor;                                    \
    auto attrs = functor.GetAttrs();                                    \
    *(attrs[0].second) = attr;                                          \
54
    ActivationImpl<T, T, Context, funcs::functor_class<T>>(             \
Y
YuanRisheng 已提交
55
        dev_ctx, x, out, functor);                                      \
56 57
  }

58 59 60 61 62 63 64 65 66 67 68 69 70 71
#define DEFINE_CPU_ACT_KERNEL_WITH_TWO_ATTRS(               \
    name, functor_class, attr1, attr2)                      \
  template <typename T, typename Context>                   \
  void name##Kernel(const Context& dev_ctx,                 \
                    const DenseTensor& x,                   \
                    float attr1,                            \
                    float attr2,                            \
                    DenseTensor* out) {                     \
    funcs::functor_class<T> functor;                        \
    auto attrs = functor.GetAttrs();                        \
    *(attrs[0].second) = attr1;                             \
    *(attrs[1].second) = attr2;                             \
    ActivationImpl<T, T, Context, funcs::functor_class<T>>( \
        dev_ctx, x, out, functor);                          \
72 73
  }

Y
YuanRisheng 已提交
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
DEFINE_CPU_ACTIVATION_KERNEL(Sin, SinFunctor)
DEFINE_CPU_ACTIVATION_KERNEL(Cos, CosFunctor)
DEFINE_CPU_ACTIVATION_KERNEL(Tan, TanFunctor)
DEFINE_CPU_ACTIVATION_KERNEL(Asin, AsinFunctor)
DEFINE_CPU_ACTIVATION_KERNEL(Atan, AtanFunctor)
DEFINE_CPU_ACTIVATION_KERNEL(Acos, AcosFunctor)
DEFINE_CPU_ACTIVATION_KERNEL(Sinh, SinhFunctor)
DEFINE_CPU_ACTIVATION_KERNEL(Cosh, CoshFunctor)
DEFINE_CPU_ACTIVATION_KERNEL(Asinh, AsinhFunctor)
DEFINE_CPU_ACTIVATION_KERNEL(Acosh, AcoshFunctor)
DEFINE_CPU_ACTIVATION_KERNEL(Atanh, AtanhFunctor)
DEFINE_CPU_ACTIVATION_KERNEL(Relu, ReluCPUFunctor)
DEFINE_CPU_ACTIVATION_KERNEL(Tanh, TanhFunctor)
DEFINE_CPU_ACTIVATION_KERNEL(TanhShrink, TanhShrinkFunctor)
DEFINE_CPU_ACTIVATION_KERNEL(Silu, SiluFunctor)
89 90 91 92
DEFINE_CPU_ACTIVATION_KERNEL(Reciprocal, ReciprocalFunctor)
DEFINE_CPU_ACTIVATION_KERNEL(Square, SquareFunctor)
DEFINE_CPU_ACTIVATION_KERNEL(Sqrt, SqrtFunctor)
DEFINE_CPU_ACTIVATION_KERNEL(Rsqrt, RsqrtFunctor)
93
DEFINE_CPU_ACTIVATION_KERNEL(Softsign, SoftsignFunctor)
Y
YuanRisheng 已提交
94 95
DEFINE_CPU_ACTIVATION_KERNEL(Sigmoid, SigmoidFunctor)
DEFINE_CPU_ACTIVATION_KERNEL(LogSigmoid, LogSigmoidFunctor)
Y
YuanRisheng 已提交
96 97 98
DEFINE_CPU_ACTIVATION_KERNEL(Round, RoundFunctor)
DEFINE_CPU_ACTIVATION_KERNEL(Floor, FloorFunctor)
DEFINE_CPU_ACTIVATION_KERNEL(Ceil, CeilFunctor)
99
DEFINE_CPU_ACTIVATION_KERNEL(Negative, NegativeFunctor)
Y
YuanRisheng 已提交
100

101 102 103 104
DEFINE_CPU_ACTIVATION_KERNEL_WITH_INT_IN_FLOAT_OUT(Log, LogFunctor)
DEFINE_CPU_ACTIVATION_KERNEL_WITH_INT_IN_FLOAT_OUT(Log2, Log2Functor)
DEFINE_CPU_ACTIVATION_KERNEL_WITH_INT_IN_FLOAT_OUT(Log10, Log10Functor)
DEFINE_CPU_ACTIVATION_KERNEL_WITH_INT_IN_FLOAT_OUT(Log1p, Log1pFunctor)
105 106
DEFINE_CPU_ACTIVATION_KERNEL_WITH_INT_IN_FLOAT_OUT(Exp, ExpFunctor)
DEFINE_CPU_ACTIVATION_KERNEL_WITH_INT_IN_FLOAT_OUT(Expm1, Expm1Functor)
107

Y
YuanRisheng 已提交
108
DEFINE_CPU_ACT_KERNEL_WITH_ONE_ATTRS(LeakyRelu, LeakyReluFunctor, alpha)
109
DEFINE_CPU_ACT_KERNEL_WITH_ONE_ATTRS(ThresholdedRelu,
Y
YuanRisheng 已提交
110
                                     ThresholdedReluFunctor,
111
                                     threshold)
112
DEFINE_CPU_ACT_KERNEL_WITH_ONE_ATTRS(Relu6Raw, Relu6Functor, threshold)
113
DEFINE_CPU_ACT_KERNEL_WITH_ONE_ATTRS(Mish, MishFunctor, threshold)
Y
YuanRisheng 已提交
114 115 116
DEFINE_CPU_ACT_KERNEL_WITH_ONE_ATTRS(HardShrink, HardShrinkFunctor, threshold)
DEFINE_CPU_ACT_KERNEL_WITH_ONE_ATTRS(SoftShrink, SoftShrinkFunctor, lambda)
DEFINE_CPU_ACT_KERNEL_WITH_ONE_ATTRS(Elu, ELUFunctor, alpha)
Y
YuanRisheng 已提交
117
DEFINE_CPU_ACT_KERNEL_WITH_ONE_ATTRS(Celu, CELUFunctor, alpha)
Y
YuanRisheng 已提交
118

119
DEFINE_CPU_ACT_KERNEL_WITH_TWO_ATTRS(HardTanh, HardTanhFunctor, t_min, t_max)
Y
YuanRisheng 已提交
120 121
DEFINE_CPU_ACT_KERNEL_WITH_TWO_ATTRS(STanh, STanhFunctor, scale_a, scale_b)
DEFINE_CPU_ACT_KERNEL_WITH_TWO_ATTRS(Softplus, SoftplusFunctor, beta, threshold)
Y
YuanRisheng 已提交
122 123 124 125
DEFINE_CPU_ACT_KERNEL_WITH_TWO_ATTRS(HardSigmoid,
                                     HardSigmoidFunctor,
                                     slope,
                                     offset)
126

Y
YuanRisheng 已提交
127
template <typename T, typename Context>
128 129 130
void HardSwishKernel(const Context& dev_ctx,
                     const DenseTensor& x,
                     DenseTensor* out) {
Y
YuanRisheng 已提交
131
  funcs::HardSwishFunctor<T> functor;
132 133 134
  float threshold = 6;
  float scale = 6;
  float offset = 3;
Y
YuanRisheng 已提交
135 136 137 138
  auto attrs = functor.GetAttrs();
  *(attrs[0].second) = threshold;
  *(attrs[1].second) = scale;
  *(attrs[2].second) = offset;
139
  ActivationImpl<T, T, Context, funcs::HardSwishFunctor<T>>(
Y
YuanRisheng 已提交
140 141 142
      dev_ctx, x, out, functor);
}

Z
zhangyuqin1998 已提交
143 144 145 146 147 148 149 150 151 152
template <typename T, typename Context>
void SwishKernel(const Context& dev_ctx,
                 const DenseTensor& x,
                 DenseTensor* out) {
  funcs::SwishFunctor<T> functor;
  auto attrs = functor.GetAttrs();
  *(attrs[0].second) = 1.0;
  ActivationImpl<T, T, Context, funcs::SwishFunctor<T>>(
      dev_ctx, x, out, functor);
}
153 154
}  // namespace phi
PD_REGISTER_KERNEL(relu, CPU, ALL_LAYOUT, phi::ReluKernel, float, double) {}
155 156

#define PD_REGISTER_ACTIVATION_KERNEL(name, func) \
Y
YuanRisheng 已提交
157
  PD_REGISTER_KERNEL(name, CPU, ALL_LAYOUT, phi::func, float, double) {}
158

Y
YuanRisheng 已提交
159 160 161 162 163 164 165 166 167 168 169 170
PD_REGISTER_ACTIVATION_KERNEL(sin, SinKernel)
PD_REGISTER_ACTIVATION_KERNEL(cos, CosKernel)
PD_REGISTER_ACTIVATION_KERNEL(tan, TanKernel)
PD_REGISTER_ACTIVATION_KERNEL(acos, AcosKernel)
PD_REGISTER_ACTIVATION_KERNEL(asin, AsinKernel)
PD_REGISTER_ACTIVATION_KERNEL(atan, AtanKernel)
PD_REGISTER_ACTIVATION_KERNEL(sinh, SinhKernel)
PD_REGISTER_ACTIVATION_KERNEL(cosh, CoshKernel)
PD_REGISTER_ACTIVATION_KERNEL(asinh, AsinhKernel)
PD_REGISTER_ACTIVATION_KERNEL(acosh, AcoshKernel)
PD_REGISTER_ACTIVATION_KERNEL(atanh, AtanhKernel)
PD_REGISTER_ACTIVATION_KERNEL(tanh, TanhKernel)
Z
zyfncg 已提交
171
PD_REGISTER_ACTIVATION_KERNEL(hardtanh, HardTanhKernel)
Y
YuanRisheng 已提交
172 173
PD_REGISTER_ACTIVATION_KERNEL(leaky_relu, LeakyReluKernel)
PD_REGISTER_ACTIVATION_KERNEL(thresholded_relu, ThresholdedReluKernel)
174
PD_REGISTER_ACTIVATION_KERNEL(relu6_raw, Relu6RawKernel)
Y
YuanRisheng 已提交
175
PD_REGISTER_ACTIVATION_KERNEL(hard_shrink, HardShrinkKernel)
176
PD_REGISTER_ACTIVATION_KERNEL(softshrink, SoftShrinkKernel)
Y
YuanRisheng 已提交
177 178 179
PD_REGISTER_ACTIVATION_KERNEL(tanh_shrink, TanhShrinkKernel)
PD_REGISTER_ACTIVATION_KERNEL(elu, EluKernel)
PD_REGISTER_ACTIVATION_KERNEL(silu, SiluKernel)
180 181 182 183 184 185 186
PD_REGISTER_ACTIVATION_KERNEL(mish, MishKernel)
PD_REGISTER_ACTIVATION_KERNEL(stanh, STanhKernel)
PD_REGISTER_ACTIVATION_KERNEL(reciprocal, ReciprocalKernel)
PD_REGISTER_ACTIVATION_KERNEL(sqrt, SqrtKernel)
PD_REGISTER_ACTIVATION_KERNEL(rsqrt, RsqrtKernel)
PD_REGISTER_ACTIVATION_KERNEL(softplus, SoftplusKernel)

187 188 189 190 191 192 193 194 195 196
PD_REGISTER_KERNEL(exp,
                   CPU,
                   ALL_LAYOUT,
                   phi::ExpKernel,
                   float,
                   double,
                   int,
                   int64_t,
                   phi::dtype::float16) {}

197 198 199 200 201 202
PD_REGISTER_KERNEL(expm1,
                   CPU,
                   ALL_LAYOUT,
                   phi::Expm1Kernel,
                   float,
                   double,
203 204
                   int,
                   int64_t,
205
                   phi::dtype::float16) {}
206

207 208 209
PD_REGISTER_KERNEL(logit, CPU, ALL_LAYOUT, phi::LogitKernel, float, double) {}
PD_REGISTER_KERNEL(
    square, CPU, ALL_LAYOUT, phi::SquareKernel, float, double, int, int64_t) {}
210
PD_REGISTER_ACTIVATION_KERNEL(softsign, SoftsignKernel)
Y
YuanRisheng 已提交
211 212 213
PD_REGISTER_ACTIVATION_KERNEL(sigmoid, SigmoidKernel)
PD_REGISTER_ACTIVATION_KERNEL(logsigmoid, LogSigmoidKernel)
PD_REGISTER_ACTIVATION_KERNEL(hard_sigmoid, HardSigmoidKernel)
Z
zhangyuqin1998 已提交
214
PD_REGISTER_ACTIVATION_KERNEL(swish, SwishKernel)
215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256

PD_REGISTER_KERNEL(log,
                   CPU,
                   ALL_LAYOUT,
                   phi::LogKernel,
                   float,
                   double,
                   int,
                   int64_t,
                   phi::dtype::float16,
                   phi::dtype::bfloat16) {}
PD_REGISTER_KERNEL(log2,
                   CPU,
                   ALL_LAYOUT,
                   phi::Log2Kernel,
                   float,
                   double,
                   int,
                   int64_t,
                   phi::dtype::float16,
                   phi::dtype::bfloat16) {}
PD_REGISTER_KERNEL(log10,
                   CPU,
                   ALL_LAYOUT,
                   phi::Log10Kernel,
                   float,
                   double,
                   int,
                   int64_t,
                   phi::dtype::float16,
                   phi::dtype::bfloat16) {}
PD_REGISTER_KERNEL(log1p,
                   CPU,
                   ALL_LAYOUT,
                   phi::Log1pKernel,
                   float,
                   double,
                   int,
                   int64_t,
                   phi::dtype::float16,
                   phi::dtype::bfloat16) {}

257
PD_REGISTER_ACTIVATION_KERNEL(hardswish, HardSwishKernel)
Y
YuanRisheng 已提交
258 259 260
PD_REGISTER_ACTIVATION_KERNEL(round, RoundKernel)
PD_REGISTER_ACTIVATION_KERNEL(floor, FloorKernel)
PD_REGISTER_ACTIVATION_KERNEL(ceil, CeilKernel)
261 262 263 264 265 266 267 268 269
PD_REGISTER_KERNEL(negative,
                   CPU,
                   ALL_LAYOUT,
                   phi::NegativeKernel,
                   float,
                   double,
                   int16_t,
                   int,
                   int64_t) {}
Y
YuanRisheng 已提交
270
PD_REGISTER_ACTIVATION_KERNEL(celu, CeluKernel)
Y
YuanRisheng 已提交
271 272
PD_REGISTER_KERNEL(
    pow, CPU, ALL_LAYOUT, phi::PowKernel, float, double, int, int64_t) {}