activation_op.h 61.0 KB
Newer Older
1
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2

L
Luo Tao 已提交
3 4 5 6 7 8 9 10 11
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
    http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
Q
qijun 已提交
12 13

#pragma once
D
dzhwinter 已提交
14
#include <glog/logging.h>
Y
Yihua Xu 已提交
15
#include <algorithm>
16
#include <memory>
D
dzhwinter 已提交
17 18
#include <string>
#include <unordered_set>
19 20
#include <utility>
#include <vector>
21

C
Clementine 已提交
22 23 24 25 26
#include <cmath>
#ifndef _USE_MATH_DEFINES
#define _USE_MATH_DEFINES
#endif

Y
Yi Wang 已提交
27 28 29
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/detail/safe_ref.h"
Y
Yihua Xu 已提交
30
#include "paddle/fluid/operators/math/blas.h"
31
#include "paddle/fluid/platform/float16.h"
Q
qijun 已提交
32

33 34 35 36
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif

Q
qijun 已提交
37 38 39
namespace paddle {
namespace operators {

40 41 42 43 44 45
enum ActBwdOpFwdDeps {
  kNoDeps = 0x00,  // Do not need any forward input/output
  kDepX = 0x01,    // Only need forward input X
  kDepOut = 0x02,  // Only need forward output Out
};

C
chengduo 已提交
46 47 48 49 50 51
/* The following operator can be used to process SelectedRows, because the
 * output of those operator for zero is zero too.
 */
static std::unordered_set<std::string> CanBeUsedBySelectedRows = {
    "abs", "abs_grad", "square", "square_grad", "sqrt", "sqrt_grad"};

52 53 54 55 56 57 58
inline void ExtractActivationTensor(const framework::ExecutionContext& context,
                                    const framework::Tensor** X,
                                    framework::Tensor** Out) {
  auto x_var = context.InputVar("X");
  auto out_var = context.OutputVar("Out");
  PADDLE_ENFORCE(x_var != nullptr,
                 "Cannot get input Variable X, variable name = %s",
H
hong 已提交
59
                 context.InputName("X"));
60 61
  PADDLE_ENFORCE(out_var != nullptr,
                 "Cannot get output Variable Out, variable name = %s",
H
hong 已提交
62 63
                 context.OutputName("Out"));
  if (CanBeUsedBySelectedRows.count(context.Type())) {
64 65 66 67 68 69 70 71 72 73
    *X = paddle::framework::GetLoDTensorOrSelectedRowsValueFromVar(*x_var);
    *Out = paddle::framework::GetMutableLoDTensorOrSelectedRowsValueFromVar(
        out_var);
  } else {
    *X = context.Input<framework::Tensor>("X");
    *Out = context.Output<framework::Tensor>("Out");
  }

  PADDLE_ENFORCE(*Out != nullptr,
                 "Cannot get output tensor Out, variable name = %s",
H
hong 已提交
74
                 context.OutputName("Out"));
75 76
}

77
template <ActBwdOpFwdDeps kDepValue>
78 79 80 81 82 83
inline void ExtractActivationGradTensor(
    const framework::ExecutionContext& context, const framework::Tensor** X,
    const framework::Tensor** Out, const framework::Tensor** dOut,
    framework::Tensor** dX) {
  auto out_grad_var = context.InputVar(framework::GradVarName("Out"));
  auto x_grad_var = context.OutputVar(framework::GradVarName("X"));
84 85 86 87 88 89
  const framework::Variable* out_var = nullptr;

  if (static_cast<int>(kDepValue) & static_cast<int>(kDepOut)) {
    out_var = context.InputVar("Out");
    PADDLE_ENFORCE(out_var != nullptr,
                   "Cannot get input Variable Out, variable name = %s",
H
hong 已提交
90
                   context.InputName("Out"));
91
  }
92 93 94
  PADDLE_ENFORCE(out_grad_var != nullptr,
                 "Cannot get input Variable %s, variable name = %s",
                 framework::GradVarName("Out"),
H
hong 已提交
95
                 context.InputName(framework::GradVarName("Out")));
96 97 98
  PADDLE_ENFORCE(x_grad_var != nullptr,
                 "Cannot get output Variable %s, variable name = %s",
                 framework::GradVarName("X"),
H
hong 已提交
99
                 context.OutputName(framework::GradVarName("X")));
100

H
hong 已提交
101
  if (CanBeUsedBySelectedRows.count(context.Type())) {
102 103 104 105
    *dOut = paddle::framework::GetLoDTensorOrSelectedRowsValueFromVar(
        *out_grad_var);
    *dX = paddle::framework::GetMutableLoDTensorOrSelectedRowsValueFromVar(
        x_grad_var);
106 107 108 109 110 111 112 113

    if (out_var) {
      *Out =
          paddle::framework::GetLoDTensorOrSelectedRowsValueFromVar(*out_var);
    } else {
      *Out = *dOut;  // fake out
    }

114 115 116 117
  } else {
    *Out = context.Input<framework::Tensor>("Out");
    *dOut = context.Input<framework::Tensor>(framework::GradVarName("Out"));
    *dX = context.Output<framework::Tensor>(framework::GradVarName("X"));
118 119 120 121 122 123

    if (out_var) {
      *Out = &(out_var->Get<framework::LoDTensor>());
    } else {
      *Out = *dOut;  // fake out
    }
124
  }
125

126 127 128
  PADDLE_ENFORCE(*dX != nullptr,
                 "Cannot get output tensor %s, variable name = %s",
                 framework::GradVarName("X"),
H
hong 已提交
129
                 context.OutputName(framework::GradVarName("X")));
130

131
  if (static_cast<int>(kDepValue) & static_cast<int>(kDepX)) {
C
chengduo 已提交
132 133
    auto x_var = context.InputVar("X");
    PADDLE_ENFORCE(x_var != nullptr,
134
                   "Cannot get input tensor X, variable name = %s",
H
hong 已提交
135 136
                   context.InputName("X"));
    if (CanBeUsedBySelectedRows.count(context.Type())) {
137
      *X = paddle::framework::GetLoDTensorOrSelectedRowsValueFromVar(*x_var);
C
chengduo 已提交
138
    } else {
139
      *X = context.Input<framework::Tensor>("X");
C
chengduo 已提交
140
    }
141
  } else {
H
hong 已提交
142
    VLOG(10) << " Inplace activation of Op : " << context.Type();
143 144 145
    *X = *dX;
  }
}
C
chengduo 已提交
146

147 148 149 150 151
template <typename DeviceContext, typename Functor>
class ActivationKernel
    : public framework::OpKernel<typename Functor::ELEMENT_TYPE> {
 public:
  using T = typename Functor::ELEMENT_TYPE;
C
chengduo 已提交
152

153 154 155 156
  void Compute(const framework::ExecutionContext& context) const override {
    const framework::Tensor* X = nullptr;
    framework::Tensor* Out = nullptr;
    ExtractActivationTensor(context, &X, &Out);
C
chengduo 已提交
157
    Out->mutable_data<T>(context.GetPlace());
158 159 160

    auto x = framework::EigenVector<T>::Flatten(detail::Ref(X));
    auto out = framework::EigenVector<T>::Flatten(detail::Ref(Out));
Q
QI JUN 已提交
161 162
    auto* place =
        context.template device_context<DeviceContext>().eigen_device();
Q
qijun 已提交
163
    Functor functor;
164 165 166 167 168

    auto attrs = functor.GetAttrs();
    for (auto& attr : attrs) {
      *attr.second = context.Attr<float>(attr.first);
    }
F
fengjiayi 已提交
169
    functor(*place, x, out);
Q
qijun 已提交
170 171 172
  }
};

Q
QI JUN 已提交
173
template <typename DeviceContext, typename Functor>
174 175
class ActivationGradKernel
    : public framework::OpKernel<typename Functor::ELEMENT_TYPE> {
Q
qijun 已提交
176
 public:
177
  using T = typename Functor::ELEMENT_TYPE;
Q
qijun 已提交
178
  void Compute(const framework::ExecutionContext& context) const override {
179 180 181
    const framework::Tensor *X, *Out, *dOut;
    framework::Tensor* dX = nullptr;
    X = Out = dOut = nullptr;
182 183
    ExtractActivationGradTensor<Functor::FwdDeps()>(context, &X, &Out, &dOut,
                                                    &dX);
Q
qijun 已提交
184
    dX->mutable_data<T>(context.GetPlace());
185 186 187 188
    auto dout = framework::EigenVector<T>::Flatten(detail::Ref(dOut));
    auto out = framework::EigenVector<T>::Flatten(detail::Ref(Out));
    auto dx = framework::EigenVector<T>::Flatten(detail::Ref(dX));
    auto x = framework::EigenVector<T>::Flatten(detail::Ref(X));
Q
QI JUN 已提交
189 190
    auto* place =
        context.template device_context<DeviceContext>().eigen_device();
Q
qijun 已提交
191
    Functor functor;
192 193 194 195
    auto attrs = functor.GetAttrs();
    for (auto& attr : attrs) {
      *attr.second = context.Attr<float>(attr.first);
    }
196
    functor(*place, x, out, dout, dx);
Q
qijun 已提交
197 198 199
  }
};

200 201 202 203 204 205 206 207 208
template <typename T>
struct BaseActivationFunctor {
  using ELEMENT_TYPE = T;

  using AttrPair = std::vector<std::pair<const char*, float*>>;

  AttrPair GetAttrs() { return AttrPair(); }
};

209
// sigmoid(x) = 1 / (1 + exp(-x))
Q
qijun 已提交
210
template <typename T>
211
struct SigmoidFunctor : public BaseActivationFunctor<T> {
F
fengjiayi 已提交
212 213 214
  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) const {
    out.device(d) = static_cast<T>(1) / (static_cast<T>(1) + (-x).exp());
Q
qijun 已提交
215 216 217
  }
};

218
template <typename T>
219
struct SigmoidGradFunctor : public BaseActivationFunctor<T> {
F
fengjiayi 已提交
220 221 222 223
  template <typename Device, typename X, typename Out, typename dOut,
            typename dX>
  void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
    dx.device(d) = dout * out * (static_cast<T>(1) - out);
Q
qijun 已提交
224
  }
225 226

  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; }
Q
qijun 已提交
227 228
};

229 230 231 232
// Originally: logsigmoid(x) = -log (1 + exp(-x))
// For numerical stability, we can use the log-sum-exp trick:
// https://hips.seas.harvard.edu/blog/2013/01/09/computing-log-sum-exp/
// We can rewrite the above equation as:
F
fengjiayi 已提交
233
// out = -log( exp(0) + exp(-x)) [since exp(0) = 1]
234 235 236 237 238 239 240 241 242 243
//   = -log( exp(max(-x, 0) - max(-x, 0)) + exp(-x + max(-x, 0) - max(-x, 0)))
//   = -log( exp(max(-x, 0)) * exp(-max(-x, 0)) - exp(max(-x, 0)) * exp(-x -
//           max(-x, 0)))
//   = -log( exp(max(-x, 0)) * (exp(-max(-x, 0)) + exp(-x - max(-x, 0))))
//   = -log( exp(max(-x, 0)) - log(exp(-max(-x, 0)) + exp(-x - max(-x, 0)))
//
// Hence, logsigmoid(x) = - (max(-x, 0) + log(exp(-max(-x, 0))
// + exp(-x - max(-x, 0))))
template <typename T>
struct LogSigmoidFunctor : public BaseActivationFunctor<T> {
F
fengjiayi 已提交
244 245
  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) const {
246
    auto temp = (-x).cwiseMax(static_cast<T>(0));  // temp = max(-x, 0)
F
fengjiayi 已提交
247
    out.device(d) = -temp - (((-temp).exp() + (-x - temp).exp()).log());
248 249 250 251 252 253 254 255
  }
};

// Originally: f' = exp(-x) / (1 + exp(-x))
// For numerical stability: f' = exp(-x - max(-x, 0)) / (exp(-max(-x, 0)) +
// exp(-x - max(-x, 0)))
template <typename T>
struct LogSigmoidGradFunctor : public BaseActivationFunctor<T> {
F
fengjiayi 已提交
256 257 258
  template <typename Device, typename X, typename Out, typename dOut,
            typename dX>
  void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
259 260
    auto temp = (-x).cwiseMax(static_cast<T>(0));  // temp = max(-x, 0)
    dx.device(d) =
F
fengjiayi 已提交
261
        dout * ((-x - temp).exp() / ((-temp).exp() + (-x - temp).exp()));
262
  }
263 264

  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; }
265 266
};

Q
qijun 已提交
267
// exp(x) = e^x
268 269
template <typename T>
struct ExpFunctor : public BaseActivationFunctor<T> {
F
fengjiayi 已提交
270 271 272
  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) const {
    out.device(d) = x.exp();
Q
qijun 已提交
273 274 275
  }
};

276 277
template <typename T>
struct ExpGradFunctor : public BaseActivationFunctor<T> {
F
fengjiayi 已提交
278 279 280 281
  template <typename Device, typename X, typename Out, typename dOut,
            typename dX>
  void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
    dx.device(d) = dout * out;
Q
qijun 已提交
282
  }
283 284

  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; }
Q
qijun 已提交
285 286
};

Q
qijun 已提交
287
// relu(x) = max(x, 0)
Q
qijun 已提交
288
template <typename T>
289
struct ReluFunctor : public BaseActivationFunctor<T> {
F
fengjiayi 已提交
290 291 292
  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) const {
    out.device(d) = x.cwiseMax(static_cast<T>(0));
Q
qijun 已提交
293 294
  }
};
Q
qijun 已提交
295

Q
qijun 已提交
296
template <typename T>
297
struct ReluGradFunctor : public BaseActivationFunctor<T> {
F
fengjiayi 已提交
298 299 300
  template <typename Device, typename X, typename Out, typename dOut,
            typename dX>
  void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
D
dzhwinter 已提交
301
    dx.device(d) = dout * (out > static_cast<T>(0)).template cast<T>();
Q
qijun 已提交
302
  }
303 304

  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; }
Q
qijun 已提交
305
};
Q
qijun 已提交
306

C
Clementine 已提交
307 308 309 310 311
// gelu(x) = 0.5 * x *  (1 + erf(x / sqrt(2)))
template <typename T>
struct GeluFunctor : public BaseActivationFunctor<T> {
  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) const {
Y
Yihua Xu 已提交
312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
// Because the execute or device context can not be deliver here, it keep the
// marco for NVCC.
#if defined(PADDLE_WITH_MKLML) && !defined(_WIN32) && !defined(__APPLE__) && \
    !defined(__OSX__) && !defined(PADDLE_WITH_CUDA)
    auto x_data = x.data();
    auto out_data = out.data();
    int n = std::min(x.size(), out.size());

    std::memset(out_data, 0, n * sizeof(T));
    math::CBlas<T>::AXPY(n, static_cast<T>(M_SQRT1_2), x_data, 1, out_data, 1);
    math::CBlas<T>::VMERF(n, out_data, out_data, VML_LA);
    for (int i = 0; i < n; i++) {
      out_data[i] += static_cast<T>(1);
    }
    math::CBlas<T>::VMUL(n, x_data, out_data, out_data);
    for (int i = 0; i < n; i++) {
      out_data[i] *= static_cast<T>(0.5);
    }
#else
331
    auto temp = (x * static_cast<T>(M_SQRT1_2)).erf();
C
Clementine 已提交
332
    out.device(d) = x * static_cast<T>(0.5) * (static_cast<T>(1) + temp);
Y
Yihua Xu 已提交
333
#endif
C
Clementine 已提交
334 335 336
  }
};

A
Adam 已提交
337 338
// gelu_grad(x) = dout * (0.5 * (1 + erf(x / sqrt(2))) + 0.5 * 2 / sqrt(pi) /
// sqrt(2) * x * exp (-0.5 * x^2))
C
Clementine 已提交
339 340 341 342 343
template <typename T>
struct GeluGradFunctor : BaseActivationFunctor<T> {
  template <typename Device, typename X, typename Out, typename dOut,
            typename dX>
  void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
344 345 346 347
#if defined(PADDLE_WITH_MKLML) && !defined(_WIN32) && !defined(__APPLE__) && \
    !defined(__OSX__) && !defined(PADDLE_WITH_CUDA)
    auto x_data = x.data();
    auto dx_data = dx.data();
A
Adam 已提交
348
    auto dout_data = dout.data();
349 350
    int n = std::min(x.size(), dx.size());

A
Adam 已提交
351 352
    auto first = static_cast<T*>(std::malloc(n * sizeof(T)));
    std::memset(first, 0, n * sizeof(T));
353 354 355
    auto second = static_cast<T*>(std::malloc(n * sizeof(T)));
    std::memset(second, 0, n * sizeof(T));

A
Adam 已提交
356 357 358
    // first = (0.5 * (1 + erf(x / sqrt(2))))
    math::CBlas<T>::AXPY(n, static_cast<T>(M_SQRT1_2), x_data, 1, first, 1);
    math::CBlas<T>::VMERF(n, first, first, VML_LA);
359
    for (int i = 0; i < n; i++) {
A
Adam 已提交
360
      first[i] += static_cast<T>(1);
361
    }
A
Adam 已提交
362 363 364 365 366
    math::CBlas<T>::SCAL(n, static_cast<T>(0.5), first, 1);

    // second = (0.5 * 2/sqrt(pi) * 1/sqrt(2) * x * exp(-0.5 * x^2))
    math::CBlas<T>::VSQUARE(n, x_data, second);
    math::CBlas<T>::SCAL(n, -static_cast<T>(0.5), second, 1);
367 368
    math::CBlas<T>::VEXP(n, second, second);
    math::CBlas<T>::VMUL(n, x_data, second, second);
A
Adam 已提交
369 370
    math::CBlas<T>::SCAL(n, static_cast<T>(0.5 * M_2_SQRTPI * M_SQRT1_2),
                         second, 1);
371

A
Adam 已提交
372 373 374
    // dx = dout * (first + second);
    math::CBlas<T>::VADD(n, first, second, first);
    math::CBlas<T>::VMUL(n, dout_data, first, dx_data);
375

A
Adam 已提交
376
    std::free(first);
377 378
    std::free(second);
#else
379 380 381 382 383 384
    auto first = static_cast<T>(0.5) *
                 (static_cast<T>(1) + ((x * static_cast<T>(M_SQRT1_2)).erf()));

    auto second = static_cast<T>(0.5 * M_2_SQRTPI * M_SQRT1_2) * x *
                  (-static_cast<T>(0.5) * x.square()).exp();
    dx.device(d) = dout * (first + second);
385
#endif
C
Clementine 已提交
386
  }
387 388

  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; }
C
Clementine 已提交
389 390
};

391
// tanh(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x))
392 393
template <typename T>
struct TanhFunctor : public BaseActivationFunctor<T> {
F
fengjiayi 已提交
394 395 396
  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) const {
    out.device(d) = x.tanh();
Q
qijun 已提交
397 398 399 400
  }
};

template <typename T>
401
struct TanhGradFunctor : public BaseActivationFunctor<T> {
F
fengjiayi 已提交
402 403 404 405
  template <typename Device, typename X, typename Out, typename dOut,
            typename dX>
  void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
    dx.device(d) = dout * (static_cast<T>(1) - out * out);
Q
qijun 已提交
406
  }
407 408

  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; }
Q
qijun 已提交
409 410
};

K
Kavya Srinet 已提交
411 412 413 414
// tanhshrink(x) = x - tanh(x)
// where tanh(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x))
template <typename T>
struct TanhShrinkFunctor : public BaseActivationFunctor<T> {
F
fengjiayi 已提交
415 416 417
  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) const {
    out.device(d) = x - x.tanh();
K
Kavya Srinet 已提交
418 419 420 421 422
  }
};

template <typename T>
struct TanhShrinkGradFunctor : public BaseActivationFunctor<T> {
F
fengjiayi 已提交
423 424 425 426
  template <typename Device, typename X, typename Out, typename dOut,
            typename dX>
  void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
    dx.device(d) = dout * (x.tanh() * x.tanh());
K
Kavya Srinet 已提交
427
  }
428 429

  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; }
K
Kavya Srinet 已提交
430 431
};

432 433 434 435 436 437 438 439 440
// tanhshrink(x) = x - tanh(x)
// where tanh(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x))
template <typename T>
struct HardShrinkFunctor : public BaseActivationFunctor<T> {
  float threshold;

  typename BaseActivationFunctor<T>::AttrPair GetAttrs() {
    return {{"threshold", &threshold}};
  }
F
fengjiayi 已提交
441 442
  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) const {
Z
Zeng Jinle 已提交
443 444
    auto temp1 = (x < static_cast<T>(threshold * -1)).template cast<T>();
    auto temp2 = (x > static_cast<T>(threshold)).template cast<T>();
F
fengjiayi 已提交
445
    out.device(d) = x * (temp1 + temp2);
446 447 448 449 450 451 452 453 454 455 456
  }
};

template <typename T>
struct HardShrinkGradFunctor : public BaseActivationFunctor<T> {
  float threshold;

  typename BaseActivationFunctor<T>::AttrPair GetAttrs() {
    return {{"threshold", &threshold}};
  }

F
fengjiayi 已提交
457 458 459
  template <typename Device, typename X, typename Out, typename dOut,
            typename dX>
  void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
Z
Zeng Jinle 已提交
460 461
    auto temp1 = (x < static_cast<T>(threshold * -1)).template cast<T>();
    auto temp2 = (x > static_cast<T>(threshold)).template cast<T>();
F
fengjiayi 已提交
462
    dx.device(d) = dout * (temp1 + temp2).template cast<T>();
463
  }
464 465

  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; }
466 467
};

K
Kexin Zhao 已提交
468
// softshrink(x) = x - lambda, if x > lambda; x + lambda, if x < -lambda; 0
469 470 471 472 473 474 475 476
// otherwise
template <typename T>
struct SoftShrinkFunctor : public BaseActivationFunctor<T> {
  float lambda;
  typename BaseActivationFunctor<T>::AttrPair GetAttrs() {
    return {{"lambda", &lambda}};
  }

F
fengjiayi 已提交
477 478
  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) const {
Y
Yu Yang 已提交
479
    auto lambdaT = static_cast<T>(lambda);
Z
Zeng Jinle 已提交
480 481
    auto temp1 = (x > lambdaT).template cast<T>();
    auto temp2 = (x < -lambdaT).template cast<T>();
F
fengjiayi 已提交
482
    out.device(d) = temp1 * (x - lambdaT) + temp2 * (x + lambdaT);
483 484 485 486 487 488 489 490 491
  }
};

template <typename T>
struct SoftShrinkGradFunctor : public BaseActivationFunctor<T> {
  float lambda;
  typename BaseActivationFunctor<T>::AttrPair GetAttrs() {
    return {{"lambda", &lambda}};
  }
F
fengjiayi 已提交
492 493 494
  template <typename Device, typename X, typename Out, typename dOut,
            typename dX>
  void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
Y
Yu Yang 已提交
495
    auto lambdaT = static_cast<T>(lambda);
Z
Zeng Jinle 已提交
496 497
    auto temp1 = (x > lambdaT).template cast<T>();
    auto temp2 = (x < -lambdaT).template cast<T>();
F
fengjiayi 已提交
498
    dx.device(d) = dout * (temp1 + temp2).template cast<T>();
499
  }
500 501

  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; }
502 503
};

Q
qijun 已提交
504
// sqrt(x) = x^(1/2)
505 506
template <typename T>
struct SqrtFunctor : public BaseActivationFunctor<T> {
F
fengjiayi 已提交
507 508 509
  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) const {
    out.device(d) = x.sqrt();
Q
qijun 已提交
510 511 512 513
  }
};

template <typename T>
514
struct SqrtGradFunctor : public BaseActivationFunctor<T> {
F
fengjiayi 已提交
515 516 517
  template <typename Device, typename X, typename Out, typename dOut,
            typename dX>
  void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
C
chengduo 已提交
518
    dx.device(d) = static_cast<T>(0.5) * dout / out;
Q
qijun 已提交
519
  }
520 521

  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; }
Q
qijun 已提交
522 523
};

Z
zhoukunsheng 已提交
524 525 526 527 528 529 530 531 532 533 534 535 536 537
// rsqrt(x) = x^(-1/2)
template <typename T>
struct RsqrtFunctor : public BaseActivationFunctor<T> {
  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) const {
    out.device(d) = x.rsqrt();
  }
};

template <typename T>
struct RsqrtGradFunctor : public BaseActivationFunctor<T> {
  template <typename Device, typename X, typename Out, typename dOut,
            typename dX>
  void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
538
    dx.device(d) = static_cast<T>(-0.5) * dout * out * out * out;
Z
zhoukunsheng 已提交
539
  }
Z
zhoukunsheng 已提交
540 541

  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; }
Z
zhoukunsheng 已提交
542 543
};

D
dzhwinter 已提交
544 545 546
// ceil(x) = ceiling(x)
template <typename T>
struct CeilFunctor : public BaseActivationFunctor<T> {
F
fengjiayi 已提交
547 548 549
  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) const {
    out.device(d) = x.ceil();
D
dzhwinter 已提交
550 551 552 553 554
  }
};

template <typename T>
struct ZeroGradFunctor : public BaseActivationFunctor<T> {
F
fengjiayi 已提交
555 556 557
  template <typename Device, typename X, typename Out, typename dOut,
            typename dX>
  void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
Z
Zeng Jinle 已提交
558
    dx.device(d) = static_cast<T>(0) * out;
D
dzhwinter 已提交
559
  }
560 561

  static constexpr ActBwdOpFwdDeps FwdDeps() { return kNoDeps; }
D
dzhwinter 已提交
562 563 564 565 566
};

// floor(x) = flooring(x)
template <typename T>
struct FloorFunctor : public BaseActivationFunctor<T> {
F
fengjiayi 已提交
567 568
  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) const {
Q
Qiao Longfei 已提交
569
    out.device(d) = x.floor();
D
dzhwinter 已提交
570 571 572
  }
};

C
add cos  
chengduoZH 已提交
573 574 575 576 577
template <typename T>
struct Sine {
  HOSTDEVICE T operator()(const T& val) const { return sin(val); }
};

578 579 580 581 582 583 584
template <>
struct Sine<platform::float16> {
  HOSTDEVICE platform::float16 operator()(const platform::float16& val) const {
    return platform::float16(sin(static_cast<float>(val)));
  }
};

C
add cos  
chengduoZH 已提交
585 586 587 588 589
template <typename T>
struct Cosine {
  HOSTDEVICE T operator()(const T& val) const { return cos(val); }
};

590 591 592 593 594 595 596
template <>
struct Cosine<platform::float16> {
  HOSTDEVICE platform::float16 operator()(const platform::float16& val) const {
    return platform::float16(cos(static_cast<float>(val)));
  }
};

C
add cos  
chengduoZH 已提交
597 598 599 600 601 602 603 604
// cosine'(x) = -sin(x)
template <typename T>
struct CosGradFunctor : public BaseActivationFunctor<T> {
  template <typename Device, typename X, typename Out, typename dOut,
            typename dX>
  void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
    dx.device(d) = -dout * x.unaryExpr(Sine<T>());
  }
605 606

  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; }
C
add cos  
chengduoZH 已提交
607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625
};

// cosine(x) = cos(x)
template <typename T>
struct CosFunctor : public BaseActivationFunctor<T> {
  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) const {
    out.device(d) = x.unaryExpr(Cosine<T>());
  }
};

// sine'(x) = cos(x)
template <typename T>
struct SinGradFunctor : public BaseActivationFunctor<T> {
  template <typename Device, typename X, typename Out, typename dOut,
            typename dX>
  void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
    dx.device(d) = dout * x.unaryExpr(Cosine<T>());
  }
626 627

  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; }
C
add cos  
chengduoZH 已提交
628 629 630 631 632 633 634 635 636 637 638
};

// sine(x) = sin(x)
template <typename T>
struct SinFunctor : public BaseActivationFunctor<T> {
  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) const {
    out.device(d) = x.unaryExpr(Sine<T>());
  }
};

639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668
template <typename T>
struct Acos {
  HOSTDEVICE T operator()(const T& val) const { return acos(val); }
};

template <>
struct Acos<platform::float16> {
  HOSTDEVICE platform::float16 operator()(const platform::float16& val) const {
    return platform::float16(acos(static_cast<float>(val)));
  }
};

// Acos(x) = acos(x)
template <typename T>
struct AcosFunctor : public BaseActivationFunctor<T> {
  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) const {
    out.device(d) = x.unaryExpr(Acos<T>());
  }
};

// acos'(x) = -1/sqrt(1-x^2)
template <typename T>
struct AcosGradFunctor : public BaseActivationFunctor<T> {
  template <typename Device, typename X, typename Out, typename dOut,
            typename dX>
  void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
    dx.device(d) =
        -dout * static_cast<T>(1) / (static_cast<T>(1) - x.square()).sqrt();
  }
669 670

  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; }
671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702
};

template <typename T>
struct Asin {
  HOSTDEVICE T operator()(const T& val) const { return asin(val); }
};

template <>
struct Asin<platform::float16> {
  HOSTDEVICE platform::float16 operator()(const platform::float16& val) const {
    return platform::float16(asin(static_cast<float>(val)));
  }
};

// Asin(x) = asin(x)
template <typename T>
struct AsinFunctor : public BaseActivationFunctor<T> {
  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) const {
    out.device(d) = x.unaryExpr(Asin<T>());
  }
};

// asin'(x) = 1/sqrt(1-x^2)
template <typename T>
struct AsinGradFunctor : public BaseActivationFunctor<T> {
  template <typename Device, typename X, typename Out, typename dOut,
            typename dX>
  void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
    dx.device(d) =
        dout * static_cast<T>(1) / (static_cast<T>(1) - x.square()).sqrt();
  }
703 704

  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; }
705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735
};

template <typename T>
struct Atan {
  HOSTDEVICE T operator()(const T& val) const { return atan(val); }
};

template <>
struct Atan<platform::float16> {
  HOSTDEVICE platform::float16 operator()(const platform::float16& val) const {
    return platform::float16(atan(static_cast<float>(val)));
  }
};

// Atan(x) = atan(x)
template <typename T>
struct AtanFunctor : public BaseActivationFunctor<T> {
  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) const {
    out.device(d) = x.unaryExpr(Atan<T>());
  }
};

// atan'(x) =  1 / (1 + x^2)
template <typename T>
struct AtanGradFunctor : public BaseActivationFunctor<T> {
  template <typename Device, typename X, typename Out, typename dOut,
            typename dX>
  void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
    dx.device(d) = dout * static_cast<T>(1) / (static_cast<T>(1) + x.square());
  }
736 737

  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; }
738 739
};

D
dzhwinter 已提交
740 741 742
// round(x) = [x]
template <typename T>
struct RoundFunctor : public BaseActivationFunctor<T> {
F
fengjiayi 已提交
743 744 745
  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) const {
    out.device(d) = x.round();
D
dzhwinter 已提交
746 747 748
  }
};

Q
qijun 已提交
749
// abs(x) = |x|
750 751
template <typename T>
struct AbsFunctor : public BaseActivationFunctor<T> {
F
fengjiayi 已提交
752 753 754
  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) const {
    out.device(d) = x.abs();
Q
qijun 已提交
755 756 757
  }
};

758 759
template <typename T>
struct AbsGradFunctor : public BaseActivationFunctor<T> {
F
fengjiayi 已提交
760 761 762 763
  template <typename Device, typename X, typename Out, typename dOut,
            typename dX>
  void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
    dx.device(d) = dout * x.sign();
764
  }
765

766
  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; }
767 768
};

Q
qijun 已提交
769 770
// reciprocal(x) = 1 / x
template <typename T>
771
struct ReciprocalFunctor : public BaseActivationFunctor<T> {
F
fengjiayi 已提交
772 773 774
  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) const {
    out.device(d) = static_cast<T>(1) / x;
Q
qijun 已提交
775 776 777
  }
};

778
template <typename T>
779
struct ReciprocalGradFunctor : public BaseActivationFunctor<T> {
F
fengjiayi 已提交
780 781 782 783
  template <typename Device, typename X, typename Out, typename dOut,
            typename dX>
  void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
    dx.device(d) = dout * static_cast<T>(-1) * out * out;
Q
qijun 已提交
784
  }
785 786

  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; }
Q
qijun 已提交
787 788 789
};

// log(x) = natural logarithm of x
790 791
template <typename T>
struct LogFunctor : public BaseActivationFunctor<T> {
F
fengjiayi 已提交
792 793 794
  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) const {
    out.device(d) = x.log();
Q
qijun 已提交
795 796 797
  }
};

798
template <typename T>
799
struct LogGradFunctor : public BaseActivationFunctor<T> {
F
fengjiayi 已提交
800 801 802 803
  template <typename Device, typename X, typename Out, typename dOut,
            typename dX>
  void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
    dx.device(d) = dout * (static_cast<T>(1) / x);
Q
qijun 已提交
804
  }
805 806

  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; }
Q
qijun 已提交
807 808 809
};

// square(x) = x^2
810 811
template <typename T>
struct SquareFunctor : public BaseActivationFunctor<T> {
F
fengjiayi 已提交
812 813 814
  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) const {
    out.device(d) = x.square();
Q
qijun 已提交
815
  }
816
};
Q
qijun 已提交
817

818
template <typename T>
819
struct SquareGradFunctor : public BaseActivationFunctor<T> {
F
fengjiayi 已提交
820 821 822 823
  template <typename Device, typename X, typename Out, typename dOut,
            typename dX>
  void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
    dx.device(d) = dout * static_cast<T>(2) * x;
824
  }
825 826

  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; }
827 828
};

829 830 831 832 833 834 835 836 837 838
template <typename T>
struct BReluFunctor : public BaseActivationFunctor<T> {
  float t_min;
  float t_max;

  // NOTE: Explicit hides the `BaseActivationFunctor<T>::GetAttrs`
  // not polymorphism for speed.
  typename BaseActivationFunctor<T>::AttrPair GetAttrs() {
    return {{"t_min", &t_min}, {"t_max", &t_max}};
  }
839

F
fengjiayi 已提交
840 841 842
  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) const {
    out.device(d) =
Y
Yu Yang 已提交
843
        x.cwiseMax(static_cast<T>(t_min)).cwiseMin(static_cast<T>(t_max));
844 845 846
  }
};

847 848 849 850 851 852 853
template <typename T>
struct BReluGradFunctor : public BaseActivationFunctor<T> {
  float t_min;
  float t_max;
  typename BaseActivationFunctor<T>::AttrPair GetAttrs() {
    return {{"t_min", &t_min}, {"t_max", &t_max}};
  }
F
fengjiayi 已提交
854 855 856 857
  template <typename Device, typename X, typename Out, typename dOut,
            typename dX>
  void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
    dx.device(d) = dout *
Y
Yu Yang 已提交
858 859
                   ((x > static_cast<T>(t_min)) * (x < static_cast<T>(t_max)))
                       .template cast<T>();
860
  }
861 862

  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; }
863 864
};

865 866 867 868 869 870 871 872 873
// relu6(x) = min(max(0, x), 6)
template <typename T>
struct Relu6Functor : public BaseActivationFunctor<T> {
  float threshold;

  typename BaseActivationFunctor<T>::AttrPair GetAttrs() {
    return {{"threshold", &threshold}};
  }

F
fengjiayi 已提交
874 875 876
  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) const {
    out.device(d) =
Y
Yu Yang 已提交
877
        x.cwiseMax(static_cast<T>(0)).cwiseMin(static_cast<T>(threshold));
878 879 880 881 882 883 884 885 886
  }
};

template <typename T>
struct Relu6GradFunctor : public BaseActivationFunctor<T> {
  float threshold;
  typename BaseActivationFunctor<T>::AttrPair GetAttrs() {
    return {{"threshold", &threshold}};
  }
F
fengjiayi 已提交
887 888 889
  template <typename Device, typename X, typename Out, typename dOut,
            typename dX>
  void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
D
dzhwinter 已提交
890 891 892 893
    dx.device(d) =
        dout *
        ((out > static_cast<T>(0)) * (out < static_cast<T>(threshold)))
            .template cast<T>();
894
  }
895 896

  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; }
897 898
};

H
huangjun12 已提交
899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943
// HardSwish = min(max(0, x+3), 6) * x / 6
template <typename T>
struct HardSwishFunctor : public BaseActivationFunctor<T> {
  float threshold;
  float scale;
  float offset;

  typename BaseActivationFunctor<T>::AttrPair GetAttrs() {
    return {{"threshold", &threshold}, {"scale", &scale}, {"offset", &offset}};
  }

  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) const {
    out.device(d) = (x + static_cast<T>(offset))
                        .cwiseMax(static_cast<T>(0))
                        .cwiseMin(static_cast<T>(threshold)) *
                    x / static_cast<T>(scale);
  }
};

template <typename T>
struct HardSwishGradFunctor : public BaseActivationFunctor<T> {
  float threshold;
  float scale;
  float offset;

  typename BaseActivationFunctor<T>::AttrPair GetAttrs() {
    return {{"threshold", &threshold}, {"scale", &scale}, {"offset", &offset}};
  }
  template <typename Device, typename X, typename Out, typename dOut,
            typename dX>
  void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
    auto tmp = ((x + static_cast<T>(offset)) < static_cast<T>(threshold))
                   .template cast<T>();
    dx.device(d) =
        dout *
        (((x + static_cast<T>(offset)) > static_cast<T>(0)).template cast<T>() *
             (static_cast<T>(2) * x + static_cast<T>(offset)) /
             static_cast<T>(scale) * tmp +
         static_cast<T>(1) * (static_cast<T>(1) - tmp));
  }

  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; }
};

K
kexinzhao 已提交
944 945 946 947 948 949 950
// softplus(x) = log(1 + exp(x))
// When x is a very large positive number, exp(x) may explode to inf,
// Using trick below for numerical stability
// https://hips.seas.harvard.edu/blog/2013/01/09/computing-log-sum-exp/
// Then: softplus(x) = max(x, 0) + log(exp(-max(x, 0)) + exp(x - max(x, 0)))
template <typename T>
struct SoftplusFunctor : public BaseActivationFunctor<T> {
F
fengjiayi 已提交
951 952
  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) {
K
kexinzhao 已提交
953
    auto temp = x.cwiseMax(static_cast<T>(0));  // temp = max(x, 0)
F
fengjiayi 已提交
954
    out.device(d) = temp + (((-temp).exp() + (x - temp).exp()).log());
K
kexinzhao 已提交
955 956 957 958 959 960 961 962 963
  }
};

// d(softplus(x))/dx = exp(x) / (1 + exp(x))
// For numerical stability:
// d(softplus(x))/dx = exp(x - max(x, 0)) / (exp(-max(x, 0)) +
// exp(x - max(x, 0)))
template <typename T>
struct SoftplusGradFunctor : public BaseActivationFunctor<T> {
F
fengjiayi 已提交
964 965 966
  template <typename Device, typename X, typename Out, typename dOut,
            typename dX>
  void operator()(Device d, X x, Out out, dOut dout, dX dx) {
K
kexinzhao 已提交
967
    auto temp = x.cwiseMax(static_cast<T>(0));  // temp = max(x, 0)
F
fengjiayi 已提交
968 969
    dx.device(d) =
        dout * ((x - temp).exp() / ((-temp).exp() + (x - temp).exp()));
K
kexinzhao 已提交
970
  }
971 972

  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; }
K
kexinzhao 已提交
973 974
};

975 976
// softsign(x) = x / (1 + |x|)
template <typename T>
977
struct SoftsignFunctor : public BaseActivationFunctor<T> {
F
fengjiayi 已提交
978 979 980
  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) {
    out.device(d) = x / (static_cast<T>(1) + x.abs());
981 982 983 984 985 986
  }
};

// d(softsign(x))/dx = 1 / (1 + |x|)^2
// Taken from https://en.wikipedia.org/wiki/Activation_function
template <typename T>
987
struct SoftsignGradFunctor : public BaseActivationFunctor<T> {
F
fengjiayi 已提交
988 989 990
  template <typename Device, typename X, typename Out, typename dOut,
            typename dX>
  void operator()(Device d, X x, Out out, dOut dout, dX dx) {
991
    dx.device(d) =
F
fengjiayi 已提交
992
        dout * (static_cast<T>(1) / (static_cast<T>(1) + x.abs()).square());
993
  }
994 995

  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; }
996 997
};

998 999 1000 1001 1002 1003
template <typename T>
struct SoftReluFunctor : public BaseActivationFunctor<T> {
  float threshold;
  typename BaseActivationFunctor<T>::AttrPair GetAttrs() {
    return {{"threshold", &threshold}};
  }
1004

F
fengjiayi 已提交
1005 1006
  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) const {
Y
Yu Yang 已提交
1007 1008
    auto tmp = static_cast<T>(threshold);
    auto temp = x.cwiseMax(-tmp).cwiseMin(tmp);
F
fengjiayi 已提交
1009
    out.device(d) = (static_cast<T>(1) + temp.exp()).log();
1010 1011 1012
  }
};

1013 1014 1015 1016 1017 1018
template <typename T>
struct SoftReluGradFunctor : public BaseActivationFunctor<T> {
  float threshold;
  typename BaseActivationFunctor<T>::AttrPair GetAttrs() {
    return {{"threshold", &threshold}};
  }
F
fengjiayi 已提交
1019 1020 1021
  template <typename Device, typename X, typename Out, typename dOut,
            typename dX>
  void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
Y
Yu Yang 已提交
1022
    auto tmp = static_cast<T>(threshold);
Z
Zeng Jinle 已提交
1023
    auto temp = ((out > -tmp) * (out < tmp)).template cast<T>();
F
fengjiayi 已提交
1024
    dx.device(d) = dout * (static_cast<T>(1) - (-out).exp()) * temp;
1025
  }
1026 1027

  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; }
1028 1029
};

K
Kavya Srinet 已提交
1030 1031 1032 1033 1034 1035
template <typename T>
struct LeakyReluFunctor : public BaseActivationFunctor<T> {
  float alpha;
  typename BaseActivationFunctor<T>::AttrPair GetAttrs() {
    return {{"alpha", &alpha}};
  }
1036

F
fengjiayi 已提交
1037 1038 1039
  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) const {
    out.device(d) = x.cwiseMax(static_cast<T>(alpha) * x);
1040 1041 1042
  }
};

K
Kavya Srinet 已提交
1043 1044 1045 1046 1047 1048
template <typename T>
struct LeakyReluGradFunctor : public BaseActivationFunctor<T> {
  float alpha;
  typename BaseActivationFunctor<T>::AttrPair GetAttrs() {
    return {{"alpha", &alpha}};
  }
F
fengjiayi 已提交
1049 1050 1051
  template <typename Device, typename X, typename Out, typename dOut,
            typename dX>
  void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
Z
Zeng Jinle 已提交
1052
    auto temp1 =
1053 1054
        static_cast<T>(alpha) * (out <= static_cast<T>(0)).template cast<T>();
    auto temp2 = (out > static_cast<T>(0)).template cast<T>();
F
fengjiayi 已提交
1055
    dx.device(d) = dout * (temp1 + temp2).template cast<T>();
1056
  }
1057

Z
Zeng Jinle 已提交
1058
  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; }
1059 1060
};

1061 1062 1063 1064 1065 1066
template <typename T>
struct ELUFunctor : public BaseActivationFunctor<T> {
  float alpha;
  typename BaseActivationFunctor<T>::AttrPair GetAttrs() {
    return {{"alpha", &alpha}};
  }
1067

F
fengjiayi 已提交
1068 1069 1070 1071 1072
  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) const {
    out.device(d) = x.cwiseMax(static_cast<T>(0)) +
                    (static_cast<T>(alpha) * (x.exp() - static_cast<T>(1)))
                        .cwiseMin(static_cast<T>(0));
1073 1074 1075
  }
};

1076 1077 1078 1079 1080 1081
template <typename T>
struct ELUGradFunctor : public BaseActivationFunctor<T> {
  float alpha;
  typename BaseActivationFunctor<T>::AttrPair GetAttrs() {
    return {{"alpha", &alpha}};
  }
F
fengjiayi 已提交
1082 1083 1084 1085
  template <typename Device, typename X, typename Out, typename dOut,
            typename dX>
  void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
    dx.device(d) = dout * (x > static_cast<T>(0)).template cast<T>() +
1086
                   dout * static_cast<T>(alpha) * x.exp() *
D
Double_V 已提交
1087
                       (x <= static_cast<T>(0)).template cast<T>();
1088
  }
1089 1090

  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; }
1091 1092
};

Q
QI JUN 已提交
1093
// FIXME(qijun) https://github.com/PaddlePaddle/Paddle/issues/5198
1094 1095 1096 1097 1098 1099
template <typename T>
struct PowFunctor : public BaseActivationFunctor<T> {
  float factor;
  typename BaseActivationFunctor<T>::AttrPair GetAttrs() {
    return {{"factor", &factor}};
  }
F
fengjiayi 已提交
1100 1101 1102
  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) const {
    out.device(d) = x.pow(static_cast<T>(factor));
1103 1104 1105
  }
};

1106 1107 1108 1109 1110 1111
template <typename T>
struct PowGradFunctor : public BaseActivationFunctor<T> {
  float factor;
  typename BaseActivationFunctor<T>::AttrPair GetAttrs() {
    return {{"factor", &factor}};
  }
F
fengjiayi 已提交
1112 1113 1114 1115
  template <typename Device, typename X, typename Out, typename dOut,
            typename dX>
  void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
    dx.device(d) = dout * static_cast<T>(factor) *
C
chengduo 已提交
1116
                   x.pow(static_cast<T>(factor) - static_cast<T>(1));
1117
  }
1118 1119

  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; }
1120 1121
};

1122 1123 1124 1125 1126 1127 1128
template <typename T>
struct STanhFunctor : public BaseActivationFunctor<T> {
  float scale_a;
  float scale_b;
  typename BaseActivationFunctor<T>::AttrPair GetAttrs() {
    return {{"scale_a", &scale_a}, {"scale_b", &scale_b}};
  }
1129

F
fengjiayi 已提交
1130 1131 1132
  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) const {
    out.device(d) =
Y
Yu Yang 已提交
1133
        static_cast<T>(scale_b) * (static_cast<T>(scale_a) * x).tanh();
1134 1135 1136
  }
};

1137 1138 1139 1140 1141 1142 1143
template <typename T>
struct STanhGradFunctor : public BaseActivationFunctor<T> {
  float scale_a;
  float scale_b;
  typename BaseActivationFunctor<T>::AttrPair GetAttrs() {
    return {{"scale_a", &scale_a}, {"scale_b", &scale_b}};
  }
1144

F
fengjiayi 已提交
1145 1146 1147
  template <typename Device, typename X, typename Out, typename dOut,
            typename dX>
  void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
Y
Yu Yang 已提交
1148 1149 1150
    auto a = static_cast<T>(scale_a);
    auto b = static_cast<T>(scale_b);
    auto temp = (a * x).tanh() * (a * x).tanh();
F
fengjiayi 已提交
1151
    dx.device(d) = dout * a * b * (static_cast<T>(1) - temp);
Q
qijun 已提交
1152
  }
1153 1154

  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; }
Q
qijun 已提交
1155 1156
};

1157 1158 1159 1160 1161 1162 1163
template <typename T>
struct ThresholdedReluFunctor : public BaseActivationFunctor<T> {
  float threshold;
  typename BaseActivationFunctor<T>::AttrPair GetAttrs() {
    return {{"threshold", &threshold}};
  }

F
fengjiayi 已提交
1164 1165
  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) const {
Y
Yu Yang 已提交
1166
    auto th = static_cast<T>(threshold);
F
fengjiayi 已提交
1167
    out.device(d) = (x > th).template cast<T>() * x;
1168 1169 1170 1171 1172 1173 1174 1175 1176 1177
  }
};

template <typename T>
struct ThresholdedReluGradFunctor : public BaseActivationFunctor<T> {
  float threshold;
  typename BaseActivationFunctor<T>::AttrPair GetAttrs() {
    return {{"threshold", &threshold}};
  }

F
fengjiayi 已提交
1178 1179 1180
  template <typename Device, typename X, typename Out, typename dOut,
            typename dX>
  void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
Y
Yu Yang 已提交
1181
    auto th = static_cast<T>(threshold);
F
fengjiayi 已提交
1182
    dx.device(d) = dout * (x > th).template cast<T>();
1183
  }
1184 1185

  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; }
1186 1187
};

1188 1189 1190 1191 1192 1193 1194 1195
template <typename T>
struct HardSigmoidFunctor : public BaseActivationFunctor<T> {
  float slope;
  float offset;
  typename BaseActivationFunctor<T>::AttrPair GetAttrs() {
    return {{"slope", &slope}, {"offset", &offset}};
  }

F
fengjiayi 已提交
1196 1197
  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) const {
1198
    auto temp = x * static_cast<T>(slope) + static_cast<T>(offset);
F
fengjiayi 已提交
1199 1200
    out.device(d) =
        temp.cwiseMax(static_cast<T>(0)).cwiseMin(static_cast<T>(1));
1201 1202 1203 1204 1205 1206 1207 1208 1209 1210
  }
};

template <typename T>
struct HardSigmoidGradFunctor : public BaseActivationFunctor<T> {
  float slope;
  float offset;
  typename BaseActivationFunctor<T>::AttrPair GetAttrs() {
    return {{"slope", &slope}, {"offset", &offset}};
  }
F
fengjiayi 已提交
1211 1212 1213 1214 1215 1216 1217
  template <typename Device, typename X, typename Out, typename dOut,
            typename dX>
  void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
    dx.device(d) = dout *
                   ((out > static_cast<T>(0)) * (out < static_cast<T>(1)))
                       .template cast<T>() *
                   static_cast<T>(slope);
1218
  }
1219 1220

  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; }
1221 1222
};

A
Abhinav Arora 已提交
1223 1224 1225 1226 1227 1228 1229
template <typename T>
struct SwishFunctor : public BaseActivationFunctor<T> {
  float beta;
  typename BaseActivationFunctor<T>::AttrPair GetAttrs() {
    return {{"beta", &beta}};
  }

F
fengjiayi 已提交
1230 1231 1232
  template <typename Device, typename X, typename Out>
  void operator()(Device d, X x, Out out) const {
    out.device(d) = x / (static_cast<T>(1) + (static_cast<T>(-beta) * x).exp());
A
Abhinav Arora 已提交
1233 1234 1235 1236 1237 1238 1239 1240 1241 1242
  }
};

template <typename T>
struct SwishGradFunctor : public BaseActivationFunctor<T> {
  float beta;
  typename BaseActivationFunctor<T>::AttrPair GetAttrs() {
    return {{"beta", &beta}};
  }

F
fengjiayi 已提交
1243 1244
  template <typename Device, typename X, typename Out, typename dOut,
            typename dX>
1245
  void operator()(Device d, X x, Out fake_out, dOut dout, dX dx) const {
A
Abhinav Arora 已提交
1246
    auto temp1 = static_cast<T>(1) /
1247
                 (static_cast<T>(1) + (static_cast<T>(-beta) * x).exp());
1248
    auto out = x * temp1;
D
dzhwinter 已提交
1249 1250
    auto temp2 = temp1 * (static_cast<T>(1) - (static_cast<T>(beta) * out));
    dx.device(d) = dout * ((static_cast<T>(beta) * out) + temp2);
A
Abhinav Arora 已提交
1251
  }
1252 1253

  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; }
A
Abhinav Arora 已提交
1254 1255
};

1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268
/*
 * in arguments: x, out, ddx
 * out arguments: ddout, dout, dx
 */
template <ActBwdOpFwdDeps kDepValue>
inline void ExtractActivationDoubleGradTensor(
    const framework::ExecutionContext& ctx, const framework::Tensor** X,
    const framework::Tensor** Out, const framework::Tensor** ddX,
    framework::Tensor** dX, framework::Tensor** dOut,
    framework::Tensor** ddOut) {
  auto ddx_var = ctx.InputVar("DDX");
  auto ddo_var = ctx.OutputVar("DDOut");
  PADDLE_ENFORCE(ddx_var != nullptr,
1269
                 "Cannot get input Variable Out, variable name = %s",
H
hong 已提交
1270 1271
                 ctx.InputName("DDX"));
  if (CanBeUsedBySelectedRows.count(ctx.Type())) {
1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283
    *ddX = paddle::framework::GetLoDTensorOrSelectedRowsValueFromVar(*ddx_var);
    if (ddo_var) {
      *ddOut = paddle::framework::GetMutableLoDTensorOrSelectedRowsValueFromVar(
          ddo_var);
    }
  } else {
    *ddX = ctx.Input<framework::Tensor>("DDX");
    if (ddo_var) {
      *ddOut = ctx.Output<framework::Tensor>("DDOut");
    }
  }
  PADDLE_ENFORCE(*ddX != nullptr,
1284
                 "Cannot get output tensor DDX, variable name = %s",
H
hong 已提交
1285
                 ctx.OutputName("DDX"));
1286 1287 1288 1289

  if (static_cast<int>(kDepValue) & static_cast<int>(kDepX)) {
    auto x_var = ctx.InputVar("X");
    PADDLE_ENFORCE(x_var != nullptr,
1290
                   "Cannot get input Variable Out, variable name = %s",
H
hong 已提交
1291
                   ctx.InputName("X"));
1292
    auto dx_var = ctx.OutputVar("DX");
H
hong 已提交
1293
    if (CanBeUsedBySelectedRows.count(ctx.Type())) {
1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305
      *X = paddle::framework::GetLoDTensorOrSelectedRowsValueFromVar(*x_var);
      if (dx_var) {
        *dX = paddle::framework::GetMutableLoDTensorOrSelectedRowsValueFromVar(
            dx_var);
      }
    } else {
      *X = ctx.Input<framework::Tensor>("X");
      if (dx_var) {
        *dX = ctx.Output<framework::Tensor>("DX");
      }
    }
  } else {
H
hong 已提交
1306
    VLOG(10) << "Inplace activation of Op: " << ctx.Type();
1307 1308
    *X = *ddX;
  }
1309 1310 1311 1312
  if (static_cast<int>(kDepValue) & static_cast<int>(kDepOut)) {
    auto out_var = ctx.InputVar("Out");
    PADDLE_ENFORCE(out_var != nullptr,
                   "Cannot get input tensor Out, variable name = %s",
H
hong 已提交
1313
                   ctx.InputName("Out"));
1314
    auto dout_var = ctx.OutputVar("DOut");
H
hong 已提交
1315
    if (CanBeUsedBySelectedRows.count(ctx.Type())) {
1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329
      *Out =
          paddle::framework::GetLoDTensorOrSelectedRowsValueFromVar(*out_var);
      if (dout_var) {
        *dOut =
            paddle::framework::GetMutableLoDTensorOrSelectedRowsValueFromVar(
                dout_var);
      }
    } else {
      *Out = ctx.Input<framework::Tensor>("Out");
      if (dout_var) {
        *dOut = ctx.Output<framework::Tensor>("DOut");
      }
    }
  } else {
H
hong 已提交
1330
    VLOG(10) << "Inplace activation of Op: " << ctx.Type();
1331 1332
    *Out = *ddX;
  }
1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381
}

template <typename DeviceContext, typename Functor>
class ActivationDoubleGradKernel
    : public framework::OpKernel<typename Functor::ELEMENT_TYPE> {
 public:
  using T = typename Functor::ELEMENT_TYPE;
  void Compute(const framework::ExecutionContext& ctx) const override {
    const framework::Tensor *X, *Out, *ddX;
    X = Out = ddX = nullptr;
    framework::Tensor *ddOut, *dOut, *dX;
    ddOut = dOut = dX = nullptr;

    ExtractActivationDoubleGradTensor<Functor::FwdDeps()>(ctx, &X, &Out, &ddX,
                                                          &dX, &dOut, &ddOut);

    if (ddOut) ddOut->mutable_data<T>(ctx.GetPlace());
    if (dOut) dOut->mutable_data<T>(ctx.GetPlace());
    if (dX) dX->mutable_data<T>(Out->dims(), ctx.GetPlace());

    auto& place = ctx.template device_context<DeviceContext>();

    Functor functor;
    auto attrs = functor.GetAttrs();
    for (auto& attr : attrs) {
      *attr.second = ctx.Attr<float>(attr.first);
    }
    functor(place, X, Out, ddX, ddOut, dOut, dX);
  }
};

template <typename T>
struct ReluGradGradFunctor : public BaseActivationFunctor<T> {
  template <typename Device>
  void operator()(const Device& dev, const framework::Tensor* X,
                  const framework::Tensor* Out, const framework::Tensor* ddX,
                  framework::Tensor* ddOut, framework::Tensor* dOut,
                  framework::Tensor* dX) const {
    auto* d = dev.eigen_device();
    auto ddx = framework::EigenVector<T>::Flatten(detail::Ref(ddX));
    auto out = framework::EigenVector<T>::Flatten(detail::Ref(Out));
    if (ddOut) {
      auto ddout = framework::EigenVector<T>::Flatten(detail::Ref(ddOut));
      ddout.device(*d) = ddx * (out > static_cast<T>(0)).template cast<T>();
    }
  }
  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; }
};

1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393
template <typename T>
struct LeakyReluGradGradFunctor : public BaseActivationFunctor<T> {
  float alpha;
  typename BaseActivationFunctor<T>::AttrPair GetAttrs() {
    return {{"alpha", &alpha}};
  }
  template <typename Device>
  void operator()(const Device& dev, const framework::Tensor* X,
                  const framework::Tensor* Out, const framework::Tensor* ddX,
                  framework::Tensor* ddOut, framework::Tensor* dOut,
                  framework::Tensor* dX) const {
    if (ddOut) {
Z
Zeng Jinle 已提交
1394 1395 1396
      auto* d = dev.eigen_device();
      auto ddx = framework::EigenVector<T>::Flatten(detail::Ref(ddX));
      auto out = framework::EigenVector<T>::Flatten(detail::Ref(Out));
1397
      auto ddout = framework::EigenVector<T>::Flatten(detail::Ref(ddOut));
1398 1399 1400 1401 1402
      ddout.device(*d) = ddx *
                         ((out > static_cast<T>(0)).template cast<T>() +
                          static_cast<T>(alpha) *
                              (out <= static_cast<T>(0)).template cast<T>())
                             .template cast<T>();
1403 1404
    }
  }
Z
Zeng Jinle 已提交
1405
  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; }
1406 1407
};

D
Double_V 已提交
1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440
template <typename T>
struct ELUGradGradFunctor : public BaseActivationFunctor<T> {
  float alpha;
  typename BaseActivationFunctor<T>::AttrPair GetAttrs() {
    return {{"alpha", &alpha}};
  }
  template <typename Device>
  void operator()(const Device& dev, const framework::Tensor* X,
                  const framework::Tensor* ddX, framework::Tensor* ddOut,
                  const framework::Tensor* dOut, framework::Tensor* dX) const {
    auto* d = dev.eigen_device();
    auto ddx = framework::EigenVector<T>::Flatten(detail::Ref(ddX));
    auto x = framework::EigenVector<T>::Flatten(detail::Ref(X));

    if (dX) {
      auto dx = framework::EigenVector<T>::Flatten(detail::Ref(dX));
      auto dout = framework::EigenVector<T>::Flatten(detail::Ref(dOut));
      dx.device(*d) = ddx * dout * static_cast<T>(alpha) * x.exp() *
                      (x < static_cast<T>(0)).template cast<T>();
    }

    if (ddOut) {
      auto ddout = framework::EigenVector<T>::Flatten(detail::Ref(ddOut));
      ddout.device(*d) = ddx *
                         ((x > static_cast<T>(0)).template cast<T>() +
                          static_cast<T>(alpha) * x.exp() *
                              (x <= static_cast<T>(0)).template cast<T>())
                             .template cast<T>();
    }
  }
  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; }
};

L
lvmengsi 已提交
1441 1442 1443 1444 1445 1446 1447 1448 1449
template <typename T>
struct SqrtGradGradFunctor : public BaseActivationFunctor<T> {
  template <typename Device>
  void operator()(const Device& dev, const framework::Tensor* Out,
                  const framework::Tensor* ddX, framework::Tensor* ddOut,
                  framework::Tensor* dOut, const framework::Tensor* dX) const {
    auto* d = dev.eigen_device();
    auto ddx = framework::EigenVector<T>::Flatten(detail::Ref(ddX));
    auto out = framework::EigenVector<T>::Flatten(detail::Ref(Out));
1450 1451
    // sqrt GradGrad: ddy = 0.5 * ddx / y, dy = -1 * dx * ddx
    // calculate dy first, so ddy can inplace ddx
L
lvmengsi 已提交
1452 1453 1454 1455 1456
    if (dOut) {
      auto dx = framework::EigenVector<T>::Flatten(detail::Ref(dX));
      auto dout = framework::EigenVector<T>::Flatten(detail::Ref(dOut));
      dout.device(*d) = dx * ddx * static_cast<T>(-1) / out;
    }
1457 1458 1459 1460
    if (ddOut) {
      auto ddout = framework::EigenVector<T>::Flatten(detail::Ref(ddOut));
      ddout.device(*d) = ddx * static_cast<T>(0.5) / out;
    }
L
lvmengsi 已提交
1461 1462 1463 1464
  }
  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; }
};

1465 1466 1467 1468 1469 1470 1471 1472 1473
template <typename T>
struct SquareGradGradFunctor : public BaseActivationFunctor<T> {
  template <typename Device>
  void operator()(const Device& dev, const framework::Tensor* X,
                  const framework::Tensor* ddX, framework::Tensor* ddOut,
                  const framework::Tensor* dOut, framework::Tensor* dX) const {
    auto* d = dev.eigen_device();
    auto ddx = framework::EigenVector<T>::Flatten(detail::Ref(ddX));
    auto x = framework::EigenVector<T>::Flatten(detail::Ref(X));
1474 1475
    // square GradGrad: ddy=2x*ddx, dx=2dy*ddx
    // calculate dx first, so ddy can inplace ddx
1476 1477 1478 1479 1480
    if (dX) {
      auto dx = framework::EigenVector<T>::Flatten(detail::Ref(dX));
      auto dout = framework::EigenVector<T>::Flatten(detail::Ref(dOut));
      dx.device(*d) = ddx * static_cast<T>(2) * dout;
    }
1481 1482 1483 1484
    if (ddOut) {
      auto ddout = framework::EigenVector<T>::Flatten(detail::Ref(ddOut));
      ddout.device(*d) = ddx * static_cast<T>(2) * x;
    }
1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500
  }
  static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; }
};

// TODO(dengkaipeng): double gradient calculation for Square/Sqrt need
// DOut(dy) as input(not output), tensor extraction is different from
// others. Impliment extraction kernel seperately here.
inline void ExtractDoubleGradTensorWithInputDOut(
    const framework::ExecutionContext& ctx, const framework::Tensor** X,
    const framework::Tensor** ddX, framework::Tensor** dX,
    const framework::Tensor** dOut, framework::Tensor** ddOut) {
  // extract ddX(output), ddOut(input)
  auto ddx_var = ctx.InputVar("DDX");
  auto ddo_var = ctx.OutputVar("DDOut");
  PADDLE_ENFORCE(ddx_var != nullptr,
                 "Cannot get input Variable Out, variable name = %s",
H
hong 已提交
1501
                 ctx.InputName("DDX"));
1502 1503 1504 1505 1506 1507
  *ddX = ctx.Input<framework::Tensor>("DDX");
  if (ddo_var) {
    *ddOut = ctx.Output<framework::Tensor>("DDOut");
  }
  PADDLE_ENFORCE(*ddX != nullptr,
                 "Cannot get output tensor DDX, variable name = %s",
H
hong 已提交
1508
                 ctx.OutputName("DDX"));
1509 1510 1511 1512 1513

  // extract x(input), dx(output)
  auto x_var = ctx.InputVar("X");
  PADDLE_ENFORCE(x_var != nullptr,
                 "Cannot get input Variable Out, variable name = %s",
H
hong 已提交
1514
                 ctx.InputName("X"));
1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540
  auto dx_var = ctx.OutputVar("DX");
  *X = ctx.Input<framework::Tensor>("X");
  if (dx_var) {
    *dX = ctx.Output<framework::Tensor>("DX");
  }

  // extract dOut(input)
  auto dout_var = ctx.InputVar("DOut");
  if (dout_var) {
    *dOut = ctx.Input<framework::Tensor>("DOut");
  }
}

template <typename DeviceContext, typename Functor>
class SquareDoubleGradKernel
    : public framework::OpKernel<typename Functor::ELEMENT_TYPE> {
 public:
  using T = typename Functor::ELEMENT_TYPE;
  void Compute(const framework::ExecutionContext& ctx) const override {
    const framework::Tensor *X, *ddX, *dOut;
    X = ddX = dOut = nullptr;
    framework::Tensor *dX, *ddOut;
    dX = ddOut = nullptr;

    ExtractDoubleGradTensorWithInputDOut(ctx, &X, &ddX, &dX, &dOut, &ddOut);

L
lvmengsi 已提交
1541 1542
    if (dX) dX->mutable_data<T>(X->dims(), ctx.GetPlace());
    if (ddOut) ddOut->mutable_data<T>(ctx.GetPlace());
1543 1544 1545 1546 1547 1548 1549 1550

    auto& place = ctx.template device_context<DeviceContext>();

    Functor functor;
    functor(place, X, ddX, ddOut, dOut, dX);
  }
};

D
Double_V 已提交
1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577
template <typename DeviceContext, typename Functor>
class ELUDoubleGradKernel
    : public framework::OpKernel<typename Functor::ELEMENT_TYPE> {
 public:
  using T = typename Functor::ELEMENT_TYPE;
  void Compute(const framework::ExecutionContext& ctx) const override {
    const framework::Tensor *X, *ddX, *dOut;
    X = ddX = dOut = nullptr;
    framework::Tensor *dX, *ddOut;
    dX = ddOut = nullptr;

    ExtractDoubleGradTensorWithInputDOut(ctx, &X, &ddX, &dX, &dOut, &ddOut);

    if (dX) dX->mutable_data<T>(X->dims(), ctx.GetPlace());
    if (ddOut) ddOut->mutable_data<T>(ctx.GetPlace());

    auto& place = ctx.template device_context<DeviceContext>();

    Functor functor;
    auto attrs = functor.GetAttrs();
    for (auto& attr : attrs) {
      *attr.second = ctx.Attr<float>(attr.first);
    }
    functor(place, X, ddX, ddOut, dOut, dX);
  }
};

L
lvmengsi 已提交
1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593
template <typename DeviceContext, typename Functor>
class SqrtDoubleGradKernel
    : public framework::OpKernel<typename Functor::ELEMENT_TYPE> {
 public:
  using T = typename Functor::ELEMENT_TYPE;
  void Compute(const framework::ExecutionContext& ctx) const override {
    const framework::Tensor *Out, *dX, *ddX;
    Out = dX = ddX = nullptr;
    framework::Tensor *ddOut, *dOut;
    ddOut = dOut = nullptr;

    // extract ddx(input), ddout(output)
    auto ddx_var = ctx.InputVar("DDX");
    auto ddo_var = ctx.OutputVar("DDOut");
    PADDLE_ENFORCE(ddx_var != nullptr,
                   "Cannot get input Variable DDX, variable name = %s",
H
hong 已提交
1594
                   ctx.InputName("DDX"));
L
lvmengsi 已提交
1595 1596 1597 1598 1599 1600
    ddX = ctx.Input<framework::Tensor>("DDX");
    if (ddo_var) {
      ddOut = ctx.Output<framework::Tensor>("DDOut");
    }
    PADDLE_ENFORCE(ddX != nullptr,
                   "Cannot get input Variable DDX, variable name = %s",
H
hong 已提交
1601
                   ctx.InputName("DDX"));
L
lvmengsi 已提交
1602 1603 1604 1605 1606

    // extract out(input), dout(output)
    auto out_var = ctx.InputVar("Out");
    PADDLE_ENFORCE(out_var != nullptr,
                   "Cannot get input Variable Out, variable name = %s",
H
hong 已提交
1607
                   ctx.InputName("Out"));
L
lvmengsi 已提交
1608 1609 1610 1611 1612 1613 1614 1615 1616 1617
    auto dout_var = ctx.OutputVar("DOut");
    Out = ctx.Input<framework::Tensor>("Out");
    if (dout_var) {
      dOut = ctx.Output<framework::Tensor>("DOut");
    }

    // extract dx(input)
    auto dx_var = ctx.InputVar("DX");
    PADDLE_ENFORCE(dx_var != nullptr,
                   "Cannot get input Variable DX, variable name = %s",
H
hong 已提交
1618
                   ctx.InputName("DX"));
L
lvmengsi 已提交
1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632
    if (dx_var) {
      dX = ctx.Input<framework::Tensor>("DX");
    }

    if (dOut) dOut->mutable_data<T>(Out->dims(), ctx.GetPlace());
    if (ddOut) ddOut->mutable_data<T>(Out->dims(), ctx.GetPlace());

    auto& place = ctx.template device_context<DeviceContext>();

    Functor functor;
    functor(place, Out, ddX, ddOut, dOut, dX);
  }
};

1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723
template <typename DeviceContext, typename Functor>
class PowKernel : public framework::OpKernel<typename Functor::ELEMENT_TYPE> {
 public:
  using T = typename Functor::ELEMENT_TYPE;

  void Compute(const framework::ExecutionContext& context) const override {
    const framework::Tensor* X = nullptr;
    framework::Tensor* Out = nullptr;
    ExtractActivationTensor(context, &X, &Out);
    Out->mutable_data<T>(context.GetPlace());

    auto x = framework::EigenVector<T>::Flatten(detail::Ref(X));
    auto out = framework::EigenVector<T>::Flatten(detail::Ref(Out));
    auto* place =
        context.template device_context<DeviceContext>().eigen_device();
    Functor functor;
    auto attrs = functor.GetAttrs();
    for (auto& attr : attrs) {
      *attr.second = context.Attr<float>(attr.first);
    }
    // get FactorTensor
    auto* factor_tensor = context.HasInput("FactorTensor")
                              ? context.Input<framework::Tensor>("FactorTensor")
                              : nullptr;
    if (factor_tensor) {
      auto* factor_data = factor_tensor->data<float>();
      framework::Tensor cpu_factor_tensor;
      if (platform::is_gpu_place(factor_tensor->place())) {
        TensorCopySync(*factor_tensor, platform::CPUPlace(),
                       &cpu_factor_tensor);
        factor_data = cpu_factor_tensor.data<float>();
      }
      auto factor =
          std::vector<float>(factor_data, factor_data + factor_tensor->numel());
      PADDLE_ENFORCE_EQ(factor.size(), 1,
                        "The shape of factor(tensor) MUST BE [1].");
      for (auto& attr : attrs) {
        *attr.second = factor[0];
      }
    }
    functor(*place, x, out);
  }
};

template <typename DeviceContext, typename Functor>
class PowGradKernel
    : public framework::OpKernel<typename Functor::ELEMENT_TYPE> {
 public:
  using T = typename Functor::ELEMENT_TYPE;
  void Compute(const framework::ExecutionContext& context) const override {
    const framework::Tensor *X, *Out, *dOut;
    framework::Tensor* dX = nullptr;
    X = Out = dOut = nullptr;
    ExtractActivationGradTensor<Functor::FwdDeps()>(context, &X, &Out, &dOut,
                                                    &dX);
    dX->mutable_data<T>(context.GetPlace());
    auto dout = framework::EigenVector<T>::Flatten(detail::Ref(dOut));
    auto out = framework::EigenVector<T>::Flatten(detail::Ref(Out));
    auto dx = framework::EigenVector<T>::Flatten(detail::Ref(dX));
    auto x = framework::EigenVector<T>::Flatten(detail::Ref(X));
    auto* place =
        context.template device_context<DeviceContext>().eigen_device();
    Functor functor;
    auto attrs = functor.GetAttrs();
    for (auto& attr : attrs) {
      *attr.second = context.Attr<float>(attr.first);
    }
    // get FactorTensor
    auto* factor_tensor =
        context.HasInput("FactorTensor")
            ? context.Input<framework::LoDTensor>("FactorTensor")
            : nullptr;
    if (factor_tensor) {
      auto* factor_data = factor_tensor->data<float>();
      framework::Tensor cpu_factor_tensor;
      if (platform::is_gpu_place(factor_tensor->place())) {
        TensorCopySync(*factor_tensor, platform::CPUPlace(),
                       &cpu_factor_tensor);
        factor_data = cpu_factor_tensor.data<float>();
      }
      auto factor =
          std::vector<float>(factor_data, factor_data + factor_tensor->numel());
      PADDLE_ENFORCE_EQ(factor.size(), 1,
                        "The shape of factor(tensor) MUST BE [1].");
      for (auto& attr : attrs) {
        *attr.second = factor[0];
      }
    }
    functor(*place, x, out, dout, dx);
  }
};
Q
qijun 已提交
1724 1725
}  // namespace operators
}  // namespace paddle
1726

1727 1728 1729 1730 1731 1732 1733
#define FOR_EACH_ACTIVATION_OP(__macro)                                       \
  __macro(sigmoid, Sigmoid, SigmoidFunctor, SigmoidGradFunctor);              \
  __macro(logsigmoid, LogSigmoid, LogSigmoidFunctor, LogSigmoidGradFunctor);  \
  __macro(gelu, Gelu, GeluFunctor, GeluGradFunctor);                          \
  __macro(tanh, Tanh, TanhFunctor, TanhGradFunctor);                          \
  __macro(atan, Atan, AtanFunctor, AtanGradFunctor);                          \
  __macro(softshrink, SoftShrink, SoftShrinkFunctor, SoftShrinkGradFunctor);  \
Z
zhoukunsheng 已提交
1734
  __macro(rsqrt, Rsqrt, RsqrtFunctor, RsqrtGradFunctor);                      \
1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755
  __macro(ceil, Ceil, CeilFunctor, ZeroGradFunctor);                          \
  __macro(floor, Floor, FloorFunctor, ZeroGradFunctor);                       \
  __macro(cos, Cos, CosFunctor, CosGradFunctor);                              \
  __macro(acos, Acos, AcosFunctor, AcosGradFunctor);                          \
  __macro(sin, Sin, SinFunctor, SinGradFunctor);                              \
  __macro(asin, Asin, AsinFunctor, AsinGradFunctor);                          \
  __macro(round, Round, RoundFunctor, ZeroGradFunctor);                       \
  __macro(reciprocal, Reciprocal, ReciprocalFunctor, ReciprocalGradFunctor);  \
  __macro(log, Log, LogFunctor, LogGradFunctor);                              \
  __macro(brelu, BRelu, BReluFunctor, BReluGradFunctor);                      \
  __macro(soft_relu, SoftRelu, SoftReluFunctor, SoftReluGradFunctor);         \
  __macro(stanh, STanh, STanhFunctor, STanhGradFunctor);                      \
  __macro(softplus, Softplus, SoftplusFunctor, SoftplusGradFunctor);          \
  __macro(softsign, Softsign, SoftsignFunctor, SoftsignGradFunctor);          \
  __macro(relu6, Relu6, Relu6Functor, Relu6GradFunctor);                      \
  __macro(tanh_shrink, TanhShrink, TanhShrinkFunctor, TanhShrinkGradFunctor); \
  __macro(hard_shrink, HardShrink, HardShrinkFunctor, HardShrinkGradFunctor); \
  __macro(hard_sigmoid, HardSigmoid, HardSigmoidFunctor,                      \
          HardSigmoidGradFunctor);                                            \
  __macro(swish, Swish, SwishFunctor, SwishGradFunctor);                      \
  __macro(thresholded_relu, ThresholdedRelu, ThresholdedReluFunctor,          \
H
huangjun12 已提交
1756 1757
          ThresholdedReluGradFunctor);                                        \
  __macro(hard_swish, HardSwish, HardSwishFunctor, HardSwishGradFunctor);