activation_op.cc 49.3 KB
Newer Older
1
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Q
qijun 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
Q
qijun 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
Q
qijun 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
Q
qijun 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/activation_op.h"
16

T
tink2123 已提交
17
#include <memory>
D
dzhwinter 已提交
18
#include <string>
19
#include <type_traits>
T
tink2123 已提交
20
#include <unordered_map>
21
#include <vector>
22

23
#include "paddle/fluid/framework/op_version_registry.h"
24
#include "paddle/fluid/operators/common_infer_shape_functions.h"
25
#include "paddle/fluid/operators/mkldnn/mkldnn_activation_op.h"
D
dzhwinter 已提交
26
#include "paddle/fluid/platform/port.h"
27 28 29
#ifdef PADDLE_WITH_CUDA
#include "paddle/fluid/platform/cudnn_helper.h"
#endif
Q
qijun 已提交
30

A
Adam 已提交
31 32
DECLARE_bool(use_mkldnn);

Q
qijun 已提交
33 34 35
namespace paddle {
namespace operators {

36 37
using paddle::framework::Tensor;

38 39 40 41 42
template <typename GradFunctor>
static constexpr bool CanInplaceAct() {
  return GradFunctor::FwdDeps() == kDepOut || GradFunctor::FwdDeps() == kNoDeps;
}

43 44 45 46 47
#define REGISTER_ACTIVATION_OP_MAKER(OP_NAME, OP_COMMENT)                    \
  class OP_NAME##OpMaker                                                     \
      : public ::paddle::framework::OpProtoAndCheckerMaker {                 \
   public:                                                                   \
    void Make() override {                                                   \
48 49 50 51 52
      AddInput("X", "Input of " #OP_NAME                                     \
                    " operator, an N-D Tensor, with data type float32, "     \
                    "float64 or float16.");                                  \
      AddOutput("Out", "Output of " #OP_NAME                                 \
                       " operator, a Tensor with shape same as input.");     \
53 54 55 56 57 58 59 60 61
      AddAttr<bool>("use_mkldnn",                                            \
                    "(bool, default false) Only used in mkldnn kernel")      \
          .SetDefault(false);                                                \
      AddAttr<bool>("use_cudnn",                                             \
                    "(bool, default false) Only used in cudnn kernel, need " \
                    "install cudnn")                                         \
          .SetDefault(false);                                                \
      AddComment(OP_COMMENT);                                                \
    }                                                                        \
D
dzhwinter 已提交
62
  }
D
dzhwinter 已提交
63

H
hong 已提交
64 65
template <ActBwdOpFwdDeps kDepValue, typename T>
class ActivationGradOpMaker : public framework::SingleGradOpMaker<T> {
66
 public:
H
hong 已提交
67
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
68 69

 protected:
70
  void Apply(GradOpPtr<T> op) const override {
H
hong 已提交
71 72 73 74
    op->SetType(this->ForwardOpType() + "_grad");
    op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
    op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
    op->SetAttrMap(this->Attrs());
75

A
Adam 已提交
76 77
    if ((static_cast<int>(kDepValue) &
         static_cast<int>(ActBwdOpFwdDeps::kDepX)) ||
78 79 80
        FLAGS_use_mkldnn ||
        (op->HasAttr("use_mkldnn") &&
         BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn")))) {
H
hong 已提交
81
      op->SetInput("X", this->Input("X"));
82 83 84 85
    }

    if (static_cast<int>(kDepValue) &
        static_cast<int>(ActBwdOpFwdDeps::kDepOut)) {
H
hong 已提交
86
      op->SetInput("Out", this->Output("Out"));
87
    }
D
dzhwinter 已提交
88
  }
89
};
D
dzhwinter 已提交
90

91 92 93 94
framework::OpKernelType GetKernelType(const framework::ExecutionContext& ctx,
                                      const framework::OperatorWithKernel& oper,
                                      const std::string& name) {
  framework::LibraryType library{framework::LibraryType::kPlain};
M
mozga-intel 已提交
95
  framework::DataLayout layout = framework::DataLayout::kAnyLayout;
96 97 98 99 100 101 102 103 104 105
// FIXME(liuwei1031) temporarily disable the code to unblock users
// TODO(liuwei1031) figure out the reason behind
// https://github.com/PaddlePaddle/Paddle/issues/16096
// and re-enable this in the future
// #ifdef PADDLE_WITH_CUDA
//   auto it1 = oper.Attrs().find("use_cudnn");
//   if (it1 != oper.Attrs().end() && platform::CanCUDNNBeUsed(ctx)) {
//     library = framework::LibraryType::kCUDNN;
//   }
// #endif
106 107 108 109 110
#ifdef PADDLE_WITH_MKLDNN
  auto it = oper.Attrs().find("use_mkldnn");
  if (library == framework::LibraryType::kPlain && it != oper.Attrs().end() &&
      platform::CanMKLDNNBeUsed(ctx)) {
    library = framework::LibraryType::kMKLDNN;
M
mozga-intel 已提交
111
    layout = framework::DataLayout::kMKLDNN;
112 113
  }
#endif
114 115
  return framework::OpKernelType(oper.IndicateVarDataType(ctx, name),
                                 ctx.GetPlace(), layout, library);
116 117
}

Q
qijun 已提交
118 119 120 121
class ActivationOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

122
  void InferShape(framework::InferShapeContext* ctx) const override {
123
    ctx->ShareDim("X", /*->*/ "Out");
F
fengjiayi 已提交
124
    ctx->ShareLoD("X", /*->*/ "Out");
Q
qijun 已提交
125
  }
126

127
 protected:
128 129 130 131
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    return GetKernelType(ctx, *this, "X");
  }
Q
qijun 已提交
132 133
};

C
chengduo 已提交
134 135 136
class ActivationOpInferVarType
    : public framework::PassInDtypeAndVarTypeToOutput {
 protected:
137
  std::unordered_map<std::string, std::string>& GetInputOutputWithSameType()
C
chengduo 已提交
138
      const override {
139 140
    static std::unordered_map<std::string, std::string> m{{"X", /*->*/ "Out"}};
    return m;
141 142 143
  }
};

Q
qijun 已提交
144 145 146 147
class ActivationOpGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

148
  void InferShape(framework::InferShapeContext* ctx) const override {
149 150 151
    auto out_grad_name = framework::GradVarName("Out");
    ctx->ShareDim(out_grad_name, framework::GradVarName("X"));
    ctx->ShareLoD(out_grad_name, framework::GradVarName("X"));
Q
qijun 已提交
152
  }
153

154
 protected:
155 156
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
157
    return GetKernelType(ctx, *this, framework::GradVarName("Out"));
158
  }
Q
qijun 已提交
159 160
};

D
dzhwinter 已提交
161
UNUSED constexpr char SigmoidDoc[] = R"DOC(
162
Sigmoid Activation Operator
K
Kexin Zhao 已提交
163

164
$$out = \\frac{1}{1 + e^{-x}}$$
K
Kexin Zhao 已提交
165

D
dzhwinter 已提交
166
)DOC";
Q
qijun 已提交
167

D
dzhwinter 已提交
168
UNUSED constexpr char LogSigmoidDoc[] = R"DOC(
169
Logsigmoid Activation Operator
K
Kexin Zhao 已提交
170

171
$$out = \\log \\frac{1}{1 + e^{-x}}$$
K
Kexin Zhao 已提交
172

D
dzhwinter 已提交
173
)DOC";
174

D
dzhwinter 已提交
175
UNUSED constexpr char ExpDoc[] = R"DOC(
176
Exp Operator. Computes exp of x element-wise with a natural number :math:`e` as the base.
K
Kexin Zhao 已提交
177

178
$$out = e^x$$
K
Kexin Zhao 已提交
179

D
dzhwinter 已提交
180
)DOC";
Q
qijun 已提交
181

D
dzhwinter 已提交
182
UNUSED constexpr char ReluDoc[] = R"DOC(
K
kexinzhao 已提交
183
Relu Activation Operator.
K
Kexin Zhao 已提交
184

185
$$out = \max(x, 0)$$
K
Kexin Zhao 已提交
186

D
dzhwinter 已提交
187
)DOC";
K
Kexin Zhao 已提交
188

D
dzhwinter 已提交
189
UNUSED constexpr char TanhDoc[] = R"DOC(
K
kexinzhao 已提交
190
Tanh Activation Operator.
K
Kexin Zhao 已提交
191

Q
update  
qiaolongfei 已提交
192
$$out = \\frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$
K
Kexin Zhao 已提交
193

D
dzhwinter 已提交
194
)DOC";
195

D
dzhwinter 已提交
196
UNUSED constexpr char TanhShrinkDoc[] = R"DOC(
K
kexinzhao 已提交
197
TanhShrink Activation Operator.
K
Kexin Zhao 已提交
198

Y
Yan Chunwei 已提交
199
$$out = x - \\frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$
K
Kexin Zhao 已提交
200

D
dzhwinter 已提交
201
)DOC";
K
Kexin Zhao 已提交
202

D
dzhwinter 已提交
203
UNUSED constexpr char SqrtDoc[] = R"DOC(
K
kexinzhao 已提交
204
Sqrt Activation Operator.
K
Kexin Zhao 已提交
205

206
.. math:: out=\\sqrt{x}=x^{1/2}
207

208 209
**Note**:
  input value must be greater than or equal to zero.
K
Kexin Zhao 已提交
210

D
dzhwinter 已提交
211
)DOC";
212

Z
zhoukunsheng 已提交
213 214 215 216 217
UNUSED constexpr char RsqrtDoc[] = R"DOC(
Rsqrt Activation Operator.

Please make sure input is legal in case of numeric errors.

218
$$out = \\frac{1}{\\sqrt{x}}$$
Z
zhoukunsheng 已提交
219 220 221

)DOC";

D
dzhwinter 已提交
222
UNUSED constexpr char AbsDoc[] = R"DOC(
Y
Yang Zhang 已提交
223
Abs Operator.
K
Kexin Zhao 已提交
224

225
$$out = |x|$$
K
Kexin Zhao 已提交
226

D
dzhwinter 已提交
227
)DOC";
228

D
dzhwinter 已提交
229
UNUSED constexpr char CeilDoc[] = R"DOC(
230
Ceil Operator. Computes ceil of x element-wise.
D
dzhwinter 已提交
231

232
$$out = \\left \\lceil x \\right \\rceil$$
D
dzhwinter 已提交
233

D
dzhwinter 已提交
234
)DOC";
D
dzhwinter 已提交
235

D
dzhwinter 已提交
236
UNUSED constexpr char FloorDoc[] = R"DOC(
237
Floor Activation Operator. Computes floor of x element-wise.
D
dzhwinter 已提交
238

239
$$out = \\left \\lfloor x \\right \\rfloor$$
D
dzhwinter 已提交
240

D
dzhwinter 已提交
241
)DOC";
D
dzhwinter 已提交
242

D
dzhwinter 已提交
243
UNUSED constexpr char CosDoc[] = R"DOC(
244
Cosine Operator. Computes cosine of x element-wise.
C
add cos  
chengduoZH 已提交
245

Y
Yang Zhang 已提交
246 247
Input range is `(-inf, inf)` and output range is `[-1,1]`.

248
$$out = cos(x)$$
C
add cos  
chengduoZH 已提交
249

D
dzhwinter 已提交
250
)DOC";
C
add cos  
chengduoZH 已提交
251

D
dzhwinter 已提交
252
UNUSED constexpr char SinDoc[] = R"DOC(
C
add sin  
chengduoZH 已提交
253 254
Sine Activation Operator.

255
$$out = sin(x)$$
C
add sin  
chengduoZH 已提交
256

D
dzhwinter 已提交
257
)DOC";
C
add sin  
chengduoZH 已提交
258

259 260 261 262 263 264 265 266 267 268 269 270 271 272
UNUSED constexpr char SinhDoc[] = R"DOC(
Sinh Activation Operator.

$$out = sinh(x)$$

)DOC";

UNUSED constexpr char CoshDoc[] = R"DOC(
Cosh Activation Operator.

$$out = cosh(x)$$

)DOC";

D
dzhwinter 已提交
273
UNUSED constexpr char RoundDoc[] = R"DOC(
274
The OP rounds the values in the input to the nearest integer value.
D
dzhwinter 已提交
275

276 277 278 279 280 281 282 283 284
.. code-block:: python

  input:
    x.shape = [4]
    x.data = [1.2, -0.9, 3.4, 0.9]

  output:
    out.shape = [4]
    out.data = [1., -1., 3., 1.]
D
dzhwinter 已提交
285

D
dzhwinter 已提交
286
)DOC";
D
dzhwinter 已提交
287

D
dzhwinter 已提交
288
UNUSED constexpr char ReciprocalDoc[] = R"DOC(
K
kexinzhao 已提交
289
Reciprocal Activation Operator.
K
Kexin Zhao 已提交
290

291
$$out = \\frac{1}{x}$$
K
Kexin Zhao 已提交
292

D
dzhwinter 已提交
293
)DOC";
294

D
dzhwinter 已提交
295
UNUSED constexpr char LogDoc[] = R"DOC(
K
kexinzhao 已提交
296
Log Activation Operator.
K
Kexin Zhao 已提交
297

298
$$out = \ln(x)$$
K
Kexin Zhao 已提交
299 300 301

Natural logarithm of x.

D
dzhwinter 已提交
302 303
)DOC";

J
joejiong 已提交
304 305 306 307 308 309 310 311 312
UNUSED constexpr char Log2Doc[] = R"DOC(
Log2 Activation Operator.

$$out = \log_2x$$

logarithm of x base to 2.

)DOC";

313 314 315 316 317 318 319 320 321
UNUSED constexpr char Log1pDoc[] = R"DOC(
Log Activation Operator.

$out = \ln(x+1)$

Natural logarithm of x.

)DOC";

D
dzhwinter 已提交
322
UNUSED constexpr char SquareDoc[] = R"DOC(
323
The OP square each elements of the inputs.
D
dzhwinter 已提交
324

325
$$out = x^2$$
326

D
dzhwinter 已提交
327 328
)DOC";

D
dzhwinter 已提交
329
UNUSED constexpr char SoftsignDoc[] = R"DOC(
D
dzhwinter 已提交
330 331
Softsign Activation Operator.

332
$$out = \\frac{x}{1 + \|x\|}$$
D
dzhwinter 已提交
333 334 335

)DOC";

T
tink2123 已提交
336 337 338 339 340 341
class AcosOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
    AddInput("X", "Input of acos operator");
    AddOutput("Out", "Output of acos operator");
    AddComment(R"DOC(
342
Arccosine Operator.
343

T
tink2123 已提交
344
$$out = \cos^{-1}(x)$$
345

T
tink2123 已提交
346 347 348
)DOC");
  }
};
349

T
tink2123 已提交
350 351 352
class AsinOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
W
wawltor 已提交
353 354 355
    AddInput("X",
             "Input of asin operator, an N-D Tensor, with data type float32, "
             "float64 or float16.");
T
tink2123 已提交
356 357
    AddOutput("Out", "Output of asin operator");
    AddComment(R"DOC(
358
Arcsine Operator.
359

T
tink2123 已提交
360
$$out = \sin^{-1}(x)$$
361

T
tink2123 已提交
362 363 364
)DOC");
  }
};
365

T
tink2123 已提交
366 367 368
class AtanOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
W
wawltor 已提交
369 370 371
    AddInput("X",
             "Input of atan operator, an N-D Tensor, with data type float32, "
             "float64 or float16.");
T
tink2123 已提交
372 373
    AddOutput("Out", "Output of atan operator");
    AddComment(R"DOC(
374
Arctangent Operator.
375

376
$$out = \tan^{-1}(x)$$
377

T
tink2123 已提交
378 379 380
)DOC");
  }
};
381

D
dzhwinter 已提交
382
class LeakyReluOpMaker : public framework::OpProtoAndCheckerMaker {
383
 public:
Y
Yu Yang 已提交
384
  void Make() override {
W
Wilber 已提交
385 386 387 388 389 390 391 392
    AddInput("X",
             "A LoDTensor or Tensor representing preactivation values. Must be "
             "one of the following types: float32, float64.");
    AddOutput(
        "Out",
        "A LoDTensor or Tensor with the same type and size as that of x.");
    AddAttr<float>("alpha", "Slope of the activation function at x < 0.")
        .SetDefault(0.02f);
A
Adam 已提交
393 394 395
    AddAttr<bool>("use_mkldnn",
                  "(bool, default false) Only used in mkldnn kernel")
        .SetDefault(false);
K
Kexin Zhao 已提交
396
    AddComment(R"DOC(
D
dzhwinter 已提交
397
LeakyRelu Activation Operator.
K
Kexin Zhao 已提交
398

W
Wilber 已提交
399
$$out = \max(x, \alpha * x)$$
K
Kexin Zhao 已提交
400 401

)DOC");
402 403 404
  }
};

405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434
class SoftplusOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
    AddInput("X",
             "Input of Softplus operator, an N-D Tensor, with data type "
             "float32, float64 or float16.");
    AddOutput(
        "Out",
        "Output of Softplus operator, a Tensor with shape same as input.");
    AddAttr<float>("beta", "The value of beta for Softplus.").SetDefault(1.0f);
    AddAttr<float>("threshold", "The value of threshold for Softplus.")
        .SetDefault(20.0f);
    AddAttr<bool>("use_mkldnn",
                  "(bool, default false) Only used in mkldnn kernel.")
        .SetDefault(false);
    AddAttr<bool>(
        "use_cudnn",
        "(bool, default false) Only used in cudnn kernel, need install cudnn.")
        .SetDefault(false);
    AddComment(R"DOC(
:strong:`Softplus Activation Operator`

..  math::
    out = \frac{1}{\beta} * \log(1 + \exp(\beta * x)) \\
    \text{For numerical stability, the implementation reverts to the linear function when :}\,x \times \beta > threshold.

)DOC");
  }
};

D
dzhwinter 已提交
435
class SoftShrinkOpMaker : public framework::OpProtoAndCheckerMaker {
K
kexinzhao 已提交
436
 public:
Y
Yu Yang 已提交
437
  void Make() override {
D
dzhwinter 已提交
438 439 440
    AddInput("X", "Input of Softshrink operator");
    AddOutput("Out", "Output of Softshrink operator");
    AddAttr<float>("lambda", "non-negative offset").SetDefault(0.5f);
K
Kexin Zhao 已提交
441
    AddComment(R"DOC(
442 443 444
:strong:`Softshrink Activation Operator`

..  math::
445
    out = \begin{cases}
446 447 448 449
         x - \lambda, \text{if } x > \lambda \\
         x + \lambda, \text{if } x < -\lambda \\
         0,  \text{otherwise}
         \end{cases}
K
Kexin Zhao 已提交
450 451

)DOC");
K
kexinzhao 已提交
452 453 454
  }
};

D
dzhwinter 已提交
455
class HardShrinkOpMaker : public framework::OpProtoAndCheckerMaker {
456
 public:
Y
Yu Yang 已提交
457
  void Make() override {
D
dzhwinter 已提交
458 459
    AddInput("X", "Input of HardShrink operator");
    AddOutput("Out", "Output of HardShrink operator");
Y
yuyang18 已提交
460 461
    AddAttr<float>("threshold",
                   "The value of threshold for HardShrink. [default: 0.5]")
D
dzhwinter 已提交
462
        .SetDefault(0.5f);
K
Kexin Zhao 已提交
463
    AddComment(R"DOC(
Y
yuyang18 已提交
464
:strong:`HardShrink activation operator`
K
Kexin Zhao 已提交
465

Y
yuyang18 已提交
466 467 468 469 470 471
..  math::
    out = \begin{cases}
            x, \text{if } x > \lambda \\
            x, \text{if } x < -\lambda \\
            0,  \text{otherwise}
          \end{cases}
K
Kexin Zhao 已提交
472 473

)DOC");
474 475 476
  }
};

477 478
class BReluOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
479
  void Make() override {
480 481 482 483 484 485
    AddInput("X",
             "The input is a multi-dimensional Tensor. The data type is "
             "float32, float64.");
    AddOutput("Out",
              "The output is a multi-dimensional Tensor which has same "
              "dimension and data type as the ``X``.");
486 487 488 489
    AddAttr<float>("t_min", "The min marginal value of BRelu")
        .SetDefault(static_cast<float>(0));
    AddAttr<float>("t_max", "The max marginal value of BRelu")
        .SetDefault(static_cast<float>(24));
K
Kexin Zhao 已提交
490
    AddComment(R"DOC(
K
kexinzhao 已提交
491
BRelu Activation Operator.
K
Kexin Zhao 已提交
492

493
$$out = \min(\max(x, t_{min}), t_{max})$$
K
Kexin Zhao 已提交
494 495

)DOC");
496 497 498 499 500
  }
};

class SoftReluOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
501
  void Make() override {
502
    AddInput("X", "Input of SoftRelu operator");
F
fengjiayi 已提交
503
    AddOutput("Out", "Output of SoftRelu operator");
504 505
    AddAttr<float>("threshold", "The threshold value of SoftRelu")
        .SetDefault(40.0f);
K
Kexin Zhao 已提交
506
    AddComment(R"DOC(
K
kexinzhao 已提交
507
SoftRelu Activation Operator.
K
Kexin Zhao 已提交
508

509
$$out = \ln(1 + \exp(\max(\min(x, threshold), -threshold)))$$
K
Kexin Zhao 已提交
510 511

)DOC");
512 513 514
  }
};

515 516
class ELUOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
517
  void Make() override {
518 519 520 521 522 523
    AddInput("X",
             "The input is a multi-dimensional Tensor. The data type is "
             "float32 or float64.");
    AddOutput("Out",
              "The output is a multi-dimensional Tensor which has same "
              "dimension and data type as the ``x``.");
524
    AddAttr<float>("alpha", "The alpha value of ELU").SetDefault(1.0f);
525
    AddComment(R"DOC(
K
kexinzhao 已提交
526
ELU Activation Operator.
K
Kexin Zhao 已提交
527 528 529 530

Applies the following element-wise computation on the input according to
https://arxiv.org/abs/1511.07289.

531
$$out = \max(0, x) + \min(0, \alpha * (e^x - 1))$$
K
Kexin Zhao 已提交
532 533

)DOC");
534 535 536
  }
};

537 538
class Relu6OpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
539
  void Make() override {
Z
zhupengyang 已提交
540 541 542 543 544 545 546 547
    AddInput("X",
             "Input of relu6 operator, an N-D Tensor, "
             "with data type float32, float64.");
    AddOutput(
        "Out",
        "Output of relu6 operator, a Tensor with the same shape as input.");
    AddAttr<float>("threshold",
                   "The threshold value of Relu6. Default is 6.0. ")
548
        .SetDefault(6.0f);
A
Adam 已提交
549 550 551
    AddAttr<bool>("use_mkldnn",
                  "(bool, default false) Only used in mkldnn kernel")
        .SetDefault(false);
K
Kexin Zhao 已提交
552
    AddComment(R"DOC(
K
kexinzhao 已提交
553
Relu6 Activation Operator.
K
Kexin Zhao 已提交
554

555
$$out = \min(\max(0, x), threshold)$$
K
Kexin Zhao 已提交
556 557

)DOC");
558 559 560
  }
};

561 562
class PowOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
563
  void Make() override {
564
    AddInput("X", "Input of Pow operator");
565 566 567 568 569
    AddInput("FactorTensor",
             "(Tensor<float>, optional). If provided, pow will use this"
             "The shape of FactorTensor MUST BE [1]."
             "it has higher priority than attr(factor).")
        .AsDispensable();
F
fengjiayi 已提交
570
    AddOutput("Out", "Output of Pow operator");
571
    AddAttr<float>("factor", "The exponential factor of Pow").SetDefault(1.0f);
K
Kexin Zhao 已提交
572
    AddComment(R"DOC(
K
kexinzhao 已提交
573
Pow Activation Operator.
K
Kexin Zhao 已提交
574

575
$$out = x^{factor}$$
K
Kexin Zhao 已提交
576 577

)DOC");
578 579 580 581 582
  }
};

class STanhOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
583
  void Make() override {
584 585 586 587 588 589
    AddInput("X",
             "Input of STanh operator."
             " A LoDTensor or Tensor with type float32, float64.");
    AddOutput("Out", "Output of STanh operator. A Tensor with type float32.");
    AddAttr<float>("scale_a", "The scale parameter of a for the input. ")
        .SetDefault(0.67f);
590 591
    AddAttr<float>("scale_b", "The scale parameter of b for the input")
        .SetDefault(1.7159f);
K
Kexin Zhao 已提交
592
    AddComment(R"DOC(
K
kexinzhao 已提交
593
STanh Activation Operator.
K
Kexin Zhao 已提交
594

Y
Yan Chunwei 已提交
595
$$out = b * \\frac{e^{a * x} - e^{-a * x}}{e^{a * x} + e^{-a * x}}$$
K
Kexin Zhao 已提交
596 597

)DOC");
Q
qijun 已提交
598 599 600
  }
};

601 602
class ThresholdedReluOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
603
  void Make() override {
604
    AddInput("X", "Input of ThresholdedRelu operator");
F
fengjiayi 已提交
605
    AddOutput("Out", "Output of ThresholdedRelu operator");
Y
yuyang18 已提交
606 607
    AddAttr<float>("threshold",
                   "The threshold location of activation. [default 1.0].")
608
        .SetDefault(1.0f);
K
Kexin Zhao 已提交
609
    AddComment(R"DOC(
Y
yuyang18 已提交
610
:strong:`ThresholdedRelu activation operator`
K
Kexin Zhao 已提交
611

Y
yuyang18 已提交
612
..  math::
K
Kexin Zhao 已提交
613

Y
yuyang18 已提交
614
    out = \begin{cases}
Y
yuyang18 已提交
615
             x,  \text{if } x > threshold \\
Y
yuyang18 已提交
616 617
             0,  \text{otherwise}
          \end{cases}
K
Kexin Zhao 已提交
618
)DOC");
619 620 621
  }
};

622 623
class HardSigmoidOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
624
  void Make() override {
625 626 627 628 629
    AddInput("X", "An N-D Tensor with data type float32, float64. ");
    AddOutput("Out", "A Tensor with the same shape as input. ");
    AddAttr<float>("slope",
                   "The slope of the linear approximation of sigmoid. Its "
                   "value MUST BE positive. Default is 0.2. ")
630
        .SetDefault(0.2f);
631 632 633
    AddAttr<float>(
        "offset",
        "The offset of the linear approximation of sigmoid. Default is 0.5. ")
634
        .SetDefault(0.5f);
635
    AddComment(R"DOC(
K
kexinzhao 已提交
636
HardSigmoid Activation Operator.
637

638
A 3-part piecewise linear approximation of sigmoid(https://arxiv.org/abs/1603.00391),
K
Kexin Zhao 已提交
639
which is much faster than sigmoid.
640

641
$$out = \max(0, \min(1, slope * x + offset))$$
642

K
Kexin Zhao 已提交
643
)DOC");
644 645 646
  }
};

A
Abhinav Arora 已提交
647 648
class SwishOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
649
  void Make() override {
A
Abhinav Arora 已提交
650
    AddInput("X", "Input of Swish operator");
F
fengjiayi 已提交
651
    AddOutput("Out", "Output of Swish operator");
A
Abhinav Arora 已提交
652
    AddAttr<float>("beta", "Constant beta of swish operator").SetDefault(1.0f);
653 654 655
    AddAttr<bool>("use_mkldnn",
                  "(bool, default false) Only used in mkldnn kernel")
        .SetDefault(false);
A
Abhinav Arora 已提交
656 657 658
    AddComment(R"DOC(
Swish Activation Operator.

659
$$out = \\frac{x}{1 + e^{- \beta \ x}}$$
A
Abhinav Arora 已提交
660 661 662 663 664

)DOC");
  }
};

H
huangjun12 已提交
665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680
class HardSwishOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
    AddInput("X", "Input of HardSwish operator");
    AddOutput("Out", "Output of HardSwish operator");
    AddAttr<float>("threshold", "The threshold parameter of HardSwish operator")
        .SetDefault(6.0f);
    AddAttr<float>("scale", "The scale parameter of HardSwish operator")
        .SetDefault(6.0f);
    AddAttr<float>("offset", "The offset parameter of HardSwish operator")
        .SetDefault(3.0f);
    AddComment(R"DOC(
HardSwish Activation Operator.

The hard version of swish(https://arxiv.org/pdf/1905.02244.pdf).

681
$$out = \frac{x * (min(max(0, x+offset), threshold))}{scale}$$
H
huangjun12 已提交
682 683 684 685 686 687 688 689 690

The threshold and scale should be positive. The offset can be either positive or negative.
The default parameters are set according to the above reference.
It is recommended to use the defaults for this activation.

)DOC");
  }
};

D
dzhwinter 已提交
691 692 693 694 695 696 697
REGISTER_ACTIVATION_OP_MAKER(Sigmoid, SigmoidDoc);
REGISTER_ACTIVATION_OP_MAKER(LogSigmoid, LogSigmoidDoc);
REGISTER_ACTIVATION_OP_MAKER(Exp, ExpDoc);
REGISTER_ACTIVATION_OP_MAKER(Relu, ReluDoc);
REGISTER_ACTIVATION_OP_MAKER(Tanh, TanhDoc);
REGISTER_ACTIVATION_OP_MAKER(TanhShrink, TanhShrinkDoc);
REGISTER_ACTIVATION_OP_MAKER(Sqrt, SqrtDoc);
Z
zhoukunsheng 已提交
698
REGISTER_ACTIVATION_OP_MAKER(Rsqrt, RsqrtDoc);
D
dzhwinter 已提交
699 700 701 702 703
REGISTER_ACTIVATION_OP_MAKER(Abs, AbsDoc);
REGISTER_ACTIVATION_OP_MAKER(Ceil, CeilDoc);
REGISTER_ACTIVATION_OP_MAKER(Floor, FloorDoc);
REGISTER_ACTIVATION_OP_MAKER(Cos, CosDoc);
REGISTER_ACTIVATION_OP_MAKER(Sin, SinDoc);
704 705
REGISTER_ACTIVATION_OP_MAKER(Sinh, SinhDoc);
REGISTER_ACTIVATION_OP_MAKER(Cosh, CoshDoc);
D
dzhwinter 已提交
706 707 708
REGISTER_ACTIVATION_OP_MAKER(Round, RoundDoc);
REGISTER_ACTIVATION_OP_MAKER(Reciprocal, ReciprocalDoc);
REGISTER_ACTIVATION_OP_MAKER(Log, LogDoc);
J
joejiong 已提交
709
REGISTER_ACTIVATION_OP_MAKER(Log2, Log2Doc);
710
REGISTER_ACTIVATION_OP_MAKER(Log1p, Log1pDoc);
D
dzhwinter 已提交
711 712 713
REGISTER_ACTIVATION_OP_MAKER(Square, SquareDoc);
REGISTER_ACTIVATION_OP_MAKER(Softsign, SoftsignDoc);

714
template <ActBwdOpFwdDeps kDepValue>
715 716 717 718 719
class ActivationOpDoubleGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
720
    if (static_cast<int>(kDepValue) & static_cast<int>(kDepX)) {
721
      if (ctx->HasOutput("DX")) {
722 723 724
        ctx->ShareDim("X", "DX");
        ctx->ShareLoD("X", "DX");
      }
725
      if (ctx->HasOutput("DDOut")) {
726 727 728
        ctx->ShareDim("X", "DDOut");
        ctx->ShareLoD("X", "DDOut");
      }
729
    }
730
    if (static_cast<int>(kDepValue) & static_cast<int>(kDepOut)) {
731
      if (ctx->HasOutput("DOut")) {
732 733 734
        ctx->ShareDim("Out", "DOut");
        ctx->ShareLoD("Out", "DOut");
      }
735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762
      if (ctx->HasOutput("DDOut")) {
        ctx->ShareDim("Out", "DDOut");
        ctx->ShareLoD("Out", "DDOut");
      }
    }
  }

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    return GetKernelType(ctx, *this, "DDX");
  }
};

template <ActBwdOpFwdDeps kDepValue>
class ActivationOpDoubleGrad2 : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
    if (static_cast<int>(kDepValue) & static_cast<int>(kDepX)) {
      if (ctx->HasOutput("DDOut")) {
        ctx->ShareDim("X", "DDOut");
        ctx->ShareLoD("X", "DDOut");
      }
    }
    if (static_cast<int>(kDepValue) & static_cast<int>(kDepOut)) {
      if (ctx->HasOutput("DDOut")) {
763 764 765
        ctx->ShareDim("Out", "DDOut");
        ctx->ShareLoD("Out", "DDOut");
      }
766 767 768 769 770 771 772 773 774 775
    }
  }

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    return GetKernelType(ctx, *this, "DDX");
  }
};

Z
Zhong Hui 已提交
776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795
// AbsGrad: dx=dy if x >=0 else -dy
// AbsDoubleGrad: ddy = ddx if x >=0 else -ddx
template <typename T>
class AbsDoubleGradMaker : public ::paddle::framework::SingleGradOpMaker<T> {
 public:
  using ::paddle::framework::SingleGradOpMaker<T>::SingleGradOpMaker;

 protected:
  void Apply(GradOpPtr<T> op) const override {
    op->SetType("abs_grad_grad");
    // input1: x
    op->SetInput("X", this->Input("X"));
    // input2: ddx
    op->SetInput("DDX", this->OutputGrad(framework::GradVarName("X")));
    op->SetAttrMap(this->Attrs());
    // output: ddy
    op->SetOutput("DDOut", this->InputGrad(framework::GradVarName("Out")));
  }
};

796 797
// ReluGrad: dx = dy if y >= 0 else 0
// ReluGradGrad: ddy = ddx if y >= 0 else 0
H
hong 已提交
798 799
template <typename T>
class ReluDoubleGradMaker : public ::paddle::framework::SingleGradOpMaker<T> {
800
 public:
H
hong 已提交
801
  using ::paddle::framework::SingleGradOpMaker<T>::SingleGradOpMaker;
802 803

 protected:
804
  void Apply(GradOpPtr<T> op) const override {
805 806
    op->SetType("relu_grad_grad");
    // input1: Out
H
hong 已提交
807
    op->SetInput("Out", this->Input("Out"));
Q
qingqing01 已提交
808
    // input2: ddx
H
hong 已提交
809 810
    op->SetInput("DDX", this->OutputGrad(framework::GradVarName("X")));
    op->SetAttrMap(this->Attrs());
811
    // output: ddy
H
hong 已提交
812
    op->SetOutput("DDOut", this->InputGrad(framework::GradVarName("Out")));
813 814 815
  }
};

816 817
// leaky_relu Grad: dx=dy if x>=0 else alpha * dy
// leaky_relu GradGrad: ddy=ddx if x>=0 else alpha * ddx
H
hong 已提交
818
template <typename T>
819
class LeakyReluDoubleGradMaker
H
hong 已提交
820
    : public ::paddle::framework::SingleGradOpMaker<T> {
821
 public:
H
hong 已提交
822
  using ::paddle::framework::SingleGradOpMaker<T>::SingleGradOpMaker;
823 824

 protected:
825
  void Apply(GradOpPtr<T> op) const override {
826
    op->SetType("leaky_relu_grad_grad");
827 828
    // input1: X
    op->SetInput("X", this->Input("X"));
829
    // X@GRAD@GRAD: ddx
H
hong 已提交
830 831
    op->SetInput("DDX", this->OutputGrad(framework::GradVarName("X")));
    op->SetAttrMap(this->Attrs());
832
    // Out@GRAD@GRAD: ddy
H
hong 已提交
833
    op->SetOutput("DDOut", this->InputGrad(framework::GradVarName("Out")));
834 835 836
  }
};

D
Double_V 已提交
837 838 839 840 841 842 843 844
// elu grad: dx=dy if y>0 else alpha*dy*x.exp()
// elu gradgrad: ddx=ddy if y>0 else alpha*ddy*x.exp()
template <typename T>
class ELUDoubleGradMaker : public ::paddle::framework::SingleGradOpMaker<T> {
 public:
  using ::paddle::framework::SingleGradOpMaker<T>::SingleGradOpMaker;

 protected:
845
  void Apply(GradOpPtr<T> op) const override {
D
Double_V 已提交
846 847 848 849 850 851 852 853 854 855 856 857 858 859
    op->SetType("elu_grad_grad");

    op->SetInput("X", this->Input("X"));
    op->SetInput("DOut", this->Input(framework::GradVarName("Out")));
    // X@GRAD@GRAD: ddx
    op->SetInput("DDX", this->OutputGrad(framework::GradVarName("X")));
    op->SetAttrMap(this->Attrs());

    // Out@GRAD@GRAD: ddy
    op->SetOutput("DX", this->InputGrad("X"));
    op->SetOutput("DDOut", this->InputGrad(framework::GradVarName("Out")));
  }
};

L
lvmengsi 已提交
860 861
// sqrt Grad: dx = 0.5 * dy / y
// sqrt GradGrad: ddy = 0.5 * ddx / y, dy = -1 * dx * ddx
H
hong 已提交
862 863
template <typename T>
class SqrtDoubleGradMaker : public ::paddle::framework::SingleGradOpMaker<T> {
L
lvmengsi 已提交
864
 public:
H
hong 已提交
865
  using ::paddle::framework::SingleGradOpMaker<T>::SingleGradOpMaker;
L
lvmengsi 已提交
866 867

 protected:
868
  void Apply(GradOpPtr<T> op) const override {
L
lvmengsi 已提交
869
    op->SetType("sqrt_grad_grad");
H
hong 已提交
870 871 872 873 874 875
    op->SetInput("Out", this->Input("Out"));
    op->SetInput("DX", this->Output(framework::GradVarName("X")));
    op->SetInput("DDX", this->OutputGrad(framework::GradVarName("X")));
    op->SetAttrMap(this->Attrs());
    op->SetOutput("DOut", this->InputGrad("Out"));
    op->SetOutput("DDOut", this->InputGrad(framework::GradVarName("Out")));
L
lvmengsi 已提交
876 877 878
  }
};

879 880
// square Grad: dx=2x*dy
// square GradGrad: ddy=2x*ddx, dx=2dy*ddx
H
hong 已提交
881 882
template <typename T>
class SquareDoubleGradMaker : public ::paddle::framework::SingleGradOpMaker<T> {
883
 public:
H
hong 已提交
884
  using ::paddle::framework::SingleGradOpMaker<T>::SingleGradOpMaker;
885 886

 protected:
887
  void Apply(GradOpPtr<T> op) const override {
888
    op->SetType("square_grad_grad");
H
hong 已提交
889
    op->SetInput("X", this->Input("X"));
890
    // Out@GRAD: dy
H
hong 已提交
891
    op->SetInput("DOut", this->Input(framework::GradVarName("Out")));
892
    // X@GRAD@GRAD: ddx
H
hong 已提交
893
    op->SetInput("DDX", this->OutputGrad(framework::GradVarName("X")));
894

H
hong 已提交
895
    op->SetAttrMap(this->Attrs());
896 897

    // X@GRAD: dx
H
hong 已提交
898
    op->SetOutput("DX", this->InputGrad("X"));
899
    // Out@GRAD@GRAD: ddy
H
hong 已提交
900
    op->SetOutput("DDOut", this->InputGrad(framework::GradVarName("Out")));
901 902 903
  }
};

904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925
// log Grad: dx = dout / x
// log Grad Grad: ddout = ddx / x; dx = -(dout / x) * (ddx / x)
template <typename T>
class LogDoubleGradMaker : public ::paddle::framework::SingleGradOpMaker<T> {
 public:
  using ::paddle::framework::SingleGradOpMaker<T>::SingleGradOpMaker;

 protected:
  void Apply(GradOpPtr<T> op) const override {
    op->SetType("log_grad_grad");
    op->SetInput("X", this->Input("X"));
    // X@GRAD@GRAD: ddx
    op->SetInput("DDX", this->OutputGrad(framework::GradVarName("X")));
    op->SetInput("DOut", this->Input(framework::GradVarName("Out")));
    op->SetAttrMap(this->Attrs());
    // X@GRAD: dx
    op->SetOutput("DX", this->InputGrad("X"));
    // Out@GRAD@GRAD: ddy
    op->SetOutput("DDOut", this->InputGrad(framework::GradVarName("Out")));
  }
};

926
DECLARE_INPLACE_OP_INFERER(ActivationGradOpInplaceInferer,
927 928
                           {framework::GradVarName("Out"),
                            framework::GradVarName("X")});
929
DECLARE_INPLACE_OP_INFERER(ActivationDoubleGradOpInplaceInferer,
930
                           {"DDX", "DDOut"});
931

H
hong 已提交
932 933
template <typename T>
class PowGradOpMaker : public framework::SingleGradOpMaker<T> {
934
 public:
H
hong 已提交
935
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
936 937

 protected:
938
  void Apply(GradOpPtr<T> op) const override {
939
    op->SetType("pow_grad");
H
hong 已提交
940 941 942 943 944
    op->SetInput("X", this->Input("X"));
    op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
    op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
    op->SetInput("FactorTensor", this->Input("FactorTensor"));
    op->SetAttrMap(this->Attrs());
945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998
  }
};
class PowOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
    ctx->ShareDim("X", /*->*/ "Out");
    ctx->ShareLoD("X", /*->*/ "Out");
  }

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    return GetKernelType(ctx, *this, "X");
  }

  framework::OpKernelType GetKernelTypeForVar(
      const std::string& var_name, const Tensor& tensor,
      const framework::OpKernelType& expected_kernel_type) const override {
    if (var_name == "FactorTensor") {
      return expected_kernel_type;
    }
    return framework::OpKernelType(expected_kernel_type.data_type_,
                                   tensor.place(), tensor.layout());
  }
};

class PowOpGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
    auto out_grad_name = framework::GradVarName("Out");
    ctx->ShareDim(out_grad_name, framework::GradVarName("X"));
    ctx->ShareLoD(out_grad_name, framework::GradVarName("X"));
  }

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    return GetKernelType(ctx, *this, framework::GradVarName("Out"));
  }

  framework::OpKernelType GetKernelTypeForVar(
      const std::string& var_name, const Tensor& tensor,
      const framework::OpKernelType& expected_kernel_type) const override {
    if (var_name == "FactorTensor") {
      return expected_kernel_type;
    }
    return framework::OpKernelType(expected_kernel_type.data_type_,
                                   tensor.place(), tensor.layout());
  }
};
999
DECLARE_INPLACE_OP_INFERER(ActFwdInplaceInferer, {"X", "Out"});
Q
qijun 已提交
1000 1001 1002 1003
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
1004
namespace plat = paddle::platform;
1005

1006 1007 1008 1009
#define REGISTER_ACTIVATION_OP(KERNEL_TYPE, OP_NAME, functor, grad_functor) \
  REGISTER_OPERATOR(                                                        \
      KERNEL_TYPE, ops::ActivationOp, ops::OP_NAME##OpMaker,                \
      ops::ActivationOpInferVarType,                                        \
H
hong 已提交
1010 1011 1012 1013
      ops::ActivationGradOpMaker<ops::grad_functor<float>::FwdDeps(),       \
                                 paddle::framework::OpDesc>,                \
      ops::ActivationGradOpMaker<ops::grad_functor<float>::FwdDeps(),       \
                                 paddle::imperative::OpBase>,               \
1014
      std::conditional<ops::CanInplaceAct<ops::grad_functor<float>>(),      \
1015
                       ops::ActFwdInplaceInferer, void>::type);             \
1016
  REGISTER_OPERATOR(KERNEL_TYPE##_grad, ops::ActivationOpGrad,              \
1017
                    ops::ActivationGradOpInplaceInferer);
1018 1019 1020

#define REGISTER_ACTIVATION_CPU_KERNEL(act_type, op_name, functor,        \
                                       grad_functor)                      \
Q
QI JUN 已提交
1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
  REGISTER_OP_CPU_KERNEL(                                                 \
      act_type, ops::ActivationKernel<paddle::platform::CPUDeviceContext, \
                                      ops::functor<float>>,               \
      ops::ActivationKernel<paddle::platform::CPUDeviceContext,           \
                            ops::functor<double>>);                       \
  REGISTER_OP_CPU_KERNEL(                                                 \
      act_type##_grad,                                                    \
      ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,       \
                                ops::grad_functor<float>>,                \
      ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,       \
Y
Yu Yang 已提交
1031
                                ops::grad_functor<double>>);
1032

1033 1034
FOR_EACH_ACTIVATION_OP(REGISTER_ACTIVATION_OP);
FOR_EACH_ACTIVATION_OP(REGISTER_ACTIVATION_CPU_KERNEL);
1035

1036
/* ==========================    relu register  ============================= */
1037 1038
REGISTER_OPERATOR(
    relu, ops::ActivationOp, ops::ReluOpMaker, ops::ActivationOpInferVarType,
H
hong 已提交
1039 1040 1041 1042
    ops::ActivationGradOpMaker<ops::ReluGradFunctor<float>::FwdDeps(),
                               paddle::framework::OpDesc>,
    ops::ActivationGradOpMaker<ops::ReluGradFunctor<float>::FwdDeps(),
                               paddle::imperative::OpBase>,
1043
    ops::ActFwdInplaceInferer);
1044
REGISTER_OPERATOR(relu_grad, ops::ActivationOpGrad,
1045
                  ops::ActivationGradOpInplaceInferer,
H
hong 已提交
1046 1047
                  ops::ReluDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::ReluDoubleGradMaker<paddle::imperative::OpBase>);
1048 1049
REGISTER_OPERATOR(
    relu_grad_grad,
1050
    ops::ActivationOpDoubleGrad2<ops::ReluGradFunctor<float>::FwdDeps()>,
1051
    ops::ActivationDoubleGradOpInplaceInferer);
1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062

REGISTER_ACTIVATION_CPU_KERNEL(relu, Relu, ReluFunctor, ReluGradFunctor);

REGISTER_OP_CPU_KERNEL(
    relu_grad_grad,
    ops::ActivationDoubleGradKernel<plat::CPUDeviceContext,
                                    ops::ReluGradGradFunctor<float>>,
    ops::ActivationDoubleGradKernel<plat::CPUDeviceContext,
                                    ops::ReluGradGradFunctor<double>>,
    ops::ActivationDoubleGradKernel<plat::CPUDeviceContext,
                                    ops::ReluGradGradFunctor<plat::float16>>);
1063
/* ========================================================================== */
1064

1065
/* ======================== leaky relu register  ============================ */
1066 1067 1068
REGISTER_OPERATOR(
    leaky_relu, ops::ActivationOp, ops::LeakyReluOpMaker,
    ops::ActivationOpInferVarType,
H
hong 已提交
1069 1070 1071 1072
    ops::ActivationGradOpMaker<ops::LeakyReluGradFunctor<float>::FwdDeps(),
                               paddle::framework::OpDesc>,
    ops::ActivationGradOpMaker<ops::LeakyReluGradFunctor<float>::FwdDeps(),
                               paddle::imperative::OpBase>,
1073
    ops::ActFwdInplaceInferer);
1074
REGISTER_OPERATOR(leaky_relu_grad, ops::ActivationOpGrad,
1075
                  ops::ActivationGradOpInplaceInferer,
H
hong 已提交
1076 1077
                  ops::LeakyReluDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::LeakyReluDoubleGradMaker<paddle::imperative::OpBase>);
1078 1079
REGISTER_OPERATOR(
    leaky_relu_grad_grad,
1080
    ops::ActivationOpDoubleGrad2<ops::LeakyReluGradFunctor<float>::FwdDeps()>,
1081
    ops::ActivationDoubleGradOpInplaceInferer);
1082

1083 1084 1085 1086 1087 1088 1089 1090 1091 1092
REGISTER_ACTIVATION_CPU_KERNEL(leaky_relu, LeakyRelu, LeakyReluFunctor,
                               LeakyReluGradFunctor);
REGISTER_OP_CPU_KERNEL(
    leaky_relu_grad_grad,
    ops::ActivationDoubleGradKernel<plat::CPUDeviceContext,
                                    ops::LeakyReluGradGradFunctor<float>>,
    ops::ActivationDoubleGradKernel<plat::CPUDeviceContext,
                                    ops::LeakyReluGradGradFunctor<double>>,
    ops::ActivationDoubleGradKernel<
        plat::CPUDeviceContext, ops::LeakyReluGradGradFunctor<plat::float16>>);
1093 1094
/* ========================================================================== */

D
Double_V 已提交
1095 1096 1097 1098 1099 1100 1101 1102 1103
/* ========================    elu  register     ============================ */
REGISTER_OPERATOR(
    elu, ops::ActivationOp, ops::ELUOpMaker, ops::ActivationOpInferVarType,
    ops::ActivationGradOpMaker<ops::ELUGradFunctor<float>::FwdDeps(),
                               paddle::framework::OpDesc>,
    ops::ActivationGradOpMaker<ops::ELUGradFunctor<float>::FwdDeps(),
                               paddle::imperative::OpBase>,
    ops::ActFwdInplaceInferer);
REGISTER_OPERATOR(elu_grad, ops::ActivationOpGrad,
1104
                  ops::ActivationGradOpInplaceInferer,
D
Double_V 已提交
1105 1106 1107 1108 1109
                  ops::ELUDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::ELUDoubleGradMaker<paddle::imperative::OpBase>);
REGISTER_OPERATOR(
    elu_grad_grad,
    ops::ActivationOpDoubleGrad<ops::ELUGradFunctor<float>::FwdDeps()>,
1110
    ops::ActivationDoubleGradOpInplaceInferer);
D
Double_V 已提交
1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122

REGISTER_ACTIVATION_CPU_KERNEL(elu, ELU, ELUFunctor, ELUGradFunctor);
REGISTER_OP_CPU_KERNEL(
    elu_grad_grad, ops::ELUDoubleGradKernel<plat::CPUDeviceContext,
                                            ops::ELUGradGradFunctor<float>>,
    ops::ELUDoubleGradKernel<plat::CPUDeviceContext,
                             ops::ELUGradGradFunctor<double>>,
    ops::ELUDoubleGradKernel<plat::CPUDeviceContext,
                             ops::ELUGradGradFunctor<plat::float16>>);

/* ========================================================================== */

L
lvmengsi 已提交
1123 1124 1125
/* ===========================   sqrt register  ============================= */
REGISTER_OPERATOR(
    sqrt, ops::ActivationOp, ops::SqrtOpMaker, ops::ActivationOpInferVarType,
H
hong 已提交
1126 1127 1128 1129
    ops::ActivationGradOpMaker<ops::SqrtGradFunctor<float>::FwdDeps(),
                               paddle::framework::OpDesc>,
    ops::ActivationGradOpMaker<ops::SqrtGradFunctor<float>::FwdDeps(),
                               paddle::imperative::OpBase>,
1130
    ops::ActFwdInplaceInferer);
L
lvmengsi 已提交
1131
REGISTER_OPERATOR(sqrt_grad, ops::ActivationOpGrad,
1132
                  ops::ActivationGradOpInplaceInferer,
H
hong 已提交
1133 1134
                  ops::SqrtDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::SqrtDoubleGradMaker<paddle::imperative::OpBase>);
L
lvmengsi 已提交
1135 1136
REGISTER_OPERATOR(
    sqrt_grad_grad,
1137
    ops::ActivationOpDoubleGrad<ops::SqrtGradGradFunctor<float>::FwdDeps()>,
1138
    ops::ActivationDoubleGradOpInplaceInferer);
1139

L
lvmengsi 已提交
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149
REGISTER_ACTIVATION_CPU_KERNEL(sqrt, Sqrt, SqrtFunctor, SqrtGradFunctor);
REGISTER_OP_CPU_KERNEL(
    sqrt_grad_grad, ops::SqrtDoubleGradKernel<plat::CPUDeviceContext,
                                              ops::SqrtGradGradFunctor<float>>,
    ops::SqrtDoubleGradKernel<plat::CPUDeviceContext,
                              ops::SqrtGradGradFunctor<double>>,
    ops::SqrtDoubleGradKernel<plat::CPUDeviceContext,
                              ops::SqrtGradGradFunctor<plat::float16>>);
/* ========================================================================== */

1150 1151 1152 1153
/* ==========================   square register  ============================ */
REGISTER_OPERATOR(
    square, ops::ActivationOp, ops::SquareOpMaker,
    ops::ActivationOpInferVarType,
H
hong 已提交
1154 1155 1156 1157
    ops::ActivationGradOpMaker<ops::SquareGradFunctor<float>::FwdDeps(),
                               paddle::framework::OpDesc>,
    ops::ActivationGradOpMaker<ops::SquareGradFunctor<float>::FwdDeps(),
                               paddle::imperative::OpBase>,
1158
    ops::ActFwdInplaceInferer);
1159
REGISTER_OPERATOR(square_grad, ops::ActivationOpGrad,
1160
                  ops::ActivationGradOpInplaceInferer,
H
hong 已提交
1161 1162
                  ops::SquareDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::SquareDoubleGradMaker<paddle::imperative::OpBase>);
1163 1164
REGISTER_OPERATOR(
    square_grad_grad,
1165
    ops::ActivationOpDoubleGrad<ops::SquareGradGradFunctor<float>::FwdDeps()>,
1166
    ops::ActivationDoubleGradOpInplaceInferer);
1167

1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185
REGISTER_OP_CPU_KERNEL(square,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::SquareFunctor<float>>,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::SquareFunctor<double>>,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::SquareFunctor<int>>,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::SquareFunctor<int64_t>>);
REGISTER_OP_CPU_KERNEL(
    square_grad, ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                                           ops::SquareGradFunctor<float>>,
    ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                              ops::SquareGradFunctor<double>>,
    ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                              ops::SquareGradFunctor<int>>,
    ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                              ops::SquareGradFunctor<int64_t>>);
1186 1187 1188 1189 1190 1191 1192 1193

REGISTER_OP_CPU_KERNEL(
    square_grad_grad,
    ops::SquareDoubleGradKernel<plat::CPUDeviceContext,
                                ops::SquareGradGradFunctor<float>>,
    ops::SquareDoubleGradKernel<plat::CPUDeviceContext,
                                ops::SquareGradGradFunctor<double>>,
    ops::SquareDoubleGradKernel<plat::CPUDeviceContext,
1194 1195 1196 1197 1198
                                ops::SquareGradGradFunctor<plat::float16>>,
    ops::SquareDoubleGradKernel<plat::CPUDeviceContext,
                                ops::SquareGradGradFunctor<int>>,
    ops::SquareDoubleGradKernel<plat::CPUDeviceContext,
                                ops::SquareGradGradFunctor<int64_t>>);
1199
/* ========================================================================== */
1200 1201 1202 1203 1204

/* ==========================   pow register  ============================ */

REGISTER_OPERATOR(
    pow, ops::PowOp, ops::PowOpMaker, ops::ActivationOpInferVarType,
H
hong 已提交
1205 1206
    ops::PowGradOpMaker<paddle::framework::OpDesc>,
    ops::PowGradOpMaker<paddle::imperative::OpBase>,
1207
    std::conditional<ops::CanInplaceAct<ops::PowGradFunctor<float>>(),
1208
                     ops::ActFwdInplaceInferer, void>::type);
1209
REGISTER_OPERATOR(pow_grad, ops::PowOpGrad,
1210
                  ops::ActivationGradOpInplaceInferer);
1211 1212 1213

REGISTER_OP_CPU_KERNEL(
    pow, ops::PowKernel<plat::CPUDeviceContext, ops::PowFunctor<float>>,
1214 1215 1216
    ops::PowKernel<plat::CPUDeviceContext, ops::PowFunctor<double>>,
    ops::PowKernel<plat::CPUDeviceContext, ops::PowFunctor<int>>,
    ops::PowKernel<plat::CPUDeviceContext, ops::PowFunctor<int64_t>>);
1217 1218 1219
REGISTER_OP_CPU_KERNEL(
    pow_grad,
    ops::PowGradKernel<plat::CPUDeviceContext, ops::PowGradFunctor<float>>,
1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234
    ops::PowGradKernel<plat::CPUDeviceContext, ops::PowGradFunctor<double>>,
    ops::PowGradKernel<plat::CPUDeviceContext, ops::PowGradFunctor<int>>,
    ops::PowGradKernel<plat::CPUDeviceContext, ops::PowGradFunctor<int64_t>>);
/* ========================================================================== */

/* ==========================   exp register  ============================ */
REGISTER_OPERATOR(
    exp, ops::ActivationOp, ops::ExpOpMaker, ops::ActivationOpInferVarType,
    ops::ActivationGradOpMaker<ops::ExpGradFunctor<float>::FwdDeps(),
                               paddle::framework::OpDesc>,
    ops::ActivationGradOpMaker<ops::ExpGradFunctor<float>::FwdDeps(),
                               paddle::imperative::OpBase>,
    std::conditional<ops::CanInplaceAct<ops::ExpGradFunctor<float>>(),
                     ops::ActFwdInplaceInferer, void>::type);
REGISTER_OPERATOR(exp_grad, ops::ActivationOpGrad,
1235
                  ops::ActivationGradOpInplaceInferer);
1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266

REGISTER_OP_CPU_KERNEL(exp,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::ExpFunctor<float>>,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::ExpFunctor<double>>,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::ExpFunctor<int>>,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::ExpFunctor<int64_t>>);
REGISTER_OP_CPU_KERNEL(
    exp_grad, ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                                        ops::ExpGradFunctor<float>>,
    ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                              ops::ExpGradFunctor<double>>,
    ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                              ops::ExpGradFunctor<int>>,
    ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                              ops::ExpGradFunctor<int64_t>>);
/* ========================================================================== */

/* ==========================   abs register  ============================ */
REGISTER_OPERATOR(
    abs, ops::ActivationOp, ops::AbsOpMaker, ops::ActivationOpInferVarType,
    ops::ActivationGradOpMaker<ops::AbsGradFunctor<float>::FwdDeps(),
                               paddle::framework::OpDesc>,
    ops::ActivationGradOpMaker<ops::AbsGradFunctor<float>::FwdDeps(),
                               paddle::imperative::OpBase>,
    std::conditional<ops::CanInplaceAct<ops::AbsGradFunctor<float>>(),
                     ops::ActFwdInplaceInferer, void>::type);
REGISTER_OPERATOR(abs_grad, ops::ActivationOpGrad,
Z
Zhong Hui 已提交
1267 1268 1269 1270 1271 1272 1273
                  ops::ActivationGradOpInplaceInferer,
                  ops::AbsDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::AbsDoubleGradMaker<paddle::imperative::OpBase>);
REGISTER_OPERATOR(
    abs_grad_grad,
    ops::ActivationOpDoubleGrad<ops::AbsGradGradFunctor<float>::FwdDeps()>,
    ops::ActivationDoubleGradOpInplaceInferer);
1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292

REGISTER_OP_CPU_KERNEL(abs,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::AbsFunctor<float>>,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::AbsFunctor<double>>,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::AbsFunctor<int>>,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::AbsFunctor<int64_t>>);
REGISTER_OP_CPU_KERNEL(
    abs_grad, ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                                        ops::AbsGradFunctor<float>>,
    ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                              ops::AbsGradFunctor<double>>,
    ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                              ops::AbsGradFunctor<int>>,
    ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                              ops::AbsGradFunctor<int64_t>>);
Z
Zhong Hui 已提交
1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304
REGISTER_OP_CPU_KERNEL(
    abs_grad_grad,
    ops::ActivationDoubleGradKernel<plat::CPUDeviceContext,
                                    ops::AbsGradGradFunctor<float>>,
    ops::ActivationDoubleGradKernel<plat::CPUDeviceContext,
                                    ops::AbsGradGradFunctor<double>>,
    ops::ActivationDoubleGradKernel<plat::CPUDeviceContext,
                                    ops::AbsGradGradFunctor<plat::float16>>,
    ops::ActivationDoubleGradKernel<plat::CPUDeviceContext,
                                    ops::AbsGradGradFunctor<int>>,
    ops::ActivationDoubleGradKernel<plat::CPUDeviceContext,
                                    ops::AbsGradGradFunctor<int64_t>>);
1305
/* ========================================================================== */
1306

1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335
/* ==========================  Log register ==================================*/
REGISTER_OPERATOR(
    log, ops::ActivationOp, ops::LogOpMaker, ops::ActivationOpInferVarType,
    ops::ActivationGradOpMaker<ops::LogGradFunctor<float>::FwdDeps(),
                               paddle::framework::OpDesc>,
    ops::ActivationGradOpMaker<ops::LogGradFunctor<float>::FwdDeps(),
                               paddle::imperative::OpBase>,
    ops::ActFwdInplaceInferer);
REGISTER_OPERATOR(log_grad, ops::ActivationOpGrad,
                  ops::ActivationGradOpInplaceInferer,
                  ops::LogDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::LogDoubleGradMaker<paddle::imperative::OpBase>);

REGISTER_OPERATOR(
    log_grad_grad,
    ops::ActivationOpDoubleGrad<ops::LogGradGradFunctor<float>::FwdDeps()>,
    ops::ActivationDoubleGradOpInplaceInferer);

REGISTER_ACTIVATION_CPU_KERNEL(log, Log, LogFunctor, LogGradFunctor);

REGISTER_OP_CPU_KERNEL(
    log_grad_grad, ops::LogDoubleGradKernel<plat::CPUDeviceContext,
                                            ops::LogGradGradFunctor<float>>,
    ops::LogDoubleGradKernel<plat::CPUDeviceContext,
                             ops::LogGradGradFunctor<double>>,
    ops::LogDoubleGradKernel<plat::CPUDeviceContext,
                             ops::LogGradGradFunctor<plat::float16>>);
/* ========================================================================== */

1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354
/* ==========================  register checkpoint ===========================*/
REGISTER_OP_VERSION(leaky_relu)
    .AddCheckpoint(
        R"ROC(fix leaky_relu, bahavior changed when alpha < 0 or alpha > 1)ROC",
        paddle::framework::compatible::OpVersionDesc()
            .BugfixWithBehaviorChanged(
                "leaky_relu calculate formula before checkponit: out = max(x, "
                "alpha * x); after checkpoint: out = x if x > 0 else alpha * "
                "x"));

REGISTER_OP_VERSION(hard_shrink)
    .AddCheckpoint(
        R"ROC(fix hard_shrink, bahavior changed when threshold<0)ROC",
        paddle::framework::compatible::OpVersionDesc()
            .BugfixWithBehaviorChanged(
                "hard_shrink calculate formula before checkponit: out = x * "
                "((x < -threshold) + (x > threshold)); after checkpoint: out = "
                "x * (((x < -threshold) + (x > threshold)) > 0)"));

1355 1356 1357 1358 1359 1360 1361 1362 1363 1364
REGISTER_OP_VERSION(softplus)
    .AddCheckpoint(
        R"ROC(add new attributes [beta] and [threshold], and the formula is changed to "
         " softplus(x) = \\frac{1}{beta} * \\log(1 + e^{beta * x}) \\\\ \\text{For numerical"
         " stability, the implementation reverts to the linear function when: beta * x > threshold.})ROC",
        paddle::framework::compatible::OpVersionDesc()
            .NewAttr("beta", "The beta value of the new formula", 1.0f)
            .NewAttr("threshold", "The threshold value of the new formula",
                     20.0f));

1365
/* ========================================================================== */