activation_op.cc 45.3 KB
Newer Older
1
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Q
qijun 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
Q
qijun 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
Q
qijun 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
Q
qijun 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/activation_op.h"
16

T
tink2123 已提交
17
#include <memory>
D
dzhwinter 已提交
18
#include <string>
19
#include <type_traits>
T
tink2123 已提交
20
#include <unordered_map>
21
#include <vector>
22

23
#include "paddle/fluid/framework/op_version_registry.h"
24
#include "paddle/fluid/operators/common_infer_shape_functions.h"
25
#include "paddle/fluid/operators/mkldnn/mkldnn_activation_op.h"
D
dzhwinter 已提交
26
#include "paddle/fluid/platform/port.h"
27 28 29
#ifdef PADDLE_WITH_CUDA
#include "paddle/fluid/platform/cudnn_helper.h"
#endif
Q
qijun 已提交
30

A
Adam 已提交
31 32
DECLARE_bool(use_mkldnn);

Q
qijun 已提交
33 34 35
namespace paddle {
namespace operators {

36 37
using paddle::framework::Tensor;

38 39 40 41 42
template <typename GradFunctor>
static constexpr bool CanInplaceAct() {
  return GradFunctor::FwdDeps() == kDepOut || GradFunctor::FwdDeps() == kNoDeps;
}

43 44 45 46 47
#define REGISTER_ACTIVATION_OP_MAKER(OP_NAME, OP_COMMENT)                    \
  class OP_NAME##OpMaker                                                     \
      : public ::paddle::framework::OpProtoAndCheckerMaker {                 \
   public:                                                                   \
    void Make() override {                                                   \
48 49 50 51 52
      AddInput("X", "Input of " #OP_NAME                                     \
                    " operator, an N-D Tensor, with data type float32, "     \
                    "float64 or float16.");                                  \
      AddOutput("Out", "Output of " #OP_NAME                                 \
                       " operator, a Tensor with shape same as input.");     \
53 54 55 56 57 58 59 60 61
      AddAttr<bool>("use_mkldnn",                                            \
                    "(bool, default false) Only used in mkldnn kernel")      \
          .SetDefault(false);                                                \
      AddAttr<bool>("use_cudnn",                                             \
                    "(bool, default false) Only used in cudnn kernel, need " \
                    "install cudnn")                                         \
          .SetDefault(false);                                                \
      AddComment(OP_COMMENT);                                                \
    }                                                                        \
D
dzhwinter 已提交
62
  }
D
dzhwinter 已提交
63

H
hong 已提交
64 65
template <ActBwdOpFwdDeps kDepValue, typename T>
class ActivationGradOpMaker : public framework::SingleGradOpMaker<T> {
66
 public:
H
hong 已提交
67
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
68 69

 protected:
70
  void Apply(GradOpPtr<T> op) const override {
H
hong 已提交
71 72 73 74
    op->SetType(this->ForwardOpType() + "_grad");
    op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
    op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
    op->SetAttrMap(this->Attrs());
75

A
Adam 已提交
76 77
    if ((static_cast<int>(kDepValue) &
         static_cast<int>(ActBwdOpFwdDeps::kDepX)) ||
78 79 80
        FLAGS_use_mkldnn ||
        (op->HasAttr("use_mkldnn") &&
         BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn")))) {
H
hong 已提交
81
      op->SetInput("X", this->Input("X"));
82 83 84 85
    }

    if (static_cast<int>(kDepValue) &
        static_cast<int>(ActBwdOpFwdDeps::kDepOut)) {
H
hong 已提交
86
      op->SetInput("Out", this->Output("Out"));
87
    }
D
dzhwinter 已提交
88
  }
89
};
D
dzhwinter 已提交
90

91 92 93 94
framework::OpKernelType GetKernelType(const framework::ExecutionContext& ctx,
                                      const framework::OperatorWithKernel& oper,
                                      const std::string& name) {
  framework::LibraryType library{framework::LibraryType::kPlain};
M
mozga-intel 已提交
95
  framework::DataLayout layout = framework::DataLayout::kAnyLayout;
96 97 98 99 100 101 102 103 104 105
// FIXME(liuwei1031) temporarily disable the code to unblock users
// TODO(liuwei1031) figure out the reason behind
// https://github.com/PaddlePaddle/Paddle/issues/16096
// and re-enable this in the future
// #ifdef PADDLE_WITH_CUDA
//   auto it1 = oper.Attrs().find("use_cudnn");
//   if (it1 != oper.Attrs().end() && platform::CanCUDNNBeUsed(ctx)) {
//     library = framework::LibraryType::kCUDNN;
//   }
// #endif
106 107 108 109 110
#ifdef PADDLE_WITH_MKLDNN
  auto it = oper.Attrs().find("use_mkldnn");
  if (library == framework::LibraryType::kPlain && it != oper.Attrs().end() &&
      platform::CanMKLDNNBeUsed(ctx)) {
    library = framework::LibraryType::kMKLDNN;
M
mozga-intel 已提交
111
    layout = framework::DataLayout::kMKLDNN;
112 113
  }
#endif
114 115
  return framework::OpKernelType(oper.IndicateVarDataType(ctx, name),
                                 ctx.GetPlace(), layout, library);
116 117
}

Q
qijun 已提交
118 119 120 121
class ActivationOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

122
  void InferShape(framework::InferShapeContext* ctx) const override {
123
    ctx->ShareDim("X", /*->*/ "Out");
F
fengjiayi 已提交
124
    ctx->ShareLoD("X", /*->*/ "Out");
Q
qijun 已提交
125
  }
126

127
 protected:
128 129 130 131
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    return GetKernelType(ctx, *this, "X");
  }
Q
qijun 已提交
132 133
};

C
chengduo 已提交
134 135 136
class ActivationOpInferVarType
    : public framework::PassInDtypeAndVarTypeToOutput {
 protected:
137
  std::unordered_map<std::string, std::string>& GetInputOutputWithSameType()
C
chengduo 已提交
138
      const override {
139 140
    static std::unordered_map<std::string, std::string> m{{"X", /*->*/ "Out"}};
    return m;
141 142 143
  }
};

Q
qijun 已提交
144 145 146 147
class ActivationOpGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

148
  void InferShape(framework::InferShapeContext* ctx) const override {
149 150 151
    auto out_grad_name = framework::GradVarName("Out");
    ctx->ShareDim(out_grad_name, framework::GradVarName("X"));
    ctx->ShareLoD(out_grad_name, framework::GradVarName("X"));
Q
qijun 已提交
152
  }
153

154
 protected:
155 156
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
157
    return GetKernelType(ctx, *this, framework::GradVarName("Out"));
158
  }
Q
qijun 已提交
159 160
};

D
dzhwinter 已提交
161
UNUSED constexpr char SigmoidDoc[] = R"DOC(
162
Sigmoid Activation Operator
K
Kexin Zhao 已提交
163

164
$$out = \\frac{1}{1 + e^{-x}}$$
K
Kexin Zhao 已提交
165

D
dzhwinter 已提交
166
)DOC";
Q
qijun 已提交
167

D
dzhwinter 已提交
168
UNUSED constexpr char LogSigmoidDoc[] = R"DOC(
169
Logsigmoid Activation Operator
K
Kexin Zhao 已提交
170

171
$$out = \\log \\frac{1}{1 + e^{-x}}$$
K
Kexin Zhao 已提交
172

D
dzhwinter 已提交
173
)DOC";
174

D
dzhwinter 已提交
175
UNUSED constexpr char ExpDoc[] = R"DOC(
176
Exp Operator. Computes exp of x element-wise with a natural number :math:`e` as the base.
K
Kexin Zhao 已提交
177

178
$$out = e^x$$
K
Kexin Zhao 已提交
179

D
dzhwinter 已提交
180
)DOC";
Q
qijun 已提交
181

D
dzhwinter 已提交
182
UNUSED constexpr char ReluDoc[] = R"DOC(
K
kexinzhao 已提交
183
Relu Activation Operator.
K
Kexin Zhao 已提交
184

185
$$out = \max(x, 0)$$
K
Kexin Zhao 已提交
186

D
dzhwinter 已提交
187
)DOC";
K
Kexin Zhao 已提交
188

D
dzhwinter 已提交
189
UNUSED constexpr char TanhDoc[] = R"DOC(
K
kexinzhao 已提交
190
Tanh Activation Operator.
K
Kexin Zhao 已提交
191

Q
update  
qiaolongfei 已提交
192
$$out = \\frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$
K
Kexin Zhao 已提交
193

D
dzhwinter 已提交
194
)DOC";
195

D
dzhwinter 已提交
196
UNUSED constexpr char TanhShrinkDoc[] = R"DOC(
K
kexinzhao 已提交
197
TanhShrink Activation Operator.
K
Kexin Zhao 已提交
198

Y
Yan Chunwei 已提交
199
$$out = x - \\frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$
K
Kexin Zhao 已提交
200

D
dzhwinter 已提交
201
)DOC";
K
Kexin Zhao 已提交
202

D
dzhwinter 已提交
203
UNUSED constexpr char SqrtDoc[] = R"DOC(
K
kexinzhao 已提交
204
Sqrt Activation Operator.
K
Kexin Zhao 已提交
205

206
.. math:: out=\\sqrt{x}=x^{1/2}
207

208 209
**Note**:
  input value must be greater than or equal to zero.
K
Kexin Zhao 已提交
210

D
dzhwinter 已提交
211
)DOC";
212

Z
zhoukunsheng 已提交
213 214 215 216 217
UNUSED constexpr char RsqrtDoc[] = R"DOC(
Rsqrt Activation Operator.

Please make sure input is legal in case of numeric errors.

218
$$out = \\frac{1}{\\sqrt{x}}$$
Z
zhoukunsheng 已提交
219 220 221

)DOC";

D
dzhwinter 已提交
222
UNUSED constexpr char AbsDoc[] = R"DOC(
Y
Yang Zhang 已提交
223
Abs Operator.
K
Kexin Zhao 已提交
224

225
$$out = |x|$$
K
Kexin Zhao 已提交
226

D
dzhwinter 已提交
227
)DOC";
228

D
dzhwinter 已提交
229
UNUSED constexpr char CeilDoc[] = R"DOC(
230
Ceil Operator. Computes ceil of x element-wise.
D
dzhwinter 已提交
231

232
$$out = \\left \\lceil x \\right \\rceil$$
D
dzhwinter 已提交
233

D
dzhwinter 已提交
234
)DOC";
D
dzhwinter 已提交
235

D
dzhwinter 已提交
236
UNUSED constexpr char FloorDoc[] = R"DOC(
237
Floor Activation Operator. Computes floor of x element-wise.
D
dzhwinter 已提交
238

239
$$out = \\left \\lfloor x \\right \\rfloor$$
D
dzhwinter 已提交
240

D
dzhwinter 已提交
241
)DOC";
D
dzhwinter 已提交
242

D
dzhwinter 已提交
243
UNUSED constexpr char CosDoc[] = R"DOC(
244
Cosine Operator. Computes cosine of x element-wise.
C
add cos  
chengduoZH 已提交
245

Y
Yang Zhang 已提交
246 247
Input range is `(-inf, inf)` and output range is `[-1,1]`.

248
$$out = cos(x)$$
C
add cos  
chengduoZH 已提交
249

D
dzhwinter 已提交
250
)DOC";
C
add cos  
chengduoZH 已提交
251

D
dzhwinter 已提交
252
UNUSED constexpr char SinDoc[] = R"DOC(
C
add sin  
chengduoZH 已提交
253 254
Sine Activation Operator.

255
$$out = sin(x)$$
C
add sin  
chengduoZH 已提交
256

D
dzhwinter 已提交
257
)DOC";
C
add sin  
chengduoZH 已提交
258

259 260 261 262 263 264 265 266 267 268 269 270 271 272
UNUSED constexpr char SinhDoc[] = R"DOC(
Sinh Activation Operator.

$$out = sinh(x)$$

)DOC";

UNUSED constexpr char CoshDoc[] = R"DOC(
Cosh Activation Operator.

$$out = cosh(x)$$

)DOC";

D
dzhwinter 已提交
273
UNUSED constexpr char RoundDoc[] = R"DOC(
274
The OP rounds the values in the input to the nearest integer value.
D
dzhwinter 已提交
275

276 277 278 279 280 281 282 283 284
.. code-block:: python

  input:
    x.shape = [4]
    x.data = [1.2, -0.9, 3.4, 0.9]

  output:
    out.shape = [4]
    out.data = [1., -1., 3., 1.]
D
dzhwinter 已提交
285

D
dzhwinter 已提交
286
)DOC";
D
dzhwinter 已提交
287

D
dzhwinter 已提交
288
UNUSED constexpr char ReciprocalDoc[] = R"DOC(
K
kexinzhao 已提交
289
Reciprocal Activation Operator.
K
Kexin Zhao 已提交
290

291
$$out = \\frac{1}{x}$$
K
Kexin Zhao 已提交
292

D
dzhwinter 已提交
293
)DOC";
294

D
dzhwinter 已提交
295
UNUSED constexpr char LogDoc[] = R"DOC(
K
kexinzhao 已提交
296
Log Activation Operator.
K
Kexin Zhao 已提交
297

298
$$out = \ln(x)$$
K
Kexin Zhao 已提交
299 300 301

Natural logarithm of x.

D
dzhwinter 已提交
302 303
)DOC";

304 305 306 307 308 309 310 311 312
UNUSED constexpr char Log1pDoc[] = R"DOC(
Log Activation Operator.

$out = \ln(x+1)$

Natural logarithm of x.

)DOC";

D
dzhwinter 已提交
313
UNUSED constexpr char SquareDoc[] = R"DOC(
314
The OP square each elements of the inputs.
D
dzhwinter 已提交
315

316
$$out = x^2$$
317

D
dzhwinter 已提交
318 319
)DOC";

D
dzhwinter 已提交
320
UNUSED constexpr char SoftsignDoc[] = R"DOC(
D
dzhwinter 已提交
321 322
Softsign Activation Operator.

323
$$out = \\frac{x}{1 + \|x\|}$$
D
dzhwinter 已提交
324 325 326

)DOC";

T
tink2123 已提交
327 328 329 330 331 332
class AcosOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
    AddInput("X", "Input of acos operator");
    AddOutput("Out", "Output of acos operator");
    AddComment(R"DOC(
333
Arccosine Operator.
334

T
tink2123 已提交
335
$$out = \cos^{-1}(x)$$
336

T
tink2123 已提交
337 338 339
)DOC");
  }
};
340

T
tink2123 已提交
341 342 343
class AsinOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
W
wawltor 已提交
344 345 346
    AddInput("X",
             "Input of asin operator, an N-D Tensor, with data type float32, "
             "float64 or float16.");
T
tink2123 已提交
347 348
    AddOutput("Out", "Output of asin operator");
    AddComment(R"DOC(
349
Arcsine Operator.
350

T
tink2123 已提交
351
$$out = \sin^{-1}(x)$$
352

T
tink2123 已提交
353 354 355
)DOC");
  }
};
356

T
tink2123 已提交
357 358 359
class AtanOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
W
wawltor 已提交
360 361 362
    AddInput("X",
             "Input of atan operator, an N-D Tensor, with data type float32, "
             "float64 or float16.");
T
tink2123 已提交
363 364
    AddOutput("Out", "Output of atan operator");
    AddComment(R"DOC(
365
Arctangent Operator.
366

367
$$out = \tan^{-1}(x)$$
368

T
tink2123 已提交
369 370 371
)DOC");
  }
};
372

D
dzhwinter 已提交
373
class LeakyReluOpMaker : public framework::OpProtoAndCheckerMaker {
374
 public:
Y
Yu Yang 已提交
375
  void Make() override {
W
Wilber 已提交
376 377 378 379 380 381 382 383
    AddInput("X",
             "A LoDTensor or Tensor representing preactivation values. Must be "
             "one of the following types: float32, float64.");
    AddOutput(
        "Out",
        "A LoDTensor or Tensor with the same type and size as that of x.");
    AddAttr<float>("alpha", "Slope of the activation function at x < 0.")
        .SetDefault(0.02f);
A
Adam 已提交
384 385 386
    AddAttr<bool>("use_mkldnn",
                  "(bool, default false) Only used in mkldnn kernel")
        .SetDefault(false);
K
Kexin Zhao 已提交
387
    AddComment(R"DOC(
D
dzhwinter 已提交
388
LeakyRelu Activation Operator.
K
Kexin Zhao 已提交
389

W
Wilber 已提交
390
$$out = \max(x, \alpha * x)$$
K
Kexin Zhao 已提交
391 392

)DOC");
393 394 395
  }
};

396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425
class SoftplusOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
    AddInput("X",
             "Input of Softplus operator, an N-D Tensor, with data type "
             "float32, float64 or float16.");
    AddOutput(
        "Out",
        "Output of Softplus operator, a Tensor with shape same as input.");
    AddAttr<float>("beta", "The value of beta for Softplus.").SetDefault(1.0f);
    AddAttr<float>("threshold", "The value of threshold for Softplus.")
        .SetDefault(20.0f);
    AddAttr<bool>("use_mkldnn",
                  "(bool, default false) Only used in mkldnn kernel.")
        .SetDefault(false);
    AddAttr<bool>(
        "use_cudnn",
        "(bool, default false) Only used in cudnn kernel, need install cudnn.")
        .SetDefault(false);
    AddComment(R"DOC(
:strong:`Softplus Activation Operator`

..  math::
    out = \frac{1}{\beta} * \log(1 + \exp(\beta * x)) \\
    \text{For numerical stability, the implementation reverts to the linear function when :}\,x \times \beta > threshold.

)DOC");
  }
};

D
dzhwinter 已提交
426
class SoftShrinkOpMaker : public framework::OpProtoAndCheckerMaker {
K
kexinzhao 已提交
427
 public:
Y
Yu Yang 已提交
428
  void Make() override {
D
dzhwinter 已提交
429 430 431
    AddInput("X", "Input of Softshrink operator");
    AddOutput("Out", "Output of Softshrink operator");
    AddAttr<float>("lambda", "non-negative offset").SetDefault(0.5f);
K
Kexin Zhao 已提交
432
    AddComment(R"DOC(
433 434 435
:strong:`Softshrink Activation Operator`

..  math::
436
    out = \begin{cases}
437 438 439 440
         x - \lambda, \text{if } x > \lambda \\
         x + \lambda, \text{if } x < -\lambda \\
         0,  \text{otherwise}
         \end{cases}
K
Kexin Zhao 已提交
441 442

)DOC");
K
kexinzhao 已提交
443 444 445
  }
};

D
dzhwinter 已提交
446
class HardShrinkOpMaker : public framework::OpProtoAndCheckerMaker {
447
 public:
Y
Yu Yang 已提交
448
  void Make() override {
D
dzhwinter 已提交
449 450
    AddInput("X", "Input of HardShrink operator");
    AddOutput("Out", "Output of HardShrink operator");
Y
yuyang18 已提交
451 452
    AddAttr<float>("threshold",
                   "The value of threshold for HardShrink. [default: 0.5]")
D
dzhwinter 已提交
453
        .SetDefault(0.5f);
K
Kexin Zhao 已提交
454
    AddComment(R"DOC(
Y
yuyang18 已提交
455
:strong:`HardShrink activation operator`
K
Kexin Zhao 已提交
456

Y
yuyang18 已提交
457 458 459 460 461 462
..  math::
    out = \begin{cases}
            x, \text{if } x > \lambda \\
            x, \text{if } x < -\lambda \\
            0,  \text{otherwise}
          \end{cases}
K
Kexin Zhao 已提交
463 464

)DOC");
465 466 467
  }
};

468 469
class BReluOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
470
  void Make() override {
471 472 473 474 475 476
    AddInput("X",
             "The input is a multi-dimensional Tensor. The data type is "
             "float32, float64.");
    AddOutput("Out",
              "The output is a multi-dimensional Tensor which has same "
              "dimension and data type as the ``X``.");
477 478 479 480
    AddAttr<float>("t_min", "The min marginal value of BRelu")
        .SetDefault(static_cast<float>(0));
    AddAttr<float>("t_max", "The max marginal value of BRelu")
        .SetDefault(static_cast<float>(24));
K
Kexin Zhao 已提交
481
    AddComment(R"DOC(
K
kexinzhao 已提交
482
BRelu Activation Operator.
K
Kexin Zhao 已提交
483

484
$$out = \min(\max(x, t_{min}), t_{max})$$
K
Kexin Zhao 已提交
485 486

)DOC");
487 488 489 490 491
  }
};

class SoftReluOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
492
  void Make() override {
493
    AddInput("X", "Input of SoftRelu operator");
F
fengjiayi 已提交
494
    AddOutput("Out", "Output of SoftRelu operator");
495 496
    AddAttr<float>("threshold", "The threshold value of SoftRelu")
        .SetDefault(40.0f);
K
Kexin Zhao 已提交
497
    AddComment(R"DOC(
K
kexinzhao 已提交
498
SoftRelu Activation Operator.
K
Kexin Zhao 已提交
499

500
$$out = \ln(1 + \exp(\max(\min(x, threshold), -threshold)))$$
K
Kexin Zhao 已提交
501 502

)DOC");
503 504 505
  }
};

506 507
class ELUOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
508
  void Make() override {
509 510 511 512 513 514
    AddInput("X",
             "The input is a multi-dimensional Tensor. The data type is "
             "float32 or float64.");
    AddOutput("Out",
              "The output is a multi-dimensional Tensor which has same "
              "dimension and data type as the ``x``.");
515
    AddAttr<float>("alpha", "The alpha value of ELU").SetDefault(1.0f);
516
    AddComment(R"DOC(
K
kexinzhao 已提交
517
ELU Activation Operator.
K
Kexin Zhao 已提交
518 519 520 521

Applies the following element-wise computation on the input according to
https://arxiv.org/abs/1511.07289.

522
$$out = \max(0, x) + \min(0, \alpha * (e^x - 1))$$
K
Kexin Zhao 已提交
523 524

)DOC");
525 526 527
  }
};

528 529
class Relu6OpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
530
  void Make() override {
Z
zhupengyang 已提交
531 532 533 534 535 536 537 538
    AddInput("X",
             "Input of relu6 operator, an N-D Tensor, "
             "with data type float32, float64.");
    AddOutput(
        "Out",
        "Output of relu6 operator, a Tensor with the same shape as input.");
    AddAttr<float>("threshold",
                   "The threshold value of Relu6. Default is 6.0. ")
539
        .SetDefault(6.0f);
A
Adam 已提交
540 541 542
    AddAttr<bool>("use_mkldnn",
                  "(bool, default false) Only used in mkldnn kernel")
        .SetDefault(false);
K
Kexin Zhao 已提交
543
    AddComment(R"DOC(
K
kexinzhao 已提交
544
Relu6 Activation Operator.
K
Kexin Zhao 已提交
545

546
$$out = \min(\max(0, x), threshold)$$
K
Kexin Zhao 已提交
547 548

)DOC");
549 550 551
  }
};

552 553
class PowOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
554
  void Make() override {
555
    AddInput("X", "Input of Pow operator");
556 557 558 559 560
    AddInput("FactorTensor",
             "(Tensor<float>, optional). If provided, pow will use this"
             "The shape of FactorTensor MUST BE [1]."
             "it has higher priority than attr(factor).")
        .AsDispensable();
F
fengjiayi 已提交
561
    AddOutput("Out", "Output of Pow operator");
562
    AddAttr<float>("factor", "The exponential factor of Pow").SetDefault(1.0f);
K
Kexin Zhao 已提交
563
    AddComment(R"DOC(
K
kexinzhao 已提交
564
Pow Activation Operator.
K
Kexin Zhao 已提交
565

566
$$out = x^{factor}$$
K
Kexin Zhao 已提交
567 568

)DOC");
569 570 571 572 573
  }
};

class STanhOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
574
  void Make() override {
575 576 577 578 579 580
    AddInput("X",
             "Input of STanh operator."
             " A LoDTensor or Tensor with type float32, float64.");
    AddOutput("Out", "Output of STanh operator. A Tensor with type float32.");
    AddAttr<float>("scale_a", "The scale parameter of a for the input. ")
        .SetDefault(0.67f);
581 582
    AddAttr<float>("scale_b", "The scale parameter of b for the input")
        .SetDefault(1.7159f);
K
Kexin Zhao 已提交
583
    AddComment(R"DOC(
K
kexinzhao 已提交
584
STanh Activation Operator.
K
Kexin Zhao 已提交
585

Y
Yan Chunwei 已提交
586
$$out = b * \\frac{e^{a * x} - e^{-a * x}}{e^{a * x} + e^{-a * x}}$$
K
Kexin Zhao 已提交
587 588

)DOC");
Q
qijun 已提交
589 590 591
  }
};

592 593
class ThresholdedReluOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
594
  void Make() override {
595
    AddInput("X", "Input of ThresholdedRelu operator");
F
fengjiayi 已提交
596
    AddOutput("Out", "Output of ThresholdedRelu operator");
Y
yuyang18 已提交
597 598
    AddAttr<float>("threshold",
                   "The threshold location of activation. [default 1.0].")
599
        .SetDefault(1.0f);
K
Kexin Zhao 已提交
600
    AddComment(R"DOC(
Y
yuyang18 已提交
601
:strong:`ThresholdedRelu activation operator`
K
Kexin Zhao 已提交
602

Y
yuyang18 已提交
603
..  math::
K
Kexin Zhao 已提交
604

Y
yuyang18 已提交
605
    out = \begin{cases}
Y
yuyang18 已提交
606
             x,  \text{if } x > threshold \\
Y
yuyang18 已提交
607 608
             0,  \text{otherwise}
          \end{cases}
K
Kexin Zhao 已提交
609
)DOC");
610 611 612
  }
};

613 614
class HardSigmoidOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
615
  void Make() override {
616 617 618 619 620
    AddInput("X", "An N-D Tensor with data type float32, float64. ");
    AddOutput("Out", "A Tensor with the same shape as input. ");
    AddAttr<float>("slope",
                   "The slope of the linear approximation of sigmoid. Its "
                   "value MUST BE positive. Default is 0.2. ")
621
        .SetDefault(0.2f);
622 623 624
    AddAttr<float>(
        "offset",
        "The offset of the linear approximation of sigmoid. Default is 0.5. ")
625
        .SetDefault(0.5f);
626
    AddComment(R"DOC(
K
kexinzhao 已提交
627
HardSigmoid Activation Operator.
628

629
A 3-part piecewise linear approximation of sigmoid(https://arxiv.org/abs/1603.00391),
K
Kexin Zhao 已提交
630
which is much faster than sigmoid.
631

632
$$out = \max(0, \min(1, slope * x + offset))$$
633

K
Kexin Zhao 已提交
634
)DOC");
635 636 637
  }
};

A
Abhinav Arora 已提交
638 639
class SwishOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
640
  void Make() override {
A
Abhinav Arora 已提交
641
    AddInput("X", "Input of Swish operator");
F
fengjiayi 已提交
642
    AddOutput("Out", "Output of Swish operator");
A
Abhinav Arora 已提交
643
    AddAttr<float>("beta", "Constant beta of swish operator").SetDefault(1.0f);
644 645 646
    AddAttr<bool>("use_mkldnn",
                  "(bool, default false) Only used in mkldnn kernel")
        .SetDefault(false);
A
Abhinav Arora 已提交
647 648 649
    AddComment(R"DOC(
Swish Activation Operator.

650
$$out = \\frac{x}{1 + e^{- \beta \ x}}$$
A
Abhinav Arora 已提交
651 652 653 654 655

)DOC");
  }
};

H
huangjun12 已提交
656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671
class HardSwishOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
    AddInput("X", "Input of HardSwish operator");
    AddOutput("Out", "Output of HardSwish operator");
    AddAttr<float>("threshold", "The threshold parameter of HardSwish operator")
        .SetDefault(6.0f);
    AddAttr<float>("scale", "The scale parameter of HardSwish operator")
        .SetDefault(6.0f);
    AddAttr<float>("offset", "The offset parameter of HardSwish operator")
        .SetDefault(3.0f);
    AddComment(R"DOC(
HardSwish Activation Operator.

The hard version of swish(https://arxiv.org/pdf/1905.02244.pdf).

672
$$out = \frac{x * (min(max(0, x+offset), threshold))}{scale}$$
H
huangjun12 已提交
673 674 675 676 677 678 679 680 681

The threshold and scale should be positive. The offset can be either positive or negative.
The default parameters are set according to the above reference.
It is recommended to use the defaults for this activation.

)DOC");
  }
};

D
dzhwinter 已提交
682 683 684 685 686 687 688
REGISTER_ACTIVATION_OP_MAKER(Sigmoid, SigmoidDoc);
REGISTER_ACTIVATION_OP_MAKER(LogSigmoid, LogSigmoidDoc);
REGISTER_ACTIVATION_OP_MAKER(Exp, ExpDoc);
REGISTER_ACTIVATION_OP_MAKER(Relu, ReluDoc);
REGISTER_ACTIVATION_OP_MAKER(Tanh, TanhDoc);
REGISTER_ACTIVATION_OP_MAKER(TanhShrink, TanhShrinkDoc);
REGISTER_ACTIVATION_OP_MAKER(Sqrt, SqrtDoc);
Z
zhoukunsheng 已提交
689
REGISTER_ACTIVATION_OP_MAKER(Rsqrt, RsqrtDoc);
D
dzhwinter 已提交
690 691 692 693 694
REGISTER_ACTIVATION_OP_MAKER(Abs, AbsDoc);
REGISTER_ACTIVATION_OP_MAKER(Ceil, CeilDoc);
REGISTER_ACTIVATION_OP_MAKER(Floor, FloorDoc);
REGISTER_ACTIVATION_OP_MAKER(Cos, CosDoc);
REGISTER_ACTIVATION_OP_MAKER(Sin, SinDoc);
695 696
REGISTER_ACTIVATION_OP_MAKER(Sinh, SinhDoc);
REGISTER_ACTIVATION_OP_MAKER(Cosh, CoshDoc);
D
dzhwinter 已提交
697 698 699
REGISTER_ACTIVATION_OP_MAKER(Round, RoundDoc);
REGISTER_ACTIVATION_OP_MAKER(Reciprocal, ReciprocalDoc);
REGISTER_ACTIVATION_OP_MAKER(Log, LogDoc);
700
REGISTER_ACTIVATION_OP_MAKER(Log1p, Log1pDoc);
D
dzhwinter 已提交
701 702 703
REGISTER_ACTIVATION_OP_MAKER(Square, SquareDoc);
REGISTER_ACTIVATION_OP_MAKER(Softsign, SoftsignDoc);

704
template <ActBwdOpFwdDeps kDepValue>
705 706 707 708 709
class ActivationOpDoubleGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
710
    if (static_cast<int>(kDepValue) & static_cast<int>(kDepX)) {
711
      if (ctx->HasOutput("DX")) {
712 713 714
        ctx->ShareDim("X", "DX");
        ctx->ShareLoD("X", "DX");
      }
715
      if (ctx->HasOutput("DDOut")) {
716 717 718
        ctx->ShareDim("X", "DDOut");
        ctx->ShareLoD("X", "DDOut");
      }
719
    }
720
    if (static_cast<int>(kDepValue) & static_cast<int>(kDepOut)) {
721
      if (ctx->HasOutput("DOut")) {
722 723 724
        ctx->ShareDim("Out", "DOut");
        ctx->ShareLoD("Out", "DOut");
      }
725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752
      if (ctx->HasOutput("DDOut")) {
        ctx->ShareDim("Out", "DDOut");
        ctx->ShareLoD("Out", "DDOut");
      }
    }
  }

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    return GetKernelType(ctx, *this, "DDX");
  }
};

template <ActBwdOpFwdDeps kDepValue>
class ActivationOpDoubleGrad2 : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
    if (static_cast<int>(kDepValue) & static_cast<int>(kDepX)) {
      if (ctx->HasOutput("DDOut")) {
        ctx->ShareDim("X", "DDOut");
        ctx->ShareLoD("X", "DDOut");
      }
    }
    if (static_cast<int>(kDepValue) & static_cast<int>(kDepOut)) {
      if (ctx->HasOutput("DDOut")) {
753 754 755
        ctx->ShareDim("Out", "DDOut");
        ctx->ShareLoD("Out", "DDOut");
      }
756 757 758 759 760 761 762 763 764 765
    }
  }

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    return GetKernelType(ctx, *this, "DDX");
  }
};

766 767 768 769
//
// ReluGrad: dx = dy if y >= 0 else 0
// ReluGradGrad: ddy = ddx if y >= 0 else 0
//
H
hong 已提交
770 771
template <typename T>
class ReluDoubleGradMaker : public ::paddle::framework::SingleGradOpMaker<T> {
772
 public:
H
hong 已提交
773
  using ::paddle::framework::SingleGradOpMaker<T>::SingleGradOpMaker;
774 775

 protected:
776
  void Apply(GradOpPtr<T> op) const override {
777 778
    op->SetType("relu_grad_grad");
    // input1: Out
H
hong 已提交
779
    op->SetInput("Out", this->Input("Out"));
Q
qingqing01 已提交
780
    // input2: ddx
H
hong 已提交
781 782
    op->SetInput("DDX", this->OutputGrad(framework::GradVarName("X")));
    op->SetAttrMap(this->Attrs());
783
    // output: ddy
H
hong 已提交
784
    op->SetOutput("DDOut", this->InputGrad(framework::GradVarName("Out")));
785 786 787
  }
};

788 789
// leaky_relu Grad: dx=dy if x>=0 else alpha * dy
// leaky_relu GradGrad: ddy=ddx if x>=0 else alpha * ddx
H
hong 已提交
790
template <typename T>
791
class LeakyReluDoubleGradMaker
H
hong 已提交
792
    : public ::paddle::framework::SingleGradOpMaker<T> {
793
 public:
H
hong 已提交
794
  using ::paddle::framework::SingleGradOpMaker<T>::SingleGradOpMaker;
795 796

 protected:
797
  void Apply(GradOpPtr<T> op) const override {
798
    op->SetType("leaky_relu_grad_grad");
799 800
    // input1: X
    op->SetInput("X", this->Input("X"));
801
    // X@GRAD@GRAD: ddx
H
hong 已提交
802 803
    op->SetInput("DDX", this->OutputGrad(framework::GradVarName("X")));
    op->SetAttrMap(this->Attrs());
804
    // Out@GRAD@GRAD: ddy
H
hong 已提交
805
    op->SetOutput("DDOut", this->InputGrad(framework::GradVarName("Out")));
806 807 808
  }
};

D
Double_V 已提交
809 810 811 812 813 814 815 816
// elu grad: dx=dy if y>0 else alpha*dy*x.exp()
// elu gradgrad: ddx=ddy if y>0 else alpha*ddy*x.exp()
template <typename T>
class ELUDoubleGradMaker : public ::paddle::framework::SingleGradOpMaker<T> {
 public:
  using ::paddle::framework::SingleGradOpMaker<T>::SingleGradOpMaker;

 protected:
817
  void Apply(GradOpPtr<T> op) const override {
D
Double_V 已提交
818 819 820 821 822 823 824 825 826 827 828 829 830 831
    op->SetType("elu_grad_grad");

    op->SetInput("X", this->Input("X"));
    op->SetInput("DOut", this->Input(framework::GradVarName("Out")));
    // X@GRAD@GRAD: ddx
    op->SetInput("DDX", this->OutputGrad(framework::GradVarName("X")));
    op->SetAttrMap(this->Attrs());

    // Out@GRAD@GRAD: ddy
    op->SetOutput("DX", this->InputGrad("X"));
    op->SetOutput("DDOut", this->InputGrad(framework::GradVarName("Out")));
  }
};

L
lvmengsi 已提交
832 833
// sqrt Grad: dx = 0.5 * dy / y
// sqrt GradGrad: ddy = 0.5 * ddx / y, dy = -1 * dx * ddx
H
hong 已提交
834 835
template <typename T>
class SqrtDoubleGradMaker : public ::paddle::framework::SingleGradOpMaker<T> {
L
lvmengsi 已提交
836
 public:
H
hong 已提交
837
  using ::paddle::framework::SingleGradOpMaker<T>::SingleGradOpMaker;
L
lvmengsi 已提交
838 839

 protected:
840
  void Apply(GradOpPtr<T> op) const override {
L
lvmengsi 已提交
841
    op->SetType("sqrt_grad_grad");
H
hong 已提交
842 843 844 845 846 847
    op->SetInput("Out", this->Input("Out"));
    op->SetInput("DX", this->Output(framework::GradVarName("X")));
    op->SetInput("DDX", this->OutputGrad(framework::GradVarName("X")));
    op->SetAttrMap(this->Attrs());
    op->SetOutput("DOut", this->InputGrad("Out"));
    op->SetOutput("DDOut", this->InputGrad(framework::GradVarName("Out")));
L
lvmengsi 已提交
848 849 850
  }
};

851 852
// square Grad: dx=2x*dy
// square GradGrad: ddy=2x*ddx, dx=2dy*ddx
H
hong 已提交
853 854
template <typename T>
class SquareDoubleGradMaker : public ::paddle::framework::SingleGradOpMaker<T> {
855
 public:
H
hong 已提交
856
  using ::paddle::framework::SingleGradOpMaker<T>::SingleGradOpMaker;
857 858

 protected:
859
  void Apply(GradOpPtr<T> op) const override {
860
    op->SetType("square_grad_grad");
H
hong 已提交
861
    op->SetInput("X", this->Input("X"));
862
    // Out@GRAD: dy
H
hong 已提交
863
    op->SetInput("DOut", this->Input(framework::GradVarName("Out")));
864
    // X@GRAD@GRAD: ddx
H
hong 已提交
865
    op->SetInput("DDX", this->OutputGrad(framework::GradVarName("X")));
866

H
hong 已提交
867
    op->SetAttrMap(this->Attrs());
868 869

    // X@GRAD: dx
H
hong 已提交
870
    op->SetOutput("DX", this->InputGrad("X"));
871
    // Out@GRAD@GRAD: ddy
H
hong 已提交
872
    op->SetOutput("DDOut", this->InputGrad(framework::GradVarName("Out")));
873 874 875
  }
};

876
DECLARE_INPLACE_OP_INFERER(ActivationGradOpInplaceInferer,
877 878
                           {framework::GradVarName("Out"),
                            framework::GradVarName("X")});
879
DECLARE_INPLACE_OP_INFERER(ActivationDoubleGradOpInplaceInferer,
880
                           {"DDX", "DDOut"});
881

H
hong 已提交
882 883
template <typename T>
class PowGradOpMaker : public framework::SingleGradOpMaker<T> {
884
 public:
H
hong 已提交
885
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
886 887

 protected:
888
  void Apply(GradOpPtr<T> op) const override {
889
    op->SetType("pow_grad");
H
hong 已提交
890 891 892 893 894
    op->SetInput("X", this->Input("X"));
    op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
    op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
    op->SetInput("FactorTensor", this->Input("FactorTensor"));
    op->SetAttrMap(this->Attrs());
895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948
  }
};
class PowOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
    ctx->ShareDim("X", /*->*/ "Out");
    ctx->ShareLoD("X", /*->*/ "Out");
  }

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    return GetKernelType(ctx, *this, "X");
  }

  framework::OpKernelType GetKernelTypeForVar(
      const std::string& var_name, const Tensor& tensor,
      const framework::OpKernelType& expected_kernel_type) const override {
    if (var_name == "FactorTensor") {
      return expected_kernel_type;
    }
    return framework::OpKernelType(expected_kernel_type.data_type_,
                                   tensor.place(), tensor.layout());
  }
};

class PowOpGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
    auto out_grad_name = framework::GradVarName("Out");
    ctx->ShareDim(out_grad_name, framework::GradVarName("X"));
    ctx->ShareLoD(out_grad_name, framework::GradVarName("X"));
  }

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    return GetKernelType(ctx, *this, framework::GradVarName("Out"));
  }

  framework::OpKernelType GetKernelTypeForVar(
      const std::string& var_name, const Tensor& tensor,
      const framework::OpKernelType& expected_kernel_type) const override {
    if (var_name == "FactorTensor") {
      return expected_kernel_type;
    }
    return framework::OpKernelType(expected_kernel_type.data_type_,
                                   tensor.place(), tensor.layout());
  }
};
949
DECLARE_INPLACE_OP_INFERER(ActFwdInplaceInferer, {"X", "Out"});
Q
qijun 已提交
950 951 952 953
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
954
namespace plat = paddle::platform;
955

956 957 958 959
#define REGISTER_ACTIVATION_OP(KERNEL_TYPE, OP_NAME, functor, grad_functor) \
  REGISTER_OPERATOR(                                                        \
      KERNEL_TYPE, ops::ActivationOp, ops::OP_NAME##OpMaker,                \
      ops::ActivationOpInferVarType,                                        \
H
hong 已提交
960 961 962 963
      ops::ActivationGradOpMaker<ops::grad_functor<float>::FwdDeps(),       \
                                 paddle::framework::OpDesc>,                \
      ops::ActivationGradOpMaker<ops::grad_functor<float>::FwdDeps(),       \
                                 paddle::imperative::OpBase>,               \
964
      std::conditional<ops::CanInplaceAct<ops::grad_functor<float>>(),      \
965
                       ops::ActFwdInplaceInferer, void>::type);             \
966
  REGISTER_OPERATOR(KERNEL_TYPE##_grad, ops::ActivationOpGrad,              \
967
                    ops::ActivationGradOpInplaceInferer);
968 969 970

#define REGISTER_ACTIVATION_CPU_KERNEL(act_type, op_name, functor,        \
                                       grad_functor)                      \
Q
QI JUN 已提交
971 972 973 974 975 976 977 978 979 980
  REGISTER_OP_CPU_KERNEL(                                                 \
      act_type, ops::ActivationKernel<paddle::platform::CPUDeviceContext, \
                                      ops::functor<float>>,               \
      ops::ActivationKernel<paddle::platform::CPUDeviceContext,           \
                            ops::functor<double>>);                       \
  REGISTER_OP_CPU_KERNEL(                                                 \
      act_type##_grad,                                                    \
      ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,       \
                                ops::grad_functor<float>>,                \
      ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,       \
Y
Yu Yang 已提交
981
                                ops::grad_functor<double>>);
982

983 984
FOR_EACH_ACTIVATION_OP(REGISTER_ACTIVATION_OP);
FOR_EACH_ACTIVATION_OP(REGISTER_ACTIVATION_CPU_KERNEL);
985

986
/* ==========================    relu register  ============================= */
987 988
REGISTER_OPERATOR(
    relu, ops::ActivationOp, ops::ReluOpMaker, ops::ActivationOpInferVarType,
H
hong 已提交
989 990 991 992
    ops::ActivationGradOpMaker<ops::ReluGradFunctor<float>::FwdDeps(),
                               paddle::framework::OpDesc>,
    ops::ActivationGradOpMaker<ops::ReluGradFunctor<float>::FwdDeps(),
                               paddle::imperative::OpBase>,
993
    ops::ActFwdInplaceInferer);
994
REGISTER_OPERATOR(relu_grad, ops::ActivationOpGrad,
995
                  ops::ActivationGradOpInplaceInferer,
H
hong 已提交
996 997
                  ops::ReluDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::ReluDoubleGradMaker<paddle::imperative::OpBase>);
998 999
REGISTER_OPERATOR(
    relu_grad_grad,
1000
    ops::ActivationOpDoubleGrad2<ops::ReluGradFunctor<float>::FwdDeps()>,
1001
    ops::ActivationDoubleGradOpInplaceInferer);
1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012

REGISTER_ACTIVATION_CPU_KERNEL(relu, Relu, ReluFunctor, ReluGradFunctor);

REGISTER_OP_CPU_KERNEL(
    relu_grad_grad,
    ops::ActivationDoubleGradKernel<plat::CPUDeviceContext,
                                    ops::ReluGradGradFunctor<float>>,
    ops::ActivationDoubleGradKernel<plat::CPUDeviceContext,
                                    ops::ReluGradGradFunctor<double>>,
    ops::ActivationDoubleGradKernel<plat::CPUDeviceContext,
                                    ops::ReluGradGradFunctor<plat::float16>>);
1013
/* ========================================================================== */
1014

1015
/* ======================== leaky relu register  ============================ */
1016 1017 1018
REGISTER_OPERATOR(
    leaky_relu, ops::ActivationOp, ops::LeakyReluOpMaker,
    ops::ActivationOpInferVarType,
H
hong 已提交
1019 1020 1021 1022
    ops::ActivationGradOpMaker<ops::LeakyReluGradFunctor<float>::FwdDeps(),
                               paddle::framework::OpDesc>,
    ops::ActivationGradOpMaker<ops::LeakyReluGradFunctor<float>::FwdDeps(),
                               paddle::imperative::OpBase>,
1023
    ops::ActFwdInplaceInferer);
1024
REGISTER_OPERATOR(leaky_relu_grad, ops::ActivationOpGrad,
1025
                  ops::ActivationGradOpInplaceInferer,
H
hong 已提交
1026 1027
                  ops::LeakyReluDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::LeakyReluDoubleGradMaker<paddle::imperative::OpBase>);
1028 1029
REGISTER_OPERATOR(
    leaky_relu_grad_grad,
1030
    ops::ActivationOpDoubleGrad2<ops::LeakyReluGradFunctor<float>::FwdDeps()>,
1031
    ops::ActivationDoubleGradOpInplaceInferer);
1032

1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
REGISTER_ACTIVATION_CPU_KERNEL(leaky_relu, LeakyRelu, LeakyReluFunctor,
                               LeakyReluGradFunctor);
REGISTER_OP_CPU_KERNEL(
    leaky_relu_grad_grad,
    ops::ActivationDoubleGradKernel<plat::CPUDeviceContext,
                                    ops::LeakyReluGradGradFunctor<float>>,
    ops::ActivationDoubleGradKernel<plat::CPUDeviceContext,
                                    ops::LeakyReluGradGradFunctor<double>>,
    ops::ActivationDoubleGradKernel<
        plat::CPUDeviceContext, ops::LeakyReluGradGradFunctor<plat::float16>>);
1043 1044
/* ========================================================================== */

D
Double_V 已提交
1045 1046 1047 1048 1049 1050 1051 1052 1053
/* ========================    elu  register     ============================ */
REGISTER_OPERATOR(
    elu, ops::ActivationOp, ops::ELUOpMaker, ops::ActivationOpInferVarType,
    ops::ActivationGradOpMaker<ops::ELUGradFunctor<float>::FwdDeps(),
                               paddle::framework::OpDesc>,
    ops::ActivationGradOpMaker<ops::ELUGradFunctor<float>::FwdDeps(),
                               paddle::imperative::OpBase>,
    ops::ActFwdInplaceInferer);
REGISTER_OPERATOR(elu_grad, ops::ActivationOpGrad,
1054
                  ops::ActivationGradOpInplaceInferer,
D
Double_V 已提交
1055 1056 1057 1058 1059
                  ops::ELUDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::ELUDoubleGradMaker<paddle::imperative::OpBase>);
REGISTER_OPERATOR(
    elu_grad_grad,
    ops::ActivationOpDoubleGrad<ops::ELUGradFunctor<float>::FwdDeps()>,
1060
    ops::ActivationDoubleGradOpInplaceInferer);
D
Double_V 已提交
1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072

REGISTER_ACTIVATION_CPU_KERNEL(elu, ELU, ELUFunctor, ELUGradFunctor);
REGISTER_OP_CPU_KERNEL(
    elu_grad_grad, ops::ELUDoubleGradKernel<plat::CPUDeviceContext,
                                            ops::ELUGradGradFunctor<float>>,
    ops::ELUDoubleGradKernel<plat::CPUDeviceContext,
                             ops::ELUGradGradFunctor<double>>,
    ops::ELUDoubleGradKernel<plat::CPUDeviceContext,
                             ops::ELUGradGradFunctor<plat::float16>>);

/* ========================================================================== */

L
lvmengsi 已提交
1073 1074 1075
/* ===========================   sqrt register  ============================= */
REGISTER_OPERATOR(
    sqrt, ops::ActivationOp, ops::SqrtOpMaker, ops::ActivationOpInferVarType,
H
hong 已提交
1076 1077 1078 1079
    ops::ActivationGradOpMaker<ops::SqrtGradFunctor<float>::FwdDeps(),
                               paddle::framework::OpDesc>,
    ops::ActivationGradOpMaker<ops::SqrtGradFunctor<float>::FwdDeps(),
                               paddle::imperative::OpBase>,
1080
    ops::ActFwdInplaceInferer);
L
lvmengsi 已提交
1081
REGISTER_OPERATOR(sqrt_grad, ops::ActivationOpGrad,
1082
                  ops::ActivationGradOpInplaceInferer,
H
hong 已提交
1083 1084
                  ops::SqrtDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::SqrtDoubleGradMaker<paddle::imperative::OpBase>);
L
lvmengsi 已提交
1085 1086
REGISTER_OPERATOR(
    sqrt_grad_grad,
1087
    ops::ActivationOpDoubleGrad<ops::SqrtGradGradFunctor<float>::FwdDeps()>,
1088
    ops::ActivationDoubleGradOpInplaceInferer);
1089

L
lvmengsi 已提交
1090 1091 1092 1093 1094 1095 1096 1097 1098 1099
REGISTER_ACTIVATION_CPU_KERNEL(sqrt, Sqrt, SqrtFunctor, SqrtGradFunctor);
REGISTER_OP_CPU_KERNEL(
    sqrt_grad_grad, ops::SqrtDoubleGradKernel<plat::CPUDeviceContext,
                                              ops::SqrtGradGradFunctor<float>>,
    ops::SqrtDoubleGradKernel<plat::CPUDeviceContext,
                              ops::SqrtGradGradFunctor<double>>,
    ops::SqrtDoubleGradKernel<plat::CPUDeviceContext,
                              ops::SqrtGradGradFunctor<plat::float16>>);
/* ========================================================================== */

1100 1101 1102 1103
/* ==========================   square register  ============================ */
REGISTER_OPERATOR(
    square, ops::ActivationOp, ops::SquareOpMaker,
    ops::ActivationOpInferVarType,
H
hong 已提交
1104 1105 1106 1107
    ops::ActivationGradOpMaker<ops::SquareGradFunctor<float>::FwdDeps(),
                               paddle::framework::OpDesc>,
    ops::ActivationGradOpMaker<ops::SquareGradFunctor<float>::FwdDeps(),
                               paddle::imperative::OpBase>,
1108
    ops::ActFwdInplaceInferer);
1109
REGISTER_OPERATOR(square_grad, ops::ActivationOpGrad,
1110
                  ops::ActivationGradOpInplaceInferer,
H
hong 已提交
1111 1112
                  ops::SquareDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::SquareDoubleGradMaker<paddle::imperative::OpBase>);
1113 1114
REGISTER_OPERATOR(
    square_grad_grad,
1115
    ops::ActivationOpDoubleGrad<ops::SquareGradGradFunctor<float>::FwdDeps()>,
1116
    ops::ActivationDoubleGradOpInplaceInferer);
1117

1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
REGISTER_OP_CPU_KERNEL(square,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::SquareFunctor<float>>,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::SquareFunctor<double>>,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::SquareFunctor<int>>,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::SquareFunctor<int64_t>>);
REGISTER_OP_CPU_KERNEL(
    square_grad, ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                                           ops::SquareGradFunctor<float>>,
    ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                              ops::SquareGradFunctor<double>>,
    ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                              ops::SquareGradFunctor<int>>,
    ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                              ops::SquareGradFunctor<int64_t>>);
1136 1137 1138 1139 1140 1141 1142 1143

REGISTER_OP_CPU_KERNEL(
    square_grad_grad,
    ops::SquareDoubleGradKernel<plat::CPUDeviceContext,
                                ops::SquareGradGradFunctor<float>>,
    ops::SquareDoubleGradKernel<plat::CPUDeviceContext,
                                ops::SquareGradGradFunctor<double>>,
    ops::SquareDoubleGradKernel<plat::CPUDeviceContext,
1144 1145 1146 1147 1148
                                ops::SquareGradGradFunctor<plat::float16>>,
    ops::SquareDoubleGradKernel<plat::CPUDeviceContext,
                                ops::SquareGradGradFunctor<int>>,
    ops::SquareDoubleGradKernel<plat::CPUDeviceContext,
                                ops::SquareGradGradFunctor<int64_t>>);
1149
/* ========================================================================== */
1150 1151 1152 1153 1154

/* ==========================   pow register  ============================ */

REGISTER_OPERATOR(
    pow, ops::PowOp, ops::PowOpMaker, ops::ActivationOpInferVarType,
H
hong 已提交
1155 1156
    ops::PowGradOpMaker<paddle::framework::OpDesc>,
    ops::PowGradOpMaker<paddle::imperative::OpBase>,
1157
    std::conditional<ops::CanInplaceAct<ops::PowGradFunctor<float>>(),
1158
                     ops::ActFwdInplaceInferer, void>::type);
1159
REGISTER_OPERATOR(pow_grad, ops::PowOpGrad,
1160
                  ops::ActivationGradOpInplaceInferer);
1161 1162 1163

REGISTER_OP_CPU_KERNEL(
    pow, ops::PowKernel<plat::CPUDeviceContext, ops::PowFunctor<float>>,
1164 1165 1166
    ops::PowKernel<plat::CPUDeviceContext, ops::PowFunctor<double>>,
    ops::PowKernel<plat::CPUDeviceContext, ops::PowFunctor<int>>,
    ops::PowKernel<plat::CPUDeviceContext, ops::PowFunctor<int64_t>>);
1167 1168 1169
REGISTER_OP_CPU_KERNEL(
    pow_grad,
    ops::PowGradKernel<plat::CPUDeviceContext, ops::PowGradFunctor<float>>,
1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184
    ops::PowGradKernel<plat::CPUDeviceContext, ops::PowGradFunctor<double>>,
    ops::PowGradKernel<plat::CPUDeviceContext, ops::PowGradFunctor<int>>,
    ops::PowGradKernel<plat::CPUDeviceContext, ops::PowGradFunctor<int64_t>>);
/* ========================================================================== */

/* ==========================   exp register  ============================ */
REGISTER_OPERATOR(
    exp, ops::ActivationOp, ops::ExpOpMaker, ops::ActivationOpInferVarType,
    ops::ActivationGradOpMaker<ops::ExpGradFunctor<float>::FwdDeps(),
                               paddle::framework::OpDesc>,
    ops::ActivationGradOpMaker<ops::ExpGradFunctor<float>::FwdDeps(),
                               paddle::imperative::OpBase>,
    std::conditional<ops::CanInplaceAct<ops::ExpGradFunctor<float>>(),
                     ops::ActFwdInplaceInferer, void>::type);
REGISTER_OPERATOR(exp_grad, ops::ActivationOpGrad,
1185
                  ops::ActivationGradOpInplaceInferer);
1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216

REGISTER_OP_CPU_KERNEL(exp,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::ExpFunctor<float>>,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::ExpFunctor<double>>,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::ExpFunctor<int>>,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::ExpFunctor<int64_t>>);
REGISTER_OP_CPU_KERNEL(
    exp_grad, ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                                        ops::ExpGradFunctor<float>>,
    ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                              ops::ExpGradFunctor<double>>,
    ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                              ops::ExpGradFunctor<int>>,
    ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                              ops::ExpGradFunctor<int64_t>>);
/* ========================================================================== */

/* ==========================   abs register  ============================ */
REGISTER_OPERATOR(
    abs, ops::ActivationOp, ops::AbsOpMaker, ops::ActivationOpInferVarType,
    ops::ActivationGradOpMaker<ops::AbsGradFunctor<float>::FwdDeps(),
                               paddle::framework::OpDesc>,
    ops::ActivationGradOpMaker<ops::AbsGradFunctor<float>::FwdDeps(),
                               paddle::imperative::OpBase>,
    std::conditional<ops::CanInplaceAct<ops::AbsGradFunctor<float>>(),
                     ops::ActFwdInplaceInferer, void>::type);
REGISTER_OPERATOR(abs_grad, ops::ActivationOpGrad,
1217
                  ops::ActivationGradOpInplaceInferer);
1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236

REGISTER_OP_CPU_KERNEL(abs,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::AbsFunctor<float>>,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::AbsFunctor<double>>,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::AbsFunctor<int>>,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::AbsFunctor<int64_t>>);
REGISTER_OP_CPU_KERNEL(
    abs_grad, ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                                        ops::AbsGradFunctor<float>>,
    ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                              ops::AbsGradFunctor<double>>,
    ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                              ops::AbsGradFunctor<int>>,
    ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                              ops::AbsGradFunctor<int64_t>>);
1237
/* ========================================================================== */
1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257

/* ==========================  register checkpoint ===========================*/
REGISTER_OP_VERSION(leaky_relu)
    .AddCheckpoint(
        R"ROC(fix leaky_relu, bahavior changed when alpha < 0 or alpha > 1)ROC",
        paddle::framework::compatible::OpVersionDesc()
            .BugfixWithBehaviorChanged(
                "leaky_relu calculate formula before checkponit: out = max(x, "
                "alpha * x); after checkpoint: out = x if x > 0 else alpha * "
                "x"));

REGISTER_OP_VERSION(hard_shrink)
    .AddCheckpoint(
        R"ROC(fix hard_shrink, bahavior changed when threshold<0)ROC",
        paddle::framework::compatible::OpVersionDesc()
            .BugfixWithBehaviorChanged(
                "hard_shrink calculate formula before checkponit: out = x * "
                "((x < -threshold) + (x > threshold)); after checkpoint: out = "
                "x * (((x < -threshold) + (x > threshold)) > 0)"));

1258 1259 1260 1261 1262 1263 1264 1265 1266 1267
REGISTER_OP_VERSION(softplus)
    .AddCheckpoint(
        R"ROC(add new attributes [beta] and [threshold], and the formula is changed to "
         " softplus(x) = \\frac{1}{beta} * \\log(1 + e^{beta * x}) \\\\ \\text{For numerical"
         " stability, the implementation reverts to the linear function when: beta * x > threshold.})ROC",
        paddle::framework::compatible::OpVersionDesc()
            .NewAttr("beta", "The beta value of the new formula", 1.0f)
            .NewAttr("threshold", "The threshold value of the new formula",
                     20.0f));

1268
/* ========================================================================== */