activation_op.cc 47.9 KB
Newer Older
1
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Q
qijun 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
Q
qijun 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
Q
qijun 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
Q
qijun 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/activation_op.h"
16

T
tink2123 已提交
17
#include <memory>
D
dzhwinter 已提交
18
#include <string>
19
#include <type_traits>
T
tink2123 已提交
20
#include <unordered_map>
21
#include <vector>
22

23
#include "paddle/fluid/framework/op_version_registry.h"
24
#include "paddle/fluid/operators/common_infer_shape_functions.h"
25
#include "paddle/fluid/operators/mkldnn/mkldnn_activation_op.h"
D
dzhwinter 已提交
26
#include "paddle/fluid/platform/port.h"
27 28 29
#ifdef PADDLE_WITH_CUDA
#include "paddle/fluid/platform/cudnn_helper.h"
#endif
Q
qijun 已提交
30

A
Adam 已提交
31 32
DECLARE_bool(use_mkldnn);

Q
qijun 已提交
33 34 35
namespace paddle {
namespace operators {

36 37
using paddle::framework::Tensor;

38 39 40 41 42
template <typename GradFunctor>
static constexpr bool CanInplaceAct() {
  return GradFunctor::FwdDeps() == kDepOut || GradFunctor::FwdDeps() == kNoDeps;
}

43 44 45 46 47
#define REGISTER_ACTIVATION_OP_MAKER(OP_NAME, OP_COMMENT)                    \
  class OP_NAME##OpMaker                                                     \
      : public ::paddle::framework::OpProtoAndCheckerMaker {                 \
   public:                                                                   \
    void Make() override {                                                   \
48 49 50 51 52
      AddInput("X", "Input of " #OP_NAME                                     \
                    " operator, an N-D Tensor, with data type float32, "     \
                    "float64 or float16.");                                  \
      AddOutput("Out", "Output of " #OP_NAME                                 \
                       " operator, a Tensor with shape same as input.");     \
53 54 55 56 57 58 59 60 61
      AddAttr<bool>("use_mkldnn",                                            \
                    "(bool, default false) Only used in mkldnn kernel")      \
          .SetDefault(false);                                                \
      AddAttr<bool>("use_cudnn",                                             \
                    "(bool, default false) Only used in cudnn kernel, need " \
                    "install cudnn")                                         \
          .SetDefault(false);                                                \
      AddComment(OP_COMMENT);                                                \
    }                                                                        \
D
dzhwinter 已提交
62
  }
D
dzhwinter 已提交
63

H
hong 已提交
64 65
template <ActBwdOpFwdDeps kDepValue, typename T>
class ActivationGradOpMaker : public framework::SingleGradOpMaker<T> {
66
 public:
H
hong 已提交
67
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
68 69

 protected:
70
  void Apply(GradOpPtr<T> op) const override {
H
hong 已提交
71 72 73 74
    op->SetType(this->ForwardOpType() + "_grad");
    op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
    op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
    op->SetAttrMap(this->Attrs());
75

A
Adam 已提交
76 77
    if ((static_cast<int>(kDepValue) &
         static_cast<int>(ActBwdOpFwdDeps::kDepX)) ||
78 79 80
        FLAGS_use_mkldnn ||
        (op->HasAttr("use_mkldnn") &&
         BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn")))) {
H
hong 已提交
81
      op->SetInput("X", this->Input("X"));
82 83 84 85
    }

    if (static_cast<int>(kDepValue) &
        static_cast<int>(ActBwdOpFwdDeps::kDepOut)) {
H
hong 已提交
86
      op->SetInput("Out", this->Output("Out"));
87
    }
D
dzhwinter 已提交
88
  }
89
};
D
dzhwinter 已提交
90

91 92 93 94
framework::OpKernelType GetKernelType(const framework::ExecutionContext& ctx,
                                      const framework::OperatorWithKernel& oper,
                                      const std::string& name) {
  framework::LibraryType library{framework::LibraryType::kPlain};
M
mozga-intel 已提交
95
  framework::DataLayout layout = framework::DataLayout::kAnyLayout;
96 97 98 99 100 101 102 103 104 105
// FIXME(liuwei1031) temporarily disable the code to unblock users
// TODO(liuwei1031) figure out the reason behind
// https://github.com/PaddlePaddle/Paddle/issues/16096
// and re-enable this in the future
// #ifdef PADDLE_WITH_CUDA
//   auto it1 = oper.Attrs().find("use_cudnn");
//   if (it1 != oper.Attrs().end() && platform::CanCUDNNBeUsed(ctx)) {
//     library = framework::LibraryType::kCUDNN;
//   }
// #endif
106 107 108
#ifdef PADDLE_WITH_MKLDNN
  auto it = oper.Attrs().find("use_mkldnn");
  if (library == framework::LibraryType::kPlain && it != oper.Attrs().end() &&
109
      oper.CanMKLDNNBeUsed(ctx)) {
110
    library = framework::LibraryType::kMKLDNN;
M
mozga-intel 已提交
111
    layout = framework::DataLayout::kMKLDNN;
112 113
  }
#endif
114 115
  return framework::OpKernelType(oper.IndicateVarDataType(ctx, name),
                                 ctx.GetPlace(), layout, library);
116 117
}

Q
qijun 已提交
118 119 120 121
class ActivationOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

122
  void InferShape(framework::InferShapeContext* ctx) const override {
123
    ctx->ShareDim("X", /*->*/ "Out");
F
fengjiayi 已提交
124
    ctx->ShareLoD("X", /*->*/ "Out");
Q
qijun 已提交
125
  }
126

127
 protected:
128 129 130 131
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    return GetKernelType(ctx, *this, "X");
  }
Q
qijun 已提交
132 133
};

C
chengduo 已提交
134 135 136
class ActivationOpInferVarType
    : public framework::PassInDtypeAndVarTypeToOutput {
 protected:
137
  std::unordered_map<std::string, std::string>& GetInputOutputWithSameType()
C
chengduo 已提交
138
      const override {
139 140
    static std::unordered_map<std::string, std::string> m{{"X", /*->*/ "Out"}};
    return m;
141 142 143
  }
};

Q
qijun 已提交
144 145 146 147
class ActivationOpGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

148
  void InferShape(framework::InferShapeContext* ctx) const override {
149 150 151
    auto out_grad_name = framework::GradVarName("Out");
    ctx->ShareDim(out_grad_name, framework::GradVarName("X"));
    ctx->ShareLoD(out_grad_name, framework::GradVarName("X"));
Q
qijun 已提交
152
  }
153

154
 protected:
155 156
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
157
    return GetKernelType(ctx, *this, framework::GradVarName("Out"));
158
  }
Q
qijun 已提交
159 160
};

D
dzhwinter 已提交
161
UNUSED constexpr char SigmoidDoc[] = R"DOC(
162
Sigmoid Activation Operator
K
Kexin Zhao 已提交
163

164
$$out = \\frac{1}{1 + e^{-x}}$$
K
Kexin Zhao 已提交
165

D
dzhwinter 已提交
166
)DOC";
Q
qijun 已提交
167

D
dzhwinter 已提交
168
UNUSED constexpr char LogSigmoidDoc[] = R"DOC(
169
Logsigmoid Activation Operator
K
Kexin Zhao 已提交
170

171
$$out = \\log \\frac{1}{1 + e^{-x}}$$
K
Kexin Zhao 已提交
172

D
dzhwinter 已提交
173
)DOC";
174

D
dzhwinter 已提交
175
UNUSED constexpr char ExpDoc[] = R"DOC(
176
Exp Operator. Computes exp of x element-wise with a natural number :math:`e` as the base.
K
Kexin Zhao 已提交
177

178
$$out = e^x$$
K
Kexin Zhao 已提交
179

D
dzhwinter 已提交
180
)DOC";
Q
qijun 已提交
181

D
dzhwinter 已提交
182
UNUSED constexpr char ReluDoc[] = R"DOC(
K
kexinzhao 已提交
183
Relu Activation Operator.
K
Kexin Zhao 已提交
184

185
$$out = \max(x, 0)$$
K
Kexin Zhao 已提交
186

D
dzhwinter 已提交
187
)DOC";
K
Kexin Zhao 已提交
188

D
dzhwinter 已提交
189
UNUSED constexpr char TanhDoc[] = R"DOC(
K
kexinzhao 已提交
190
Tanh Activation Operator.
K
Kexin Zhao 已提交
191

Q
update  
qiaolongfei 已提交
192
$$out = \\frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$
K
Kexin Zhao 已提交
193

D
dzhwinter 已提交
194
)DOC";
195

D
dzhwinter 已提交
196
UNUSED constexpr char TanhShrinkDoc[] = R"DOC(
K
kexinzhao 已提交
197
TanhShrink Activation Operator.
K
Kexin Zhao 已提交
198

Y
Yan Chunwei 已提交
199
$$out = x - \\frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$
K
Kexin Zhao 已提交
200

D
dzhwinter 已提交
201
)DOC";
K
Kexin Zhao 已提交
202

D
dzhwinter 已提交
203
UNUSED constexpr char SqrtDoc[] = R"DOC(
K
kexinzhao 已提交
204
Sqrt Activation Operator.
K
Kexin Zhao 已提交
205

N
Noel 已提交
206
$$out=\\sqrt{x}=x^{1/2}$$
207

208 209
**Note**:
  input value must be greater than or equal to zero.
K
Kexin Zhao 已提交
210

D
dzhwinter 已提交
211
)DOC";
212

Z
zhoukunsheng 已提交
213 214 215 216 217
UNUSED constexpr char RsqrtDoc[] = R"DOC(
Rsqrt Activation Operator.

Please make sure input is legal in case of numeric errors.

218
$$out = \\frac{1}{\\sqrt{x}}$$
Z
zhoukunsheng 已提交
219 220 221

)DOC";

D
dzhwinter 已提交
222
UNUSED constexpr char CeilDoc[] = R"DOC(
223
Ceil Operator. Computes ceil of x element-wise.
D
dzhwinter 已提交
224

N
Noel 已提交
225
$$out = \\lceil x \\rceil$$
D
dzhwinter 已提交
226

D
dzhwinter 已提交
227
)DOC";
D
dzhwinter 已提交
228

D
dzhwinter 已提交
229
UNUSED constexpr char FloorDoc[] = R"DOC(
230
Floor Activation Operator. Computes floor of x element-wise.
D
dzhwinter 已提交
231

N
Noel 已提交
232
$$out = \\lfloor x \\rfloor$$
D
dzhwinter 已提交
233

D
dzhwinter 已提交
234
)DOC";
D
dzhwinter 已提交
235

D
dzhwinter 已提交
236
UNUSED constexpr char CosDoc[] = R"DOC(
237
Cosine Operator. Computes cosine of x element-wise.
C
add cos  
chengduoZH 已提交
238

Y
Yang Zhang 已提交
239 240
Input range is `(-inf, inf)` and output range is `[-1,1]`.

241
$$out = cos(x)$$
C
add cos  
chengduoZH 已提交
242

D
dzhwinter 已提交
243
)DOC";
C
add cos  
chengduoZH 已提交
244

D
dzhwinter 已提交
245
UNUSED constexpr char SinDoc[] = R"DOC(
C
add sin  
chengduoZH 已提交
246 247
Sine Activation Operator.

248
$$out = sin(x)$$
C
add sin  
chengduoZH 已提交
249

D
dzhwinter 已提交
250
)DOC";
C
add sin  
chengduoZH 已提交
251

252 253 254 255 256 257 258 259 260 261 262 263 264 265
UNUSED constexpr char SinhDoc[] = R"DOC(
Sinh Activation Operator.

$$out = sinh(x)$$

)DOC";

UNUSED constexpr char CoshDoc[] = R"DOC(
Cosh Activation Operator.

$$out = cosh(x)$$

)DOC";

D
dzhwinter 已提交
266
UNUSED constexpr char RoundDoc[] = R"DOC(
267
The OP rounds the values in the input to the nearest integer value.
D
dzhwinter 已提交
268

N
Noel 已提交
269
.. code-block:: text
270 271 272 273 274 275 276 277

  input:
    x.shape = [4]
    x.data = [1.2, -0.9, 3.4, 0.9]

  output:
    out.shape = [4]
    out.data = [1., -1., 3., 1.]
D
dzhwinter 已提交
278

D
dzhwinter 已提交
279
)DOC";
D
dzhwinter 已提交
280

D
dzhwinter 已提交
281
UNUSED constexpr char ReciprocalDoc[] = R"DOC(
K
kexinzhao 已提交
282
Reciprocal Activation Operator.
K
Kexin Zhao 已提交
283

284
$$out = \\frac{1}{x}$$
K
Kexin Zhao 已提交
285

D
dzhwinter 已提交
286
)DOC";
287

D
dzhwinter 已提交
288
UNUSED constexpr char LogDoc[] = R"DOC(
K
kexinzhao 已提交
289
Log Activation Operator.
K
Kexin Zhao 已提交
290

291
$$out = \ln(x)$$
K
Kexin Zhao 已提交
292 293 294

Natural logarithm of x.

D
dzhwinter 已提交
295 296
)DOC";

J
joejiong 已提交
297 298 299 300 301 302 303 304 305
UNUSED constexpr char Log2Doc[] = R"DOC(
Log2 Activation Operator.

$$out = \log_2x$$

logarithm of x base to 2.

)DOC";

J
joejiong 已提交
306 307 308 309 310 311 312 313 314
UNUSED constexpr char Log10Doc[] = R"DOC(
Log10 Activation Operator.

$$out = \log_10_x$$

logarithm of x base to 10.

)DOC";

315 316 317 318 319 320 321 322 323
UNUSED constexpr char Log1pDoc[] = R"DOC(
Log Activation Operator.

$out = \ln(x+1)$

Natural logarithm of x.

)DOC";

D
dzhwinter 已提交
324
UNUSED constexpr char SquareDoc[] = R"DOC(
325
The OP square each elements of the inputs.
D
dzhwinter 已提交
326

327
$$out = x^2$$
328

D
dzhwinter 已提交
329 330
)DOC";

D
dzhwinter 已提交
331
UNUSED constexpr char SoftsignDoc[] = R"DOC(
D
dzhwinter 已提交
332 333
Softsign Activation Operator.

334
$$out = \\frac{x}{1 + \|x\|}$$
D
dzhwinter 已提交
335 336 337

)DOC";

T
tink2123 已提交
338 339 340 341 342 343
class AcosOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
    AddInput("X", "Input of acos operator");
    AddOutput("Out", "Output of acos operator");
    AddComment(R"DOC(
344
Arccosine Operator.
345

T
tink2123 已提交
346
$$out = \cos^{-1}(x)$$
347

T
tink2123 已提交
348 349 350
)DOC");
  }
};
351

T
tink2123 已提交
352 353 354
class AsinOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
W
wawltor 已提交
355 356 357
    AddInput("X",
             "Input of asin operator, an N-D Tensor, with data type float32, "
             "float64 or float16.");
T
tink2123 已提交
358 359
    AddOutput("Out", "Output of asin operator");
    AddComment(R"DOC(
360
Arcsine Operator.
361

T
tink2123 已提交
362
$$out = \sin^{-1}(x)$$
363

T
tink2123 已提交
364 365 366
)DOC");
  }
};
367

T
tink2123 已提交
368 369 370
class AtanOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
W
wawltor 已提交
371 372 373
    AddInput("X",
             "Input of atan operator, an N-D Tensor, with data type float32, "
             "float64 or float16.");
T
tink2123 已提交
374 375
    AddOutput("Out", "Output of atan operator");
    AddComment(R"DOC(
376
Arctangent Operator.
377

378
$$out = \tan^{-1}(x)$$
379

T
tink2123 已提交
380 381 382
)DOC");
  }
};
383

D
dzhwinter 已提交
384
class LeakyReluOpMaker : public framework::OpProtoAndCheckerMaker {
385
 public:
Y
Yu Yang 已提交
386
  void Make() override {
W
Wilber 已提交
387 388 389 390 391 392 393 394
    AddInput("X",
             "A LoDTensor or Tensor representing preactivation values. Must be "
             "one of the following types: float32, float64.");
    AddOutput(
        "Out",
        "A LoDTensor or Tensor with the same type and size as that of x.");
    AddAttr<float>("alpha", "Slope of the activation function at x < 0.")
        .SetDefault(0.02f);
A
Adam 已提交
395 396 397
    AddAttr<bool>("use_mkldnn",
                  "(bool, default false) Only used in mkldnn kernel")
        .SetDefault(false);
K
Kexin Zhao 已提交
398
    AddComment(R"DOC(
D
dzhwinter 已提交
399
LeakyRelu Activation Operator.
K
Kexin Zhao 已提交
400

W
Wilber 已提交
401
$$out = \max(x, \alpha * x)$$
K
Kexin Zhao 已提交
402 403

)DOC");
404 405 406
  }
};

407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436
class SoftplusOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
    AddInput("X",
             "Input of Softplus operator, an N-D Tensor, with data type "
             "float32, float64 or float16.");
    AddOutput(
        "Out",
        "Output of Softplus operator, a Tensor with shape same as input.");
    AddAttr<float>("beta", "The value of beta for Softplus.").SetDefault(1.0f);
    AddAttr<float>("threshold", "The value of threshold for Softplus.")
        .SetDefault(20.0f);
    AddAttr<bool>("use_mkldnn",
                  "(bool, default false) Only used in mkldnn kernel.")
        .SetDefault(false);
    AddAttr<bool>(
        "use_cudnn",
        "(bool, default false) Only used in cudnn kernel, need install cudnn.")
        .SetDefault(false);
    AddComment(R"DOC(
:strong:`Softplus Activation Operator`

..  math::
    out = \frac{1}{\beta} * \log(1 + \exp(\beta * x)) \\
    \text{For numerical stability, the implementation reverts to the linear function when :}\,x \times \beta > threshold.

)DOC");
  }
};

D
dzhwinter 已提交
437
class SoftShrinkOpMaker : public framework::OpProtoAndCheckerMaker {
K
kexinzhao 已提交
438
 public:
Y
Yu Yang 已提交
439
  void Make() override {
D
dzhwinter 已提交
440 441 442
    AddInput("X", "Input of Softshrink operator");
    AddOutput("Out", "Output of Softshrink operator");
    AddAttr<float>("lambda", "non-negative offset").SetDefault(0.5f);
K
Kexin Zhao 已提交
443
    AddComment(R"DOC(
444 445 446
:strong:`Softshrink Activation Operator`

..  math::
447
    out = \begin{cases}
448 449 450 451
         x - \lambda, \text{if } x > \lambda \\
         x + \lambda, \text{if } x < -\lambda \\
         0,  \text{otherwise}
         \end{cases}
K
Kexin Zhao 已提交
452 453

)DOC");
K
kexinzhao 已提交
454 455 456
  }
};

D
dzhwinter 已提交
457
class HardShrinkOpMaker : public framework::OpProtoAndCheckerMaker {
458
 public:
Y
Yu Yang 已提交
459
  void Make() override {
D
dzhwinter 已提交
460 461
    AddInput("X", "Input of HardShrink operator");
    AddOutput("Out", "Output of HardShrink operator");
Y
yuyang18 已提交
462 463
    AddAttr<float>("threshold",
                   "The value of threshold for HardShrink. [default: 0.5]")
D
dzhwinter 已提交
464
        .SetDefault(0.5f);
K
Kexin Zhao 已提交
465
    AddComment(R"DOC(
Y
yuyang18 已提交
466
:strong:`HardShrink activation operator`
K
Kexin Zhao 已提交
467

Y
yuyang18 已提交
468 469 470 471 472 473
..  math::
    out = \begin{cases}
            x, \text{if } x > \lambda \\
            x, \text{if } x < -\lambda \\
            0,  \text{otherwise}
          \end{cases}
K
Kexin Zhao 已提交
474 475

)DOC");
476 477 478
  }
};

479 480
class BReluOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
481
  void Make() override {
482 483 484 485 486 487
    AddInput("X",
             "The input is a multi-dimensional Tensor. The data type is "
             "float32, float64.");
    AddOutput("Out",
              "The output is a multi-dimensional Tensor which has same "
              "dimension and data type as the ``X``.");
488 489 490 491
    AddAttr<float>("t_min", "The min marginal value of BRelu")
        .SetDefault(static_cast<float>(0));
    AddAttr<float>("t_max", "The max marginal value of BRelu")
        .SetDefault(static_cast<float>(24));
K
Kexin Zhao 已提交
492
    AddComment(R"DOC(
K
kexinzhao 已提交
493
BRelu Activation Operator.
K
Kexin Zhao 已提交
494

495
$$out = \min(\max(x, t_{min}), t_{max})$$
K
Kexin Zhao 已提交
496 497

)DOC");
498 499 500 501 502
  }
};

class SoftReluOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
503
  void Make() override {
504
    AddInput("X", "Input of SoftRelu operator");
F
fengjiayi 已提交
505
    AddOutput("Out", "Output of SoftRelu operator");
506 507
    AddAttr<float>("threshold", "The threshold value of SoftRelu")
        .SetDefault(40.0f);
K
Kexin Zhao 已提交
508
    AddComment(R"DOC(
K
kexinzhao 已提交
509
SoftRelu Activation Operator.
K
Kexin Zhao 已提交
510

511
$$out = \ln(1 + \exp(\max(\min(x, threshold), -threshold)))$$
K
Kexin Zhao 已提交
512 513

)DOC");
514 515 516
  }
};

517 518
class ELUOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
519
  void Make() override {
520 521 522 523 524 525
    AddInput("X",
             "The input is a multi-dimensional Tensor. The data type is "
             "float32 or float64.");
    AddOutput("Out",
              "The output is a multi-dimensional Tensor which has same "
              "dimension and data type as the ``x``.");
526
    AddAttr<float>("alpha", "The alpha value of ELU").SetDefault(1.0f);
527
    AddComment(R"DOC(
K
kexinzhao 已提交
528
ELU Activation Operator.
K
Kexin Zhao 已提交
529 530 531 532

Applies the following element-wise computation on the input according to
https://arxiv.org/abs/1511.07289.

533
$$out = \max(0, x) + \min(0, \alpha * (e^x - 1))$$
K
Kexin Zhao 已提交
534 535

)DOC");
536 537 538
  }
};

539 540
class Relu6OpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
541
  void Make() override {
Z
zhupengyang 已提交
542 543 544 545 546 547 548 549
    AddInput("X",
             "Input of relu6 operator, an N-D Tensor, "
             "with data type float32, float64.");
    AddOutput(
        "Out",
        "Output of relu6 operator, a Tensor with the same shape as input.");
    AddAttr<float>("threshold",
                   "The threshold value of Relu6. Default is 6.0. ")
550
        .SetDefault(6.0f);
A
Adam 已提交
551 552 553
    AddAttr<bool>("use_mkldnn",
                  "(bool, default false) Only used in mkldnn kernel")
        .SetDefault(false);
K
Kexin Zhao 已提交
554
    AddComment(R"DOC(
K
kexinzhao 已提交
555
Relu6 Activation Operator.
K
Kexin Zhao 已提交
556

557
$$out = \min(\max(0, x), threshold)$$
K
Kexin Zhao 已提交
558 559

)DOC");
560 561 562
  }
};

563 564
class PowOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
565
  void Make() override {
566
    AddInput("X", "Input of Pow operator");
567 568 569 570 571
    AddInput("FactorTensor",
             "(Tensor<float>, optional). If provided, pow will use this"
             "The shape of FactorTensor MUST BE [1]."
             "it has higher priority than attr(factor).")
        .AsDispensable();
F
fengjiayi 已提交
572
    AddOutput("Out", "Output of Pow operator");
573
    AddAttr<float>("factor", "The exponential factor of Pow").SetDefault(1.0f);
K
Kexin Zhao 已提交
574
    AddComment(R"DOC(
K
kexinzhao 已提交
575
Pow Activation Operator.
K
Kexin Zhao 已提交
576

577
$$out = x^{factor}$$
K
Kexin Zhao 已提交
578 579

)DOC");
580 581 582 583 584
  }
};

class STanhOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
585
  void Make() override {
586 587
    AddInput("X",
             "Input of STanh operator."
N
Noel 已提交
588
             " A Tensor with type float32, float64.");
589 590 591
    AddOutput("Out", "Output of STanh operator. A Tensor with type float32.");
    AddAttr<float>("scale_a", "The scale parameter of a for the input. ")
        .SetDefault(0.67f);
592 593
    AddAttr<float>("scale_b", "The scale parameter of b for the input")
        .SetDefault(1.7159f);
K
Kexin Zhao 已提交
594
    AddComment(R"DOC(
K
kexinzhao 已提交
595
STanh Activation Operator.
K
Kexin Zhao 已提交
596

Y
Yan Chunwei 已提交
597
$$out = b * \\frac{e^{a * x} - e^{-a * x}}{e^{a * x} + e^{-a * x}}$$
K
Kexin Zhao 已提交
598 599

)DOC");
Q
qijun 已提交
600 601 602
  }
};

603 604
class ThresholdedReluOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
605
  void Make() override {
606
    AddInput("X", "Input of ThresholdedRelu operator");
F
fengjiayi 已提交
607
    AddOutput("Out", "Output of ThresholdedRelu operator");
Y
yuyang18 已提交
608 609
    AddAttr<float>("threshold",
                   "The threshold location of activation. [default 1.0].")
610
        .SetDefault(1.0f);
K
Kexin Zhao 已提交
611
    AddComment(R"DOC(
Y
yuyang18 已提交
612
:strong:`ThresholdedRelu activation operator`
K
Kexin Zhao 已提交
613

Y
yuyang18 已提交
614
..  math::
K
Kexin Zhao 已提交
615

Y
yuyang18 已提交
616
    out = \begin{cases}
Y
yuyang18 已提交
617
             x,  \text{if } x > threshold \\
Y
yuyang18 已提交
618 619
             0,  \text{otherwise}
          \end{cases}
K
Kexin Zhao 已提交
620
)DOC");
621 622 623
  }
};

624 625
class HardSigmoidOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
626
  void Make() override {
627 628 629 630 631
    AddInput("X", "An N-D Tensor with data type float32, float64. ");
    AddOutput("Out", "A Tensor with the same shape as input. ");
    AddAttr<float>("slope",
                   "The slope of the linear approximation of sigmoid. Its "
                   "value MUST BE positive. Default is 0.2. ")
632
        .SetDefault(0.2f);
633 634 635
    AddAttr<float>(
        "offset",
        "The offset of the linear approximation of sigmoid. Default is 0.5. ")
636
        .SetDefault(0.5f);
637
    AddComment(R"DOC(
K
kexinzhao 已提交
638
HardSigmoid Activation Operator.
639

640
A 3-part piecewise linear approximation of sigmoid(https://arxiv.org/abs/1603.00391),
K
Kexin Zhao 已提交
641
which is much faster than sigmoid.
642

643
$$out = \max(0, \min(1, slope * x + offset))$$
644

K
Kexin Zhao 已提交
645
)DOC");
646 647 648
  }
};

A
Abhinav Arora 已提交
649 650
class SwishOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
651
  void Make() override {
A
Abhinav Arora 已提交
652
    AddInput("X", "Input of Swish operator");
F
fengjiayi 已提交
653
    AddOutput("Out", "Output of Swish operator");
A
Abhinav Arora 已提交
654
    AddAttr<float>("beta", "Constant beta of swish operator").SetDefault(1.0f);
655 656 657
    AddAttr<bool>("use_mkldnn",
                  "(bool, default false) Only used in mkldnn kernel")
        .SetDefault(false);
A
Abhinav Arora 已提交
658 659 660
    AddComment(R"DOC(
Swish Activation Operator.

661
$$out = \\frac{x}{1 + e^{- \beta \ x}}$$
A
Abhinav Arora 已提交
662 663 664 665 666

)DOC");
  }
};

H
huangjun12 已提交
667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682
class HardSwishOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
    AddInput("X", "Input of HardSwish operator");
    AddOutput("Out", "Output of HardSwish operator");
    AddAttr<float>("threshold", "The threshold parameter of HardSwish operator")
        .SetDefault(6.0f);
    AddAttr<float>("scale", "The scale parameter of HardSwish operator")
        .SetDefault(6.0f);
    AddAttr<float>("offset", "The offset parameter of HardSwish operator")
        .SetDefault(3.0f);
    AddComment(R"DOC(
HardSwish Activation Operator.

The hard version of swish(https://arxiv.org/pdf/1905.02244.pdf).

683
$$out = \frac{x * (min(max(0, x+offset), threshold))}{scale}$$
H
huangjun12 已提交
684 685 686 687 688 689 690 691 692

The threshold and scale should be positive. The offset can be either positive or negative.
The default parameters are set according to the above reference.
It is recommended to use the defaults for this activation.

)DOC");
  }
};

D
dzhwinter 已提交
693 694 695 696 697 698 699
REGISTER_ACTIVATION_OP_MAKER(Sigmoid, SigmoidDoc);
REGISTER_ACTIVATION_OP_MAKER(LogSigmoid, LogSigmoidDoc);
REGISTER_ACTIVATION_OP_MAKER(Exp, ExpDoc);
REGISTER_ACTIVATION_OP_MAKER(Relu, ReluDoc);
REGISTER_ACTIVATION_OP_MAKER(Tanh, TanhDoc);
REGISTER_ACTIVATION_OP_MAKER(TanhShrink, TanhShrinkDoc);
REGISTER_ACTIVATION_OP_MAKER(Sqrt, SqrtDoc);
Z
zhoukunsheng 已提交
700
REGISTER_ACTIVATION_OP_MAKER(Rsqrt, RsqrtDoc);
D
dzhwinter 已提交
701 702 703 704
REGISTER_ACTIVATION_OP_MAKER(Ceil, CeilDoc);
REGISTER_ACTIVATION_OP_MAKER(Floor, FloorDoc);
REGISTER_ACTIVATION_OP_MAKER(Cos, CosDoc);
REGISTER_ACTIVATION_OP_MAKER(Sin, SinDoc);
705 706
REGISTER_ACTIVATION_OP_MAKER(Sinh, SinhDoc);
REGISTER_ACTIVATION_OP_MAKER(Cosh, CoshDoc);
D
dzhwinter 已提交
707 708 709
REGISTER_ACTIVATION_OP_MAKER(Round, RoundDoc);
REGISTER_ACTIVATION_OP_MAKER(Reciprocal, ReciprocalDoc);
REGISTER_ACTIVATION_OP_MAKER(Log, LogDoc);
J
joejiong 已提交
710
REGISTER_ACTIVATION_OP_MAKER(Log2, Log2Doc);
J
joejiong 已提交
711
REGISTER_ACTIVATION_OP_MAKER(Log10, Log10Doc);
712
REGISTER_ACTIVATION_OP_MAKER(Log1p, Log1pDoc);
D
dzhwinter 已提交
713 714 715
REGISTER_ACTIVATION_OP_MAKER(Square, SquareDoc);
REGISTER_ACTIVATION_OP_MAKER(Softsign, SoftsignDoc);

716
template <ActBwdOpFwdDeps kDepValue>
717 718 719 720 721
class ActivationOpDoubleGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
722
    if (static_cast<int>(kDepValue) & static_cast<int>(kDepX)) {
723
      if (ctx->HasOutput("DX")) {
724 725 726
        ctx->ShareDim("X", "DX");
        ctx->ShareLoD("X", "DX");
      }
727
      if (ctx->HasOutput("DDOut")) {
728 729 730
        ctx->ShareDim("X", "DDOut");
        ctx->ShareLoD("X", "DDOut");
      }
731
    }
732
    if (static_cast<int>(kDepValue) & static_cast<int>(kDepOut)) {
733
      if (ctx->HasOutput("DOut")) {
734 735 736
        ctx->ShareDim("Out", "DOut");
        ctx->ShareLoD("Out", "DOut");
      }
737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764
      if (ctx->HasOutput("DDOut")) {
        ctx->ShareDim("Out", "DDOut");
        ctx->ShareLoD("Out", "DDOut");
      }
    }
  }

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    return GetKernelType(ctx, *this, "DDX");
  }
};

template <ActBwdOpFwdDeps kDepValue>
class ActivationOpDoubleGrad2 : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
    if (static_cast<int>(kDepValue) & static_cast<int>(kDepX)) {
      if (ctx->HasOutput("DDOut")) {
        ctx->ShareDim("X", "DDOut");
        ctx->ShareLoD("X", "DDOut");
      }
    }
    if (static_cast<int>(kDepValue) & static_cast<int>(kDepOut)) {
      if (ctx->HasOutput("DDOut")) {
765 766 767
        ctx->ShareDim("Out", "DDOut");
        ctx->ShareLoD("Out", "DDOut");
      }
768 769 770 771 772 773 774 775 776 777
    }
  }

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    return GetKernelType(ctx, *this, "DDX");
  }
};

778 779
// ReluGrad: dx = dy if y >= 0 else 0
// ReluGradGrad: ddy = ddx if y >= 0 else 0
H
hong 已提交
780 781
template <typename T>
class ReluDoubleGradMaker : public ::paddle::framework::SingleGradOpMaker<T> {
782
 public:
H
hong 已提交
783
  using ::paddle::framework::SingleGradOpMaker<T>::SingleGradOpMaker;
784 785

 protected:
786
  void Apply(GradOpPtr<T> op) const override {
787 788
    op->SetType("relu_grad_grad");
    // input1: Out
H
hong 已提交
789
    op->SetInput("Out", this->Input("Out"));
Q
qingqing01 已提交
790
    // input2: ddx
H
hong 已提交
791 792
    op->SetInput("DDX", this->OutputGrad(framework::GradVarName("X")));
    op->SetAttrMap(this->Attrs());
793
    // output: ddy
H
hong 已提交
794
    op->SetOutput("DDOut", this->InputGrad(framework::GradVarName("Out")));
795 796 797
  }
};

798 799
// leaky_relu Grad: dx=dy if x>=0 else alpha * dy
// leaky_relu GradGrad: ddy=ddx if x>=0 else alpha * ddx
H
hong 已提交
800
template <typename T>
801
class LeakyReluDoubleGradMaker
H
hong 已提交
802
    : public ::paddle::framework::SingleGradOpMaker<T> {
803
 public:
H
hong 已提交
804
  using ::paddle::framework::SingleGradOpMaker<T>::SingleGradOpMaker;
805 806

 protected:
807
  void Apply(GradOpPtr<T> op) const override {
808
    op->SetType("leaky_relu_grad_grad");
809 810
    // input1: X
    op->SetInput("X", this->Input("X"));
811
    // X@GRAD@GRAD: ddx
H
hong 已提交
812 813
    op->SetInput("DDX", this->OutputGrad(framework::GradVarName("X")));
    op->SetAttrMap(this->Attrs());
814
    // Out@GRAD@GRAD: ddy
H
hong 已提交
815
    op->SetOutput("DDOut", this->InputGrad(framework::GradVarName("Out")));
816 817 818
  }
};

D
Double_V 已提交
819 820 821 822 823 824 825 826
// elu grad: dx=dy if y>0 else alpha*dy*x.exp()
// elu gradgrad: ddx=ddy if y>0 else alpha*ddy*x.exp()
template <typename T>
class ELUDoubleGradMaker : public ::paddle::framework::SingleGradOpMaker<T> {
 public:
  using ::paddle::framework::SingleGradOpMaker<T>::SingleGradOpMaker;

 protected:
827
  void Apply(GradOpPtr<T> op) const override {
D
Double_V 已提交
828 829 830 831 832 833 834 835 836 837 838 839 840 841
    op->SetType("elu_grad_grad");

    op->SetInput("X", this->Input("X"));
    op->SetInput("DOut", this->Input(framework::GradVarName("Out")));
    // X@GRAD@GRAD: ddx
    op->SetInput("DDX", this->OutputGrad(framework::GradVarName("X")));
    op->SetAttrMap(this->Attrs());

    // Out@GRAD@GRAD: ddy
    op->SetOutput("DX", this->InputGrad("X"));
    op->SetOutput("DDOut", this->InputGrad(framework::GradVarName("Out")));
  }
};

L
lvmengsi 已提交
842 843
// sqrt Grad: dx = 0.5 * dy / y
// sqrt GradGrad: ddy = 0.5 * ddx / y, dy = -1 * dx * ddx
H
hong 已提交
844 845
template <typename T>
class SqrtDoubleGradMaker : public ::paddle::framework::SingleGradOpMaker<T> {
L
lvmengsi 已提交
846
 public:
H
hong 已提交
847
  using ::paddle::framework::SingleGradOpMaker<T>::SingleGradOpMaker;
L
lvmengsi 已提交
848 849

 protected:
850
  void Apply(GradOpPtr<T> op) const override {
L
lvmengsi 已提交
851
    op->SetType("sqrt_grad_grad");
H
hong 已提交
852 853 854 855 856 857
    op->SetInput("Out", this->Input("Out"));
    op->SetInput("DX", this->Output(framework::GradVarName("X")));
    op->SetInput("DDX", this->OutputGrad(framework::GradVarName("X")));
    op->SetAttrMap(this->Attrs());
    op->SetOutput("DOut", this->InputGrad("Out"));
    op->SetOutput("DDOut", this->InputGrad(framework::GradVarName("Out")));
L
lvmengsi 已提交
858 859 860
  }
};

861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879
// rsqrt Grad: dx = -0.5 * dy * y * y * y
// rsqrt GradGrad: ddy = -0.5 * ddx * y * y * y, dy = (3/y) * ddx
template <typename T>
class RsqrtDoubleGradMaker : public ::paddle::framework::SingleGradOpMaker<T> {
 public:
  using ::paddle::framework::SingleGradOpMaker<T>::SingleGradOpMaker;

 protected:
  void Apply(GradOpPtr<T> op) const override {
    op->SetType("rsqrt_grad_grad");
    op->SetInput("Out", this->Input("Out"));
    op->SetInput("DX", this->Output(framework::GradVarName("X")));
    op->SetInput("DDX", this->OutputGrad(framework::GradVarName("X")));
    op->SetAttrMap(this->Attrs());
    op->SetOutput("DOut", this->InputGrad("Out"));
    op->SetOutput("DDOut", this->InputGrad(framework::GradVarName("Out")));
  }
};

880 881
// square Grad: dx=2x*dy
// square GradGrad: ddy=2x*ddx, dx=2dy*ddx
H
hong 已提交
882 883
template <typename T>
class SquareDoubleGradMaker : public ::paddle::framework::SingleGradOpMaker<T> {
884
 public:
H
hong 已提交
885
  using ::paddle::framework::SingleGradOpMaker<T>::SingleGradOpMaker;
886 887

 protected:
888
  void Apply(GradOpPtr<T> op) const override {
889
    op->SetType("square_grad_grad");
H
hong 已提交
890
    op->SetInput("X", this->Input("X"));
891
    // Out@GRAD: dy
H
hong 已提交
892
    op->SetInput("DOut", this->Input(framework::GradVarName("Out")));
893
    // X@GRAD@GRAD: ddx
H
hong 已提交
894
    op->SetInput("DDX", this->OutputGrad(framework::GradVarName("X")));
895

H
hong 已提交
896
    op->SetAttrMap(this->Attrs());
897 898

    // X@GRAD: dx
H
hong 已提交
899
    op->SetOutput("DX", this->InputGrad("X"));
900
    // Out@GRAD@GRAD: ddy
H
hong 已提交
901
    op->SetOutput("DDOut", this->InputGrad(framework::GradVarName("Out")));
902 903 904
  }
};

905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926
// log Grad: dx = dout / x
// log Grad Grad: ddout = ddx / x; dx = -(dout / x) * (ddx / x)
template <typename T>
class LogDoubleGradMaker : public ::paddle::framework::SingleGradOpMaker<T> {
 public:
  using ::paddle::framework::SingleGradOpMaker<T>::SingleGradOpMaker;

 protected:
  void Apply(GradOpPtr<T> op) const override {
    op->SetType("log_grad_grad");
    op->SetInput("X", this->Input("X"));
    // X@GRAD@GRAD: ddx
    op->SetInput("DDX", this->OutputGrad(framework::GradVarName("X")));
    op->SetInput("DOut", this->Input(framework::GradVarName("Out")));
    op->SetAttrMap(this->Attrs());
    // X@GRAD: dx
    op->SetOutput("DX", this->InputGrad("X"));
    // Out@GRAD@GRAD: ddy
    op->SetOutput("DDOut", this->InputGrad(framework::GradVarName("Out")));
  }
};

927
DECLARE_INPLACE_OP_INFERER(ActivationGradOpInplaceInferer,
928 929
                           {framework::GradVarName("Out"),
                            framework::GradVarName("X")});
930
DECLARE_INPLACE_OP_INFERER(ActivationDoubleGradOpInplaceInferer,
931
                           {"DDX", "DDOut"});
932

H
hong 已提交
933 934
template <typename T>
class PowGradOpMaker : public framework::SingleGradOpMaker<T> {
935
 public:
H
hong 已提交
936
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
937 938

 protected:
939
  void Apply(GradOpPtr<T> op) const override {
940
    op->SetType("pow_grad");
H
hong 已提交
941 942 943 944 945
    op->SetInput("X", this->Input("X"));
    op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
    op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
    op->SetInput("FactorTensor", this->Input("FactorTensor"));
    op->SetAttrMap(this->Attrs());
946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999
  }
};
class PowOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
    ctx->ShareDim("X", /*->*/ "Out");
    ctx->ShareLoD("X", /*->*/ "Out");
  }

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    return GetKernelType(ctx, *this, "X");
  }

  framework::OpKernelType GetKernelTypeForVar(
      const std::string& var_name, const Tensor& tensor,
      const framework::OpKernelType& expected_kernel_type) const override {
    if (var_name == "FactorTensor") {
      return expected_kernel_type;
    }
    return framework::OpKernelType(expected_kernel_type.data_type_,
                                   tensor.place(), tensor.layout());
  }
};

class PowOpGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
    auto out_grad_name = framework::GradVarName("Out");
    ctx->ShareDim(out_grad_name, framework::GradVarName("X"));
    ctx->ShareLoD(out_grad_name, framework::GradVarName("X"));
  }

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    return GetKernelType(ctx, *this, framework::GradVarName("Out"));
  }

  framework::OpKernelType GetKernelTypeForVar(
      const std::string& var_name, const Tensor& tensor,
      const framework::OpKernelType& expected_kernel_type) const override {
    if (var_name == "FactorTensor") {
      return expected_kernel_type;
    }
    return framework::OpKernelType(expected_kernel_type.data_type_,
                                   tensor.place(), tensor.layout());
  }
};
1000
DECLARE_INPLACE_OP_INFERER(ActFwdInplaceInferer, {"X", "Out"});
Q
qijun 已提交
1001 1002 1003 1004
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
1005
namespace plat = paddle::platform;
1006

1007 1008 1009 1010
#define REGISTER_ACTIVATION_OP(KERNEL_TYPE, OP_NAME, functor, grad_functor) \
  REGISTER_OPERATOR(                                                        \
      KERNEL_TYPE, ops::ActivationOp, ops::OP_NAME##OpMaker,                \
      ops::ActivationOpInferVarType,                                        \
H
hong 已提交
1011 1012 1013 1014
      ops::ActivationGradOpMaker<ops::grad_functor<float>::FwdDeps(),       \
                                 paddle::framework::OpDesc>,                \
      ops::ActivationGradOpMaker<ops::grad_functor<float>::FwdDeps(),       \
                                 paddle::imperative::OpBase>,               \
1015
      std::conditional<ops::CanInplaceAct<ops::grad_functor<float>>(),      \
1016
                       ops::ActFwdInplaceInferer, void>::type);             \
1017
  REGISTER_OPERATOR(KERNEL_TYPE##_grad, ops::ActivationOpGrad,              \
1018
                    ops::ActivationGradOpInplaceInferer);
1019 1020 1021

#define REGISTER_ACTIVATION_CPU_KERNEL(act_type, op_name, functor,        \
                                       grad_functor)                      \
Q
QI JUN 已提交
1022 1023 1024 1025 1026 1027 1028 1029 1030 1031
  REGISTER_OP_CPU_KERNEL(                                                 \
      act_type, ops::ActivationKernel<paddle::platform::CPUDeviceContext, \
                                      ops::functor<float>>,               \
      ops::ActivationKernel<paddle::platform::CPUDeviceContext,           \
                            ops::functor<double>>);                       \
  REGISTER_OP_CPU_KERNEL(                                                 \
      act_type##_grad,                                                    \
      ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,       \
                                ops::grad_functor<float>>,                \
      ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,       \
Y
Yu Yang 已提交
1032
                                ops::grad_functor<double>>);
1033

1034 1035
FOR_EACH_ACTIVATION_OP(REGISTER_ACTIVATION_OP);
FOR_EACH_ACTIVATION_OP(REGISTER_ACTIVATION_CPU_KERNEL);
1036

1037
/* ==========================    relu register  ============================= */
1038 1039
REGISTER_OPERATOR(
    relu, ops::ActivationOp, ops::ReluOpMaker, ops::ActivationOpInferVarType,
H
hong 已提交
1040 1041 1042 1043
    ops::ActivationGradOpMaker<ops::ReluGradFunctor<float>::FwdDeps(),
                               paddle::framework::OpDesc>,
    ops::ActivationGradOpMaker<ops::ReluGradFunctor<float>::FwdDeps(),
                               paddle::imperative::OpBase>,
1044
    ops::ActFwdInplaceInferer);
1045
REGISTER_OPERATOR(relu_grad, ops::ActivationOpGrad,
1046
                  ops::ActivationGradOpInplaceInferer,
H
hong 已提交
1047 1048
                  ops::ReluDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::ReluDoubleGradMaker<paddle::imperative::OpBase>);
1049 1050
REGISTER_OPERATOR(
    relu_grad_grad,
1051
    ops::ActivationOpDoubleGrad2<ops::ReluGradFunctor<float>::FwdDeps()>,
1052
    ops::ActivationDoubleGradOpInplaceInferer);
1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063

REGISTER_ACTIVATION_CPU_KERNEL(relu, Relu, ReluFunctor, ReluGradFunctor);

REGISTER_OP_CPU_KERNEL(
    relu_grad_grad,
    ops::ActivationDoubleGradKernel<plat::CPUDeviceContext,
                                    ops::ReluGradGradFunctor<float>>,
    ops::ActivationDoubleGradKernel<plat::CPUDeviceContext,
                                    ops::ReluGradGradFunctor<double>>,
    ops::ActivationDoubleGradKernel<plat::CPUDeviceContext,
                                    ops::ReluGradGradFunctor<plat::float16>>);
1064
/* ========================================================================== */
1065

1066
/* ======================== leaky relu register  ============================ */
1067 1068 1069
REGISTER_OPERATOR(
    leaky_relu, ops::ActivationOp, ops::LeakyReluOpMaker,
    ops::ActivationOpInferVarType,
H
hong 已提交
1070 1071 1072 1073
    ops::ActivationGradOpMaker<ops::LeakyReluGradFunctor<float>::FwdDeps(),
                               paddle::framework::OpDesc>,
    ops::ActivationGradOpMaker<ops::LeakyReluGradFunctor<float>::FwdDeps(),
                               paddle::imperative::OpBase>,
1074
    ops::ActFwdInplaceInferer);
1075
REGISTER_OPERATOR(leaky_relu_grad, ops::ActivationOpGrad,
1076
                  ops::ActivationGradOpInplaceInferer,
H
hong 已提交
1077 1078
                  ops::LeakyReluDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::LeakyReluDoubleGradMaker<paddle::imperative::OpBase>);
1079 1080
REGISTER_OPERATOR(
    leaky_relu_grad_grad,
1081
    ops::ActivationOpDoubleGrad2<ops::LeakyReluGradFunctor<float>::FwdDeps()>,
1082
    ops::ActivationDoubleGradOpInplaceInferer);
1083

1084 1085 1086 1087 1088 1089 1090 1091 1092 1093
REGISTER_ACTIVATION_CPU_KERNEL(leaky_relu, LeakyRelu, LeakyReluFunctor,
                               LeakyReluGradFunctor);
REGISTER_OP_CPU_KERNEL(
    leaky_relu_grad_grad,
    ops::ActivationDoubleGradKernel<plat::CPUDeviceContext,
                                    ops::LeakyReluGradGradFunctor<float>>,
    ops::ActivationDoubleGradKernel<plat::CPUDeviceContext,
                                    ops::LeakyReluGradGradFunctor<double>>,
    ops::ActivationDoubleGradKernel<
        plat::CPUDeviceContext, ops::LeakyReluGradGradFunctor<plat::float16>>);
1094 1095
/* ========================================================================== */

D
Double_V 已提交
1096 1097 1098 1099 1100 1101 1102 1103 1104
/* ========================    elu  register     ============================ */
REGISTER_OPERATOR(
    elu, ops::ActivationOp, ops::ELUOpMaker, ops::ActivationOpInferVarType,
    ops::ActivationGradOpMaker<ops::ELUGradFunctor<float>::FwdDeps(),
                               paddle::framework::OpDesc>,
    ops::ActivationGradOpMaker<ops::ELUGradFunctor<float>::FwdDeps(),
                               paddle::imperative::OpBase>,
    ops::ActFwdInplaceInferer);
REGISTER_OPERATOR(elu_grad, ops::ActivationOpGrad,
1105
                  ops::ActivationGradOpInplaceInferer,
D
Double_V 已提交
1106 1107 1108 1109 1110
                  ops::ELUDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::ELUDoubleGradMaker<paddle::imperative::OpBase>);
REGISTER_OPERATOR(
    elu_grad_grad,
    ops::ActivationOpDoubleGrad<ops::ELUGradFunctor<float>::FwdDeps()>,
1111
    ops::ActivationDoubleGradOpInplaceInferer);
D
Double_V 已提交
1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123

REGISTER_ACTIVATION_CPU_KERNEL(elu, ELU, ELUFunctor, ELUGradFunctor);
REGISTER_OP_CPU_KERNEL(
    elu_grad_grad, ops::ELUDoubleGradKernel<plat::CPUDeviceContext,
                                            ops::ELUGradGradFunctor<float>>,
    ops::ELUDoubleGradKernel<plat::CPUDeviceContext,
                             ops::ELUGradGradFunctor<double>>,
    ops::ELUDoubleGradKernel<plat::CPUDeviceContext,
                             ops::ELUGradGradFunctor<plat::float16>>);

/* ========================================================================== */

L
lvmengsi 已提交
1124 1125 1126
/* ===========================   sqrt register  ============================= */
REGISTER_OPERATOR(
    sqrt, ops::ActivationOp, ops::SqrtOpMaker, ops::ActivationOpInferVarType,
H
hong 已提交
1127 1128 1129 1130
    ops::ActivationGradOpMaker<ops::SqrtGradFunctor<float>::FwdDeps(),
                               paddle::framework::OpDesc>,
    ops::ActivationGradOpMaker<ops::SqrtGradFunctor<float>::FwdDeps(),
                               paddle::imperative::OpBase>,
1131
    ops::ActFwdInplaceInferer);
L
lvmengsi 已提交
1132
REGISTER_OPERATOR(sqrt_grad, ops::ActivationOpGrad,
1133
                  ops::ActivationGradOpInplaceInferer,
H
hong 已提交
1134 1135
                  ops::SqrtDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::SqrtDoubleGradMaker<paddle::imperative::OpBase>);
L
lvmengsi 已提交
1136 1137
REGISTER_OPERATOR(
    sqrt_grad_grad,
1138
    ops::ActivationOpDoubleGrad<ops::SqrtGradGradFunctor<float>::FwdDeps()>,
1139
    ops::ActivationDoubleGradOpInplaceInferer);
1140

L
lvmengsi 已提交
1141 1142 1143 1144 1145 1146 1147 1148 1149 1150
REGISTER_ACTIVATION_CPU_KERNEL(sqrt, Sqrt, SqrtFunctor, SqrtGradFunctor);
REGISTER_OP_CPU_KERNEL(
    sqrt_grad_grad, ops::SqrtDoubleGradKernel<plat::CPUDeviceContext,
                                              ops::SqrtGradGradFunctor<float>>,
    ops::SqrtDoubleGradKernel<plat::CPUDeviceContext,
                              ops::SqrtGradGradFunctor<double>>,
    ops::SqrtDoubleGradKernel<plat::CPUDeviceContext,
                              ops::SqrtGradGradFunctor<plat::float16>>);
/* ========================================================================== */

1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179
/* ===========================   rsqrt register  =============================
 */
REGISTER_OPERATOR(
    rsqrt, ops::ActivationOp, ops::RsqrtOpMaker, ops::ActivationOpInferVarType,
    ops::ActivationGradOpMaker<ops::RsqrtGradFunctor<float>::FwdDeps(),
                               paddle::framework::OpDesc>,
    ops::ActivationGradOpMaker<ops::RsqrtGradFunctor<float>::FwdDeps(),
                               paddle::imperative::OpBase>,
    ops::ActFwdInplaceInferer);
REGISTER_OPERATOR(rsqrt_grad, ops::ActivationOpGrad,
                  ops::ActivationGradOpInplaceInferer,
                  ops::RsqrtDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::RsqrtDoubleGradMaker<paddle::imperative::OpBase>);
REGISTER_OPERATOR(
    rsqrt_grad_grad,
    ops::ActivationOpDoubleGrad<ops::RsqrtGradGradFunctor<float>::FwdDeps()>,
    ops::ActivationDoubleGradOpInplaceInferer);

REGISTER_ACTIVATION_CPU_KERNEL(rsqrt, Rsqrt, RsqrtFunctor, RsqrtGradFunctor);
REGISTER_OP_CPU_KERNEL(
    rsqrt_grad_grad,
    ops::RsqrtDoubleGradKernel<plat::CPUDeviceContext,
                               ops::RsqrtGradGradFunctor<float>>,
    ops::RsqrtDoubleGradKernel<plat::CPUDeviceContext,
                               ops::RsqrtGradGradFunctor<double>>,
    ops::RsqrtDoubleGradKernel<plat::CPUDeviceContext,
                               ops::RsqrtGradGradFunctor<plat::float16>>);
/* ========================================================================== */

1180 1181 1182 1183
/* ==========================   square register  ============================ */
REGISTER_OPERATOR(
    square, ops::ActivationOp, ops::SquareOpMaker,
    ops::ActivationOpInferVarType,
H
hong 已提交
1184 1185 1186 1187
    ops::ActivationGradOpMaker<ops::SquareGradFunctor<float>::FwdDeps(),
                               paddle::framework::OpDesc>,
    ops::ActivationGradOpMaker<ops::SquareGradFunctor<float>::FwdDeps(),
                               paddle::imperative::OpBase>,
1188
    ops::ActFwdInplaceInferer);
1189
REGISTER_OPERATOR(square_grad, ops::ActivationOpGrad,
1190
                  ops::ActivationGradOpInplaceInferer,
H
hong 已提交
1191 1192
                  ops::SquareDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::SquareDoubleGradMaker<paddle::imperative::OpBase>);
1193 1194
REGISTER_OPERATOR(
    square_grad_grad,
1195
    ops::ActivationOpDoubleGrad<ops::SquareGradGradFunctor<float>::FwdDeps()>,
1196
    ops::ActivationDoubleGradOpInplaceInferer);
1197

1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215
REGISTER_OP_CPU_KERNEL(square,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::SquareFunctor<float>>,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::SquareFunctor<double>>,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::SquareFunctor<int>>,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::SquareFunctor<int64_t>>);
REGISTER_OP_CPU_KERNEL(
    square_grad, ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                                           ops::SquareGradFunctor<float>>,
    ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                              ops::SquareGradFunctor<double>>,
    ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                              ops::SquareGradFunctor<int>>,
    ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                              ops::SquareGradFunctor<int64_t>>);
1216 1217 1218 1219 1220 1221 1222 1223

REGISTER_OP_CPU_KERNEL(
    square_grad_grad,
    ops::SquareDoubleGradKernel<plat::CPUDeviceContext,
                                ops::SquareGradGradFunctor<float>>,
    ops::SquareDoubleGradKernel<plat::CPUDeviceContext,
                                ops::SquareGradGradFunctor<double>>,
    ops::SquareDoubleGradKernel<plat::CPUDeviceContext,
1224 1225 1226 1227 1228
                                ops::SquareGradGradFunctor<plat::float16>>,
    ops::SquareDoubleGradKernel<plat::CPUDeviceContext,
                                ops::SquareGradGradFunctor<int>>,
    ops::SquareDoubleGradKernel<plat::CPUDeviceContext,
                                ops::SquareGradGradFunctor<int64_t>>);
1229
/* ========================================================================== */
1230 1231 1232 1233 1234

/* ==========================   pow register  ============================ */

REGISTER_OPERATOR(
    pow, ops::PowOp, ops::PowOpMaker, ops::ActivationOpInferVarType,
H
hong 已提交
1235 1236
    ops::PowGradOpMaker<paddle::framework::OpDesc>,
    ops::PowGradOpMaker<paddle::imperative::OpBase>,
1237
    std::conditional<ops::CanInplaceAct<ops::PowGradFunctor<float>>(),
1238
                     ops::ActFwdInplaceInferer, void>::type);
1239
REGISTER_OPERATOR(pow_grad, ops::PowOpGrad,
1240
                  ops::ActivationGradOpInplaceInferer);
1241 1242 1243

REGISTER_OP_CPU_KERNEL(
    pow, ops::PowKernel<plat::CPUDeviceContext, ops::PowFunctor<float>>,
1244 1245 1246
    ops::PowKernel<plat::CPUDeviceContext, ops::PowFunctor<double>>,
    ops::PowKernel<plat::CPUDeviceContext, ops::PowFunctor<int>>,
    ops::PowKernel<plat::CPUDeviceContext, ops::PowFunctor<int64_t>>);
1247 1248 1249
REGISTER_OP_CPU_KERNEL(
    pow_grad,
    ops::PowGradKernel<plat::CPUDeviceContext, ops::PowGradFunctor<float>>,
1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264
    ops::PowGradKernel<plat::CPUDeviceContext, ops::PowGradFunctor<double>>,
    ops::PowGradKernel<plat::CPUDeviceContext, ops::PowGradFunctor<int>>,
    ops::PowGradKernel<plat::CPUDeviceContext, ops::PowGradFunctor<int64_t>>);
/* ========================================================================== */

/* ==========================   exp register  ============================ */
REGISTER_OPERATOR(
    exp, ops::ActivationOp, ops::ExpOpMaker, ops::ActivationOpInferVarType,
    ops::ActivationGradOpMaker<ops::ExpGradFunctor<float>::FwdDeps(),
                               paddle::framework::OpDesc>,
    ops::ActivationGradOpMaker<ops::ExpGradFunctor<float>::FwdDeps(),
                               paddle::imperative::OpBase>,
    std::conditional<ops::CanInplaceAct<ops::ExpGradFunctor<float>>(),
                     ops::ActFwdInplaceInferer, void>::type);
REGISTER_OPERATOR(exp_grad, ops::ActivationOpGrad,
1265
                  ops::ActivationGradOpInplaceInferer);
1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285

REGISTER_OP_CPU_KERNEL(exp,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::ExpFunctor<float>>,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::ExpFunctor<double>>,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::ExpFunctor<int>>,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::ExpFunctor<int64_t>>);
REGISTER_OP_CPU_KERNEL(
    exp_grad, ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                                        ops::ExpGradFunctor<float>>,
    ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                              ops::ExpGradFunctor<double>>,
    ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                              ops::ExpGradFunctor<int>>,
    ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                              ops::ExpGradFunctor<int64_t>>);
/* ========================================================================== */
1286

1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315
/* ==========================  Log register ==================================*/
REGISTER_OPERATOR(
    log, ops::ActivationOp, ops::LogOpMaker, ops::ActivationOpInferVarType,
    ops::ActivationGradOpMaker<ops::LogGradFunctor<float>::FwdDeps(),
                               paddle::framework::OpDesc>,
    ops::ActivationGradOpMaker<ops::LogGradFunctor<float>::FwdDeps(),
                               paddle::imperative::OpBase>,
    ops::ActFwdInplaceInferer);
REGISTER_OPERATOR(log_grad, ops::ActivationOpGrad,
                  ops::ActivationGradOpInplaceInferer,
                  ops::LogDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::LogDoubleGradMaker<paddle::imperative::OpBase>);

REGISTER_OPERATOR(
    log_grad_grad,
    ops::ActivationOpDoubleGrad<ops::LogGradGradFunctor<float>::FwdDeps()>,
    ops::ActivationDoubleGradOpInplaceInferer);

REGISTER_ACTIVATION_CPU_KERNEL(log, Log, LogFunctor, LogGradFunctor);

REGISTER_OP_CPU_KERNEL(
    log_grad_grad, ops::LogDoubleGradKernel<plat::CPUDeviceContext,
                                            ops::LogGradGradFunctor<float>>,
    ops::LogDoubleGradKernel<plat::CPUDeviceContext,
                             ops::LogGradGradFunctor<double>>,
    ops::LogDoubleGradKernel<plat::CPUDeviceContext,
                             ops::LogGradGradFunctor<plat::float16>>);
/* ========================================================================== */

1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334
/* ==========================  register checkpoint ===========================*/
REGISTER_OP_VERSION(leaky_relu)
    .AddCheckpoint(
        R"ROC(fix leaky_relu, bahavior changed when alpha < 0 or alpha > 1)ROC",
        paddle::framework::compatible::OpVersionDesc()
            .BugfixWithBehaviorChanged(
                "leaky_relu calculate formula before checkponit: out = max(x, "
                "alpha * x); after checkpoint: out = x if x > 0 else alpha * "
                "x"));

REGISTER_OP_VERSION(hard_shrink)
    .AddCheckpoint(
        R"ROC(fix hard_shrink, bahavior changed when threshold<0)ROC",
        paddle::framework::compatible::OpVersionDesc()
            .BugfixWithBehaviorChanged(
                "hard_shrink calculate formula before checkponit: out = x * "
                "((x < -threshold) + (x > threshold)); after checkpoint: out = "
                "x * (((x < -threshold) + (x > threshold)) > 0)"));

1335 1336 1337 1338 1339 1340 1341 1342 1343 1344
REGISTER_OP_VERSION(softplus)
    .AddCheckpoint(
        R"ROC(add new attributes [beta] and [threshold], and the formula is changed to "
         " softplus(x) = \\frac{1}{beta} * \\log(1 + e^{beta * x}) \\\\ \\text{For numerical"
         " stability, the implementation reverts to the linear function when: beta * x > threshold.})ROC",
        paddle::framework::compatible::OpVersionDesc()
            .NewAttr("beta", "The beta value of the new formula", 1.0f)
            .NewAttr("threshold", "The threshold value of the new formula",
                     20.0f));

1345
/* ========================================================================== */