activation_op.cc 55.0 KB
Newer Older
1
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Q
qijun 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
Q
qijun 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
Q
qijun 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
Q
qijun 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/activation_op.h"
16

T
tink2123 已提交
17
#include <memory>
D
dzhwinter 已提交
18
#include <string>
19
#include <type_traits>
T
tink2123 已提交
20
#include <unordered_map>
21
#include <vector>
22

23
#include "paddle/fluid/framework/op_version_registry.h"
24
#include "paddle/fluid/operators/common_infer_shape_functions.h"
25
#include "paddle/fluid/operators/mkldnn/mkldnn_activation_op.h"
D
dzhwinter 已提交
26
#include "paddle/fluid/platform/port.h"
Q
qijun 已提交
27

A
Adam 已提交
28 29
DECLARE_bool(use_mkldnn);

Q
qijun 已提交
30 31 32
namespace paddle {
namespace operators {

33 34
using paddle::framework::Tensor;

35 36 37 38 39
template <typename GradFunctor>
static constexpr bool CanInplaceAct() {
  return GradFunctor::FwdDeps() == kDepOut || GradFunctor::FwdDeps() == kNoDeps;
}

40 41 42 43 44
#define REGISTER_ACTIVATION_OP_MAKER(OP_NAME, OP_COMMENT)                    \
  class OP_NAME##OpMaker                                                     \
      : public ::paddle::framework::OpProtoAndCheckerMaker {                 \
   public:                                                                   \
    void Make() override {                                                   \
45 46 47 48 49
      AddInput("X", "Input of " #OP_NAME                                     \
                    " operator, an N-D Tensor, with data type float32, "     \
                    "float64 or float16.");                                  \
      AddOutput("Out", "Output of " #OP_NAME                                 \
                       " operator, a Tensor with shape same as input.");     \
50 51
      AddAttr<bool>("use_mkldnn",                                            \
                    "(bool, default false) Only used in mkldnn kernel")      \
52 53
          .SetDefault(false)                                                 \
          .AsExtra();                                                        \
54 55 56
      AddAttr<bool>("use_cudnn",                                             \
                    "(bool, default false) Only used in cudnn kernel, need " \
                    "install cudnn")                                         \
57 58
          .SetDefault(false)                                                 \
          .AsExtra();                                                        \
59 60
      AddComment(OP_COMMENT);                                                \
    }                                                                        \
D
dzhwinter 已提交
61
  }
D
dzhwinter 已提交
62

H
hong 已提交
63 64
template <ActBwdOpFwdDeps kDepValue, typename T>
class ActivationGradOpMaker : public framework::SingleGradOpMaker<T> {
65
 public:
H
hong 已提交
66
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
67 68

 protected:
69
  void Apply(GradOpPtr<T> op) const override {
H
hong 已提交
70 71 72 73
    op->SetType(this->ForwardOpType() + "_grad");
    op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
    op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
    op->SetAttrMap(this->Attrs());
74

A
Adam 已提交
75 76
    if ((static_cast<int>(kDepValue) &
         static_cast<int>(ActBwdOpFwdDeps::kDepX)) ||
77 78 79
        FLAGS_use_mkldnn ||
        (op->HasAttr("use_mkldnn") &&
         BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn")))) {
H
hong 已提交
80
      op->SetInput("X", this->Input("X"));
81 82 83 84
    }

    if (static_cast<int>(kDepValue) &
        static_cast<int>(ActBwdOpFwdDeps::kDepOut)) {
H
hong 已提交
85
      op->SetInput("Out", this->Output("Out"));
86
    }
D
dzhwinter 已提交
87
  }
88
};
D
dzhwinter 已提交
89

90 91 92 93
framework::OpKernelType GetKernelType(const framework::ExecutionContext& ctx,
                                      const framework::OperatorWithKernel& oper,
                                      const std::string& name) {
  framework::LibraryType library{framework::LibraryType::kPlain};
M
mozga-intel 已提交
94
  framework::DataLayout layout = framework::DataLayout::kAnyLayout;
95
  auto data_type = oper.IndicateVarDataType(ctx, name);
96 97 98 99 100 101 102 103 104 105
// FIXME(liuwei1031) temporarily disable the code to unblock users
// TODO(liuwei1031) figure out the reason behind
// https://github.com/PaddlePaddle/Paddle/issues/16096
// and re-enable this in the future
// #ifdef PADDLE_WITH_CUDA
//   auto it1 = oper.Attrs().find("use_cudnn");
//   if (it1 != oper.Attrs().end() && platform::CanCUDNNBeUsed(ctx)) {
//     library = framework::LibraryType::kCUDNN;
//   }
// #endif
106 107 108
#ifdef PADDLE_WITH_MKLDNN
  auto it = oper.Attrs().find("use_mkldnn");
  if (library == framework::LibraryType::kPlain && it != oper.Attrs().end() &&
109
      oper.CanMKLDNNBeUsed(ctx, data_type)) {
110
    library = framework::LibraryType::kMKLDNN;
M
mozga-intel 已提交
111
    layout = framework::DataLayout::kMKLDNN;
112 113
  }
#endif
114
  return framework::OpKernelType(data_type, ctx.GetPlace(), layout, library);
115 116
}

Q
qijun 已提交
117 118 119 120
class ActivationOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

121
  void InferShape(framework::InferShapeContext* ctx) const override {
122
    ctx->ShareDim("X", /*->*/ "Out");
F
fengjiayi 已提交
123
    ctx->ShareLoD("X", /*->*/ "Out");
Q
qijun 已提交
124
  }
125

126
 protected:
127 128 129 130
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    return GetKernelType(ctx, *this, "X");
  }
Q
qijun 已提交
131 132
};

C
chengduo 已提交
133 134 135
class ActivationOpInferVarType
    : public framework::PassInDtypeAndVarTypeToOutput {
 protected:
136
  std::unordered_map<std::string, std::string>& GetInputOutputWithSameType()
C
chengduo 已提交
137
      const override {
138 139
    static std::unordered_map<std::string, std::string> m{{"X", /*->*/ "Out"}};
    return m;
140 141 142
  }
};

Q
qijun 已提交
143 144 145 146
class ActivationOpGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

147
  void InferShape(framework::InferShapeContext* ctx) const override {
148 149 150
    auto out_grad_name = framework::GradVarName("Out");
    ctx->ShareDim(out_grad_name, framework::GradVarName("X"));
    ctx->ShareLoD(out_grad_name, framework::GradVarName("X"));
Q
qijun 已提交
151
  }
152

153
 protected:
154 155
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
156
    return GetKernelType(ctx, *this, framework::GradVarName("Out"));
157
  }
Q
qijun 已提交
158 159
};

D
dzhwinter 已提交
160
UNUSED constexpr char SigmoidDoc[] = R"DOC(
161
Sigmoid Activation Operator
K
Kexin Zhao 已提交
162

163
$$out = \\frac{1}{1 + e^{-x}}$$
K
Kexin Zhao 已提交
164

D
dzhwinter 已提交
165
)DOC";
Q
qijun 已提交
166

M
minghaoBD 已提交
167 168 169 170 171 172
UNUSED constexpr char SiluDoc[] = R"DOC(
Silu Activation Operator

$$out = x * \\frac{1}{1 + e^{-x}}$$
)DOC";

D
dzhwinter 已提交
173
UNUSED constexpr char LogSigmoidDoc[] = R"DOC(
174
Logsigmoid Activation Operator
K
Kexin Zhao 已提交
175

176
$$out = \\log \\frac{1}{1 + e^{-x}}$$
K
Kexin Zhao 已提交
177

D
dzhwinter 已提交
178
)DOC";
179

D
dzhwinter 已提交
180
UNUSED constexpr char ExpDoc[] = R"DOC(
181
Exp Operator. Computes exp of x element-wise with a natural number :math:`e` as the base.
K
Kexin Zhao 已提交
182

183
$$out = e^x$$
K
Kexin Zhao 已提交
184

D
dzhwinter 已提交
185
)DOC";
Q
qijun 已提交
186

R
ronnywang 已提交
187 188 189 190 191 192 193
UNUSED constexpr char Expm1Doc[] = R"DOC(
Expm1 Operator. Computes expm1 of x element-wise with a natural number :math:`e` as the base.

$$out = e^x - 1$$

)DOC";

D
dzhwinter 已提交
194
UNUSED constexpr char ReluDoc[] = R"DOC(
K
kexinzhao 已提交
195
Relu Activation Operator.
K
Kexin Zhao 已提交
196

197
$$out = \max(x, 0)$$
K
Kexin Zhao 已提交
198

D
dzhwinter 已提交
199
)DOC";
K
Kexin Zhao 已提交
200

D
dzhwinter 已提交
201
UNUSED constexpr char TanhDoc[] = R"DOC(
K
kexinzhao 已提交
202
Tanh Activation Operator.
K
Kexin Zhao 已提交
203

Q
update  
qiaolongfei 已提交
204
$$out = \\frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$
K
Kexin Zhao 已提交
205

D
dzhwinter 已提交
206
)DOC";
207

D
dzhwinter 已提交
208
UNUSED constexpr char TanhShrinkDoc[] = R"DOC(
K
kexinzhao 已提交
209
TanhShrink Activation Operator.
K
Kexin Zhao 已提交
210

Y
Yan Chunwei 已提交
211
$$out = x - \\frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$
K
Kexin Zhao 已提交
212

D
dzhwinter 已提交
213
)DOC";
K
Kexin Zhao 已提交
214

D
dzhwinter 已提交
215
UNUSED constexpr char SqrtDoc[] = R"DOC(
K
kexinzhao 已提交
216
Sqrt Activation Operator.
K
Kexin Zhao 已提交
217

N
Noel 已提交
218
$$out=\\sqrt{x}=x^{1/2}$$
219

220 221
**Note**:
  input value must be greater than or equal to zero.
K
Kexin Zhao 已提交
222

D
dzhwinter 已提交
223
)DOC";
224

Z
zhoukunsheng 已提交
225 226 227 228 229
UNUSED constexpr char RsqrtDoc[] = R"DOC(
Rsqrt Activation Operator.

Please make sure input is legal in case of numeric errors.

230
$$out = \\frac{1}{\\sqrt{x}}$$
Z
zhoukunsheng 已提交
231 232 233

)DOC";

D
dzhwinter 已提交
234
UNUSED constexpr char CeilDoc[] = R"DOC(
235
Ceil Operator. Computes ceil of x element-wise.
D
dzhwinter 已提交
236

N
Noel 已提交
237
$$out = \\lceil x \\rceil$$
D
dzhwinter 已提交
238

D
dzhwinter 已提交
239
)DOC";
D
dzhwinter 已提交
240

D
dzhwinter 已提交
241
UNUSED constexpr char FloorDoc[] = R"DOC(
242
Floor Activation Operator. Computes floor of x element-wise.
D
dzhwinter 已提交
243

N
Noel 已提交
244
$$out = \\lfloor x \\rfloor$$
D
dzhwinter 已提交
245

D
dzhwinter 已提交
246
)DOC";
D
dzhwinter 已提交
247

D
dzhwinter 已提交
248
UNUSED constexpr char CosDoc[] = R"DOC(
249
Cosine Operator. Computes cosine of x element-wise.
C
add cos  
chengduoZH 已提交
250

Y
Yang Zhang 已提交
251 252
Input range is `(-inf, inf)` and output range is `[-1,1]`.

253
$$out = cos(x)$$
C
add cos  
chengduoZH 已提交
254

D
dzhwinter 已提交
255
)DOC";
C
add cos  
chengduoZH 已提交
256

J
joejiong 已提交
257 258 259 260 261 262 263 264 265
UNUSED constexpr char TanDoc[] = R"DOC(
Tangent Operator. Computes tangent of x element-wise.

Input range is `(k*pi-pi/2, k*pi+pi/2)` and output range is `(-inf, inf)`.

$$out = tan(x)$$

)DOC";

D
dzhwinter 已提交
266
UNUSED constexpr char SinDoc[] = R"DOC(
C
add sin  
chengduoZH 已提交
267 268
Sine Activation Operator.

269
$$out = sin(x)$$
C
add sin  
chengduoZH 已提交
270

D
dzhwinter 已提交
271
)DOC";
C
add sin  
chengduoZH 已提交
272

273 274 275 276 277 278 279 280 281 282 283 284 285 286
UNUSED constexpr char SinhDoc[] = R"DOC(
Sinh Activation Operator.

$$out = sinh(x)$$

)DOC";

UNUSED constexpr char CoshDoc[] = R"DOC(
Cosh Activation Operator.

$$out = cosh(x)$$

)DOC";

D
dzhwinter 已提交
287
UNUSED constexpr char RoundDoc[] = R"DOC(
288
The OP rounds the values in the input to the nearest integer value.
D
dzhwinter 已提交
289

N
Noel 已提交
290
.. code-block:: text
291 292 293 294 295 296 297 298

  input:
    x.shape = [4]
    x.data = [1.2, -0.9, 3.4, 0.9]

  output:
    out.shape = [4]
    out.data = [1., -1., 3., 1.]
D
dzhwinter 已提交
299

D
dzhwinter 已提交
300
)DOC";
D
dzhwinter 已提交
301

D
dzhwinter 已提交
302
UNUSED constexpr char ReciprocalDoc[] = R"DOC(
K
kexinzhao 已提交
303
Reciprocal Activation Operator.
K
Kexin Zhao 已提交
304

305
$$out = \\frac{1}{x}$$
K
Kexin Zhao 已提交
306

D
dzhwinter 已提交
307
)DOC";
308

D
dzhwinter 已提交
309
UNUSED constexpr char LogDoc[] = R"DOC(
K
kexinzhao 已提交
310
Log Activation Operator.
K
Kexin Zhao 已提交
311

312
$$out = \ln(x)$$
K
Kexin Zhao 已提交
313 314 315

Natural logarithm of x.

D
dzhwinter 已提交
316 317
)DOC";

J
joejiong 已提交
318 319 320 321 322 323 324 325 326
UNUSED constexpr char Log2Doc[] = R"DOC(
Log2 Activation Operator.

$$out = \log_2x$$

logarithm of x base to 2.

)DOC";

J
joejiong 已提交
327 328 329 330 331 332 333 334 335
UNUSED constexpr char Log10Doc[] = R"DOC(
Log10 Activation Operator.

$$out = \log_10_x$$

logarithm of x base to 10.

)DOC";

336 337 338 339 340 341 342 343 344
UNUSED constexpr char Log1pDoc[] = R"DOC(
Log Activation Operator.

$out = \ln(x+1)$

Natural logarithm of x.

)DOC";

D
dzhwinter 已提交
345
UNUSED constexpr char SquareDoc[] = R"DOC(
346
The OP square each elements of the inputs.
D
dzhwinter 已提交
347

348
$$out = x^2$$
349

D
dzhwinter 已提交
350 351
)DOC";

D
dzhwinter 已提交
352
UNUSED constexpr char SoftsignDoc[] = R"DOC(
D
dzhwinter 已提交
353 354
Softsign Activation Operator.

355
$$out = \\frac{x}{1 + \|x\|}$$
D
dzhwinter 已提交
356 357 358

)DOC";

T
tink2123 已提交
359 360 361 362 363 364
class AcosOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
    AddInput("X", "Input of acos operator");
    AddOutput("Out", "Output of acos operator");
    AddComment(R"DOC(
365
Arccosine Operator.
366

T
tink2123 已提交
367
$$out = \cos^{-1}(x)$$
368

T
tink2123 已提交
369 370 371
)DOC");
  }
};
372

T
tink2123 已提交
373 374 375
class AsinOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
W
wawltor 已提交
376 377 378
    AddInput("X",
             "Input of asin operator, an N-D Tensor, with data type float32, "
             "float64 or float16.");
T
tink2123 已提交
379 380
    AddOutput("Out", "Output of asin operator");
    AddComment(R"DOC(
381
Arcsine Operator.
382

T
tink2123 已提交
383
$$out = \sin^{-1}(x)$$
384

T
tink2123 已提交
385 386 387
)DOC");
  }
};
388

T
tink2123 已提交
389 390 391
class AtanOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
W
wawltor 已提交
392 393 394
    AddInput("X",
             "Input of atan operator, an N-D Tensor, with data type float32, "
             "float64 or float16.");
T
tink2123 已提交
395 396
    AddOutput("Out", "Output of atan operator");
    AddComment(R"DOC(
397
Arctangent Operator.
398

399
$$out = \tan^{-1}(x)$$
400

T
tink2123 已提交
401 402 403
)DOC");
  }
};
404

D
dzhwinter 已提交
405
class LeakyReluOpMaker : public framework::OpProtoAndCheckerMaker {
406
 public:
Y
Yu Yang 已提交
407
  void Make() override {
W
Wilber 已提交
408 409 410 411 412 413 414 415
    AddInput("X",
             "A LoDTensor or Tensor representing preactivation values. Must be "
             "one of the following types: float32, float64.");
    AddOutput(
        "Out",
        "A LoDTensor or Tensor with the same type and size as that of x.");
    AddAttr<float>("alpha", "Slope of the activation function at x < 0.")
        .SetDefault(0.02f);
A
Adam 已提交
416 417 418
    AddAttr<bool>("use_mkldnn",
                  "(bool, default false) Only used in mkldnn kernel")
        .SetDefault(false);
K
Kexin Zhao 已提交
419
    AddComment(R"DOC(
D
dzhwinter 已提交
420
LeakyRelu Activation Operator.
K
Kexin Zhao 已提交
421

W
Wilber 已提交
422
$$out = \max(x, \alpha * x)$$
K
Kexin Zhao 已提交
423 424

)DOC");
425 426 427
  }
};

428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457
class SoftplusOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
    AddInput("X",
             "Input of Softplus operator, an N-D Tensor, with data type "
             "float32, float64 or float16.");
    AddOutput(
        "Out",
        "Output of Softplus operator, a Tensor with shape same as input.");
    AddAttr<float>("beta", "The value of beta for Softplus.").SetDefault(1.0f);
    AddAttr<float>("threshold", "The value of threshold for Softplus.")
        .SetDefault(20.0f);
    AddAttr<bool>("use_mkldnn",
                  "(bool, default false) Only used in mkldnn kernel.")
        .SetDefault(false);
    AddAttr<bool>(
        "use_cudnn",
        "(bool, default false) Only used in cudnn kernel, need install cudnn.")
        .SetDefault(false);
    AddComment(R"DOC(
:strong:`Softplus Activation Operator`

..  math::
    out = \frac{1}{\beta} * \log(1 + \exp(\beta * x)) \\
    \text{For numerical stability, the implementation reverts to the linear function when :}\,x \times \beta > threshold.

)DOC");
  }
};

D
dzhwinter 已提交
458
class SoftShrinkOpMaker : public framework::OpProtoAndCheckerMaker {
K
kexinzhao 已提交
459
 public:
Y
Yu Yang 已提交
460
  void Make() override {
D
dzhwinter 已提交
461 462 463
    AddInput("X", "Input of Softshrink operator");
    AddOutput("Out", "Output of Softshrink operator");
    AddAttr<float>("lambda", "non-negative offset").SetDefault(0.5f);
K
Kexin Zhao 已提交
464
    AddComment(R"DOC(
465 466 467
:strong:`Softshrink Activation Operator`

..  math::
468
    out = \begin{cases}
469 470 471 472
         x - \lambda, \text{if } x > \lambda \\
         x + \lambda, \text{if } x < -\lambda \\
         0,  \text{otherwise}
         \end{cases}
K
Kexin Zhao 已提交
473 474

)DOC");
K
kexinzhao 已提交
475 476 477
  }
};

D
dzhwinter 已提交
478
class HardShrinkOpMaker : public framework::OpProtoAndCheckerMaker {
479
 public:
Y
Yu Yang 已提交
480
  void Make() override {
D
dzhwinter 已提交
481 482
    AddInput("X", "Input of HardShrink operator");
    AddOutput("Out", "Output of HardShrink operator");
Y
yuyang18 已提交
483 484
    AddAttr<float>("threshold",
                   "The value of threshold for HardShrink. [default: 0.5]")
D
dzhwinter 已提交
485
        .SetDefault(0.5f);
K
Kexin Zhao 已提交
486
    AddComment(R"DOC(
Y
yuyang18 已提交
487
:strong:`HardShrink activation operator`
K
Kexin Zhao 已提交
488

Y
yuyang18 已提交
489 490 491 492 493 494
..  math::
    out = \begin{cases}
            x, \text{if } x > \lambda \\
            x, \text{if } x < -\lambda \\
            0,  \text{otherwise}
          \end{cases}
K
Kexin Zhao 已提交
495 496

)DOC");
497 498 499
  }
};

500 501
class BReluOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
502
  void Make() override {
503 504 505 506 507 508
    AddInput("X",
             "The input is a multi-dimensional Tensor. The data type is "
             "float32, float64.");
    AddOutput("Out",
              "The output is a multi-dimensional Tensor which has same "
              "dimension and data type as the ``X``.");
509 510 511 512
    AddAttr<float>("t_min", "The min marginal value of BRelu")
        .SetDefault(static_cast<float>(0));
    AddAttr<float>("t_max", "The max marginal value of BRelu")
        .SetDefault(static_cast<float>(24));
K
Kexin Zhao 已提交
513
    AddComment(R"DOC(
K
kexinzhao 已提交
514
BRelu Activation Operator.
K
Kexin Zhao 已提交
515

516
$$out = \min(\max(x, t_{min}), t_{max})$$
K
Kexin Zhao 已提交
517 518

)DOC");
519 520 521 522 523
  }
};

class SoftReluOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
524
  void Make() override {
525
    AddInput("X", "Input of SoftRelu operator");
F
fengjiayi 已提交
526
    AddOutput("Out", "Output of SoftRelu operator");
527 528
    AddAttr<float>("threshold", "The threshold value of SoftRelu")
        .SetDefault(40.0f);
K
Kexin Zhao 已提交
529
    AddComment(R"DOC(
K
kexinzhao 已提交
530
SoftRelu Activation Operator.
K
Kexin Zhao 已提交
531

532
$$out = \ln(1 + \exp(\max(\min(x, threshold), -threshold)))$$
K
Kexin Zhao 已提交
533 534

)DOC");
535 536 537
  }
};

538 539
class ELUOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
540
  void Make() override {
541 542 543 544 545 546
    AddInput("X",
             "The input is a multi-dimensional Tensor. The data type is "
             "float32 or float64.");
    AddOutput("Out",
              "The output is a multi-dimensional Tensor which has same "
              "dimension and data type as the ``x``.");
547
    AddAttr<float>("alpha", "The alpha value of ELU").SetDefault(1.0f);
548
    AddComment(R"DOC(
K
kexinzhao 已提交
549
ELU Activation Operator.
K
Kexin Zhao 已提交
550 551 552 553

Applies the following element-wise computation on the input according to
https://arxiv.org/abs/1511.07289.

554
$$out = \max(0, x) + \min(0, \alpha * (e^x - 1))$$
K
Kexin Zhao 已提交
555 556

)DOC");
557 558 559
  }
};

560 561
class Relu6OpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
562
  void Make() override {
Z
zhupengyang 已提交
563 564 565 566 567 568 569 570
    AddInput("X",
             "Input of relu6 operator, an N-D Tensor, "
             "with data type float32, float64.");
    AddOutput(
        "Out",
        "Output of relu6 operator, a Tensor with the same shape as input.");
    AddAttr<float>("threshold",
                   "The threshold value of Relu6. Default is 6.0. ")
571
        .SetDefault(6.0f);
A
Adam 已提交
572 573 574
    AddAttr<bool>("use_mkldnn",
                  "(bool, default false) Only used in mkldnn kernel")
        .SetDefault(false);
K
Kexin Zhao 已提交
575
    AddComment(R"DOC(
K
kexinzhao 已提交
576
Relu6 Activation Operator.
K
Kexin Zhao 已提交
577

578
$$out = \min(\max(0, x), threshold)$$
K
Kexin Zhao 已提交
579 580

)DOC");
581 582 583
  }
};

584 585
class PowOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
586
  void Make() override {
587
    AddInput("X", "Input of Pow operator");
588 589 590 591 592
    AddInput("FactorTensor",
             "(Tensor<float>, optional). If provided, pow will use this"
             "The shape of FactorTensor MUST BE [1]."
             "it has higher priority than attr(factor).")
        .AsDispensable();
F
fengjiayi 已提交
593
    AddOutput("Out", "Output of Pow operator");
594
    AddAttr<float>("factor", "The exponential factor of Pow").SetDefault(1.0f);
K
Kexin Zhao 已提交
595
    AddComment(R"DOC(
K
kexinzhao 已提交
596
Pow Activation Operator.
K
Kexin Zhao 已提交
597

598
$$out = x^{factor}$$
K
Kexin Zhao 已提交
599 600

)DOC");
601 602 603 604 605
  }
};

class STanhOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
606
  void Make() override {
607 608
    AddInput("X",
             "Input of STanh operator."
N
Noel 已提交
609
             " A Tensor with type float32, float64.");
610 611 612
    AddOutput("Out", "Output of STanh operator. A Tensor with type float32.");
    AddAttr<float>("scale_a", "The scale parameter of a for the input. ")
        .SetDefault(0.67f);
613 614
    AddAttr<float>("scale_b", "The scale parameter of b for the input")
        .SetDefault(1.7159f);
K
Kexin Zhao 已提交
615
    AddComment(R"DOC(
K
kexinzhao 已提交
616
STanh Activation Operator.
K
Kexin Zhao 已提交
617

Y
Yan Chunwei 已提交
618
$$out = b * \\frac{e^{a * x} - e^{-a * x}}{e^{a * x} + e^{-a * x}}$$
K
Kexin Zhao 已提交
619 620

)DOC");
Q
qijun 已提交
621 622 623
  }
};

624 625
class ThresholdedReluOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
626
  void Make() override {
627
    AddInput("X", "Input of ThresholdedRelu operator");
F
fengjiayi 已提交
628
    AddOutput("Out", "Output of ThresholdedRelu operator");
Y
yuyang18 已提交
629 630
    AddAttr<float>("threshold",
                   "The threshold location of activation. [default 1.0].")
631
        .SetDefault(1.0f);
K
Kexin Zhao 已提交
632
    AddComment(R"DOC(
Y
yuyang18 已提交
633
:strong:`ThresholdedRelu activation operator`
K
Kexin Zhao 已提交
634

Y
yuyang18 已提交
635
..  math::
K
Kexin Zhao 已提交
636

Y
yuyang18 已提交
637
    out = \begin{cases}
Y
yuyang18 已提交
638
             x,  \text{if } x > threshold \\
Y
yuyang18 已提交
639 640
             0,  \text{otherwise}
          \end{cases}
K
Kexin Zhao 已提交
641
)DOC");
642 643 644
  }
};

645 646
class HardSigmoidOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
647
  void Make() override {
648 649 650 651 652
    AddInput("X", "An N-D Tensor with data type float32, float64. ");
    AddOutput("Out", "A Tensor with the same shape as input. ");
    AddAttr<float>("slope",
                   "The slope of the linear approximation of sigmoid. Its "
                   "value MUST BE positive. Default is 0.2. ")
653
        .SetDefault(0.2f);
654 655 656
    AddAttr<float>(
        "offset",
        "The offset of the linear approximation of sigmoid. Default is 0.5. ")
657
        .SetDefault(0.5f);
658
    AddComment(R"DOC(
K
kexinzhao 已提交
659
HardSigmoid Activation Operator.
660

661
A 3-part piecewise linear approximation of sigmoid(https://arxiv.org/abs/1603.00391),
K
Kexin Zhao 已提交
662
which is much faster than sigmoid.
663

664
$$out = \max(0, \min(1, slope * x + offset))$$
665

K
Kexin Zhao 已提交
666
)DOC");
667 668 669
  }
};

A
Abhinav Arora 已提交
670 671
class SwishOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
672
  void Make() override {
A
Abhinav Arora 已提交
673
    AddInput("X", "Input of Swish operator");
F
fengjiayi 已提交
674
    AddOutput("Out", "Output of Swish operator");
A
Abhinav Arora 已提交
675
    AddAttr<float>("beta", "Constant beta of swish operator").SetDefault(1.0f);
676 677 678
    AddAttr<bool>("use_mkldnn",
                  "(bool, default false) Only used in mkldnn kernel")
        .SetDefault(false);
A
Abhinav Arora 已提交
679 680 681
    AddComment(R"DOC(
Swish Activation Operator.

682
$$out = \\frac{x}{1 + e^{- \beta \ x}}$$
A
Abhinav Arora 已提交
683 684 685 686 687

)DOC");
  }
};

H
huangjun12 已提交
688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703
class HardSwishOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
    AddInput("X", "Input of HardSwish operator");
    AddOutput("Out", "Output of HardSwish operator");
    AddAttr<float>("threshold", "The threshold parameter of HardSwish operator")
        .SetDefault(6.0f);
    AddAttr<float>("scale", "The scale parameter of HardSwish operator")
        .SetDefault(6.0f);
    AddAttr<float>("offset", "The offset parameter of HardSwish operator")
        .SetDefault(3.0f);
    AddComment(R"DOC(
HardSwish Activation Operator.

The hard version of swish(https://arxiv.org/pdf/1905.02244.pdf).

704
$$out = \frac{x * (min(max(0, x+offset), threshold))}{scale}$$
H
huangjun12 已提交
705 706 707 708 709 710 711 712 713

The threshold and scale should be positive. The offset can be either positive or negative.
The default parameters are set according to the above reference.
It is recommended to use the defaults for this activation.

)DOC");
  }
};

D
dzhwinter 已提交
714
REGISTER_ACTIVATION_OP_MAKER(Sigmoid, SigmoidDoc);
M
minghaoBD 已提交
715
REGISTER_ACTIVATION_OP_MAKER(Silu, SiluDoc);
D
dzhwinter 已提交
716 717
REGISTER_ACTIVATION_OP_MAKER(LogSigmoid, LogSigmoidDoc);
REGISTER_ACTIVATION_OP_MAKER(Exp, ExpDoc);
R
ronnywang 已提交
718
REGISTER_ACTIVATION_OP_MAKER(Expm1, Expm1Doc);
D
dzhwinter 已提交
719 720 721 722
REGISTER_ACTIVATION_OP_MAKER(Relu, ReluDoc);
REGISTER_ACTIVATION_OP_MAKER(Tanh, TanhDoc);
REGISTER_ACTIVATION_OP_MAKER(TanhShrink, TanhShrinkDoc);
REGISTER_ACTIVATION_OP_MAKER(Sqrt, SqrtDoc);
Z
zhoukunsheng 已提交
723
REGISTER_ACTIVATION_OP_MAKER(Rsqrt, RsqrtDoc);
D
dzhwinter 已提交
724 725 726
REGISTER_ACTIVATION_OP_MAKER(Ceil, CeilDoc);
REGISTER_ACTIVATION_OP_MAKER(Floor, FloorDoc);
REGISTER_ACTIVATION_OP_MAKER(Cos, CosDoc);
J
joejiong 已提交
727
REGISTER_ACTIVATION_OP_MAKER(Tan, TanDoc);
D
dzhwinter 已提交
728
REGISTER_ACTIVATION_OP_MAKER(Sin, SinDoc);
729 730
REGISTER_ACTIVATION_OP_MAKER(Sinh, SinhDoc);
REGISTER_ACTIVATION_OP_MAKER(Cosh, CoshDoc);
D
dzhwinter 已提交
731 732 733
REGISTER_ACTIVATION_OP_MAKER(Round, RoundDoc);
REGISTER_ACTIVATION_OP_MAKER(Reciprocal, ReciprocalDoc);
REGISTER_ACTIVATION_OP_MAKER(Log, LogDoc);
J
joejiong 已提交
734
REGISTER_ACTIVATION_OP_MAKER(Log2, Log2Doc);
J
joejiong 已提交
735
REGISTER_ACTIVATION_OP_MAKER(Log10, Log10Doc);
736
REGISTER_ACTIVATION_OP_MAKER(Log1p, Log1pDoc);
D
dzhwinter 已提交
737 738 739
REGISTER_ACTIVATION_OP_MAKER(Square, SquareDoc);
REGISTER_ACTIVATION_OP_MAKER(Softsign, SoftsignDoc);

740
template <ActBwdOpFwdDeps kDepValue>
741 742 743 744 745
class ActivationOpDoubleGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
746
    if (static_cast<int>(kDepValue) & static_cast<int>(kDepX)) {
747
      if (ctx->HasOutput("DX")) {
748 749 750
        ctx->ShareDim("X", "DX");
        ctx->ShareLoD("X", "DX");
      }
751
      if (ctx->HasOutput("DDOut")) {
752 753 754
        ctx->ShareDim("X", "DDOut");
        ctx->ShareLoD("X", "DDOut");
      }
755
    }
756
    if (static_cast<int>(kDepValue) & static_cast<int>(kDepOut)) {
757
      if (ctx->HasOutput("DOut")) {
758 759 760
        ctx->ShareDim("Out", "DOut");
        ctx->ShareLoD("Out", "DOut");
      }
761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788
      if (ctx->HasOutput("DDOut")) {
        ctx->ShareDim("Out", "DDOut");
        ctx->ShareLoD("Out", "DDOut");
      }
    }
  }

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    return GetKernelType(ctx, *this, "DDX");
  }
};

template <ActBwdOpFwdDeps kDepValue>
class ActivationOpDoubleGrad2 : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
    if (static_cast<int>(kDepValue) & static_cast<int>(kDepX)) {
      if (ctx->HasOutput("DDOut")) {
        ctx->ShareDim("X", "DDOut");
        ctx->ShareLoD("X", "DDOut");
      }
    }
    if (static_cast<int>(kDepValue) & static_cast<int>(kDepOut)) {
      if (ctx->HasOutput("DDOut")) {
789 790 791
        ctx->ShareDim("Out", "DDOut");
        ctx->ShareLoD("Out", "DDOut");
      }
792 793 794 795 796 797 798 799 800 801
    }
  }

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    return GetKernelType(ctx, *this, "DDX");
  }
};

802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822
template <typename T>
class SigmoidDoubleGradMaker
    : public ::paddle::framework::SingleGradOpMaker<T> {
 public:
  using ::paddle::framework::SingleGradOpMaker<T>::SingleGradOpMaker;

 protected:
  void Apply(GradOpPtr<T> op) const override {
    op->SetType("sigmoid_grad_grad");
    // input1: Out
    op->SetInput("Out", this->Input("Out"));
    // input2: ddx
    op->SetInput("DDX", this->OutputGrad(framework::GradVarName("X")));
    op->SetInput("DOut", this->Input(framework::GradVarName("Out")));
    op->SetAttrMap(this->Attrs());
    // output: ddy
    op->SetOutput("DOutNew", this->InputGrad("Out"));
    op->SetOutput("DDOut", this->InputGrad(framework::GradVarName("Out")));
  }
};

823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842
template <typename T>
class TanhDoubleGradMaker : public ::paddle::framework::SingleGradOpMaker<T> {
 public:
  using ::paddle::framework::SingleGradOpMaker<T>::SingleGradOpMaker;

 protected:
  void Apply(GradOpPtr<T> op) const override {
    op->SetType("tanh_grad_grad");
    // input1: Out
    op->SetInput("Out", this->Input("Out"));
    // input2: ddx
    op->SetInput("DDX", this->OutputGrad(framework::GradVarName("X")));
    op->SetInput("DOut", this->Input(framework::GradVarName("Out")));
    op->SetAttrMap(this->Attrs());
    // output: ddy
    op->SetOutput("DOutNew", this->InputGrad("Out"));
    op->SetOutput("DDOut", this->InputGrad(framework::GradVarName("Out")));
  }
};

843 844
// ReluGrad: dx = dy if y >= 0 else 0
// ReluGradGrad: ddy = ddx if y >= 0 else 0
H
hong 已提交
845 846
template <typename T>
class ReluDoubleGradMaker : public ::paddle::framework::SingleGradOpMaker<T> {
847
 public:
H
hong 已提交
848
  using ::paddle::framework::SingleGradOpMaker<T>::SingleGradOpMaker;
849 850

 protected:
851
  void Apply(GradOpPtr<T> op) const override {
852 853
    op->SetType("relu_grad_grad");
    // input1: Out
H
hong 已提交
854
    op->SetInput("Out", this->Input("Out"));
Q
qingqing01 已提交
855
    // input2: ddx
H
hong 已提交
856 857
    op->SetInput("DDX", this->OutputGrad(framework::GradVarName("X")));
    op->SetAttrMap(this->Attrs());
858
    // output: ddy
H
hong 已提交
859
    op->SetOutput("DDOut", this->InputGrad(framework::GradVarName("Out")));
860 861 862
  }
};

863 864
// leaky_relu Grad: dx=dy if x>=0 else alpha * dy
// leaky_relu GradGrad: ddy=ddx if x>=0 else alpha * ddx
H
hong 已提交
865
template <typename T>
866
class LeakyReluDoubleGradMaker
H
hong 已提交
867
    : public ::paddle::framework::SingleGradOpMaker<T> {
868
 public:
H
hong 已提交
869
  using ::paddle::framework::SingleGradOpMaker<T>::SingleGradOpMaker;
870 871

 protected:
872
  void Apply(GradOpPtr<T> op) const override {
873
    op->SetType("leaky_relu_grad_grad");
874 875
    // input1: X
    op->SetInput("X", this->Input("X"));
876
    // X@GRAD@GRAD: ddx
H
hong 已提交
877 878
    op->SetInput("DDX", this->OutputGrad(framework::GradVarName("X")));
    op->SetAttrMap(this->Attrs());
879
    // Out@GRAD@GRAD: ddy
H
hong 已提交
880
    op->SetOutput("DDOut", this->InputGrad(framework::GradVarName("Out")));
881 882 883
  }
};

D
Double_V 已提交
884 885 886 887 888 889 890 891
// elu grad: dx=dy if y>0 else alpha*dy*x.exp()
// elu gradgrad: ddx=ddy if y>0 else alpha*ddy*x.exp()
template <typename T>
class ELUDoubleGradMaker : public ::paddle::framework::SingleGradOpMaker<T> {
 public:
  using ::paddle::framework::SingleGradOpMaker<T>::SingleGradOpMaker;

 protected:
892
  void Apply(GradOpPtr<T> op) const override {
D
Double_V 已提交
893 894 895 896 897 898 899 900 901 902 903 904 905 906
    op->SetType("elu_grad_grad");

    op->SetInput("X", this->Input("X"));
    op->SetInput("DOut", this->Input(framework::GradVarName("Out")));
    // X@GRAD@GRAD: ddx
    op->SetInput("DDX", this->OutputGrad(framework::GradVarName("X")));
    op->SetAttrMap(this->Attrs());

    // Out@GRAD@GRAD: ddy
    op->SetOutput("DX", this->InputGrad("X"));
    op->SetOutput("DDOut", this->InputGrad(framework::GradVarName("Out")));
  }
};

L
lvmengsi 已提交
907 908
// sqrt Grad: dx = 0.5 * dy / y
// sqrt GradGrad: ddy = 0.5 * ddx / y, dy = -1 * dx * ddx
H
hong 已提交
909 910
template <typename T>
class SqrtDoubleGradMaker : public ::paddle::framework::SingleGradOpMaker<T> {
L
lvmengsi 已提交
911
 public:
H
hong 已提交
912
  using ::paddle::framework::SingleGradOpMaker<T>::SingleGradOpMaker;
L
lvmengsi 已提交
913 914

 protected:
915
  void Apply(GradOpPtr<T> op) const override {
L
lvmengsi 已提交
916
    op->SetType("sqrt_grad_grad");
H
hong 已提交
917 918 919 920 921 922
    op->SetInput("Out", this->Input("Out"));
    op->SetInput("DX", this->Output(framework::GradVarName("X")));
    op->SetInput("DDX", this->OutputGrad(framework::GradVarName("X")));
    op->SetAttrMap(this->Attrs());
    op->SetOutput("DOut", this->InputGrad("Out"));
    op->SetOutput("DDOut", this->InputGrad(framework::GradVarName("Out")));
L
lvmengsi 已提交
923 924 925
  }
};

W
whs 已提交
926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944
// rsqrt Grad: dx = -0.5 * dy * y * y * y
// rsqrt GradGrad: ddy = -0.5 * ddx * y * y * y, dy = (3/y) * ddx
template <typename T>
class RsqrtDoubleGradMaker : public ::paddle::framework::SingleGradOpMaker<T> {
 public:
  using ::paddle::framework::SingleGradOpMaker<T>::SingleGradOpMaker;

 protected:
  void Apply(GradOpPtr<T> op) const override {
    op->SetType("rsqrt_grad_grad");
    op->SetInput("Out", this->Input("Out"));
    op->SetInput("DX", this->Output(framework::GradVarName("X")));
    op->SetInput("DDX", this->OutputGrad(framework::GradVarName("X")));
    op->SetAttrMap(this->Attrs());
    op->SetOutput("DOut", this->InputGrad("Out"));
    op->SetOutput("DDOut", this->InputGrad(framework::GradVarName("Out")));
  }
};

945 946
// square Grad: dx=2x*dy
// square GradGrad: ddy=2x*ddx, dx=2dy*ddx
H
hong 已提交
947 948
template <typename T>
class SquareDoubleGradMaker : public ::paddle::framework::SingleGradOpMaker<T> {
949
 public:
H
hong 已提交
950
  using ::paddle::framework::SingleGradOpMaker<T>::SingleGradOpMaker;
951 952

 protected:
953
  void Apply(GradOpPtr<T> op) const override {
954
    op->SetType("square_grad_grad");
H
hong 已提交
955
    op->SetInput("X", this->Input("X"));
956
    // Out@GRAD: dy
H
hong 已提交
957
    op->SetInput("DOut", this->Input(framework::GradVarName("Out")));
958
    // X@GRAD@GRAD: ddx
H
hong 已提交
959
    op->SetInput("DDX", this->OutputGrad(framework::GradVarName("X")));
960

H
hong 已提交
961
    op->SetAttrMap(this->Attrs());
962 963

    // X@GRAD: dx
H
hong 已提交
964
    op->SetOutput("DX", this->InputGrad("X"));
965
    // Out@GRAD@GRAD: ddy
H
hong 已提交
966
    op->SetOutput("DDOut", this->InputGrad(framework::GradVarName("Out")));
967 968 969
  }
};

970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991
// log Grad: dx = dout / x
// log Grad Grad: ddout = ddx / x; dx = -(dout / x) * (ddx / x)
template <typename T>
class LogDoubleGradMaker : public ::paddle::framework::SingleGradOpMaker<T> {
 public:
  using ::paddle::framework::SingleGradOpMaker<T>::SingleGradOpMaker;

 protected:
  void Apply(GradOpPtr<T> op) const override {
    op->SetType("log_grad_grad");
    op->SetInput("X", this->Input("X"));
    // X@GRAD@GRAD: ddx
    op->SetInput("DDX", this->OutputGrad(framework::GradVarName("X")));
    op->SetInput("DOut", this->Input(framework::GradVarName("Out")));
    op->SetAttrMap(this->Attrs());
    // X@GRAD: dx
    op->SetOutput("DX", this->InputGrad("X"));
    // Out@GRAD@GRAD: ddy
    op->SetOutput("DDOut", this->InputGrad(framework::GradVarName("Out")));
  }
};

992
DECLARE_INPLACE_OP_INFERER(ActivationGradOpInplaceInferer,
993 994
                           {framework::GradVarName("Out"),
                            framework::GradVarName("X")});
995
DECLARE_INPLACE_OP_INFERER(ActivationDoubleGradOpInplaceInferer,
996
                           {"DDX", "DDOut"});
997

H
hong 已提交
998 999
template <typename T>
class PowGradOpMaker : public framework::SingleGradOpMaker<T> {
1000
 public:
H
hong 已提交
1001
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
1002 1003

 protected:
1004
  void Apply(GradOpPtr<T> op) const override {
1005
    op->SetType("pow_grad");
H
hong 已提交
1006 1007 1008 1009 1010
    op->SetInput("X", this->Input("X"));
    op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
    op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
    op->SetInput("FactorTensor", this->Input("FactorTensor"));
    op->SetAttrMap(this->Attrs());
1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064
  }
};
class PowOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
    ctx->ShareDim("X", /*->*/ "Out");
    ctx->ShareLoD("X", /*->*/ "Out");
  }

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    return GetKernelType(ctx, *this, "X");
  }

  framework::OpKernelType GetKernelTypeForVar(
      const std::string& var_name, const Tensor& tensor,
      const framework::OpKernelType& expected_kernel_type) const override {
    if (var_name == "FactorTensor") {
      return expected_kernel_type;
    }
    return framework::OpKernelType(expected_kernel_type.data_type_,
                                   tensor.place(), tensor.layout());
  }
};

class PowOpGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
    auto out_grad_name = framework::GradVarName("Out");
    ctx->ShareDim(out_grad_name, framework::GradVarName("X"));
    ctx->ShareLoD(out_grad_name, framework::GradVarName("X"));
  }

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    return GetKernelType(ctx, *this, framework::GradVarName("Out"));
  }

  framework::OpKernelType GetKernelTypeForVar(
      const std::string& var_name, const Tensor& tensor,
      const framework::OpKernelType& expected_kernel_type) const override {
    if (var_name == "FactorTensor") {
      return expected_kernel_type;
    }
    return framework::OpKernelType(expected_kernel_type.data_type_,
                                   tensor.place(), tensor.layout());
  }
};
1065
DECLARE_INPLACE_OP_INFERER(ActFwdInplaceInferer, {"X", "Out"});
Q
qijun 已提交
1066 1067 1068 1069
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
1070
namespace plat = paddle::platform;
1071

1072 1073 1074 1075
#define REGISTER_ACTIVATION_OP(KERNEL_TYPE, OP_NAME, functor, grad_functor) \
  REGISTER_OPERATOR(                                                        \
      KERNEL_TYPE, ops::ActivationOp, ops::OP_NAME##OpMaker,                \
      ops::ActivationOpInferVarType,                                        \
H
hong 已提交
1076 1077 1078 1079
      ops::ActivationGradOpMaker<ops::grad_functor<float>::FwdDeps(),       \
                                 paddle::framework::OpDesc>,                \
      ops::ActivationGradOpMaker<ops::grad_functor<float>::FwdDeps(),       \
                                 paddle::imperative::OpBase>,               \
1080
      std::conditional<ops::CanInplaceAct<ops::grad_functor<float>>(),      \
1081
                       ops::ActFwdInplaceInferer, void>::type);             \
1082
  REGISTER_OPERATOR(KERNEL_TYPE##_grad, ops::ActivationOpGrad,              \
1083
                    ops::ActivationGradOpInplaceInferer);
1084 1085 1086

#define REGISTER_ACTIVATION_CPU_KERNEL(act_type, op_name, functor,        \
                                       grad_functor)                      \
Q
QI JUN 已提交
1087 1088 1089 1090 1091 1092 1093 1094 1095 1096
  REGISTER_OP_CPU_KERNEL(                                                 \
      act_type, ops::ActivationKernel<paddle::platform::CPUDeviceContext, \
                                      ops::functor<float>>,               \
      ops::ActivationKernel<paddle::platform::CPUDeviceContext,           \
                            ops::functor<double>>);                       \
  REGISTER_OP_CPU_KERNEL(                                                 \
      act_type##_grad,                                                    \
      ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,       \
                                ops::grad_functor<float>>,                \
      ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,       \
Y
Yu Yang 已提交
1097
                                ops::grad_functor<double>>);
1098

1099 1100
FOR_EACH_ACTIVATION_OP(REGISTER_ACTIVATION_OP);
FOR_EACH_ACTIVATION_OP(REGISTER_ACTIVATION_CPU_KERNEL);
1101

1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
/* ==========================    sigmoid register  =============================
 */
// 1. Register Sigmoid Operator
REGISTER_OPERATOR(
    sigmoid, ops::ActivationOp, ops::SigmoidOpMaker,
    ops::ActivationOpInferVarType,
    ops::ActivationGradOpMaker<ops::SigmoidGradFunctor<float>::FwdDeps(),
                               paddle::framework::OpDesc>,
    ops::ActivationGradOpMaker<ops::SigmoidGradFunctor<float>::FwdDeps(),
                               paddle::imperative::OpBase>,
    std::conditional<ops::CanInplaceAct<ops::SigmoidGradFunctor<float>>(),
                     ops::ActFwdInplaceInferer, void>::type);

// 2. Register Sigmoid Grad Operator
REGISTER_OPERATOR(sigmoid_grad, ops::ActivationOpGrad,
                  ops::ActivationGradOpInplaceInferer,
                  ops::SigmoidDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::SigmoidDoubleGradMaker<paddle::imperative::OpBase>)

// 3. Register Sigmoid DoubleGrad Operator
REGISTER_OPERATOR(
    sigmoid_grad_grad,
    ops::ActivationOpDoubleGrad<ops::SigmoidGradFunctor<float>::FwdDeps()>,
    ops::ActivationDoubleGradOpInplaceInferer);

// Register Sigmoid/GradSigmoid Kernels
REGISTER_ACTIVATION_CPU_KERNEL(sigmoid, Sigmoid, SigmoidFunctor,
                               SigmoidGradFunctor);

// Register DoubleGrad Kernel
REGISTER_OP_CPU_KERNEL(
    sigmoid_grad_grad,
    ops::SigmoidDoubleGradKernel<plat::CPUDeviceContext,
                                 ops::SigmoidGradGradFunctor<float>>,
    ops::SigmoidDoubleGradKernel<plat::CPUDeviceContext,
                                 ops::SigmoidGradGradFunctor<double>>,
    ops::SigmoidDoubleGradKernel<plat::CPUDeviceContext,
                                 ops::SigmoidGradGradFunctor<plat::float16>>);

/* ========================================================================== */

1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170
/* ==========================    tanh register  ============================= */
REGISTER_OPERATOR(
    tanh, ops::ActivationOp, ops::TanhOpMaker, ops::ActivationOpInferVarType,
    ops::ActivationGradOpMaker<ops::TanhGradFunctor<float>::FwdDeps(),
                               paddle::framework::OpDesc>,
    ops::ActivationGradOpMaker<ops::TanhGradFunctor<float>::FwdDeps(),
                               paddle::imperative::OpBase>,
    std::conditional<ops::CanInplaceAct<ops::TanhGradFunctor<float>>(),
                     ops::ActFwdInplaceInferer, void>::type);
REGISTER_OPERATOR(tanh_grad, ops::ActivationOpGrad,
                  ops::ActivationGradOpInplaceInferer,
                  ops::TanhDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::TanhDoubleGradMaker<paddle::imperative::OpBase>)
REGISTER_OPERATOR(
    tanh_grad_grad,
    ops::ActivationOpDoubleGrad<ops::TanhGradFunctor<float>::FwdDeps()>,
    ops::ActivationDoubleGradOpInplaceInferer);

REGISTER_ACTIVATION_CPU_KERNEL(tanh, Tanh, TanhFunctor, TanhGradFunctor);
REGISTER_OP_CPU_KERNEL(
    tanh_grad_grad, ops::TanhDoubleGradKernel<plat::CPUDeviceContext,
                                              ops::TanhGradGradFunctor<float>>,
    ops::TanhDoubleGradKernel<plat::CPUDeviceContext,
                              ops::TanhGradGradFunctor<double>>,
    ops::TanhDoubleGradKernel<plat::CPUDeviceContext,
                              ops::TanhGradGradFunctor<plat::float16>>);
/* ========================================================================== */

1171
/* ==========================    relu register  ============================= */
1172 1173
REGISTER_OPERATOR(
    relu, ops::ActivationOp, ops::ReluOpMaker, ops::ActivationOpInferVarType,
H
hong 已提交
1174 1175 1176 1177
    ops::ActivationGradOpMaker<ops::ReluGradFunctor<float>::FwdDeps(),
                               paddle::framework::OpDesc>,
    ops::ActivationGradOpMaker<ops::ReluGradFunctor<float>::FwdDeps(),
                               paddle::imperative::OpBase>,
1178
    ops::ActFwdInplaceInferer);
1179
REGISTER_OPERATOR(relu_grad, ops::ActivationOpGrad,
1180
                  ops::ActivationGradOpInplaceInferer,
H
hong 已提交
1181 1182
                  ops::ReluDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::ReluDoubleGradMaker<paddle::imperative::OpBase>);
1183 1184
REGISTER_OPERATOR(
    relu_grad_grad,
1185
    ops::ActivationOpDoubleGrad2<ops::ReluGradFunctor<float>::FwdDeps()>,
1186
    ops::ActivationDoubleGradOpInplaceInferer);
1187

1188
REGISTER_ACTIVATION_CPU_KERNEL(relu, Relu, ReluCPUFunctor, ReluGradFunctor);
1189 1190 1191 1192 1193 1194 1195 1196 1197

REGISTER_OP_CPU_KERNEL(
    relu_grad_grad,
    ops::ActivationDoubleGradKernel<plat::CPUDeviceContext,
                                    ops::ReluGradGradFunctor<float>>,
    ops::ActivationDoubleGradKernel<plat::CPUDeviceContext,
                                    ops::ReluGradGradFunctor<double>>,
    ops::ActivationDoubleGradKernel<plat::CPUDeviceContext,
                                    ops::ReluGradGradFunctor<plat::float16>>);
1198
/* ========================================================================== */
1199

1200
/* ======================== leaky relu register  ============================ */
1201 1202 1203
REGISTER_OPERATOR(
    leaky_relu, ops::ActivationOp, ops::LeakyReluOpMaker,
    ops::ActivationOpInferVarType,
H
hong 已提交
1204 1205 1206 1207
    ops::ActivationGradOpMaker<ops::LeakyReluGradFunctor<float>::FwdDeps(),
                               paddle::framework::OpDesc>,
    ops::ActivationGradOpMaker<ops::LeakyReluGradFunctor<float>::FwdDeps(),
                               paddle::imperative::OpBase>,
1208
    ops::ActFwdInplaceInferer);
1209
REGISTER_OPERATOR(leaky_relu_grad, ops::ActivationOpGrad,
1210
                  ops::ActivationGradOpInplaceInferer,
H
hong 已提交
1211 1212
                  ops::LeakyReluDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::LeakyReluDoubleGradMaker<paddle::imperative::OpBase>);
1213 1214
REGISTER_OPERATOR(
    leaky_relu_grad_grad,
1215
    ops::ActivationOpDoubleGrad2<ops::LeakyReluGradFunctor<float>::FwdDeps()>,
1216
    ops::ActivationDoubleGradOpInplaceInferer);
1217

1218 1219 1220 1221 1222 1223 1224 1225 1226 1227
REGISTER_ACTIVATION_CPU_KERNEL(leaky_relu, LeakyRelu, LeakyReluFunctor,
                               LeakyReluGradFunctor);
REGISTER_OP_CPU_KERNEL(
    leaky_relu_grad_grad,
    ops::ActivationDoubleGradKernel<plat::CPUDeviceContext,
                                    ops::LeakyReluGradGradFunctor<float>>,
    ops::ActivationDoubleGradKernel<plat::CPUDeviceContext,
                                    ops::LeakyReluGradGradFunctor<double>>,
    ops::ActivationDoubleGradKernel<
        plat::CPUDeviceContext, ops::LeakyReluGradGradFunctor<plat::float16>>);
1228 1229
/* ========================================================================== */

D
Double_V 已提交
1230 1231 1232 1233 1234 1235 1236 1237 1238
/* ========================    elu  register     ============================ */
REGISTER_OPERATOR(
    elu, ops::ActivationOp, ops::ELUOpMaker, ops::ActivationOpInferVarType,
    ops::ActivationGradOpMaker<ops::ELUGradFunctor<float>::FwdDeps(),
                               paddle::framework::OpDesc>,
    ops::ActivationGradOpMaker<ops::ELUGradFunctor<float>::FwdDeps(),
                               paddle::imperative::OpBase>,
    ops::ActFwdInplaceInferer);
REGISTER_OPERATOR(elu_grad, ops::ActivationOpGrad,
1239
                  ops::ActivationGradOpInplaceInferer,
D
Double_V 已提交
1240 1241 1242 1243 1244
                  ops::ELUDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::ELUDoubleGradMaker<paddle::imperative::OpBase>);
REGISTER_OPERATOR(
    elu_grad_grad,
    ops::ActivationOpDoubleGrad<ops::ELUGradFunctor<float>::FwdDeps()>,
1245
    ops::ActivationDoubleGradOpInplaceInferer);
D
Double_V 已提交
1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257

REGISTER_ACTIVATION_CPU_KERNEL(elu, ELU, ELUFunctor, ELUGradFunctor);
REGISTER_OP_CPU_KERNEL(
    elu_grad_grad, ops::ELUDoubleGradKernel<plat::CPUDeviceContext,
                                            ops::ELUGradGradFunctor<float>>,
    ops::ELUDoubleGradKernel<plat::CPUDeviceContext,
                             ops::ELUGradGradFunctor<double>>,
    ops::ELUDoubleGradKernel<plat::CPUDeviceContext,
                             ops::ELUGradGradFunctor<plat::float16>>);

/* ========================================================================== */

L
lvmengsi 已提交
1258 1259 1260
/* ===========================   sqrt register  ============================= */
REGISTER_OPERATOR(
    sqrt, ops::ActivationOp, ops::SqrtOpMaker, ops::ActivationOpInferVarType,
H
hong 已提交
1261 1262 1263 1264
    ops::ActivationGradOpMaker<ops::SqrtGradFunctor<float>::FwdDeps(),
                               paddle::framework::OpDesc>,
    ops::ActivationGradOpMaker<ops::SqrtGradFunctor<float>::FwdDeps(),
                               paddle::imperative::OpBase>,
1265
    ops::ActFwdInplaceInferer);
L
lvmengsi 已提交
1266
REGISTER_OPERATOR(sqrt_grad, ops::ActivationOpGrad,
1267
                  ops::ActivationGradOpInplaceInferer,
H
hong 已提交
1268 1269
                  ops::SqrtDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::SqrtDoubleGradMaker<paddle::imperative::OpBase>);
L
lvmengsi 已提交
1270 1271
REGISTER_OPERATOR(
    sqrt_grad_grad,
1272
    ops::ActivationOpDoubleGrad<ops::SqrtGradGradFunctor<float>::FwdDeps()>,
1273
    ops::ActivationDoubleGradOpInplaceInferer);
1274

L
lvmengsi 已提交
1275 1276 1277 1278 1279 1280 1281 1282 1283 1284
REGISTER_ACTIVATION_CPU_KERNEL(sqrt, Sqrt, SqrtFunctor, SqrtGradFunctor);
REGISTER_OP_CPU_KERNEL(
    sqrt_grad_grad, ops::SqrtDoubleGradKernel<plat::CPUDeviceContext,
                                              ops::SqrtGradGradFunctor<float>>,
    ops::SqrtDoubleGradKernel<plat::CPUDeviceContext,
                              ops::SqrtGradGradFunctor<double>>,
    ops::SqrtDoubleGradKernel<plat::CPUDeviceContext,
                              ops::SqrtGradGradFunctor<plat::float16>>);
/* ========================================================================== */

W
whs 已提交
1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313
/* ===========================   rsqrt register  =============================
 */
REGISTER_OPERATOR(
    rsqrt, ops::ActivationOp, ops::RsqrtOpMaker, ops::ActivationOpInferVarType,
    ops::ActivationGradOpMaker<ops::RsqrtGradFunctor<float>::FwdDeps(),
                               paddle::framework::OpDesc>,
    ops::ActivationGradOpMaker<ops::RsqrtGradFunctor<float>::FwdDeps(),
                               paddle::imperative::OpBase>,
    ops::ActFwdInplaceInferer);
REGISTER_OPERATOR(rsqrt_grad, ops::ActivationOpGrad,
                  ops::ActivationGradOpInplaceInferer,
                  ops::RsqrtDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::RsqrtDoubleGradMaker<paddle::imperative::OpBase>);
REGISTER_OPERATOR(
    rsqrt_grad_grad,
    ops::ActivationOpDoubleGrad<ops::RsqrtGradGradFunctor<float>::FwdDeps()>,
    ops::ActivationDoubleGradOpInplaceInferer);

REGISTER_ACTIVATION_CPU_KERNEL(rsqrt, Rsqrt, RsqrtFunctor, RsqrtGradFunctor);
REGISTER_OP_CPU_KERNEL(
    rsqrt_grad_grad,
    ops::RsqrtDoubleGradKernel<plat::CPUDeviceContext,
                               ops::RsqrtGradGradFunctor<float>>,
    ops::RsqrtDoubleGradKernel<plat::CPUDeviceContext,
                               ops::RsqrtGradGradFunctor<double>>,
    ops::RsqrtDoubleGradKernel<plat::CPUDeviceContext,
                               ops::RsqrtGradGradFunctor<plat::float16>>);
/* ========================================================================== */

1314 1315 1316 1317
/* ==========================   square register  ============================ */
REGISTER_OPERATOR(
    square, ops::ActivationOp, ops::SquareOpMaker,
    ops::ActivationOpInferVarType,
H
hong 已提交
1318 1319 1320 1321
    ops::ActivationGradOpMaker<ops::SquareGradFunctor<float>::FwdDeps(),
                               paddle::framework::OpDesc>,
    ops::ActivationGradOpMaker<ops::SquareGradFunctor<float>::FwdDeps(),
                               paddle::imperative::OpBase>,
1322
    ops::ActFwdInplaceInferer);
1323
REGISTER_OPERATOR(square_grad, ops::ActivationOpGrad,
1324
                  ops::ActivationGradOpInplaceInferer,
H
hong 已提交
1325 1326
                  ops::SquareDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::SquareDoubleGradMaker<paddle::imperative::OpBase>);
1327 1328
REGISTER_OPERATOR(
    square_grad_grad,
1329
    ops::ActivationOpDoubleGrad<ops::SquareGradGradFunctor<float>::FwdDeps()>,
1330
    ops::ActivationDoubleGradOpInplaceInferer);
1331

1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349
REGISTER_OP_CPU_KERNEL(square,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::SquareFunctor<float>>,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::SquareFunctor<double>>,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::SquareFunctor<int>>,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::SquareFunctor<int64_t>>);
REGISTER_OP_CPU_KERNEL(
    square_grad, ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                                           ops::SquareGradFunctor<float>>,
    ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                              ops::SquareGradFunctor<double>>,
    ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                              ops::SquareGradFunctor<int>>,
    ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                              ops::SquareGradFunctor<int64_t>>);
1350 1351 1352 1353 1354 1355 1356 1357

REGISTER_OP_CPU_KERNEL(
    square_grad_grad,
    ops::SquareDoubleGradKernel<plat::CPUDeviceContext,
                                ops::SquareGradGradFunctor<float>>,
    ops::SquareDoubleGradKernel<plat::CPUDeviceContext,
                                ops::SquareGradGradFunctor<double>>,
    ops::SquareDoubleGradKernel<plat::CPUDeviceContext,
1358 1359 1360 1361 1362
                                ops::SquareGradGradFunctor<plat::float16>>,
    ops::SquareDoubleGradKernel<plat::CPUDeviceContext,
                                ops::SquareGradGradFunctor<int>>,
    ops::SquareDoubleGradKernel<plat::CPUDeviceContext,
                                ops::SquareGradGradFunctor<int64_t>>);
1363
/* ========================================================================== */
1364 1365 1366 1367 1368

/* ==========================   pow register  ============================ */

REGISTER_OPERATOR(
    pow, ops::PowOp, ops::PowOpMaker, ops::ActivationOpInferVarType,
H
hong 已提交
1369 1370
    ops::PowGradOpMaker<paddle::framework::OpDesc>,
    ops::PowGradOpMaker<paddle::imperative::OpBase>,
1371
    std::conditional<ops::CanInplaceAct<ops::PowGradFunctor<float>>(),
1372
                     ops::ActFwdInplaceInferer, void>::type);
1373
REGISTER_OPERATOR(pow_grad, ops::PowOpGrad,
1374
                  ops::ActivationGradOpInplaceInferer);
1375 1376 1377

REGISTER_OP_CPU_KERNEL(
    pow, ops::PowKernel<plat::CPUDeviceContext, ops::PowFunctor<float>>,
1378 1379 1380
    ops::PowKernel<plat::CPUDeviceContext, ops::PowFunctor<double>>,
    ops::PowKernel<plat::CPUDeviceContext, ops::PowFunctor<int>>,
    ops::PowKernel<plat::CPUDeviceContext, ops::PowFunctor<int64_t>>);
1381 1382 1383
REGISTER_OP_CPU_KERNEL(
    pow_grad,
    ops::PowGradKernel<plat::CPUDeviceContext, ops::PowGradFunctor<float>>,
1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398
    ops::PowGradKernel<plat::CPUDeviceContext, ops::PowGradFunctor<double>>,
    ops::PowGradKernel<plat::CPUDeviceContext, ops::PowGradFunctor<int>>,
    ops::PowGradKernel<plat::CPUDeviceContext, ops::PowGradFunctor<int64_t>>);
/* ========================================================================== */

/* ==========================   exp register  ============================ */
REGISTER_OPERATOR(
    exp, ops::ActivationOp, ops::ExpOpMaker, ops::ActivationOpInferVarType,
    ops::ActivationGradOpMaker<ops::ExpGradFunctor<float>::FwdDeps(),
                               paddle::framework::OpDesc>,
    ops::ActivationGradOpMaker<ops::ExpGradFunctor<float>::FwdDeps(),
                               paddle::imperative::OpBase>,
    std::conditional<ops::CanInplaceAct<ops::ExpGradFunctor<float>>(),
                     ops::ActFwdInplaceInferer, void>::type);
REGISTER_OPERATOR(exp_grad, ops::ActivationOpGrad,
1399
                  ops::ActivationGradOpInplaceInferer);
1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419

REGISTER_OP_CPU_KERNEL(exp,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::ExpFunctor<float>>,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::ExpFunctor<double>>,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::ExpFunctor<int>>,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::ExpFunctor<int64_t>>);
REGISTER_OP_CPU_KERNEL(
    exp_grad, ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                                        ops::ExpGradFunctor<float>>,
    ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                              ops::ExpGradFunctor<double>>,
    ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                              ops::ExpGradFunctor<int>>,
    ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                              ops::ExpGradFunctor<int64_t>>);
/* ========================================================================== */
R
ronnywang 已提交
1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447

/* ==========================   expm1 register  ============================ */
REGISTER_OPERATOR(
    expm1, ops::ActivationOp, ops::Expm1OpMaker, ops::ActivationOpInferVarType,
    ops::ActivationGradOpMaker<ops::Expm1GradFunctor<float>::FwdDeps(),
                               paddle::framework::OpDesc>,
    ops::ActivationGradOpMaker<ops::Expm1GradFunctor<float>::FwdDeps(),
                               paddle::imperative::OpBase>,
    std::conditional<ops::CanInplaceAct<ops::Expm1GradFunctor<float>>(),
                     ops::ActFwdInplaceInferer, void>::type);
REGISTER_OPERATOR(expm1_grad, ops::ActivationOpGrad,
                  ops::ActivationGradOpInplaceInferer);

REGISTER_OP_CPU_KERNEL(expm1,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::Expm1Functor<float>>,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::Expm1Functor<double>>,
                       ops::ActivationKernel<paddle::platform::CPUDeviceContext,
                                             ops::Expm1Functor<plat::float16>>);
REGISTER_OP_CPU_KERNEL(
    expm1_grad, ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                                          ops::Expm1GradFunctor<float>>,
    ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                              ops::Expm1GradFunctor<double>>,
    ops::ActivationGradKernel<paddle::platform::CPUDeviceContext,
                              ops::Expm1GradFunctor<plat::float16>>);
/* ========================================================================== */
1448

1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477
/* ==========================  Log register ==================================*/
REGISTER_OPERATOR(
    log, ops::ActivationOp, ops::LogOpMaker, ops::ActivationOpInferVarType,
    ops::ActivationGradOpMaker<ops::LogGradFunctor<float>::FwdDeps(),
                               paddle::framework::OpDesc>,
    ops::ActivationGradOpMaker<ops::LogGradFunctor<float>::FwdDeps(),
                               paddle::imperative::OpBase>,
    ops::ActFwdInplaceInferer);
REGISTER_OPERATOR(log_grad, ops::ActivationOpGrad,
                  ops::ActivationGradOpInplaceInferer,
                  ops::LogDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::LogDoubleGradMaker<paddle::imperative::OpBase>);

REGISTER_OPERATOR(
    log_grad_grad,
    ops::ActivationOpDoubleGrad<ops::LogGradGradFunctor<float>::FwdDeps()>,
    ops::ActivationDoubleGradOpInplaceInferer);

REGISTER_ACTIVATION_CPU_KERNEL(log, Log, LogFunctor, LogGradFunctor);

REGISTER_OP_CPU_KERNEL(
    log_grad_grad, ops::LogDoubleGradKernel<plat::CPUDeviceContext,
                                            ops::LogGradGradFunctor<float>>,
    ops::LogDoubleGradKernel<plat::CPUDeviceContext,
                             ops::LogGradGradFunctor<double>>,
    ops::LogDoubleGradKernel<plat::CPUDeviceContext,
                             ops::LogGradGradFunctor<plat::float16>>);
/* ========================================================================== */

1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496
/* ==========================  register checkpoint ===========================*/
REGISTER_OP_VERSION(leaky_relu)
    .AddCheckpoint(
        R"ROC(fix leaky_relu, bahavior changed when alpha < 0 or alpha > 1)ROC",
        paddle::framework::compatible::OpVersionDesc()
            .BugfixWithBehaviorChanged(
                "leaky_relu calculate formula before checkponit: out = max(x, "
                "alpha * x); after checkpoint: out = x if x > 0 else alpha * "
                "x"));

REGISTER_OP_VERSION(hard_shrink)
    .AddCheckpoint(
        R"ROC(fix hard_shrink, bahavior changed when threshold<0)ROC",
        paddle::framework::compatible::OpVersionDesc()
            .BugfixWithBehaviorChanged(
                "hard_shrink calculate formula before checkponit: out = x * "
                "((x < -threshold) + (x > threshold)); after checkpoint: out = "
                "x * (((x < -threshold) + (x > threshold)) > 0)"));

1497 1498 1499 1500 1501 1502 1503 1504 1505 1506
REGISTER_OP_VERSION(softplus)
    .AddCheckpoint(
        R"ROC(add new attributes [beta] and [threshold], and the formula is changed to "
         " softplus(x) = \\frac{1}{beta} * \\log(1 + e^{beta * x}) \\\\ \\text{For numerical"
         " stability, the implementation reverts to the linear function when: beta * x > threshold.})ROC",
        paddle::framework::compatible::OpVersionDesc()
            .NewAttr("beta", "The beta value of the new formula", 1.0f)
            .NewAttr("threshold", "The threshold value of the new formula",
                     20.0f));

1507
/* ========================================================================== */