未验证 提交 c7b373f2 编写于 作者: Z zyfncg 提交者: GitHub

Clear extra attributes of activation op in OpMaker (#45772)

* clear extra attr of activation op in opmaker

* fix syntax bug

* fix mkldnn kernel

* fix merge conflict

* fix bug
上级 01888482
......@@ -213,8 +213,8 @@ class SingleGradOpMaker<OpDesc> : public GradOpDescMakerBase {
std::vector<std::unique_ptr<OpDesc>> retv;
retv.emplace_back(new OpDesc());
try {
this->Apply(retv.front().get());
retv.front()->SetRuntimeAttrMap(this->RuntimeAttrs());
this->Apply(retv.front().get());
} catch (platform::EnforceNotMet& exception) {
framework::AppendErrorOpHint(retv.front().get()->Type(), &exception);
throw std::move(exception);
......
......@@ -50,15 +50,6 @@ static constexpr bool CanInplaceAct() {
AddOutput("Out", \
"Output of " #OP_NAME \
" operator, a Tensor with shape same as input."); \
AddAttr<bool>("use_mkldnn", \
"(bool, default false) Only used in mkldnn kernel") \
.SetDefault(false) \
.AsExtra(); \
AddAttr<bool>("use_cudnn", \
"(bool, default false) Only used in cudnn kernel, need " \
"install cudnn") \
.SetDefault(false) \
.AsExtra(); \
AddComment(OP_COMMENT); \
} \
}
......@@ -107,8 +98,7 @@ framework::OpKernelType GetKernelType(const framework::ExecutionContext& ctx,
// }
// #endif
#ifdef PADDLE_WITH_MKLDNN
auto it = oper.Attrs().find("use_mkldnn");
if (library == framework::LibraryType::kPlain && it != oper.Attrs().end() &&
if (library == framework::LibraryType::kPlain &&
oper.CanMKLDNNBeUsed(ctx, data_type)) {
library = framework::LibraryType::kMKLDNN;
layout = framework::DataLayout::kMKLDNN;
......@@ -458,10 +448,6 @@ class LeakyReluOpMaker : public framework::OpProtoAndCheckerMaker {
"A LoDTensor or Tensor with the same type and size as that of x.");
AddAttr<float>("alpha", "Slope of the activation function at x < 0.")
.SetDefault(0.02f);
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC(
LeakyRelu Activation Operator.
......@@ -483,35 +469,6 @@ class SoftplusOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<float>("beta", "The value of beta for Softplus.").SetDefault(1.0f);
AddAttr<float>("threshold", "The value of threshold for Softplus.")
.SetDefault(20.0f);
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel.")
.SetDefault(false)
.AsExtra();
AddAttr<bool>(
"use_cudnn",
"(bool, default false) Only used in cudnn kernel, need install cudnn.")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>(
"fuse_activation_type",
"Fused activation type used in softplus OneDNN kernel.")
.SetDefault("")
.AsExtra();
AddAttr<float>(
"fuse_activation_alpha",
"Fused activation alpha parameter type used in softplus OneDNN kernel.")
.SetDefault(0.0f)
.AsExtra();
AddAttr<float>(
"fuse_activation_beta",
"Fused activation beta parameter type used in softplus OneDNN kernel.")
.SetDefault(0.0f)
.AsExtra();
AddAttr<float>(
"fuse_activation_scale",
"Fused activation scale parameter type used in softplus OneDNN kernel.")
.SetDefault(1.0f)
.AsExtra();
AddComment(R"DOC(
:strong:`Softplus Activation Operator`
......@@ -613,10 +570,6 @@ class ELUOpMaker : public framework::OpProtoAndCheckerMaker {
"The output is a multi-dimensional Tensor which has same "
"dimension and data type as the ``x``.");
AddAttr<float>("alpha", "The alpha value of ELU").SetDefault(1.0f);
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC(
ELU Activation Operator.
......@@ -712,10 +665,6 @@ class Relu6OpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<float>("threshold",
"The threshold value of Relu6. Default is 6.0. ")
.SetDefault(6.0f);
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC(
Relu6 Activation Operator.
......@@ -817,10 +766,6 @@ class SwishOpMaker : public framework::OpProtoAndCheckerMaker {
AddInput("X", "Input of Swish operator");
AddOutput("Out", "Output of Swish operator");
AddAttr<float>("beta", "Constant beta of swish operator").SetDefault(1.0f);
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC(
Swish Activation Operator.
......@@ -841,10 +786,6 @@ class MishOpMaker : public framework::OpProtoAndCheckerMaker {
"of softplus will be used if absolute value of input is greater than "
":attr:`threshold`")
.SetDefault(20.f);
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC(
Mish Activation Operator.
......@@ -871,10 +812,6 @@ class HardSwishOpMaker : public framework::OpProtoAndCheckerMaker {
.SetDefault(6.0f);
AddAttr<float>("offset", "The offset parameter of HardSwish operator")
.SetDefault(3.0f);
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC(
HardSwish Activation Operator.
......
......@@ -3,6 +3,11 @@
extra :
attrs : [bool use_cudnn = false, bool use_mkldnn = false]
- api : acosh
backward : acosh_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- api : addmm
backward : addmm_grad
extra :
......@@ -18,12 +23,22 @@
extra :
attrs : [bool use_cudnn = false, bool use_mkldnn = false]
- api : asinh
backward : asinh_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- api : atan2
inputs :
{x : X1, y : X2}
outputs :
out : Out
- api : atanh
backward : atanh_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- api : batch_norm
backward : batch_norm_grad
extra :
......@@ -45,6 +60,11 @@
extra :
attrs : [bool use_mkldnn = false]
- api : ceil
backward : ceil_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- api : cholesky
inputs :
x : X
......@@ -107,6 +127,16 @@
extra :
attrs : [bool use_cudnn = true, bool use_mkldnn = false, int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
- api : cos
backward : cos_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- api : cosh
backward : cosh_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- api : cross
inputs :
{x : X, y : Y}
......@@ -179,6 +209,11 @@
extra :
attrs : [bool fix_seed = false, int seed = 0]
- api : elu
backward : elu_grad
extra :
attrs : [bool use_mkldnn = false]
- api : erf
inputs :
x : X
......@@ -191,6 +226,16 @@
outputs :
out : Out
- api : exp
backward : exp_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- api : expm1
backward : expm1_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- api : fft_c2c
inputs: {x: X}
outputs: {out: Out}
......@@ -203,6 +248,11 @@
inputs: {x: X}
outputs: {out: Out}
- api : floor
backward : floor_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- api : frobenius_norm
backward : frobenius_norm_grad
extra :
......@@ -223,6 +273,11 @@
extra :
attrs : [bool is_test = false]
- api : hard_swish
backward : hard_swish_grad
extra :
attrs : [bool use_mkldnn = false]
- api : inplace_abn
backward : inplace_abn_grad
extra :
......@@ -233,6 +288,11 @@
extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]
- api : leaky_relu
backward : leaky_relu_grad
extra :
attrs : [bool use_mkldnn = false]
- api : lgamma
inputs :
x : X
......@@ -244,11 +304,36 @@
extra :
attrs : [bool use_mkldnn = false]
- api : log
backward : log_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- api : log10
backward : log10_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- api : log1p
backward : log1p_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- api : log2
backward : log2_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- api : log_softmax
backward : log_softmax_grad
extra :
attrs : [bool use_mkldnn = false]
- api : logsigmoid
backward : logsigmoid_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- api : lrn
backward : lrn_grad
extra :
......@@ -261,6 +346,11 @@
str mkldnn_data_type = "float32", 'int[] fused_reshape_X = {}', 'int[] fused_reshape_Y = {}',
'int[] fused_transpose_X = {}', 'int[] fused_transpose_Y = {}',]
- api : mish
backward : mish_grad
extra :
attrs : [bool use_mkldnn = false]
- api : mv
inputs :
{x : X, vec : Vec}
......@@ -293,6 +383,21 @@
outputs :
out : Out
- api : prelu
backward : prelu_grad
extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]
- api : prelu
backward : prelu_grad
extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]
- api : reciprocal
backward : reciprocal_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- api : reduce_all
extra :
attrs : [bool use_mkldnn = false]
......@@ -336,15 +441,30 @@
extra :
attrs : [bool use_mkldnn = false]
- api : relu
backward : relu_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- api : relu6
backward : relu6_grad
extra :
attrs : [bool use_mkldnn = false]
- api : renorm
backward : renorm_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- api : rnn
backward : rnn_grad
- api : round
backward : round_grad
extra :
attrs : [bool is_test = false]
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- api : rsqrt
backward : rsqrt_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- api : seed
extra :
......@@ -359,6 +479,26 @@
extra :
attrs : [bool use_mkldnn = false]
- api : sigmoid
backward : sigmoid_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- api : silu
backward : silu_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- api : sin
backward : sin_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- api : sinh
backward : sinh_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- api : slice
backward : slice_grad
extra :
......@@ -368,10 +508,21 @@
backward : softmax_grad
extra :
attrs : [bool use_cudnn = false, bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]
- api : prelu
backward : prelu_grad
- api : softplus
backward : softplus_grad
extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]
attrs : [bool use_mkldnn = false, bool use_cudnn = false, str fuse_activation_type = "", float fuse_activation_alpha = 0.0f,
float fuse_activation_beta = 0.0f, float fuse_activation_scale = 1.0f]
- api : softsign
backward : softsign_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- api : rnn
backward : rnn_grad
extra :
attrs : [bool is_test = false]
- api : solve
inputs :
......@@ -379,6 +530,16 @@
outputs :
out : Out
- api : sqrt
backward : sqrt_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- api : square
backward : square_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- api : squeeze (squeeze2)
backward : squeeze_grad (squeeze2_grad)
extra :
......@@ -389,11 +550,31 @@
extra :
attrs : [bool use_mkldnn = false]
- api : swish
backward : swish_grad
extra :
attrs : [bool use_mkldnn = false]
- api : sync_batch_norm
backward : sync_batch_norm_grad
extra :
attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]
- api : tan
backward : tan_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- api : tanh
backward : tanh_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- api : tanh_shrink
backward : tanh_shrink_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- api : trace
inputs :
x : Input
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册