未验证 提交 3879f6b8 编写于 作者: Z zyfncg 提交者: GitHub

Clear extra attributes of some Op in OpMaker (Part2) (#45684)

* remove extra attr of opmaker

* remove extra attr of gru

* revert scale

* fix bug of create_op

* add extra attr checker in infer_shape

* fix gelu
上级 462cf0ef
...@@ -1018,6 +1018,13 @@ void OpDesc::CheckAttrs() { ...@@ -1018,6 +1018,13 @@ void OpDesc::CheckAttrs() {
} }
VLOG(10) << "begin to check attribute of " << Type(); VLOG(10) << "begin to check attribute of " << Type();
checker->Check(&attrs_); checker->Check(&attrs_);
const auto &extra_attr_checkers =
operators::ExtraInfoUtils::Instance().GetExtraAttrsChecker(Type());
if (!extra_attr_checkers.empty()) {
for (const auto &extra_checker : extra_attr_checkers) {
extra_checker(&runtime_attrs_, false);
}
}
} }
void OpDesc::InferShape(const BlockDesc &block) { void OpDesc::InferShape(const BlockDesc &block) {
......
...@@ -38,9 +38,18 @@ std::unique_ptr<OperatorBase> OpRegistry::CreateOp( ...@@ -38,9 +38,18 @@ std::unique_ptr<OperatorBase> OpRegistry::CreateOp(
} }
} }
auto& info = OpInfoMap::Instance().Get(type); auto& info = OpInfoMap::Instance().Get(type);
if (attr_check && info.Checker() != nullptr) { if (attr_check) {
if (info.Checker() != nullptr) {
info.Checker()->Check(&standard_attrs); info.Checker()->Check(&standard_attrs);
} }
const auto& extra_attr_checkers =
operators::ExtraInfoUtils::Instance().GetExtraAttrsChecker(type);
if (!extra_attr_checkers.empty()) {
for (const auto& checker : extra_attr_checkers) {
checker(&runtime_attrs, false);
}
}
}
auto op_base = std::unique_ptr<OperatorBase>( auto op_base = std::unique_ptr<OperatorBase>(
info.Creator()(type, inputs, outputs, standard_attrs)); info.Creator()(type, inputs, outputs, standard_attrs));
op_base->SetRuntimeAttributeMap(runtime_attrs); op_base->SetRuntimeAttributeMap(runtime_attrs);
......
...@@ -251,10 +251,6 @@ class DataNormOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -251,10 +251,6 @@ class DataNormOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<std::string>("data_layout", "").SetDefault("NCHW"); AddAttr<std::string>("data_layout", "").SetDefault("NCHW");
AddAttr<bool>("sync_stats", "(bool, default false) only used in multi-GPU") AddAttr<bool>("sync_stats", "(bool, default false) only used in multi-GPU")
.SetDefault(false); .SetDefault(false);
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddInput("X", "The input tensor"); AddInput("X", "The input tensor");
AddInput("BatchSize", AddInput("BatchSize",
"BatchSize is a 1-dimensional tensor of size C " "BatchSize is a 1-dimensional tensor of size C "
......
...@@ -77,15 +77,6 @@ class DropoutOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -77,15 +77,6 @@ class DropoutOpMaker : public framework::OpProtoAndCheckerMaker {
"(bool, default false) Set to true for inference only, false " "(bool, default false) Set to true for inference only, false "
"for training. Some layers may run faster when this is true.") "for training. Some layers may run faster when this is true.")
.SetDefault(false); .SetDefault(false);
AddAttr<bool>("fix_seed",
"A flag indicating whether to use a fixed seed to generate "
"random mask. NOTE: DO NOT set this flag to true in "
"training. Setting this flag to true is only useful in "
"unittest or for debug that always the same output units "
"will be dropped.")
.SetDefault(false)
.AsExtra();
AddAttr<int>("seed", "Dropout random seed.").SetDefault(0).AsExtra();
AddAttr<std::string>( AddAttr<std::string>(
"dropout_implementation", "dropout_implementation",
"[\"downgrade_in_infer\"|\"upscale_in_train\"]" "[\"downgrade_in_infer\"|\"upscale_in_train\"]"
......
...@@ -39,9 +39,8 @@ class GeluOp : public framework::OperatorWithKernel { ...@@ -39,9 +39,8 @@ class GeluOp : public framework::OperatorWithKernel {
framework::DataLayout layout = framework::DataLayout::kAnyLayout; framework::DataLayout layout = framework::DataLayout::kAnyLayout;
auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X"); auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
#ifdef PADDLE_WITH_MKLDNN #ifdef PADDLE_WITH_MKLDNN
auto it = this->Attrs().find("use_mkldnn");
if (library == framework::LibraryType::kPlain && if (library == framework::LibraryType::kPlain &&
it != this->Attrs().end() && this->CanMKLDNNBeUsed(ctx, data_type)) { this->CanMKLDNNBeUsed(ctx, data_type)) {
library = framework::LibraryType::kMKLDNN; library = framework::LibraryType::kMKLDNN;
layout = framework::DataLayout::kMKLDNN; layout = framework::DataLayout::kMKLDNN;
} }
...@@ -100,21 +99,6 @@ class GeluOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -100,21 +99,6 @@ class GeluOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<bool>("approximate", AddAttr<bool>("approximate",
"(bool, default false) use approximation of gelu") "(bool, default false) use approximation of gelu")
.SetDefault(false); .SetDefault(false);
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>(
"mkldnn_data_type",
"(string, default \"float32\"). Data type of mkldnn kernel")
.SetDefault("float32")
.InEnum({"float32", "int8", "bfloat16"})
.AsExtra();
AddAttr<bool>("use_cudnn",
"(bool, default false) Only used in cudnn kernel, need "
"install cudnn")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC( AddComment(R"DOC(
Gelu Activation Operator. Gelu Activation Operator.
......
...@@ -67,12 +67,6 @@ class GridSampleOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -67,12 +67,6 @@ class GridSampleOpMaker : public framework::OpProtoAndCheckerMaker {
AddOutput("Output", AddOutput("Output",
"(Tensor) Output tensor with shape [N, C, H, W] or shape [N,C, " "(Tensor) Output tensor with shape [N, C, H, W] or shape [N,C, "
"D, H ,W]"); "D, H ,W]");
AddAttr<bool>(
"use_cudnn",
"(bool, default true) Only used in cudnn kernel, need install cudnn")
.SetDefault(true)
.AsExtra();
AddAttr<bool>( AddAttr<bool>(
"align_corners", "align_corners",
"(bool, default true) If align_corners is true, it will project" "(bool, default true) If align_corners is true, it will project"
......
...@@ -180,9 +180,6 @@ class GRUOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -180,9 +180,6 @@ class GRUOpMaker : public framework::OpProtoAndCheckerMaker {
"(bool, default: False) " "(bool, default: False) "
"whether to compute reversed GRU.") "whether to compute reversed GRU.")
.SetDefault(false); .SetDefault(false);
AddAttr<bool>("is_test", "True if in test phase.")
.SetDefault(false)
.AsExtra();
AddAttr<bool>("origin_mode", AddAttr<bool>("origin_mode",
"bool" "bool"
"use origin mode in article https://arxiv.org/abs/1412.3555") "use origin mode in article https://arxiv.org/abs/1412.3555")
......
...@@ -550,10 +550,6 @@ class InterpolateV2OpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -550,10 +550,6 @@ class InterpolateV2OpMaker : public framework::OpProtoAndCheckerMaker {
"can be \'0\' for src_idx = scale*(dst_indx+0.5)-0.5 , " "can be \'0\' for src_idx = scale*(dst_indx+0.5)-0.5 , "
"can be \'1\' for src_idx = scale*dst_index .") "can be \'1\' for src_idx = scale*dst_index .")
.SetDefault(1); .SetDefault(1);
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC( AddComment(R"DOC(
This operator samples input X to given output shape by using specified This operator samples input X to given output shape by using specified
interpolation method, the interpolation methods can be \"nearest\" interpolation method, the interpolation methods can be \"nearest\"
......
...@@ -171,22 +171,6 @@ class LayerNormOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -171,22 +171,6 @@ class LayerNormOpMaker : public framework::OpProtoAndCheckerMaker {
"greater than zero. But received [%d].", "greater than zero. But received [%d].",
begin_norm_axis)); begin_norm_axis));
}); });
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>(
"mkldnn_data_type",
"(string, default \"float32\"). Data type of mkldnn kernel")
.SetDefault("float32")
.InEnum({"float32", "bfloat16"})
.AsExtra();
AddAttr<bool>("is_test",
"(bool, default false) Set to true for inference only, false "
"for training. Some layers may run faster when this is true.")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC( AddComment(R"DOC(
Assume feature vectors exist on dimensions Assume feature vectors exist on dimensions
:attr:`begin_norm_axis ... rank(input)` and calculate the moment statistics :attr:`begin_norm_axis ... rank(input)` and calculate the moment statistics
......
...@@ -57,10 +57,6 @@ class LogSoftmaxOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -57,10 +57,6 @@ class LogSoftmaxOpMaker : public framework::OpProtoAndCheckerMaker {
"The dimension index of Input(x) to perform log_softmax," "The dimension index of Input(x) to perform log_softmax,"
"default -1 for last dimension") "default -1 for last dimension")
.SetDefault(-1); .SetDefault(-1);
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC( AddComment(R"DOC(
LogSoftmax Operator. LogSoftmax Operator.
......
...@@ -302,10 +302,6 @@ class LRNOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -302,10 +302,6 @@ class LRNOpMaker : public framework::OpProtoAndCheckerMaker {
"beta is the power number.") "beta is the power number.")
.SetDefault(0.75) .SetDefault(0.75)
.GreaterThan(0.0); .GreaterThan(0.0);
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>( AddAttr<std::string>(
"data_format", "data_format",
"(string, default NCHW) Only used in " "(string, default NCHW) Only used in "
...@@ -313,12 +309,6 @@ class LRNOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -313,12 +309,6 @@ class LRNOpMaker : public framework::OpProtoAndCheckerMaker {
"Defaults to \"NHWC\". Specify the data format of the output data, " "Defaults to \"NHWC\". Specify the data format of the output data, "
"the input will be transformed automatically. ") "the input will be transformed automatically. ")
.SetDefault("AnyLayout"); .SetDefault("AnyLayout");
AddAttr<bool>("is_test",
"(bool, default false) Set to true for inference only, false "
"for training. Some layers may run faster when this is true.")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC( AddComment(R"DOC(
Local Response Normalization Operator. Local Response Normalization Operator.
......
...@@ -773,10 +773,6 @@ class Pad2dOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -773,10 +773,6 @@ class Pad2dOpMaker : public framework::OpProtoAndCheckerMaker {
"An optional string from: \"NHWC\", \"NCHW\". " "An optional string from: \"NHWC\", \"NCHW\". "
"Defaults to \"NHWC\". Specify the data format of the input data.") "Defaults to \"NHWC\". Specify the data format of the input data.")
.SetDefault("NCHW"); .SetDefault("NCHW");
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC( AddComment(R"DOC(
Pad2d Operator. Pad2d Operator.
Pad 2-d images according to 'paddings' and 'mode'. Pad 2-d images according to 'paddings' and 'mode'.
......
...@@ -111,10 +111,6 @@ class Pad3dOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -111,10 +111,6 @@ class Pad3dOpMaker : public framework::OpProtoAndCheckerMaker {
"An optional string from: \"NDHWC\", \"NCDHW\". " "An optional string from: \"NDHWC\", \"NCDHW\". "
"Defaults to \"NDHWC\". Specify the data format of the input data.") "Defaults to \"NDHWC\". Specify the data format of the input data.")
.SetDefault("NCDHW"); .SetDefault("NCDHW");
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC( AddComment(R"DOC(
Pad3d Operator. Pad3d Operator.
Pad 3-d images according to 'paddings' and 'mode'. Pad 3-d images according to 'paddings' and 'mode'.
......
...@@ -155,11 +155,6 @@ class PartialSumOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -155,11 +155,6 @@ class PartialSumOpMaker : public framework::OpProtoAndCheckerMaker {
void Make() override { void Make() override {
AddInput("X", "Input tensors of partial_sum operator.").AsDuplicable(); AddInput("X", "Input tensors of partial_sum operator.").AsDuplicable();
AddOutput("Out", "Output tensor of partial_sum operator."); AddOutput("Out", "Output tensor of partial_sum operator.");
AddAttr<bool>(
"use_mkldnn",
"(bool, default false) Indicates if MKL-DNN kernel will be used")
.SetDefault(false)
.AsExtra();
AddAttr<int>("start_index", "The start index of tensor wanted to be added.") AddAttr<int>("start_index", "The start index of tensor wanted to be added.")
.SetDefault(0); .SetDefault(0);
AddAttr<int>("length", "The length of tensor wanted to be added.") AddAttr<int>("length", "The length of tensor wanted to be added.")
......
...@@ -104,21 +104,6 @@ There are modes: ...@@ -104,21 +104,6 @@ There are modes:
AddAttr<std::string>("data_format", AddAttr<std::string>("data_format",
"Data format that specifies the layout of input") "Data format that specifies the layout of input")
.SetDefault("NCHW"); .SetDefault("NCHW");
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>(
"mkldnn_data_type",
"(string, default \"float32\"). Data type of mkldnn kernel")
.SetDefault("float32")
.InEnum({"float32", "bfloat16"})
.AsExtra();
AddAttr<bool>("is_test",
"(bool, default false) Set to true for inference only, false "
"for training. Some layers may run faster when this is true.")
.SetDefault(false)
.AsExtra();
} }
}; };
......
...@@ -39,15 +39,6 @@ class RenormOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -39,15 +39,6 @@ class RenormOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<int>("axis", AddAttr<int>("axis",
"int,the dimension to slice over to get the sub-tensors"); "int,the dimension to slice over to get the sub-tensors");
AddAttr<float>("max_norm", "(float, the norm upper-bound"); AddAttr<float>("max_norm", "(float, the norm upper-bound");
AddAttr<bool>("use_cudnn",
"(bool, default false) Only used in cudnn kernel, need "
"install cudnn")
.SetDefault(false)
.AsExtra();
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC( AddComment(R"DOC(
Renorm Operator. Renorm Operator.
......
...@@ -35,6 +35,16 @@ ...@@ -35,6 +35,16 @@
outputs : outputs :
out : Out out : Out
- api : bicubic_interp (bicubic_interp_v2)
backward : bicubic_interp_grad (bicubic_interp_v2_grad)
extra :
attrs : [bool use_mkldnn = false]
- api : bilinear_interp (bilinear_interp_v2)
backward : bilinear_interp_grad (bilinear_interp_v2_grad)
extra :
attrs : [bool use_mkldnn = false]
- api : cholesky - api : cholesky
inputs : inputs :
x : X x : X
...@@ -105,6 +115,11 @@ ...@@ -105,6 +115,11 @@
outputs : outputs :
out : Out out : Out
- api : data_norm
backward : data_norm_grad
extra :
attrs : [bool use_mkldnn = false]
- api : depthwise_conv2d - api : depthwise_conv2d
backward : depthwise_conv2d_grad backward : depthwise_conv2d_grad
extra : extra :
...@@ -154,6 +169,16 @@ ...@@ -154,6 +169,16 @@
outputs : outputs :
out : Out out : Out
- api : dropout
backward : dropout_grad
extra :
attrs : [bool fix_seed = false, int seed = 0]
- api : dropout_nd
backward : dropout_nd_grad
extra :
attrs : [bool fix_seed = false, int seed = 0]
- api : erf - api : erf
inputs : inputs :
x : X x : X
...@@ -166,35 +191,117 @@ ...@@ -166,35 +191,117 @@
outputs : outputs :
out : Out out : Out
- api : fft_c2c
inputs: {x: X}
outputs: {out: Out}
- api : fft_c2r
inputs: {x: X}
outputs: {out: Out}
- api : fft_r2c
inputs: {x: X}
outputs: {out: Out}
- api : gelu
backward : gelu_grad
extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool use_cudnn = false]
- api : grid_sampler
backward : grid_sampler_grad
extra :
attrs : [bool use_cudnn = true]
- api : gru
backward : gru_grad
extra :
attrs : [bool is_test = false]
- api : inplace_abn - api : inplace_abn
backward : inplace_abn_grad backward : inplace_abn_grad
extra : extra :
attrs : [bool use_mkldnn = false, bool fuse_with_relu = false] attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]
- api : layer_norm
backward : layer_norm_grad
extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]
- api : lgamma - api : lgamma
inputs : inputs :
x : X x : X
outputs : outputs :
out : Out out : Out
- api : linear_interp (linear_interp_v2)
backward : linear_interp_grad (linear_interp_v2_grad)
extra :
attrs : [bool use_mkldnn = false]
- api : log_softmax
backward : log_softmax_grad
extra :
attrs : [bool use_mkldnn = false]
- api : lrn
backward : lrn_grad
extra :
attrs : [bool use_mkldnn = false, bool is_test = false]
- api : mv - api : mv
inputs : inputs :
{x : X, vec : Vec} {x : X, vec : Vec}
outputs : outputs :
out : Out out : Out
- api : nearest_interp (nearest_interp_v2)
backward : nearest_interp_grad (nearest_interp_v2_grad)
extra :
attrs : [bool use_mkldnn = false]
- api : pad2d
backward : pad2d_grad
extra :
attrs : [bool use_mkldnn = false]
- api : pad3d
backward : pad3d_grad
extra :
attrs : [bool use_mkldnn = false]
- api : partial_sum
backward : partial_sum_grad
extra :
attrs : [bool use_mkldnn = false]
- api : poisson - api : poisson
inputs : inputs :
x : X x : X
outputs : outputs :
out : Out out : Out
- api : prelu
backward : prelu_grad
extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]
- api : renorm
backward : renorm_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- api : solve - api : solve
inputs : inputs :
{x : X, y : Y} {x : X, y : Y}
outputs : outputs :
out : Out out : Out
- api : stack
backward : stack_grad
extra :
attrs : [bool use_mkldnn = false]
- api : sync_batch_norm - api : sync_batch_norm
backward : sync_batch_norm_grad backward : sync_batch_norm_grad
extra : extra :
...@@ -206,20 +313,13 @@ ...@@ -206,20 +313,13 @@
outputs : outputs :
out : Out out : Out
- api : trilinear_interp (trilinear_interp_v2)
backward : trilinear_interp_grad (trilinear_interp_v2_grad)
extra :
attrs : [bool use_mkldnn = false]
- api : trunc - api : trunc
inputs : inputs :
x : X x : X
outputs : outputs :
out : Out out : Out
- api: fft_c2c
inputs: {x: X}
outputs: {out: Out}
- api: fft_c2r
inputs: {x: X}
outputs: {out: Out}
- api: fft_r2c
inputs: {x: X}
outputs: {out: Out}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册