未验证 提交 3879f6b8 编写于 作者: Z zyfncg 提交者: GitHub

Clear extra attributes of some Op in OpMaker (Part2) (#45684)

* remove extra attr of opmaker

* remove extra attr of gru

* revert scale

* fix bug of create_op

* add extra attr checker in infer_shape

* fix gelu
上级 462cf0ef
......@@ -1018,6 +1018,13 @@ void OpDesc::CheckAttrs() {
}
VLOG(10) << "begin to check attribute of " << Type();
checker->Check(&attrs_);
const auto &extra_attr_checkers =
operators::ExtraInfoUtils::Instance().GetExtraAttrsChecker(Type());
if (!extra_attr_checkers.empty()) {
for (const auto &extra_checker : extra_attr_checkers) {
extra_checker(&runtime_attrs_, false);
}
}
}
void OpDesc::InferShape(const BlockDesc &block) {
......
......@@ -38,8 +38,17 @@ std::unique_ptr<OperatorBase> OpRegistry::CreateOp(
}
}
auto& info = OpInfoMap::Instance().Get(type);
if (attr_check && info.Checker() != nullptr) {
info.Checker()->Check(&standard_attrs);
if (attr_check) {
if (info.Checker() != nullptr) {
info.Checker()->Check(&standard_attrs);
}
const auto& extra_attr_checkers =
operators::ExtraInfoUtils::Instance().GetExtraAttrsChecker(type);
if (!extra_attr_checkers.empty()) {
for (const auto& checker : extra_attr_checkers) {
checker(&runtime_attrs, false);
}
}
}
auto op_base = std::unique_ptr<OperatorBase>(
info.Creator()(type, inputs, outputs, standard_attrs));
......
......@@ -251,10 +251,6 @@ class DataNormOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<std::string>("data_layout", "").SetDefault("NCHW");
AddAttr<bool>("sync_stats", "(bool, default false) only used in multi-GPU")
.SetDefault(false);
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddInput("X", "The input tensor");
AddInput("BatchSize",
"BatchSize is a 1-dimensional tensor of size C "
......
......@@ -77,15 +77,6 @@ class DropoutOpMaker : public framework::OpProtoAndCheckerMaker {
"(bool, default false) Set to true for inference only, false "
"for training. Some layers may run faster when this is true.")
.SetDefault(false);
AddAttr<bool>("fix_seed",
"A flag indicating whether to use a fixed seed to generate "
"random mask. NOTE: DO NOT set this flag to true in "
"training. Setting this flag to true is only useful in "
"unittest or for debug that always the same output units "
"will be dropped.")
.SetDefault(false)
.AsExtra();
AddAttr<int>("seed", "Dropout random seed.").SetDefault(0).AsExtra();
AddAttr<std::string>(
"dropout_implementation",
"[\"downgrade_in_infer\"|\"upscale_in_train\"]"
......
......@@ -39,9 +39,8 @@ class GeluOp : public framework::OperatorWithKernel {
framework::DataLayout layout = framework::DataLayout::kAnyLayout;
auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
#ifdef PADDLE_WITH_MKLDNN
auto it = this->Attrs().find("use_mkldnn");
if (library == framework::LibraryType::kPlain &&
it != this->Attrs().end() && this->CanMKLDNNBeUsed(ctx, data_type)) {
this->CanMKLDNNBeUsed(ctx, data_type)) {
library = framework::LibraryType::kMKLDNN;
layout = framework::DataLayout::kMKLDNN;
}
......@@ -100,21 +99,6 @@ class GeluOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<bool>("approximate",
"(bool, default false) use approximation of gelu")
.SetDefault(false);
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>(
"mkldnn_data_type",
"(string, default \"float32\"). Data type of mkldnn kernel")
.SetDefault("float32")
.InEnum({"float32", "int8", "bfloat16"})
.AsExtra();
AddAttr<bool>("use_cudnn",
"(bool, default false) Only used in cudnn kernel, need "
"install cudnn")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC(
Gelu Activation Operator.
......
......@@ -67,12 +67,6 @@ class GridSampleOpMaker : public framework::OpProtoAndCheckerMaker {
AddOutput("Output",
"(Tensor) Output tensor with shape [N, C, H, W] or shape [N,C, "
"D, H ,W]");
AddAttr<bool>(
"use_cudnn",
"(bool, default true) Only used in cudnn kernel, need install cudnn")
.SetDefault(true)
.AsExtra();
AddAttr<bool>(
"align_corners",
"(bool, default true) If align_corners is true, it will project"
......
......@@ -180,9 +180,6 @@ class GRUOpMaker : public framework::OpProtoAndCheckerMaker {
"(bool, default: False) "
"whether to compute reversed GRU.")
.SetDefault(false);
AddAttr<bool>("is_test", "True if in test phase.")
.SetDefault(false)
.AsExtra();
AddAttr<bool>("origin_mode",
"bool"
"use origin mode in article https://arxiv.org/abs/1412.3555")
......
......@@ -550,10 +550,6 @@ class InterpolateV2OpMaker : public framework::OpProtoAndCheckerMaker {
"can be \'0\' for src_idx = scale*(dst_indx+0.5)-0.5 , "
"can be \'1\' for src_idx = scale*dst_index .")
.SetDefault(1);
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC(
This operator samples input X to given output shape by using specified
interpolation method, the interpolation methods can be \"nearest\"
......
......@@ -171,22 +171,6 @@ class LayerNormOpMaker : public framework::OpProtoAndCheckerMaker {
"greater than zero. But received [%d].",
begin_norm_axis));
});
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>(
"mkldnn_data_type",
"(string, default \"float32\"). Data type of mkldnn kernel")
.SetDefault("float32")
.InEnum({"float32", "bfloat16"})
.AsExtra();
AddAttr<bool>("is_test",
"(bool, default false) Set to true for inference only, false "
"for training. Some layers may run faster when this is true.")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC(
Assume feature vectors exist on dimensions
:attr:`begin_norm_axis ... rank(input)` and calculate the moment statistics
......
......@@ -57,10 +57,6 @@ class LogSoftmaxOpMaker : public framework::OpProtoAndCheckerMaker {
"The dimension index of Input(x) to perform log_softmax,"
"default -1 for last dimension")
.SetDefault(-1);
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC(
LogSoftmax Operator.
......
......@@ -302,10 +302,6 @@ class LRNOpMaker : public framework::OpProtoAndCheckerMaker {
"beta is the power number.")
.SetDefault(0.75)
.GreaterThan(0.0);
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>(
"data_format",
"(string, default NCHW) Only used in "
......@@ -313,12 +309,6 @@ class LRNOpMaker : public framework::OpProtoAndCheckerMaker {
"Defaults to \"NHWC\". Specify the data format of the output data, "
"the input will be transformed automatically. ")
.SetDefault("AnyLayout");
AddAttr<bool>("is_test",
"(bool, default false) Set to true for inference only, false "
"for training. Some layers may run faster when this is true.")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC(
Local Response Normalization Operator.
......
......@@ -773,10 +773,6 @@ class Pad2dOpMaker : public framework::OpProtoAndCheckerMaker {
"An optional string from: \"NHWC\", \"NCHW\". "
"Defaults to \"NHWC\". Specify the data format of the input data.")
.SetDefault("NCHW");
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC(
Pad2d Operator.
Pad 2-d images according to 'paddings' and 'mode'.
......
......@@ -111,10 +111,6 @@ class Pad3dOpMaker : public framework::OpProtoAndCheckerMaker {
"An optional string from: \"NDHWC\", \"NCDHW\". "
"Defaults to \"NDHWC\". Specify the data format of the input data.")
.SetDefault("NCDHW");
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC(
Pad3d Operator.
Pad 3-d images according to 'paddings' and 'mode'.
......
......@@ -155,11 +155,6 @@ class PartialSumOpMaker : public framework::OpProtoAndCheckerMaker {
void Make() override {
AddInput("X", "Input tensors of partial_sum operator.").AsDuplicable();
AddOutput("Out", "Output tensor of partial_sum operator.");
AddAttr<bool>(
"use_mkldnn",
"(bool, default false) Indicates if MKL-DNN kernel will be used")
.SetDefault(false)
.AsExtra();
AddAttr<int>("start_index", "The start index of tensor wanted to be added.")
.SetDefault(0);
AddAttr<int>("length", "The length of tensor wanted to be added.")
......
......@@ -104,21 +104,6 @@ There are modes:
AddAttr<std::string>("data_format",
"Data format that specifies the layout of input")
.SetDefault("NCHW");
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>(
"mkldnn_data_type",
"(string, default \"float32\"). Data type of mkldnn kernel")
.SetDefault("float32")
.InEnum({"float32", "bfloat16"})
.AsExtra();
AddAttr<bool>("is_test",
"(bool, default false) Set to true for inference only, false "
"for training. Some layers may run faster when this is true.")
.SetDefault(false)
.AsExtra();
}
};
......
......@@ -39,15 +39,6 @@ class RenormOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<int>("axis",
"int,the dimension to slice over to get the sub-tensors");
AddAttr<float>("max_norm", "(float, the norm upper-bound");
AddAttr<bool>("use_cudnn",
"(bool, default false) Only used in cudnn kernel, need "
"install cudnn")
.SetDefault(false)
.AsExtra();
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC(
Renorm Operator.
......
......@@ -35,6 +35,16 @@
outputs :
out : Out
- api : bicubic_interp (bicubic_interp_v2)
backward : bicubic_interp_grad (bicubic_interp_v2_grad)
extra :
attrs : [bool use_mkldnn = false]
- api : bilinear_interp (bilinear_interp_v2)
backward : bilinear_interp_grad (bilinear_interp_v2_grad)
extra :
attrs : [bool use_mkldnn = false]
- api : cholesky
inputs :
x : X
......@@ -105,6 +115,11 @@
outputs :
out : Out
- api : data_norm
backward : data_norm_grad
extra :
attrs : [bool use_mkldnn = false]
- api : depthwise_conv2d
backward : depthwise_conv2d_grad
extra :
......@@ -154,6 +169,16 @@
outputs :
out : Out
- api : dropout
backward : dropout_grad
extra :
attrs : [bool fix_seed = false, int seed = 0]
- api : dropout_nd
backward : dropout_nd_grad
extra :
attrs : [bool fix_seed = false, int seed = 0]
- api : erf
inputs :
x : X
......@@ -166,35 +191,117 @@
outputs :
out : Out
- api : fft_c2c
inputs: {x: X}
outputs: {out: Out}
- api : fft_c2r
inputs: {x: X}
outputs: {out: Out}
- api : fft_r2c
inputs: {x: X}
outputs: {out: Out}
- api : gelu
backward : gelu_grad
extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool use_cudnn = false]
- api : grid_sampler
backward : grid_sampler_grad
extra :
attrs : [bool use_cudnn = true]
- api : gru
backward : gru_grad
extra :
attrs : [bool is_test = false]
- api : inplace_abn
backward : inplace_abn_grad
extra :
attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]
- api : layer_norm
backward : layer_norm_grad
extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]
- api : lgamma
inputs :
x : X
outputs :
out : Out
- api : linear_interp (linear_interp_v2)
backward : linear_interp_grad (linear_interp_v2_grad)
extra :
attrs : [bool use_mkldnn = false]
- api : log_softmax
backward : log_softmax_grad
extra :
attrs : [bool use_mkldnn = false]
- api : lrn
backward : lrn_grad
extra :
attrs : [bool use_mkldnn = false, bool is_test = false]
- api : mv
inputs :
{x : X, vec : Vec}
outputs :
out : Out
- api : nearest_interp (nearest_interp_v2)
backward : nearest_interp_grad (nearest_interp_v2_grad)
extra :
attrs : [bool use_mkldnn = false]
- api : pad2d
backward : pad2d_grad
extra :
attrs : [bool use_mkldnn = false]
- api : pad3d
backward : pad3d_grad
extra :
attrs : [bool use_mkldnn = false]
- api : partial_sum
backward : partial_sum_grad
extra :
attrs : [bool use_mkldnn = false]
- api : poisson
inputs :
x : X
outputs :
out : Out
- api : prelu
backward : prelu_grad
extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]
- api : renorm
backward : renorm_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- api : solve
inputs :
{x : X, y : Y}
outputs :
out : Out
- api : stack
backward : stack_grad
extra :
attrs : [bool use_mkldnn = false]
- api : sync_batch_norm
backward : sync_batch_norm_grad
extra :
......@@ -206,20 +313,13 @@
outputs :
out : Out
- api : trilinear_interp (trilinear_interp_v2)
backward : trilinear_interp_grad (trilinear_interp_v2_grad)
extra :
attrs : [bool use_mkldnn = false]
- api : trunc
inputs :
x : X
outputs :
out : Out
- api: fft_c2c
inputs: {x: X}
outputs: {out: Out}
- api: fft_c2r
inputs: {x: X}
outputs: {out: Out}
- api: fft_r2c
inputs: {x: X}
outputs: {out: Out}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册