未验证 提交 2a149741 编写于 作者: Z zyfncg 提交者: GitHub

Clear extra attributes of some Op in OpMaker (#45613)

* remove extra attr of abs in opmaker

* remove extra attrs of some op in opmaker

* remove is_test of conv

* fix attr getting of interpretercore

* fix inplace_abn

* fix bug

* fix bug of create_op

* refine code format
上级 067d3aa0
......@@ -117,7 +117,7 @@ bool InterpretercoreInferShapeContext::HasOutputs(const std::string& name,
}
AttrReader InterpretercoreInferShapeContext::Attrs() const {
return AttrReader(op_.Attrs());
return AttrReader(op_.Attrs(), op_.RuntimeAttrs());
}
std::vector<std::string> InterpretercoreInferShapeContext::Inputs(
......
......@@ -65,7 +65,15 @@ std::unique_ptr<OperatorBase> OpRegistry::CreateOp(
op_base = std::unique_ptr<OperatorBase>(
info.Creator()(type, inputs, outputs, attrs));
}
op_base->SetRuntimeAttributeMap(runtime_attrs);
const auto& extra_attr_checkers =
operators::ExtraInfoUtils::Instance().GetExtraAttrsChecker(type);
if (!extra_attr_checkers.empty()) {
auto op_runtime_attr_map = runtime_attrs;
for (const auto& checker : extra_attr_checkers) {
checker(&op_runtime_attr_map, false);
}
op_base->SetRuntimeAttributeMap(op_runtime_attr_map);
}
return op_base;
}
......
......@@ -767,7 +767,9 @@ class RuntimeInferShapeContext : public InferShapeContext {
}
}
AttrReader Attrs() const override { return AttrReader(op_.Attrs()); }
AttrReader Attrs() const override {
return AttrReader(op_.Attrs(), op_.RuntimeAttrs());
}
std::vector<std::string> Inputs(const std::string& name) const override {
return op_.Inputs(name);
......
......@@ -54,15 +54,6 @@ class AbsOpMaker : public framework::OpProtoAndCheckerMaker {
void Make() override {
AddInput("X", "(Tensor), The input tensor of abs op.");
AddOutput("Out", "(Tensor), The output tensor of abs op.");
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<bool>("use_cudnn",
"(bool, default false) Only used in cudnn kernel, need "
"install cudnn")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC(
Abs Operator.
......
......@@ -72,10 +72,6 @@ class AddMMOpMaker : public framework::OpProtoAndCheckerMaker {
AddInput("X", "(Tensor), The first input tensor for mul.");
AddInput("Y", "(Tensor), The second input tensor for mul.");
AddOutput("Out", "(Tensor), The output tensor of addmm op.");
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<float>("Alpha", "coefficient of x*y.").SetDefault(1.0f);
AddAttr<float>("Beta", "coefficient of input.").SetDefault(1.0f);
AddComment(R"DOC(
......
......@@ -158,11 +158,6 @@ class AffineGridOpMaker : public framework::OpProtoAndCheckerMaker {
"(Tensor) The shape of target image with format [N, C, H, W].")
.AsDispensable();
AddOutput("Output", "(Tensor) Output Tensor with shape [N, H, W, 2].");
AddAttr<bool>(
"use_cudnn",
"(bool, default false) Only used in cudnn kernel, need install cudnn")
.SetDefault(true)
.AsExtra();
AddAttr<bool>("align_corners",
"(bool, default false) Whether to align the corners of input"
"and output.")
......
......@@ -39,15 +39,6 @@ class AngleOpMaker : public framework::OpProtoAndCheckerMaker {
void Make() override {
AddInput("X", "(Tensor), The input tensor of angle op.");
AddOutput("Out", "(Tensor), The output tensor of angle op.");
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<bool>("use_cudnn",
"(bool, default false) Only used in cudnn kernel, need "
"install cudnn")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC(
Angle Operator.
......
......@@ -293,14 +293,6 @@ void BatchNormOpMaker::Make() {
"NHWC kernel")
.AsDispensable()
.AsExtra();
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<bool>("fuse_with_relu",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<bool>("use_global_stats",
"(bool, default false) Whether to use global mean and "
"variance. In inference or test mode, set use_global_stats "
......
......@@ -64,16 +64,6 @@ class ClipOpMaker : public framework::OpProtoAndCheckerMaker {
"input(x)");
AddAttr<AttrType>("min", "float number, the minimum value to clip by.");
AddAttr<AttrType>("max", "float number, the maximum value to clip by.");
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>(
"mkldnn_data_type",
"(string, default \"float32\"). Data type of mkldnn kernel")
.SetDefault("float32")
.InEnum({"float32", "bfloat16"})
.AsExtra();
AddComment(R"DOC(
Clip Operator.
......
......@@ -81,11 +81,6 @@ class ConcatOpMaker : public framework::OpProtoAndCheckerMaker {
void Make() override {
AddInput("X", "Input tensors of concat operator.").AsDuplicable();
AddOutput("Out", "Output tensor of concat operator.");
AddAttr<bool>(
"use_mkldnn",
"(bool, default false) Indicates if MKL-DNN kernel will be used")
.SetDefault(false)
.AsExtra();
AddAttr<int>("axis",
"The axis along which the input tensors will be concatenated."
"The axis could also be negative numbers. Negative axis is "
......@@ -99,18 +94,6 @@ class ConcatOpMaker : public framework::OpProtoAndCheckerMaker {
"It has higher priority than Attr(axis). "
"The shape of AxisTensor must be [1].")
.AsDispensable();
AddAttr<bool>(
"use_quantizer",
"(bool, default false) "
"This parameter is no longer used. Use 'mkldnn_data_type' instead.")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>(
"mkldnn_data_type",
"(string, default \"float32\"). Data type of mkldnn kernel")
.SetDefault("float32")
.InEnum({"float32", "int8", "bfloat16"})
.AsExtra();
AddComment(R"DOC(
Concat Operator.
......
......@@ -285,11 +285,6 @@ framework::OpKernelType ConvOp::GetKernelTypeForVar(
}
void Conv2DOpMaker::Make() {
AddAttr<bool>("is_test",
"(bool, default false) Set to true for inference only, false "
"for training. Some layers may run faster when this is true.")
.SetDefault(false)
.AsExtra();
AddInput("Input",
"(Tensor) The input tensor of convolution operator. "
"The format of input tensor is NCHW or NHWC, where N is batch size, "
......@@ -356,22 +351,6 @@ void Conv2DOpMaker::Make() {
"the input will be transformed automatically. ")
.SetDefault("NCHW");
// TODO(dzhwinter): need to registered layout transform function
AddAttr<int>("workspace_size_MB",
"Only used in cudnn kernel. Need set use_cudnn to true."
"workspace size for cudnn, in MB, "
"workspace is a section of GPU memory which will be "
"allocated/freed each time the operator runs, larger "
"workspace size can increase performance but also requires "
"better hardware. This size should be chosen carefully.")
.SetDefault(platform::GetDefaultConvWorkspaceSizeLimitMB())
.AsExtra();
AddAttr<bool>("exhaustive_search",
"(bool, default false) cuDNN has many algorithm to calculation "
"convolution, whether enable exhaustive search "
"for cuDNN convolution or not, default is False.")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC(
Convolution Operator.
......@@ -403,12 +382,18 @@ $$
Apply();
}
class DepthwiseConv2DOpMaker : public Conv2DOpMaker {
protected:
void Apply() override {
AddAttr<bool>(
"use_cudnn",
"(bool, default false) Only used in cudnn kernel, need install cudnn")
.SetDefault(false)
.AsExtra();
}
};
void Conv3DOpMaker::Make() {
AddAttr<bool>("is_test",
"(bool, default false) Set to true for inference only, false "
"for training. Some layers may run faster when this is true.")
.SetDefault(false)
.AsExtra();
AddInput(
"Input",
"(Tensor) The input tensor of convolution operator. "
......@@ -465,47 +450,6 @@ void Conv3DOpMaker::Make() {
"dilations(d_dilation, h_dilation, w_dilation) of "
"convolution operator.")
.SetDefault({1, 1, 1});
AddAttr<bool>(
"use_cudnn",
"(bool, default false) Only used in cudnn kernel, need install cudnn")
.SetDefault(false)
.AsExtra();
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>(
"mkldnn_data_type",
"(string, default \"float32\"). Data type of mkldnn kernel")
.SetDefault("float32")
.InEnum({"float32", "int8", "bfloat16"})
.AsExtra();
AddAttr<bool>("fuse_relu", "(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>("fuse_activation",
"(string, default \"\") Only used in mkldnn kernel")
.SetDefault("")
.AsExtra();
AddAttr<float>("fuse_alpha",
"(float, default 0.0) Only used in mkldnn kernel")
.SetDefault(0.0f)
.AsExtra();
AddAttr<float>("fuse_beta", "(float, default 0.0) Only used in mkldnn kernel")
.SetDefault(0.0f)
.AsExtra();
AddAttr<bool>(
"use_addto",
"(bool, default false) If use addto strategy or not, only used in "
"cudnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<bool>("fuse_residual_connection",
"(bool, default false) Only used in mkldnn kernel. Used "
"whenever convolution output is as an input to residual "
"connection.")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>(
"data_format",
"(string, default NCDHW) Only used in "
......@@ -513,25 +457,6 @@ void Conv3DOpMaker::Make() {
"Defaults to \"NDHWC\". Specify the data format of the output data, "
"the input will be transformed automatically. ")
.SetDefault("NCDHW");
AddAttr<bool>("force_fp32_output",
"(bool, default false) Only used in mkldnn INT8 kernel")
.SetDefault(false)
.AsExtra();
// TODO(dzhwinter): need to registered layout transform function
AddAttr<int>("workspace_size_MB",
"Only used in cudnn kernel. workspace size for cudnn, in MB, "
"workspace is a section of GPU memory which will be "
"allocated/freed each time the operator runs, larger "
"workspace size can increase performance but also requires "
"better hardware. This size should be chosen carefully.")
.SetDefault(platform::GetDefaultConvWorkspaceSizeLimitMB())
.AsExtra();
AddAttr<bool>("exhaustive_search",
"(bool, default false) cuDNN has many algorithm to calculation "
"convolution, whether enable exhaustive search "
"for cuDNN convolution or not, default is False.")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC(
Convolution3D Operator.
......@@ -811,7 +736,7 @@ REGISTER_OPERATOR(conv2d_grad_grad, ops::ConvOpDoubleGrad);
// depthwise convolution op
REGISTER_OPERATOR(depthwise_conv2d,
ops::ConvOp,
ops::Conv2DOpMaker,
ops::DepthwiseConv2DOpMaker,
ops::ConvOpInferVarType,
ops::Conv2DGradMaker<paddle::framework::OpDesc>,
ops::Conv2DGradMaker<paddle::imperative::OpBase>);
......
......@@ -91,11 +91,6 @@ framework::OpKernelType ConvTransposeOp::GetKernelTypeForVar(
}
void Conv2DTransposeOpMaker::Make() {
AddAttr<bool>("is_test",
"(bool, default false) Set to true for inference only, false "
"for training. Some layers may run faster when this is true.")
.SetDefault(false)
.AsExtra();
AddInput("Input",
"(Tensor) The input tensor of convolution transpose operator. "
"The format of input tensor is NCHW or NHWC. Where N is batch size, "
......@@ -146,40 +141,6 @@ void Conv2DTransposeOpMaker::Make() {
"(vector<int> default:{0, 0}), the paddings(h_pad, w_pad) of convolution "
"transpose operator.")
.SetDefault({0, 0});
AddAttr<bool>(
"use_cudnn",
"(bool, default false) Only used in cudnn kernel, need install cudnn")
.SetDefault(false)
.AsExtra();
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<bool>("force_fp32_output",
"(bool, default false) Force BF16 kernel output FP32, only "
"used in MKL-DNN BF16")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>(
"mkldnn_data_type",
"(string, default \"float32\"). Data type of mkldnn kernel")
.SetDefault("float32")
.InEnum({"float32", "bfloat16"})
.AsExtra();
AddAttr<bool>("fuse_relu", "(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>("fuse_activation",
"(string, default \"\") Only used in mkldnn kernel")
.SetDefault("")
.AsExtra();
AddAttr<float>("fuse_alpha",
"(float, default 0.0) Only used in mkldnn kernel")
.SetDefault(0.0f)
.AsExtra();
AddAttr<float>("fuse_beta", "(float, default 0.0) Only used in mkldnn kernel")
.SetDefault(0.0f)
.AsExtra();
AddAttr<std::string>(
"data_format",
"(string, default NCHW) Only used in "
......@@ -193,14 +154,6 @@ void Conv2DTransposeOpMaker::Make() {
"\"SAME\",\"VALID\". Set to \"EXPLICIT\" for explicit padding. "
"Set to \"SAME\" or \"VALID\" for algorithm of padding. ")
.SetDefault("EXPLICIT");
AddAttr<int>("workspace_size_MB",
"Used in cudnn kernel only. workspace size for cudnn, in MB, "
"workspace is a section of GPU memory which will be "
"allocated/freed each time the operator runs, larger "
"workspace size can increase performance but also requires "
"better hardward. This size should be carefully set.")
.SetDefault(platform::GetDefaultConvWorkspaceSizeLimitMB())
.AsExtra();
AddComment(R"DOC(
Convolution2D Transpose Operator.
......@@ -280,15 +233,6 @@ void Conv3DTransposeOpMaker::Make() {
"(int default:1), the groups number of the convolution3d "
"transpose operator. ")
.SetDefault(1);
AddAttr<bool>(
"use_cudnn",
"(bool, default false) Only used in cudnn kernel, need install cudnn")
.SetDefault(false)
.AsExtra();
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>(
"data_format",
"(string, default NCHW) Only used in "
......@@ -302,14 +246,6 @@ void Conv3DTransposeOpMaker::Make() {
"\"SAME\",\"VALID\". Set to \"EXPLICIT\" for explicit padding. "
"Set to \"SAME\" or \"VALID\" for algorithm of padding. ")
.SetDefault("EXPLICIT");
AddAttr<int>("workspace_size_MB",
"Used in cudnn kernel only. workspace size for cudnn, in MB, "
"workspace is a section of GPU memory which will be "
"allocated/freed each time the operator runs, larger "
"workspace size can increase performance but also requires "
"better hardward. This size should be carefully set.")
.SetDefault(platform::GetDefaultConvWorkspaceSizeLimitMB())
.AsExtra();
AddComment(R"DOC(
Convolution3D Transpose Operator.
......
# - api : conv3d_transpose
# backward : conv3d_transpose_grad
# extra :
# attrs : [bool use_cudnn = true, bool use_mkldnn = false, int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
- api : abs
backward : abs_grad
extra :
attrs : [bool use_cudnn = false, bool use_mkldnn = false]
- api : addmm
backward : addmm_grad
extra :
attrs : [bool use_mkldnn = false]
- api : affine_grid
backward : affine_grid_grad
extra :
attrs : [bool use_cudnn = true]
- api : angle
backward : angle_grad
extra :
attrs : [bool use_cudnn = false, bool use_mkldnn = false]
- api : atan2
inputs :
......@@ -9,6 +24,11 @@
outputs :
out : Out
- api : batch_norm
backward : batch_norm_grad
extra :
attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]
- api : bernoulli
inputs :
x : X
......@@ -27,24 +47,55 @@
outputs :
out : Out
- api : clip
backward : clip_grad
extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]
- api : concat
backward : concat_grad
extra :
attrs : [bool use_mkldnn = false, bool use_quantizer = false, str mkldnn_data_type = "float32"]
- api : conv2d
backward : conv2d_grad
extra :
attrs : [bool use_cudnn = true, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
attrs : [bool is_test = false, bool use_cudnn = true, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
int workspace_size_MB = 512, bool exhaustive_search = false]
int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]
- api : conv2d_fusion
extra :
attrs : [bool use_cudnn = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
attrs : [bool is_test = false, bool use_cudnn = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
int workspace_size_MB = 512, bool exhaustive_search = false]
int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]
- api : conv2d_transpose
backward : conv2d_transpose_grad
extra :
attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, bool force_fp32_output = false,
str mkldnn_data_type = "float32", bool fuse_relu = false,
str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
- api : conv3d
backward : conv3d_grad
extra :
attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
bool use_addto = false, bool fuse_residual_connection = false, bool force_fp32_output = false,
int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]
- api : conv3d_transpose
backward : conv3d_transpose_grad
extra :
attrs : [bool use_cudnn = true, bool use_mkldnn = false, int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
- api : cross
inputs :
......@@ -57,12 +108,20 @@
- api : depthwise_conv2d
backward : depthwise_conv2d_grad
extra :
attrs : [bool use_cudnn = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
attrs : [bool is_test = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
int workspace_size_MB = 512, bool exhaustive_search = false]
int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]
- api : depthwise_conv2d_transpose
backward : depthwise_conv2d_transpose_grad
extra :
attrs : [bool is_test = false, bool use_cudnn = false, bool use_mkldnn = false, bool force_fp32_output = false,
str mkldnn_data_type = "float32", bool fuse_relu = false,
str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
- api : diag
op_name : diag_v2
......@@ -108,6 +167,11 @@
outputs :
out : Out
- api : inplace_abn
backward : inplace_abn_grad
extra :
attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]
- api : lgamma
inputs :
x : X
......@@ -132,6 +196,11 @@
outputs :
out : Out
- api : sync_batch_norm
backward : sync_batch_norm_grad
extra :
attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]
- api : trace
inputs :
x : Input
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册