未验证 提交 cb28753c 编写于 作者: 王明冬 提交者: GitHub

add the extra and quantization for op def, test=develop (#35076)

上级 b0a1d122
...@@ -322,8 +322,19 @@ class TypedAttrChecker { ...@@ -322,8 +322,19 @@ class TypedAttrChecker {
typedef std::function<void(const T&)> ValueChecker; typedef std::function<void(const T&)> ValueChecker;
public: public:
explicit TypedAttrChecker(const std::string& attr_name) explicit TypedAttrChecker(const std::string& attr_name,
: attr_name_(attr_name) {} proto::OpProto_Attr* attr)
: attr_name_(attr_name), attr_(attr) {}
TypedAttrChecker& AsExtra() {
attr_->set_extra(true);
return *this;
}
TypedAttrChecker& AsQuant() {
attr_->set_quant(true);
return *this;
}
TypedAttrChecker& InEnum(const std::unordered_set<T>& range) { TypedAttrChecker& InEnum(const std::unordered_set<T>& range) {
value_checkers_.push_back(EnumInContainer<T>(range)); value_checkers_.push_back(EnumInContainer<T>(range));
...@@ -398,6 +409,7 @@ class TypedAttrChecker { ...@@ -398,6 +409,7 @@ class TypedAttrChecker {
private: private:
std::string attr_name_; std::string attr_name_;
proto::OpProto_Attr* attr_;
std::vector<ValueChecker> value_checkers_; std::vector<ValueChecker> value_checkers_;
std::vector<DefaultValueChecker> default_value_setter_; std::vector<DefaultValueChecker> default_value_setter_;
}; };
...@@ -408,8 +420,9 @@ class OpAttrChecker { ...@@ -408,8 +420,9 @@ class OpAttrChecker {
public: public:
template <typename T> template <typename T>
TypedAttrChecker<T>& AddAttrChecker(const std::string& attr_name) { TypedAttrChecker<T>& AddAttrChecker(const std::string& attr_name,
attr_checkers_.push_back(TypedAttrChecker<T>(attr_name)); proto::OpProto_Attr* attr) {
attr_checkers_.push_back(TypedAttrChecker<T>(attr_name, attr));
AttrChecker& checker = attr_checkers_.back(); AttrChecker& checker = attr_checkers_.back();
return *(checker.target<TypedAttrChecker<T>>()); return *(checker.target<TypedAttrChecker<T>>());
} }
......
...@@ -90,6 +90,8 @@ message OpProto { ...@@ -90,6 +90,8 @@ message OpProto {
optional bool duplicable = 3 [ default = false ]; optional bool duplicable = 3 [ default = false ];
optional bool intermediate = 4 [ default = false ]; optional bool intermediate = 4 [ default = false ];
optional bool dispensable = 5 [ default = false ]; optional bool dispensable = 5 [ default = false ];
optional bool extra = 6 [ default = false ];
optional bool quant = 7 [ default = false ];
} }
// AttrProto describes the C++ type Attribute. // AttrProto describes the C++ type Attribute.
...@@ -101,6 +103,8 @@ message OpProto { ...@@ -101,6 +103,8 @@ message OpProto {
// language binding has responsibility to fill that // language binding has responsibility to fill that
// attribute. End-User should not set that attribute. // attribute. End-User should not set that attribute.
optional bool generated = 4 [ default = false ]; optional bool generated = 4 [ default = false ];
optional bool extra = 5 [ default = false ];
optional bool quant = 6 [ default = false ];
} }
required string type = 1; required string type = 1;
......
...@@ -102,7 +102,7 @@ TEST(OpCompatSensiblePass, compatOpAttribute) { ...@@ -102,7 +102,7 @@ TEST(OpCompatSensiblePass, compatOpAttribute) {
EXPECT_FALSE(compat.Judge(fc_op, "test_pass")); EXPECT_FALSE(compat.Judge(fc_op, "test_pass"));
OpCompat compat_1("fc_test"); OpCompat compat_1("fc_test");
info.checker_->AddAttrChecker<int>("in_num_col_dims").SetDefault(1); info.checker_->AddAttrChecker<int>("in_num_col_dims", nullptr).SetDefault(1);
EXPECT_TRUE(compat_1.Judge(fc_op, "test_pass")); EXPECT_TRUE(compat_1.Judge(fc_op, "test_pass"));
delete info.checker_; delete info.checker_;
delete info.proto_; delete info.proto_;
......
...@@ -80,19 +80,24 @@ void OpProtoAndCheckerMaker::operator()(proto::OpProto* proto, ...@@ -80,19 +80,24 @@ void OpProtoAndCheckerMaker::operator()(proto::OpProto* proto,
static_cast<int>(OpRole::kOptimize) | static_cast<int>(OpRole::kOptimize) |
static_cast<int>(OpRole::kLRSched), static_cast<int>(OpRole::kLRSched),
static_cast<int>(OpRole::kNotSpecified)}) static_cast<int>(OpRole::kNotSpecified)})
.SetDefault(static_cast<int>(OpRole::kNotSpecified)); .SetDefault(static_cast<int>(OpRole::kNotSpecified))
.AsExtra();
AddAttr<std::vector<std::string>>(OpRoleVarAttrName(), AddAttr<std::vector<std::string>>(OpRoleVarAttrName(),
"Optimized for variable") "Optimized for variable")
.SetDefault({}); .SetDefault({})
.AsExtra();
AddAttr<std::string>(OpNamescopeAttrName(), "Operator name with namesope.") AddAttr<std::string>(OpNamescopeAttrName(), "Operator name with namesope.")
.SetDefault(""); .SetDefault("")
.AsExtra();
AddAttr<std::vector<std::string>>(OpCreationCallstackAttrName(), AddAttr<std::vector<std::string>>(OpCreationCallstackAttrName(),
"Callstack for Op Creatation.") "Callstack for Op Creatation.")
.SetDefault({}); .SetDefault({})
.AsExtra();
AddAttr<std::string>(OpDeviceAttrName(), "Device type of this operator.") AddAttr<std::string>(OpDeviceAttrName(), "Device type of this operator.")
.SetDefault(""); .SetDefault("")
.AsExtra();
Validate(); Validate();
} }
......
...@@ -75,6 +75,16 @@ class OpProtoAndCheckerMaker { ...@@ -75,6 +75,16 @@ class OpProtoAndCheckerMaker {
var_->set_dispensable(true); var_->set_dispensable(true);
return *this; return *this;
} }
VariableBuilder &AsExtra() {
var_->set_extra(true);
return *this;
}
VariableBuilder &AsQuant() {
var_->set_quant(true);
return *this;
}
}; };
VariableBuilder AddInput(const std::string &name, const std::string &comment); VariableBuilder AddInput(const std::string &name, const std::string &comment);
...@@ -91,7 +101,7 @@ class OpProtoAndCheckerMaker { ...@@ -91,7 +101,7 @@ class OpProtoAndCheckerMaker {
attr->set_comment(comment); attr->set_comment(comment);
attr->set_generated(generated); attr->set_generated(generated);
attr->set_type(AttrTypeID<T>()); attr->set_type(AttrTypeID<T>());
return op_checker_->AddAttrChecker<T>(name); return op_checker_->AddAttrChecker<T>(name, attr);
} }
void AddComment(const std::string &comment) { proto_->set_comment(comment); } void AddComment(const std::string &comment) { proto_->set_comment(comment); }
......
...@@ -246,7 +246,8 @@ void Conv2DOpMaker::Make() { ...@@ -246,7 +246,8 @@ void Conv2DOpMaker::Make() {
AddAttr<bool>("is_test", AddAttr<bool>("is_test",
"(bool, default false) Set to true for inference only, false " "(bool, default false) Set to true for inference only, false "
"for training. Some layers may run faster when this is true.") "for training. Some layers may run faster when this is true.")
.SetDefault(false); .SetDefault(false)
.AsExtra();
AddInput("Input", AddInput("Input",
"(Tensor) The input tensor of convolution operator. " "(Tensor) The input tensor of convolution operator. "
"The format of input tensor is NCHW or NHWC, where N is batch size, " "The format of input tensor is NCHW or NHWC, where N is batch size, "
...@@ -264,12 +265,14 @@ void Conv2DOpMaker::Make() { ...@@ -264,12 +265,14 @@ void Conv2DOpMaker::Make() {
"(Tensor) Bias to be added to each output of filter application." "(Tensor) Bias to be added to each output of filter application."
"The format of output tensor is X (one-dimensional) of size equal" "The format of output tensor is X (one-dimensional) of size equal"
"to the number of output channels. Only used with MKL-DNN.") "to the number of output channels. Only used with MKL-DNN.")
.AsDispensable(); .AsDispensable()
.AsExtra();
AddInput("ResidualData", AddInput("ResidualData",
"(Tensor) Tensor with residual data " "(Tensor) Tensor with residual data "
"to which convolution output will be added." "to which convolution output will be added."
"Used with fuse_residual_connection fusion.") "Used with fuse_residual_connection fusion.")
.AsDispensable(); .AsDispensable()
.AsExtra();
AddOutput("Output", AddOutput("Output",
"(Tensor) The output tensor of convolution operator. " "(Tensor) The output tensor of convolution operator. "
"It has same data fromat and data type as the Input."); "It has same data fromat and data type as the Input.");
...@@ -306,69 +309,87 @@ void Conv2DOpMaker::Make() { ...@@ -306,69 +309,87 @@ void Conv2DOpMaker::Make() {
AddAttr<bool>( AddAttr<bool>(
"use_cudnn", "use_cudnn",
"(bool, default false) Only used in cudnn kernel, need install cudnn") "(bool, default false) Only used in cudnn kernel, need install cudnn")
.SetDefault(false); .SetDefault(false)
.AsExtra();
AddAttr<bool>("fuse_relu_before_depthwise_conv", AddAttr<bool>("fuse_relu_before_depthwise_conv",
"(bool, default false) Only used in cuda depthwise kernel") "(bool, default false) Only used in cuda depthwise kernel")
.SetDefault(false); .SetDefault(false)
.AsExtra();
AddAttr<bool>("use_mkldnn", AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel") "(bool, default false) Only used in mkldnn kernel")
.SetDefault(false); .SetDefault(false)
.AsExtra();
AddAttr<bool>( AddAttr<bool>(
"use_quantizer", "use_quantizer",
"(bool, default false) " "(bool, default false) "
"This parameter is no longer used. Use 'mkldnn_data_type' instead.") "This parameter is no longer used. Use 'mkldnn_data_type' instead.")
.SetDefault(false); .SetDefault(false)
.AsExtra();
AddAttr<std::string>( AddAttr<std::string>(
"mkldnn_data_type", "mkldnn_data_type",
"(string, default \"float32\"). Data type of mkldnn kernel") "(string, default \"float32\"). Data type of mkldnn kernel")
.SetDefault("float32") .SetDefault("float32")
.InEnum({"float32", "int8", "bfloat16"}); .InEnum({"float32", "int8", "bfloat16"})
.AsExtra();
AddAttr<bool>("fuse_relu", "(bool, default false) Only used in mkldnn kernel") AddAttr<bool>("fuse_relu", "(bool, default false) Only used in mkldnn kernel")
.SetDefault(false); .SetDefault(false)
.AsExtra();
AddAttr<bool>("fuse_brelu", AddAttr<bool>("fuse_brelu",
"(bool, default false) Only used in mkldnn kernel") "(bool, default false) Only used in mkldnn kernel")
.SetDefault(false); .SetDefault(false)
.AsExtra();
AddAttr<float>("fuse_brelu_threshold", AddAttr<float>("fuse_brelu_threshold",
"(float, default false 6.0) Only used in mkldnn kernel") "(float, default false 6.0) Only used in mkldnn kernel")
.SetDefault(6.0f); .SetDefault(6.0f)
.AsExtra();
AddAttr<std::string>("fuse_activation", AddAttr<std::string>("fuse_activation",
"(string, default \"\") Only used in mkldnn kernel") "(string, default \"\") Only used in mkldnn kernel")
.SetDefault(""); .SetDefault("")
.AsExtra();
AddAttr<float>("fuse_alpha", AddAttr<float>("fuse_alpha",
"(float, default 0.0) Only used in mkldnn kernel") "(float, default 0.0) Only used in mkldnn kernel")
.SetDefault(0.0f); .SetDefault(0.0f)
.AsExtra();
AddAttr<float>("fuse_beta", "(float, default 0.0) Only used in mkldnn kernel") AddAttr<float>("fuse_beta", "(float, default 0.0) Only used in mkldnn kernel")
.SetDefault(0.0f); .SetDefault(0.0f)
.AsExtra();
AddAttr<bool>( AddAttr<bool>(
"use_addto", "use_addto",
"(bool, default false) If use addto strategy or not, only used in " "(bool, default false) If use addto strategy or not, only used in "
"cudnn kernel") "cudnn kernel")
.SetDefault(false); .SetDefault(false)
.AsExtra();
AddAttr<bool>("fuse_residual_connection", AddAttr<bool>("fuse_residual_connection",
"(bool, default false) Only used in mkldnn kernel. Used " "(bool, default false) Only used in mkldnn kernel. Used "
"whenever convolution output is as an input to residual " "whenever convolution output is as an input to residual "
"connection.") "connection.")
.SetDefault(false); .SetDefault(false)
.AsExtra();
AddAttr<float>("Scale_in", AddAttr<float>("Scale_in",
"Scale_in to be used for int8 input data." "Scale_in to be used for int8 input data."
"Only used with MKL-DNN INT8.") "Only used with MKL-DNN INT8.")
.SetDefault(1.0f); .SetDefault(1.0f)
.AsExtra();
AddAttr<float>("Scale_out", AddAttr<float>("Scale_out",
"Scale_out to be used for int8 output data." "Scale_out to be used for int8 output data."
"Only used with MKL-DNN INT8.") "Only used with MKL-DNN INT8.")
.SetDefault(1.0f); .SetDefault(1.0f)
.AsExtra();
AddAttr<float>("Scale_in_eltwise", AddAttr<float>("Scale_in_eltwise",
"Scale_in_eltwise to be used for int8 eltwise input data." "Scale_in_eltwise to be used for int8 eltwise input data."
"Only used with MKL-DNN INT8.") "Only used with MKL-DNN INT8.")
.SetDefault(1.0f); .SetDefault(1.0f)
.AsExtra();
AddAttr<std::vector<float>>("Scale_weights", AddAttr<std::vector<float>>("Scale_weights",
"Scale_weights to be used for int8 weights data." "Scale_weights to be used for int8 weights data."
"Only used with MKL-DNN INT8.") "Only used with MKL-DNN INT8.")
.SetDefault({1.0f}); .SetDefault({1.0f})
.AsExtra();
AddAttr<bool>("force_fp32_output", AddAttr<bool>("force_fp32_output",
"(bool, default false) Force INT8 kernel output FP32, only " "(bool, default false) Force INT8 kernel output FP32, only "
"used in MKL-DNN INT8") "used in MKL-DNN INT8")
.SetDefault(false); .SetDefault(false)
.AsExtra();
AddAttr<std::string>( AddAttr<std::string>(
"data_format", "data_format",
"(string, default NCHW) Only used in " "(string, default NCHW) Only used in "
...@@ -384,12 +405,14 @@ void Conv2DOpMaker::Make() { ...@@ -384,12 +405,14 @@ void Conv2DOpMaker::Make() {
"allocated/freed each time the operator runs, larger " "allocated/freed each time the operator runs, larger "
"workspace size can increase performance but also requires " "workspace size can increase performance but also requires "
"better hardware. This size should be chosen carefully.") "better hardware. This size should be chosen carefully.")
.SetDefault(platform::GetDefaultConvWorkspaceSizeLimitMB()); .SetDefault(platform::GetDefaultConvWorkspaceSizeLimitMB())
.AsExtra();
AddAttr<bool>("exhaustive_search", AddAttr<bool>("exhaustive_search",
"(bool, default false) cuDNN has many algorithm to calculation " "(bool, default false) cuDNN has many algorithm to calculation "
"convolution, whether enable exhaustive search " "convolution, whether enable exhaustive search "
"for cuDNN convolution or not, default is False.") "for cuDNN convolution or not, default is False.")
.SetDefault(false); .SetDefault(false)
.AsExtra();
AddComment(R"DOC( AddComment(R"DOC(
Convolution Operator. Convolution Operator.
...@@ -426,7 +449,8 @@ void Conv3DOpMaker::Make() { ...@@ -426,7 +449,8 @@ void Conv3DOpMaker::Make() {
AddAttr<bool>("is_test", AddAttr<bool>("is_test",
"(bool, default false) Set to true for inference only, false " "(bool, default false) Set to true for inference only, false "
"for training. Some layers may run faster when this is true.") "for training. Some layers may run faster when this is true.")
.SetDefault(false); .SetDefault(false)
.AsExtra();
AddInput( AddInput(
"Input", "Input",
"(Tensor) The input tensor of convolution operator. " "(Tensor) The input tensor of convolution operator. "
...@@ -447,7 +471,8 @@ void Conv3DOpMaker::Make() { ...@@ -447,7 +471,8 @@ void Conv3DOpMaker::Make() {
"(Tensor) Tensor with residual data " "(Tensor) Tensor with residual data "
"to which convolution output will be added." "to which convolution output will be added."
"Used with fuse_residual_connection fusion.") "Used with fuse_residual_connection fusion.")
.AsDispensable(); .AsDispensable()
.AsExtra();
AddOutput("Output", AddOutput("Output",
"(Tensor) The output tensor of convolution operator." "(Tensor) The output tensor of convolution operator."
"It has same data fromat and data type as the Input."); "It has same data fromat and data type as the Input.");
...@@ -485,35 +510,44 @@ void Conv3DOpMaker::Make() { ...@@ -485,35 +510,44 @@ void Conv3DOpMaker::Make() {
AddAttr<bool>( AddAttr<bool>(
"use_cudnn", "use_cudnn",
"(bool, default false) Only used in cudnn kernel, need install cudnn") "(bool, default false) Only used in cudnn kernel, need install cudnn")
.SetDefault(false); .SetDefault(false)
.AsExtra();
AddAttr<bool>("use_mkldnn", AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel") "(bool, default false) Only used in mkldnn kernel")
.SetDefault(false); .SetDefault(false)
.AsExtra();
AddAttr<std::string>( AddAttr<std::string>(
"mkldnn_data_type", "mkldnn_data_type",
"(string, default \"float32\"). Data type of mkldnn kernel") "(string, default \"float32\"). Data type of mkldnn kernel")
.SetDefault("float32") .SetDefault("float32")
.InEnum({"float32", "int8", "bfloat16"}); .InEnum({"float32", "int8", "bfloat16"})
.AsExtra();
AddAttr<bool>("fuse_relu", "(bool, default false) Only used in mkldnn kernel") AddAttr<bool>("fuse_relu", "(bool, default false) Only used in mkldnn kernel")
.SetDefault(false); .SetDefault(false)
.AsExtra();
AddAttr<std::string>("fuse_activation", AddAttr<std::string>("fuse_activation",
"(string, default \"\") Only used in mkldnn kernel") "(string, default \"\") Only used in mkldnn kernel")
.SetDefault(""); .SetDefault("")
.AsExtra();
AddAttr<float>("fuse_alpha", AddAttr<float>("fuse_alpha",
"(float, default 0.0) Only used in mkldnn kernel") "(float, default 0.0) Only used in mkldnn kernel")
.SetDefault(0.0f); .SetDefault(0.0f)
.AsExtra();
AddAttr<float>("fuse_beta", "(float, default 0.0) Only used in mkldnn kernel") AddAttr<float>("fuse_beta", "(float, default 0.0) Only used in mkldnn kernel")
.SetDefault(0.0f); .SetDefault(0.0f)
.AsExtra();
AddAttr<bool>( AddAttr<bool>(
"use_addto", "use_addto",
"(bool, default false) If use addto strategy or not, only used in " "(bool, default false) If use addto strategy or not, only used in "
"cudnn kernel") "cudnn kernel")
.SetDefault(false); .SetDefault(false)
.AsExtra();
AddAttr<bool>("fuse_residual_connection", AddAttr<bool>("fuse_residual_connection",
"(bool, default false) Only used in mkldnn kernel. Used " "(bool, default false) Only used in mkldnn kernel. Used "
"whenever convolution output is as an input to residual " "whenever convolution output is as an input to residual "
"connection.") "connection.")
.SetDefault(false); .SetDefault(false)
.AsExtra();
AddAttr<std::string>( AddAttr<std::string>(
"data_format", "data_format",
"(string, default NCDHW) Only used in " "(string, default NCDHW) Only used in "
...@@ -523,7 +557,8 @@ void Conv3DOpMaker::Make() { ...@@ -523,7 +557,8 @@ void Conv3DOpMaker::Make() {
.SetDefault("NCDHW"); .SetDefault("NCDHW");
AddAttr<bool>("force_fp32_output", AddAttr<bool>("force_fp32_output",
"(bool, default false) Only used in mkldnn INT8 kernel") "(bool, default false) Only used in mkldnn INT8 kernel")
.SetDefault(false); .SetDefault(false)
.AsExtra();
// TODO(dzhwinter): need to registered layout transform function // TODO(dzhwinter): need to registered layout transform function
AddAttr<int>("workspace_size_MB", AddAttr<int>("workspace_size_MB",
"Only used in cudnn kernel. workspace size for cudnn, in MB, " "Only used in cudnn kernel. workspace size for cudnn, in MB, "
...@@ -531,12 +566,14 @@ void Conv3DOpMaker::Make() { ...@@ -531,12 +566,14 @@ void Conv3DOpMaker::Make() {
"allocated/freed each time the operator runs, larger " "allocated/freed each time the operator runs, larger "
"workspace size can increase performance but also requires " "workspace size can increase performance but also requires "
"better hardware. This size should be chosen carefully.") "better hardware. This size should be chosen carefully.")
.SetDefault(platform::GetDefaultConvWorkspaceSizeLimitMB()); .SetDefault(platform::GetDefaultConvWorkspaceSizeLimitMB())
.AsExtra();
AddAttr<bool>("exhaustive_search", AddAttr<bool>("exhaustive_search",
"(bool, default false) cuDNN has many algorithm to calculation " "(bool, default false) cuDNN has many algorithm to calculation "
"convolution, whether enable exhaustive search " "convolution, whether enable exhaustive search "
"for cuDNN convolution or not, default is False.") "for cuDNN convolution or not, default is False.")
.SetDefault(false); .SetDefault(false)
.AsExtra();
AddComment(R"DOC( AddComment(R"DOC(
Convolution3D Operator. Convolution3D Operator.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册