未验证 提交 cb28753c 编写于 作者: 王明冬 提交者: GitHub

add the extra and quantization for op def, test=develop (#35076)

上级 b0a1d122
......@@ -322,8 +322,19 @@ class TypedAttrChecker {
typedef std::function<void(const T&)> ValueChecker;
public:
explicit TypedAttrChecker(const std::string& attr_name)
: attr_name_(attr_name) {}
explicit TypedAttrChecker(const std::string& attr_name,
proto::OpProto_Attr* attr)
: attr_name_(attr_name), attr_(attr) {}
TypedAttrChecker& AsExtra() {
attr_->set_extra(true);
return *this;
}
TypedAttrChecker& AsQuant() {
attr_->set_quant(true);
return *this;
}
TypedAttrChecker& InEnum(const std::unordered_set<T>& range) {
value_checkers_.push_back(EnumInContainer<T>(range));
......@@ -398,6 +409,7 @@ class TypedAttrChecker {
private:
std::string attr_name_;
proto::OpProto_Attr* attr_;
std::vector<ValueChecker> value_checkers_;
std::vector<DefaultValueChecker> default_value_setter_;
};
......@@ -408,8 +420,9 @@ class OpAttrChecker {
public:
template <typename T>
TypedAttrChecker<T>& AddAttrChecker(const std::string& attr_name) {
attr_checkers_.push_back(TypedAttrChecker<T>(attr_name));
TypedAttrChecker<T>& AddAttrChecker(const std::string& attr_name,
proto::OpProto_Attr* attr) {
attr_checkers_.push_back(TypedAttrChecker<T>(attr_name, attr));
AttrChecker& checker = attr_checkers_.back();
return *(checker.target<TypedAttrChecker<T>>());
}
......
......@@ -90,6 +90,8 @@ message OpProto {
optional bool duplicable = 3 [ default = false ];
optional bool intermediate = 4 [ default = false ];
optional bool dispensable = 5 [ default = false ];
optional bool extra = 6 [ default = false ];
optional bool quant = 7 [ default = false ];
}
// AttrProto describes the C++ type Attribute.
......@@ -101,6 +103,8 @@ message OpProto {
// language binding has responsibility to fill that
// attribute. End-User should not set that attribute.
optional bool generated = 4 [ default = false ];
optional bool extra = 5 [ default = false ];
optional bool quant = 6 [ default = false ];
}
required string type = 1;
......
......@@ -102,7 +102,7 @@ TEST(OpCompatSensiblePass, compatOpAttribute) {
EXPECT_FALSE(compat.Judge(fc_op, "test_pass"));
OpCompat compat_1("fc_test");
info.checker_->AddAttrChecker<int>("in_num_col_dims").SetDefault(1);
info.checker_->AddAttrChecker<int>("in_num_col_dims", nullptr).SetDefault(1);
EXPECT_TRUE(compat_1.Judge(fc_op, "test_pass"));
delete info.checker_;
delete info.proto_;
......
......@@ -80,19 +80,24 @@ void OpProtoAndCheckerMaker::operator()(proto::OpProto* proto,
static_cast<int>(OpRole::kOptimize) |
static_cast<int>(OpRole::kLRSched),
static_cast<int>(OpRole::kNotSpecified)})
.SetDefault(static_cast<int>(OpRole::kNotSpecified));
.SetDefault(static_cast<int>(OpRole::kNotSpecified))
.AsExtra();
AddAttr<std::vector<std::string>>(OpRoleVarAttrName(),
"Optimized for variable")
.SetDefault({});
.SetDefault({})
.AsExtra();
AddAttr<std::string>(OpNamescopeAttrName(), "Operator name with namesope.")
.SetDefault("");
.SetDefault("")
.AsExtra();
AddAttr<std::vector<std::string>>(OpCreationCallstackAttrName(),
"Callstack for Op Creatation.")
.SetDefault({});
.SetDefault({})
.AsExtra();
AddAttr<std::string>(OpDeviceAttrName(), "Device type of this operator.")
.SetDefault("");
.SetDefault("")
.AsExtra();
Validate();
}
......
......@@ -75,6 +75,16 @@ class OpProtoAndCheckerMaker {
var_->set_dispensable(true);
return *this;
}
VariableBuilder &AsExtra() {
var_->set_extra(true);
return *this;
}
VariableBuilder &AsQuant() {
var_->set_quant(true);
return *this;
}
};
VariableBuilder AddInput(const std::string &name, const std::string &comment);
......@@ -91,7 +101,7 @@ class OpProtoAndCheckerMaker {
attr->set_comment(comment);
attr->set_generated(generated);
attr->set_type(AttrTypeID<T>());
return op_checker_->AddAttrChecker<T>(name);
return op_checker_->AddAttrChecker<T>(name, attr);
}
void AddComment(const std::string &comment) { proto_->set_comment(comment); }
......
......@@ -246,7 +246,8 @@ void Conv2DOpMaker::Make() {
AddAttr<bool>("is_test",
"(bool, default false) Set to true for inference only, false "
"for training. Some layers may run faster when this is true.")
.SetDefault(false);
.SetDefault(false)
.AsExtra();
AddInput("Input",
"(Tensor) The input tensor of convolution operator. "
"The format of input tensor is NCHW or NHWC, where N is batch size, "
......@@ -264,12 +265,14 @@ void Conv2DOpMaker::Make() {
"(Tensor) Bias to be added to each output of filter application."
"The format of output tensor is X (one-dimensional) of size equal"
"to the number of output channels. Only used with MKL-DNN.")
.AsDispensable();
.AsDispensable()
.AsExtra();
AddInput("ResidualData",
"(Tensor) Tensor with residual data "
"to which convolution output will be added."
"Used with fuse_residual_connection fusion.")
.AsDispensable();
.AsDispensable()
.AsExtra();
AddOutput("Output",
"(Tensor) The output tensor of convolution operator. "
"It has same data fromat and data type as the Input.");
......@@ -306,69 +309,87 @@ void Conv2DOpMaker::Make() {
AddAttr<bool>(
"use_cudnn",
"(bool, default false) Only used in cudnn kernel, need install cudnn")
.SetDefault(false);
.SetDefault(false)
.AsExtra();
AddAttr<bool>("fuse_relu_before_depthwise_conv",
"(bool, default false) Only used in cuda depthwise kernel")
.SetDefault(false);
.SetDefault(false)
.AsExtra();
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false);
.SetDefault(false)
.AsExtra();
AddAttr<bool>(
"use_quantizer",
"(bool, default false) "
"This parameter is no longer used. Use 'mkldnn_data_type' instead.")
.SetDefault(false);
.SetDefault(false)
.AsExtra();
AddAttr<std::string>(
"mkldnn_data_type",
"(string, default \"float32\"). Data type of mkldnn kernel")
.SetDefault("float32")
.InEnum({"float32", "int8", "bfloat16"});
.InEnum({"float32", "int8", "bfloat16"})
.AsExtra();
AddAttr<bool>("fuse_relu", "(bool, default false) Only used in mkldnn kernel")
.SetDefault(false);
.SetDefault(false)
.AsExtra();
AddAttr<bool>("fuse_brelu",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false);
.SetDefault(false)
.AsExtra();
AddAttr<float>("fuse_brelu_threshold",
"(float, default false 6.0) Only used in mkldnn kernel")
.SetDefault(6.0f);
.SetDefault(6.0f)
.AsExtra();
AddAttr<std::string>("fuse_activation",
"(string, default \"\") Only used in mkldnn kernel")
.SetDefault("");
.SetDefault("")
.AsExtra();
AddAttr<float>("fuse_alpha",
"(float, default 0.0) Only used in mkldnn kernel")
.SetDefault(0.0f);
.SetDefault(0.0f)
.AsExtra();
AddAttr<float>("fuse_beta", "(float, default 0.0) Only used in mkldnn kernel")
.SetDefault(0.0f);
.SetDefault(0.0f)
.AsExtra();
AddAttr<bool>(
"use_addto",
"(bool, default false) If use addto strategy or not, only used in "
"cudnn kernel")
.SetDefault(false);
.SetDefault(false)
.AsExtra();
AddAttr<bool>("fuse_residual_connection",
"(bool, default false) Only used in mkldnn kernel. Used "
"whenever convolution output is as an input to residual "
"connection.")
.SetDefault(false);
.SetDefault(false)
.AsExtra();
AddAttr<float>("Scale_in",
"Scale_in to be used for int8 input data."
"Only used with MKL-DNN INT8.")
.SetDefault(1.0f);
.SetDefault(1.0f)
.AsExtra();
AddAttr<float>("Scale_out",
"Scale_out to be used for int8 output data."
"Only used with MKL-DNN INT8.")
.SetDefault(1.0f);
.SetDefault(1.0f)
.AsExtra();
AddAttr<float>("Scale_in_eltwise",
"Scale_in_eltwise to be used for int8 eltwise input data."
"Only used with MKL-DNN INT8.")
.SetDefault(1.0f);
.SetDefault(1.0f)
.AsExtra();
AddAttr<std::vector<float>>("Scale_weights",
"Scale_weights to be used for int8 weights data."
"Only used with MKL-DNN INT8.")
.SetDefault({1.0f});
.SetDefault({1.0f})
.AsExtra();
AddAttr<bool>("force_fp32_output",
"(bool, default false) Force INT8 kernel output FP32, only "
"used in MKL-DNN INT8")
.SetDefault(false);
.SetDefault(false)
.AsExtra();
AddAttr<std::string>(
"data_format",
"(string, default NCHW) Only used in "
......@@ -384,12 +405,14 @@ void Conv2DOpMaker::Make() {
"allocated/freed each time the operator runs, larger "
"workspace size can increase performance but also requires "
"better hardware. This size should be chosen carefully.")
.SetDefault(platform::GetDefaultConvWorkspaceSizeLimitMB());
.SetDefault(platform::GetDefaultConvWorkspaceSizeLimitMB())
.AsExtra();
AddAttr<bool>("exhaustive_search",
"(bool, default false) cuDNN has many algorithm to calculation "
"convolution, whether enable exhaustive search "
"for cuDNN convolution or not, default is False.")
.SetDefault(false);
.SetDefault(false)
.AsExtra();
AddComment(R"DOC(
Convolution Operator.
......@@ -426,7 +449,8 @@ void Conv3DOpMaker::Make() {
AddAttr<bool>("is_test",
"(bool, default false) Set to true for inference only, false "
"for training. Some layers may run faster when this is true.")
.SetDefault(false);
.SetDefault(false)
.AsExtra();
AddInput(
"Input",
"(Tensor) The input tensor of convolution operator. "
......@@ -447,7 +471,8 @@ void Conv3DOpMaker::Make() {
"(Tensor) Tensor with residual data "
"to which convolution output will be added."
"Used with fuse_residual_connection fusion.")
.AsDispensable();
.AsDispensable()
.AsExtra();
AddOutput("Output",
"(Tensor) The output tensor of convolution operator."
"It has same data fromat and data type as the Input.");
......@@ -485,35 +510,44 @@ void Conv3DOpMaker::Make() {
AddAttr<bool>(
"use_cudnn",
"(bool, default false) Only used in cudnn kernel, need install cudnn")
.SetDefault(false);
.SetDefault(false)
.AsExtra();
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false);
.SetDefault(false)
.AsExtra();
AddAttr<std::string>(
"mkldnn_data_type",
"(string, default \"float32\"). Data type of mkldnn kernel")
.SetDefault("float32")
.InEnum({"float32", "int8", "bfloat16"});
.InEnum({"float32", "int8", "bfloat16"})
.AsExtra();
AddAttr<bool>("fuse_relu", "(bool, default false) Only used in mkldnn kernel")
.SetDefault(false);
.SetDefault(false)
.AsExtra();
AddAttr<std::string>("fuse_activation",
"(string, default \"\") Only used in mkldnn kernel")
.SetDefault("");
.SetDefault("")
.AsExtra();
AddAttr<float>("fuse_alpha",
"(float, default 0.0) Only used in mkldnn kernel")
.SetDefault(0.0f);
.SetDefault(0.0f)
.AsExtra();
AddAttr<float>("fuse_beta", "(float, default 0.0) Only used in mkldnn kernel")
.SetDefault(0.0f);
.SetDefault(0.0f)
.AsExtra();
AddAttr<bool>(
"use_addto",
"(bool, default false) If use addto strategy or not, only used in "
"cudnn kernel")
.SetDefault(false);
.SetDefault(false)
.AsExtra();
AddAttr<bool>("fuse_residual_connection",
"(bool, default false) Only used in mkldnn kernel. Used "
"whenever convolution output is as an input to residual "
"connection.")
.SetDefault(false);
.SetDefault(false)
.AsExtra();
AddAttr<std::string>(
"data_format",
"(string, default NCDHW) Only used in "
......@@ -523,7 +557,8 @@ void Conv3DOpMaker::Make() {
.SetDefault("NCDHW");
AddAttr<bool>("force_fp32_output",
"(bool, default false) Only used in mkldnn INT8 kernel")
.SetDefault(false);
.SetDefault(false)
.AsExtra();
// TODO(dzhwinter): need to registered layout transform function
AddAttr<int>("workspace_size_MB",
"Only used in cudnn kernel. workspace size for cudnn, in MB, "
......@@ -531,12 +566,14 @@ void Conv3DOpMaker::Make() {
"allocated/freed each time the operator runs, larger "
"workspace size can increase performance but also requires "
"better hardware. This size should be chosen carefully.")
.SetDefault(platform::GetDefaultConvWorkspaceSizeLimitMB());
.SetDefault(platform::GetDefaultConvWorkspaceSizeLimitMB())
.AsExtra();
AddAttr<bool>("exhaustive_search",
"(bool, default false) cuDNN has many algorithm to calculation "
"convolution, whether enable exhaustive search "
"for cuDNN convolution or not, default is False.")
.SetDefault(false);
.SetDefault(false)
.AsExtra();
AddComment(R"DOC(
Convolution3D Operator.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册