未验证 提交 b26efe0d 编写于 作者: Z zyfncg 提交者: GitHub

Clear extra attrs of elementwise op in OpMaker (#45845)

* clear extra attrs of elementwise op in opmaker

* fix op_debug_string_test

* fix bug of grad_add

* fix sort of runtime attrs
上级 1d78681d
...@@ -998,16 +998,25 @@ void OpDesc::Flush() { ...@@ -998,16 +998,25 @@ void OpDesc::Flush() {
std::vector<std::pair<std::string, Attribute>> sorted_attrs{attrs_.begin(), std::vector<std::pair<std::string, Attribute>> sorted_attrs{attrs_.begin(),
attrs_.end()}; attrs_.end()};
std::vector<std::pair<std::string, Attribute>> sorted_runtime_attrs{
runtime_attrs_.begin(), runtime_attrs_.end()};
std::sort( std::sort(
sorted_attrs.begin(), sorted_attrs.begin(),
sorted_attrs.end(), sorted_attrs.end(),
[](std::pair<std::string, Attribute> a, [](std::pair<std::string, Attribute> a,
std::pair<std::string, Attribute> b) { return a.first < b.first; }); std::pair<std::string, Attribute> b) { return a.first < b.first; });
std::sort(
sorted_runtime_attrs.begin(),
sorted_runtime_attrs.end(),
[](std::pair<std::string, Attribute> a,
std::pair<std::string, Attribute> b) { return a.first < b.first; });
for (auto &attr : sorted_attrs) { for (auto &attr : sorted_attrs) {
set_attr_desc(attr.first, attr.second); set_attr_desc(attr.first, attr.second);
} }
for (auto &attr : runtime_attrs_) { for (auto &attr : sorted_runtime_attrs) {
set_attr_desc(attr.first, attr.second); set_attr_desc(attr.first, attr.second);
} }
......
...@@ -216,47 +216,12 @@ class ElementwiseOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -216,47 +216,12 @@ class ElementwiseOpMaker : public framework::OpProtoAndCheckerMaker {
AddInputX(); AddInputX();
AddInputY(); AddInputY();
AddOpOutput(); AddOpOutput();
AddAttr<int>("axis", AddAttr<int>("axis",
"(int, default -1). If X.dimension != Y.dimension," "(int, default -1). If X.dimension != Y.dimension,"
"Y.dimension must be a subsequence of x.dimension. And axis " "Y.dimension must be a subsequence of x.dimension. And axis "
"is the start dimension index " "is the start dimension index "
"for broadcasting Y onto X. ") "for broadcasting Y onto X. ")
.SetDefault(-1); .SetDefault(-1);
AddAttr<bool>("use_mkldnn", "(bool, default false). Used by MKLDNN.")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>("x_data_format", "This parameter is no longer used.")
.SetDefault("")
.AsExtra();
AddAttr<std::string>("y_data_format", "This parameter is no longer used.")
.SetDefault("")
.AsExtra();
AddAttr<bool>(
"use_quantizer",
"(bool, default false) "
"This parameter is no longer used. Use 'mkldnn_data_type' instead.")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>(
"mkldnn_data_type",
"(string, default \"float32\"). Data type of mkldnn kernel")
.SetDefault("float32")
.InEnum({"float32", "int8", "bfloat16"})
.AsExtra();
/* int8 parameters */
AddAttr<float>("Scale_x",
"(float, default 1.0f), The quantize scale of X tensor")
.SetDefault(1.0f)
.AsExtra();
AddAttr<float>("Scale_y",
"(float, default 1.0f), The quantize scale of Y tensor")
.SetDefault(1.0f)
.AsExtra();
AddAttr<float>("Scale_out",
"(float, default 1.0f), The quantize scale of output data")
.SetDefault(1.0f)
.AsExtra();
AddOpComment(); AddOpComment();
} }
......
...@@ -41,8 +41,6 @@ TEST(op_debug_str, test_unknown_dtype) { ...@@ -41,8 +41,6 @@ TEST(op_debug_str, test_unknown_dtype) {
desc.SetOutput(framework::GradVarName("Y"), {framework::GradVarName("Y")}); desc.SetOutput(framework::GradVarName("Y"), {framework::GradVarName("Y")});
desc.SetAttr("axis", -1); desc.SetAttr("axis", -1);
desc.SetAttr("use_mkldnn", false); desc.SetAttr("use_mkldnn", false);
desc.SetAttr("x_data_format", "");
desc.SetAttr("y_data_format", "");
auto x_tensor = scope.Var("X")->GetMutable<framework::LoDTensor>(); auto x_tensor = scope.Var("X")->GetMutable<framework::LoDTensor>();
x_tensor->Resize(dim); x_tensor->Resize(dim);
......
...@@ -3,6 +3,12 @@ ...@@ -3,6 +3,12 @@
extra : extra :
attrs : [bool use_cudnn = false, bool use_mkldnn = false] attrs : [bool use_cudnn = false, bool use_mkldnn = false]
- op : add (elementwise_add)
backward : add_grad (elementwise_add_grad)
extra :
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
- op : acosh - op : acosh
backward : acosh_grad backward : acosh_grad
extra : extra :
...@@ -193,6 +199,12 @@ ...@@ -193,6 +199,12 @@
outputs : outputs :
out : Out out : Out
- op : divide (elementwise_div)
backward : divide_grad (elementwise_div)
extra :
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
- op : dot - op : dot
inputs : inputs :
{x : X, y : Y} {x : X, y : Y}
...@@ -209,6 +221,12 @@ ...@@ -209,6 +221,12 @@
extra : extra :
attrs : [bool fix_seed = false, int seed = 0] attrs : [bool fix_seed = false, int seed = 0]
- op : elementwise_pow
backward : elementwise_pow_grad
extra :
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
- op : elu - op : elu
backward : elu_grad backward : elu_grad
extra : extra :
...@@ -248,6 +266,23 @@ ...@@ -248,6 +266,23 @@
inputs: {x: X} inputs: {x: X}
outputs: {out: Out} outputs: {out: Out}
- op : floor_divide (elementwise_floordiv)
extra :
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
- op : fmax (elementwise_fmax)
backward : fmax_grad (elementwise_fmax_grad)
extra :
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
- op : fmin (elementwise_fmin)
backward : fmin_grad (elementwise_fmin_grad)
extra :
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
- op : floor - op : floor
backward : floor_grad backward : floor_grad
extra : extra :
...@@ -263,6 +298,11 @@ ...@@ -263,6 +298,11 @@
extra : extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool use_cudnn = false] attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool use_cudnn = false]
- op : grad_add
extra :
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
- op : grid_sampler - op : grid_sampler
backward : grid_sampler_grad backward : grid_sampler_grad
extra : extra :
...@@ -273,6 +313,12 @@ ...@@ -273,6 +313,12 @@
extra : extra :
attrs : [bool is_test = false] attrs : [bool is_test = false]
- op : heaviside (elementwise_heaviside)
backward : heaviside_grad (elementwise_heaviside_grad)
extra :
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
- op : hard_swish - op : hard_swish
backward : hard_swish_grad backward : hard_swish_grad
extra : extra :
...@@ -346,6 +392,24 @@ ...@@ -346,6 +392,24 @@
str mkldnn_data_type = "float32", 'int[] fused_reshape_X = {}', 'int[] fused_reshape_Y = {}', str mkldnn_data_type = "float32", 'int[] fused_reshape_X = {}', 'int[] fused_reshape_Y = {}',
'int[] fused_transpose_X = {}', 'int[] fused_transpose_Y = {}',] 'int[] fused_transpose_X = {}', 'int[] fused_transpose_Y = {}',]
- op : maximum (elementwise_max)
backward : maximum_grad (elementwise_max_grad)
extra :
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
- op : maximum (elementwise_min)
backward : maximum_grad (elementwise_min_grad)
extra :
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
- op : multiply (elementwise_mul)
backward : multiply_grad (elementwise_mul_grad)
extra :
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
- op : mish - op : mish
backward : mish_grad backward : mish_grad
extra : extra :
...@@ -436,6 +500,11 @@ ...@@ -436,6 +500,11 @@
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- op : remainder (elementwise_mod)
extra :
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
- op : relu - op : relu
backward : relu_grad backward : relu_grad
extra : extra :
...@@ -546,6 +615,12 @@ ...@@ -546,6 +615,12 @@
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- op : subtract (elementwise_sub)
backward : subtract_grad (elementwise_sub_grad)
extra :
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
- op : swish - op : swish
backward : swish_grad backward : swish_grad
extra : extra :
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册