未验证 提交 0a904f8b 编写于 作者: Z zyfncg 提交者: GitHub

Clear extra attrs of scale in OpMaker (#45984)

* clear extra attr of scale in opmaker

* fix sum bug

* fix merge conflict

* fix minus
上级 5c3e8585
......@@ -24,7 +24,7 @@ repos:
files: (?!.*third_party)^.*$ | (?!.*book)^.*$
- id: end-of-file-fixer
- id: sort-simple-yaml
files: (api|backward|api_[a-z_]+)\.yaml$
files: (op|backward|op_[a-z_]+)\.yaml$
- id: trailing-whitespace
files: (.*\.(py|bzl|md|rst|c|cc|cxx|cpp|cu|h|hpp|hxx|xpu|kps|cmake)|BUILD|.*\.BUILD|WORKSPACE|CMakeLists.txt)$
- repo: local
......
......@@ -130,6 +130,7 @@ class MinusGradMaker : public imperative::GradOpBaseMakerBase {
op.SetInput("X", this->OutputGrad("Out"));
op.SetOutput("Out", x_g);
op.SetAttr("scale", 1.0f);
op.SetDefaultAttrsMap(DefaultAttrsMap());
}
if (!y_g.empty()) {
......@@ -138,6 +139,7 @@ class MinusGradMaker : public imperative::GradOpBaseMakerBase {
op.SetInput("X", this->OutputGrad("Out"));
op.SetOutput("Out", y_g);
op.SetAttr("scale", -1.0f);
op.SetDefaultAttrsMap(DefaultAttrsMap());
}
return node;
......
......@@ -75,10 +75,6 @@ $$Out = scale*(X + bias)$$
"Apply bias addition after or before scaling. It is useful for "
"numeric stability in some circumstances.")
.SetDefault(true);
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
}
};
......@@ -108,11 +104,6 @@ class ScaleGradMaker : public framework::SingleGradOpMaker<T> {
VLOG(6) << "Finish Set Attr bias";
grad_op->SetAttr("bias_after_scale", true);
VLOG(6) << "Finish Set Attr bias_after_scale";
if (grad_op->HasAttr("use_mkldnn")) {
VLOG(6) << "Finish Check Attr use_mkldnn";
grad_op->SetAttr("use_mkldnn", this->GetAttr("use_mkldnn"));
VLOG(6) << "Finish Set Attr use_mkldnn";
}
VLOG(6) << "Finish Apply";
}
};
......
......@@ -334,6 +334,7 @@ class SumGradOpBaseMaker : public imperative::GradOpBaseMakerBase {
op.SetInput("X", og);
op.SetOutput("Out", InputGradsType{x_grad});
op.SetAttr("scale", 1.0f);
op.SetDefaultAttrsMap(DefaultAttrsMap());
}
return node;
} else {
......
......@@ -3,17 +3,17 @@
extra :
attrs : [bool use_cudnn = false, bool use_mkldnn = false]
- op : acosh
backward : acosh_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : add (elementwise_add)
backward : add_grad (elementwise_add_grad)
extra :
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
- op : acosh
backward : acosh_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : addmm
backward : addmm_grad
extra :
......@@ -266,6 +266,11 @@
inputs: {x: X}
outputs: {out: Out}
- op : floor
backward : floor_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : floor_divide (elementwise_floordiv)
extra :
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
......@@ -283,11 +288,6 @@
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
- op : floor
backward : floor_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : frobenius_norm
backward : frobenius_norm_grad
extra :
......@@ -313,17 +313,17 @@
extra :
attrs : [bool is_test = false]
- op : hard_swish
backward : hard_swish_grad
extra :
attrs : [bool use_mkldnn = false]
- op : heaviside (elementwise_heaviside)
backward : heaviside_grad (elementwise_heaviside_grad)
extra :
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
- op : hard_swish
backward : hard_swish_grad
extra :
attrs : [bool use_mkldnn = false]
- op : inplace_abn
backward : inplace_abn_grad
extra :
......@@ -404,17 +404,17 @@
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
- op : mish
backward : mish_grad
extra :
attrs : [bool use_mkldnn = false]
- op : multiply (elementwise_mul)
backward : multiply_grad (elementwise_mul_grad)
extra :
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
- op : mish
backward : mish_grad
extra :
attrs : [bool use_mkldnn = false]
- op : mv
inputs :
{x : X, vec : Vec}
......@@ -500,11 +500,6 @@
extra :
attrs : [bool use_mkldnn = false]
- op : remainder (elementwise_mod)
extra :
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
- op : relu
backward : relu_grad
extra :
......@@ -515,6 +510,11 @@
extra :
attrs : [bool use_mkldnn = false]
- op : remainder (elementwise_mod)
extra :
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
- op : renorm
backward : renorm_grad
extra :
......@@ -535,6 +535,10 @@
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : scale
extra :
attrs : [bool use_mkldnn = false]
- op : seed
extra :
attrs : [bool deterministic = false, str rng_name = "", bool force_cpu = false]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册