未验证 提交 0cc2251f 编写于 作者: Z zyfncg 提交者: GitHub

[cherry-pick] clear extra attrs of some ops in OpMaker (#45845, #45984, 46060) (#46218)

* Clear extra attrs of elementwise op in OpMaker (#45845)

* clear extra attrs of elementwise op in opmaker

* fix op_debug_string_test

* fix bug of grad_add

* fix sort of runtime attrs

* Clear extra attrs of scale in OpMaker (#45984)

* clear extra attr of scale in opmaker

* fix sum bug

* fix merge conflict

* fix minus

* Clear extra attributes of some Op in OpMaker (Part4) (#46060)

* clear extra attr of some ops in opmaker

* revert clear use_cudnn for pool

* fix test_operator_desc

* fix Attr interface of OperatorBase

* fix code stype
上级 dc3a3f1e
...@@ -24,7 +24,7 @@ repos: ...@@ -24,7 +24,7 @@ repos:
files: (?!.*third_party)^.*$ | (?!.*book)^.*$ files: (?!.*third_party)^.*$ | (?!.*book)^.*$
- id: end-of-file-fixer - id: end-of-file-fixer
- id: sort-simple-yaml - id: sort-simple-yaml
files: (api|backward|api_[a-z_]+)\.yaml$ files: (op|backward|op_[a-z_]+)\.yaml$
- repo: local - repo: local
hooks: hooks:
- id: clang-format - id: clang-format
......
...@@ -790,11 +790,11 @@ Attribute OpDesc::GetAttr(const std::string &name, bool with_attr_var) const { ...@@ -790,11 +790,11 @@ Attribute OpDesc::GetAttr(const std::string &name, bool with_attr_var) const {
auto it = attrs_.find(name); auto it = attrs_.find(name);
if (it == attrs_.end()) { if (it == attrs_.end()) {
it = runtime_attrs_.find(name); it = runtime_attrs_.find(name);
PADDLE_ENFORCE_NE(
it,
runtime_attrs_.end(),
platform::errors::NotFound("Attribute %s is not found.", name));
} }
PADDLE_ENFORCE_NE(
it,
attrs_.end(),
platform::errors::NotFound("Attribute %s is not found.", name));
if (!with_attr_var) { if (!with_attr_var) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
HasAttrVar(it->second), HasAttrVar(it->second),
...@@ -998,16 +998,25 @@ void OpDesc::Flush() { ...@@ -998,16 +998,25 @@ void OpDesc::Flush() {
std::vector<std::pair<std::string, Attribute>> sorted_attrs{attrs_.begin(), std::vector<std::pair<std::string, Attribute>> sorted_attrs{attrs_.begin(),
attrs_.end()}; attrs_.end()};
std::vector<std::pair<std::string, Attribute>> sorted_runtime_attrs{
runtime_attrs_.begin(), runtime_attrs_.end()};
std::sort( std::sort(
sorted_attrs.begin(), sorted_attrs.begin(),
sorted_attrs.end(), sorted_attrs.end(),
[](std::pair<std::string, Attribute> a, [](std::pair<std::string, Attribute> a,
std::pair<std::string, Attribute> b) { return a.first < b.first; }); std::pair<std::string, Attribute> b) { return a.first < b.first; });
std::sort(
sorted_runtime_attrs.begin(),
sorted_runtime_attrs.end(),
[](std::pair<std::string, Attribute> a,
std::pair<std::string, Attribute> b) { return a.first < b.first; });
for (auto &attr : sorted_attrs) { for (auto &attr : sorted_attrs) {
set_attr_desc(attr.first, attr.second); set_attr_desc(attr.first, attr.second);
} }
for (auto &attr : runtime_attrs_) { for (auto &attr : sorted_runtime_attrs) {
set_attr_desc(attr.first, attr.second); set_attr_desc(attr.first, attr.second);
} }
......
...@@ -182,11 +182,17 @@ class OperatorBase { ...@@ -182,11 +182,17 @@ class OperatorBase {
} }
template <typename T> template <typename T>
inline const T& Attr(const std::string& name) const { inline const T& Attr(const std::string& name) const {
PADDLE_ENFORCE_NE( auto it = attrs_.find(name);
attrs_.find(name), if (it == attrs_.end()) {
attrs_.end(), it = runtime_attrs_.find(name);
platform::errors::NotFound("(%s) is not found in AttributeMap.", name)); PADDLE_ENFORCE_NE(
return PADDLE_GET_CONST(T, attrs_.at(name)); it,
runtime_attrs_.end(),
platform::errors::NotFound(
"(%s) is not found in AttributeMap and RuntimeAttributeMap.",
name));
}
return PADDLE_GET_CONST(T, it->second);
} }
void SetAttr(const std::string& name, const Attribute& v) { void SetAttr(const std::string& name, const Attribute& v) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
......
...@@ -216,47 +216,12 @@ class ElementwiseOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -216,47 +216,12 @@ class ElementwiseOpMaker : public framework::OpProtoAndCheckerMaker {
AddInputX(); AddInputX();
AddInputY(); AddInputY();
AddOpOutput(); AddOpOutput();
AddAttr<int>("axis", AddAttr<int>("axis",
"(int, default -1). If X.dimension != Y.dimension," "(int, default -1). If X.dimension != Y.dimension,"
"Y.dimension must be a subsequence of x.dimension. And axis " "Y.dimension must be a subsequence of x.dimension. And axis "
"is the start dimension index " "is the start dimension index "
"for broadcasting Y onto X. ") "for broadcasting Y onto X. ")
.SetDefault(-1); .SetDefault(-1);
AddAttr<bool>("use_mkldnn", "(bool, default false). Used by MKLDNN.")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>("x_data_format", "This parameter is no longer used.")
.SetDefault("")
.AsExtra();
AddAttr<std::string>("y_data_format", "This parameter is no longer used.")
.SetDefault("")
.AsExtra();
AddAttr<bool>(
"use_quantizer",
"(bool, default false) "
"This parameter is no longer used. Use 'mkldnn_data_type' instead.")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>(
"mkldnn_data_type",
"(string, default \"float32\"). Data type of mkldnn kernel")
.SetDefault("float32")
.InEnum({"float32", "int8", "bfloat16"})
.AsExtra();
/* int8 parameters */
AddAttr<float>("Scale_x",
"(float, default 1.0f), The quantize scale of X tensor")
.SetDefault(1.0f)
.AsExtra();
AddAttr<float>("Scale_y",
"(float, default 1.0f), The quantize scale of Y tensor")
.SetDefault(1.0f)
.AsExtra();
AddAttr<float>("Scale_out",
"(float, default 1.0f), The quantize scale of output data")
.SetDefault(1.0f)
.AsExtra();
AddOpComment(); AddOpComment();
} }
......
...@@ -88,16 +88,6 @@ class ExpandV2OpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -88,16 +88,6 @@ class ExpandV2OpMaker : public framework::OpProtoAndCheckerMaker {
"the corresponding value given by Attr(expand_times)."); "the corresponding value given by Attr(expand_times).");
AddAttr<std::vector<int>>("shape", "The expanded shape for each dimension.") AddAttr<std::vector<int>>("shape", "The expanded shape for each dimension.")
.SetDefault({}); .SetDefault({});
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>(
"mkldnn_data_type",
"(string, default \"float32\"). Data type of mkldnn kernel")
.SetDefault("float32")
.InEnum({"float32", "bfloat16"})
.AsExtra();
AddComment(R"DOC( AddComment(R"DOC(
Expand the input to the given shape. The rank of X Expand the input to the given shape. The rank of X
should be in [1, 6] and size of 'shape' must be in [1, 6] also. should be in [1, 6] and size of 'shape' must be in [1, 6] also.
......
...@@ -174,10 +174,6 @@ class FillConstantOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -174,10 +174,6 @@ class FillConstantOpMaker : public framework::OpProtoAndCheckerMaker {
"3: XPUPlace. " "3: XPUPlace. "
"4: NPUPlace. ") "4: NPUPlace. ")
.SetDefault(-1); .SetDefault(-1);
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddOutput("Out", AddOutput("Out",
"(Tensor) Tensor of specified shape will be filled " "(Tensor) Tensor of specified shape will be filled "
"with the specified value"); "with the specified value");
......
...@@ -82,14 +82,6 @@ class GatherOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -82,14 +82,6 @@ class GatherOpMaker : public framework::OpProtoAndCheckerMaker {
"The Tensor which contains the axis that we do gather operation.") "The Tensor which contains the axis that we do gather operation.")
.AsDispensable(); .AsDispensable();
AddOutput("Out", "The output of gather op"); AddOutput("Out", "The output of gather op");
AddAttr<bool>(
"overwrite",
"(bool, default: False) "
"In backward process, calc the grad when has same index,"
"If true, update the grad using the overwrite mode in same index,"
"If false, using the accumulate mode in same index.")
.SetDefault(true)
.AsExtra();
AddAttr<int>( AddAttr<int>(
"axis", "axis",
"The Tensor which contains the axis that we do gather operation.") "The Tensor which contains the axis that we do gather operation.")
......
...@@ -130,6 +130,7 @@ class MinusGradMaker : public imperative::GradOpBaseMakerBase { ...@@ -130,6 +130,7 @@ class MinusGradMaker : public imperative::GradOpBaseMakerBase {
op.SetInput("X", this->OutputGrad("Out")); op.SetInput("X", this->OutputGrad("Out"));
op.SetOutput("Out", x_g); op.SetOutput("Out", x_g);
op.SetAttr("scale", 1.0f); op.SetAttr("scale", 1.0f);
op.SetDefaultAttrsMap(DefaultAttrsMap());
} }
if (!y_g.empty()) { if (!y_g.empty()) {
...@@ -138,6 +139,7 @@ class MinusGradMaker : public imperative::GradOpBaseMakerBase { ...@@ -138,6 +139,7 @@ class MinusGradMaker : public imperative::GradOpBaseMakerBase {
op.SetInput("X", this->OutputGrad("Out")); op.SetInput("X", this->OutputGrad("Out"));
op.SetOutput("Out", y_g); op.SetOutput("Out", y_g);
op.SetAttr("scale", -1.0f); op.SetAttr("scale", -1.0f);
op.SetDefaultAttrsMap(DefaultAttrsMap());
} }
return node; return node;
......
...@@ -79,10 +79,6 @@ class MulOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -79,10 +79,6 @@ class MulOpMaker : public framework::OpProtoAndCheckerMaker {
AddInput("X", "(Tensor), The first input tensor of mul op."); AddInput("X", "(Tensor), The first input tensor of mul op.");
AddInput("Y", "(Tensor), The second input tensor of mul op."); AddInput("Y", "(Tensor), The second input tensor of mul op.");
AddOutput("Out", "(Tensor), The output tensor of mul op."); AddOutput("Out", "(Tensor), The output tensor of mul op.");
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<int>( AddAttr<int>(
"x_num_col_dims", "x_num_col_dims",
R"DOC((int, default 1), The mul_op can take tensors with more than two R"DOC((int, default 1), The mul_op can take tensors with more than two
...@@ -113,31 +109,6 @@ class MulOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -113,31 +109,6 @@ class MulOpMaker : public framework::OpProtoAndCheckerMaker {
)DOC") )DOC")
.SetDefault(1) .SetDefault(1)
.EqualGreaterThan(1); .EqualGreaterThan(1);
AddAttr<float>(
"scale_x",
"scale_x to be used for int8 mul input data x. scale_x has the"
"same purpose as scale_in in OPs that support quantization."
"Only to be used with MKL-DNN INT8")
.SetDefault(1.0f)
.AsExtra();
AddAttr<std::vector<float>>(
"scale_y",
"scale_y to be used for int8 mul input data y. scale_y has the"
"same purpose as scale_weights in OPs that support quantization."
"Only to be used with MKL-DNN INT8")
.SetDefault({1.0f})
.AsExtra();
AddAttr<float>("scale_out",
"scale_out to be used for int8 output data."
"Only used with MKL-DNN INT8")
.SetDefault(1.0f)
.AsExtra();
AddAttr<bool>(
"force_fp32_output",
"(bool, default false) Force quantize kernel output FP32, only "
"used in quantized MKL-DNN.")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC( AddComment(R"DOC(
Mul Operator. Mul Operator.
......
...@@ -41,8 +41,6 @@ TEST(op_debug_str, test_unknown_dtype) { ...@@ -41,8 +41,6 @@ TEST(op_debug_str, test_unknown_dtype) {
desc.SetOutput(framework::GradVarName("Y"), {framework::GradVarName("Y")}); desc.SetOutput(framework::GradVarName("Y"), {framework::GradVarName("Y")});
desc.SetAttr("axis", -1); desc.SetAttr("axis", -1);
desc.SetAttr("use_mkldnn", false); desc.SetAttr("use_mkldnn", false);
desc.SetAttr("x_data_format", "");
desc.SetAttr("y_data_format", "");
auto x_tensor = scope.Var("X")->GetMutable<framework::LoDTensor>(); auto x_tensor = scope.Var("X")->GetMutable<framework::LoDTensor>();
x_tensor->Resize(dim); x_tensor->Resize(dim);
......
...@@ -186,34 +186,12 @@ void Pool2dOpMaker::Make() { ...@@ -186,34 +186,12 @@ void Pool2dOpMaker::Make() {
"pooling in each grid area to get output pooling value. " "pooling in each grid area to get output pooling value. "
"Default False.") "Default False.")
.SetDefault(false); .SetDefault(false);
AddAttr<bool>(
"use_cudnn",
"(bool) Only used in cudnn kernel, need install cudnn. Default False")
.SetDefault(false)
.AsExtra();
AddAttr<bool>( AddAttr<bool>(
"ceil_mode", "ceil_mode",
"(bool) Whether to use the ceil function to calculate " "(bool) Whether to use the ceil function to calculate "
"output height and width. False is the default. If it is set to False, " "output height and width. False is the default. If it is set to False, "
"the floor function will be used. Default False") "the floor function will be used. Default False")
.SetDefault(false); .SetDefault(false);
AddAttr<bool>("use_mkldnn",
"(bool) Only used in mkldnn kernel. Default False")
.SetDefault(false)
.AsExtra();
AddAttr<bool>(
"use_quantizer",
"(bool, default false) "
"This parameter is no longer used. Use 'mkldnn_data_type' instead.")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>(
"mkldnn_data_type",
"(string, default \"float32\"). Data type of mkldnn kernel")
.SetDefault("float32")
.InEnum({"float32", "int8", "bfloat16"})
.AsExtra();
AddAttr<std::string>( AddAttr<std::string>(
"data_format", "data_format",
"(string, default NCHW) Only used in " "(string, default NCHW) Only used in "
...@@ -221,12 +199,6 @@ void Pool2dOpMaker::Make() { ...@@ -221,12 +199,6 @@ void Pool2dOpMaker::Make() {
"Defaults to \"NHWC\". Specify the data format of the output data, " "Defaults to \"NHWC\". Specify the data format of the output data, "
"the input will be transformed automatically. ") "the input will be transformed automatically. ")
.SetDefault("NCHW"); .SetDefault("NCHW");
AddAttr<bool>("is_test",
"(bool, default false) Set to true for inference only, false "
"for training. Some layers may run faster when this is true.")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>( AddAttr<std::string>(
"padding_algorithm", "padding_algorithm",
"(string, default \"EXPLICIT\") An optional string from: \"EXPLICIT\"," "(string, default \"EXPLICIT\") An optional string from: \"EXPLICIT\","
...@@ -234,7 +206,11 @@ void Pool2dOpMaker::Make() { ...@@ -234,7 +206,11 @@ void Pool2dOpMaker::Make() {
"Set to \"SAME\" or \"VALID\" for algorithm of padding. ") "Set to \"SAME\" or \"VALID\" for algorithm of padding. ")
.SetDefault("EXPLICIT"); .SetDefault("EXPLICIT");
// TODO(dzhwinter): need to registered layout transform function // TODO(dzhwinter): need to registered layout transform function
AddAttr<bool>(
"use_cudnn",
"(bool) Only used in cudnn kernel, need install cudnn. Default False")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC( AddComment(R"DOC(
This operation calculates the pooling output based on This operation calculates the pooling output based on
the input, pooling_type and pool_size, pool_stride, pool_padding parameters. the input, pooling_type and pool_size, pool_stride, pool_padding parameters.
...@@ -407,22 +383,12 @@ void Pool3dOpMaker::Make() { ...@@ -407,22 +383,12 @@ void Pool3dOpMaker::Make() {
"pooling in each grid area to get output pooling value. " "pooling in each grid area to get output pooling value. "
"Default False") "Default False")
.SetDefault(false); .SetDefault(false);
AddAttr<bool>(
"use_cudnn",
"(bool) Only used in cudnn kernel, need install cudnn. Default False")
.SetDefault(false)
.AsExtra();
AddAttr<bool>( AddAttr<bool>(
"ceil_mode", "ceil_mode",
"(bool) Whether to use the ceil function to calculate " "(bool) Whether to use the ceil function to calculate "
"output height and width. False is the default. If it is set to False, " "output height and width. False is the default. If it is set to False, "
"the floor function will be used. Default False") "the floor function will be used. Default False")
.SetDefault(false); .SetDefault(false);
AddAttr<bool>("use_mkldnn",
"(bool) Only used in mkldnn kernel. Default False")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>( AddAttr<std::string>(
"data_format", "data_format",
"(string, default NCDHW) Only used in " "(string, default NCDHW) Only used in "
...@@ -436,8 +402,11 @@ void Pool3dOpMaker::Make() { ...@@ -436,8 +402,11 @@ void Pool3dOpMaker::Make() {
"\"SAME\",\"VALID\". Set to \"EXPLICIT\" for explicit padding. " "\"SAME\",\"VALID\". Set to \"EXPLICIT\" for explicit padding. "
"Set to \"SAME\" or \"VALID\" for algorithm of padding. ") "Set to \"SAME\" or \"VALID\" for algorithm of padding. ")
.SetDefault("EXPLICIT"); .SetDefault("EXPLICIT");
// TODO(dzhwinter): need to registered layout transform function AddAttr<bool>(
"use_cudnn",
"(bool) Only used in cudnn kernel, need install cudnn. Default False")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC( AddComment(R"DOC(
This operation calculates the output based on This operation calculates the output based on
the input, pooling_type, pool_size, pool_stride, and pool_padding parameters. the input, pooling_type, pool_size, pool_stride, and pool_padding parameters.
......
...@@ -75,10 +75,6 @@ $$Out = scale*(X + bias)$$ ...@@ -75,10 +75,6 @@ $$Out = scale*(X + bias)$$
"Apply bias addition after or before scaling. It is useful for " "Apply bias addition after or before scaling. It is useful for "
"numeric stability in some circumstances.") "numeric stability in some circumstances.")
.SetDefault(true); .SetDefault(true);
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
} }
}; };
...@@ -108,11 +104,6 @@ class ScaleGradMaker : public framework::SingleGradOpMaker<T> { ...@@ -108,11 +104,6 @@ class ScaleGradMaker : public framework::SingleGradOpMaker<T> {
VLOG(6) << "Finish Set Attr bias"; VLOG(6) << "Finish Set Attr bias";
grad_op->SetAttr("bias_after_scale", true); grad_op->SetAttr("bias_after_scale", true);
VLOG(6) << "Finish Set Attr bias_after_scale"; VLOG(6) << "Finish Set Attr bias_after_scale";
if (grad_op->HasAttr("use_mkldnn")) {
VLOG(6) << "Finish Check Attr use_mkldnn";
grad_op->SetAttr("use_mkldnn", this->GetAttr("use_mkldnn"));
VLOG(6) << "Finish Set Attr use_mkldnn";
}
VLOG(6) << "Finish Apply"; VLOG(6) << "Finish Apply";
} }
}; };
......
...@@ -56,11 +56,6 @@ class StackOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -56,11 +56,6 @@ class StackOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<int>("axis", AddAttr<int>("axis",
"The axis along which all of the Inputs(X) should be stacked.") "The axis along which all of the Inputs(X) should be stacked.")
.SetDefault(0); .SetDefault(0);
AddAttr<bool>(
"use_mkldnn",
"(bool, default false) Indicates if MKL-DNN kernel will be used")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC( AddComment(R"DOC(
Stack Operator. Stack Operator.
Stack all of the Inputs(X) into one tensor along Attr(axis). The dims of all Inputs(X) must be the same. Stack all of the Inputs(X) into one tensor along Attr(axis). The dims of all Inputs(X) must be the same.
......
...@@ -334,6 +334,7 @@ class SumGradOpBaseMaker : public imperative::GradOpBaseMakerBase { ...@@ -334,6 +334,7 @@ class SumGradOpBaseMaker : public imperative::GradOpBaseMakerBase {
op.SetInput("X", og); op.SetInput("X", og);
op.SetOutput("Out", InputGradsType{x_grad}); op.SetOutput("Out", InputGradsType{x_grad});
op.SetAttr("scale", 1.0f); op.SetAttr("scale", 1.0f);
op.SetDefaultAttrsMap(DefaultAttrsMap());
} }
return node; return node;
} else { } else {
......
...@@ -275,13 +275,49 @@ class Transpose2Op : public TransposeOp { ...@@ -275,13 +275,49 @@ class Transpose2Op : public TransposeOp {
} }
}; };
class Transpose2OpMaker : public TransposeOpMaker { class Transpose2OpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() override { void Make() override {
TransposeOpMaker::Make(); AddInput(
"X",
"(Tensor) The input tensor, tensors with rank up to 6 are supported.");
AddOutput("Out", "(Tensor)The output tensor.");
AddAttr<std::vector<int>>(
"axis",
"(vector<int>) A list of values, and the size of the list should be "
"the same with the input tensor rank. This operator permutes the input "
"tensor's axes according to the values given.");
AddOutput("XShape", "(Tensor)The output tensor.") AddOutput("XShape", "(Tensor)The output tensor.")
.AsIntermediate() .AsIntermediate()
.AsExtra(); .AsExtra();
AddComment(R"DOC(
Transpose Operator.
The input tensor will be permuted according to the axes given.
The behavior of this operator is similar to how `numpy.transpose` works.
- suppose the input `X` is a 2-D tensor:
$$
X = \begin{pmatrix}
0 &1 &2 \\
3 &4 &5
\end{pmatrix}$$
the given `axes` is: $[1, 0]$, and $Y$ = transpose($X$, axis)
then the output $Y$ is:
$$
Y = \begin{pmatrix}
0 &3 \\
1 &4 \\
2 &5
\end{pmatrix}$$
- Given a input tensor with shape $(N, C, H, W)$ and the `axes` is
$[0, 2, 3, 1]$, then shape of the output tensor will be: $(N, H, W, C)$.
)DOC");
} }
}; };
......
...@@ -8,6 +8,12 @@ ...@@ -8,6 +8,12 @@
extra : extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false] attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : add (elementwise_add)
backward : add_grad (elementwise_add_grad)
extra :
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
- op : addmm - op : addmm
backward : addmm_grad backward : addmm_grad
extra : extra :
...@@ -193,6 +199,12 @@ ...@@ -193,6 +199,12 @@
outputs : outputs :
out : Out out : Out
- op : divide (elementwise_div)
backward : divide_grad (elementwise_div)
extra :
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
- op : dot - op : dot
inputs : inputs :
{x : X, y : Y} {x : X, y : Y}
...@@ -209,6 +221,12 @@ ...@@ -209,6 +221,12 @@
extra : extra :
attrs : [bool fix_seed = false, int seed = 0] attrs : [bool fix_seed = false, int seed = 0]
- op : elementwise_pow
backward : elementwise_pow_grad
extra :
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
- op : elu - op : elu
backward : elu_grad backward : elu_grad
extra : extra :
...@@ -231,6 +249,11 @@ ...@@ -231,6 +249,11 @@
extra : extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false] attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : expand (expand_v2)
backward : expand_grad (expand_v2_grad)
extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]
- op : expm1 - op : expm1
backward : expm1_grad backward : expm1_grad
extra : extra :
...@@ -253,16 +276,47 @@ ...@@ -253,16 +276,47 @@
extra : extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false] attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : floor_divide (elementwise_floordiv)
extra :
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
- op : fmax (elementwise_fmax)
backward : fmax_grad (elementwise_fmax_grad)
extra :
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
- op : fmin (elementwise_fmin)
backward : fmin_grad (elementwise_fmin_grad)
extra :
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
- op : frobenius_norm - op : frobenius_norm
backward : frobenius_norm_grad backward : frobenius_norm_grad
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- op : full (fill_constant)
extra :
attrs : [bool use_mkldnn = false]
- op : gather
backward : gather_grad
extra :
attrs : [bool overwrite = true]
- op : gelu - op : gelu
backward : gelu_grad backward : gelu_grad
extra : extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool use_cudnn = false] attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool use_cudnn = false]
- op : grad_add
extra :
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
- op : grid_sampler - op : grid_sampler
backward : grid_sampler_grad backward : grid_sampler_grad
extra : extra :
...@@ -278,6 +332,12 @@ ...@@ -278,6 +332,12 @@
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- op : heaviside (elementwise_heaviside)
backward : heaviside_grad (elementwise_heaviside_grad)
extra :
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
- op : inplace_abn - op : inplace_abn
backward : inplace_abn_grad backward : inplace_abn_grad
extra : extra :
...@@ -344,13 +404,37 @@ ...@@ -344,13 +404,37 @@
extra : extra :
attrs : [bool use_mkldnn = false, 'int[] fused_reshape_Out = {}', 'int[] fused_transpose_Out = {}', attrs : [bool use_mkldnn = false, 'int[] fused_reshape_Out = {}', 'int[] fused_transpose_Out = {}',
str mkldnn_data_type = "float32", 'int[] fused_reshape_X = {}', 'int[] fused_reshape_Y = {}', str mkldnn_data_type = "float32", 'int[] fused_reshape_X = {}', 'int[] fused_reshape_Y = {}',
'int[] fused_transpose_X = {}', 'int[] fused_transpose_Y = {}',] 'int[] fused_transpose_X = {}', 'int[] fused_transpose_Y = {}']
- op : matmul_with_flatten (mul)
backward : matmul_with_flatten_grad (mul_grad)
extra :
attrs : [bool use_mkldnn = false, float scale_x = 1.0f, 'float[] scale_y = {1.0f}',
float scale_out = 1.0f, bool force_fp32_output = false]
- op : maximum (elementwise_max)
backward : maximum_grad (elementwise_max_grad)
extra :
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
- op : maximum (elementwise_min)
backward : maximum_grad (elementwise_min_grad)
extra :
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
- op : mish - op : mish
backward : mish_grad backward : mish_grad
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- op : multiply (elementwise_mul)
backward : multiply_grad (elementwise_mul_grad)
extra :
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
- op : mv - op : mv
inputs : inputs :
{x : X, vec : Vec} {x : X, vec : Vec}
...@@ -383,6 +467,17 @@ ...@@ -383,6 +467,17 @@
outputs : outputs :
out : Out out : Out
- op : pool2d
backward : pool2d_grad
extra :
attrs : [bool use_mkldnn = false, bool use_quantizer = false,
str mkldnn_data_type = "float32", bool is_test = false]
- op : pool3d
backward : pool3d_grad
extra :
attrs : [bool use_mkldnn = false]
- op : prelu - op : prelu
backward : prelu_grad backward : prelu_grad
extra : extra :
...@@ -446,6 +541,11 @@ ...@@ -446,6 +541,11 @@
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- op : remainder (elementwise_mod)
extra :
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
- op : renorm - op : renorm
backward : renorm_grad backward : renorm_grad
extra : extra :
...@@ -466,6 +566,10 @@ ...@@ -466,6 +566,10 @@
extra : extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false] attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : scale
extra :
attrs : [bool use_mkldnn = false]
- op : seed - op : seed
extra : extra :
attrs : [bool deterministic = false, str rng_name = "", bool force_cpu = false] attrs : [bool deterministic = false, str rng_name = "", bool force_cpu = false]
...@@ -546,6 +650,17 @@ ...@@ -546,6 +650,17 @@
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- op : stack
backward : stack_grad
extra :
attrs : [bool use_mkldnn = false]
- op : subtract (elementwise_sub)
backward : subtract_grad (elementwise_sub_grad)
extra :
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
- op : swish - op : swish
backward : swish_grad backward : swish_grad
extra : extra :
...@@ -577,6 +692,12 @@ ...@@ -577,6 +692,12 @@
outputs : outputs :
out : Out out : Out
- op : transpose (transpose2)
backward : transpose_grad (transpose2_grad)
extra :
attrs : [bool use_mkldnn = false, str data_format = "AnyLayout", bool use_quantizer = false,
str mkldnn_data_type = "float32"]
- op : trilinear_interp (trilinear_interp_v2) - op : trilinear_interp (trilinear_interp_v2)
backward : trilinear_interp_grad (trilinear_interp_v2_grad) backward : trilinear_interp_grad (trilinear_interp_v2_grad)
extra : extra :
......
...@@ -77,9 +77,7 @@ class TestOperator(unittest.TestCase): ...@@ -77,9 +77,7 @@ class TestOperator(unittest.TestCase):
set(mul_op.attr_names), set(mul_op.attr_names),
set([ set([
"x_num_col_dims", "y_num_col_dims", "op_role", "op_role_var", "x_num_col_dims", "y_num_col_dims", "op_role", "op_role_var",
"use_mkldnn", "scale_x", "scale_y", "scale_out", "op_namescope", "op_callstack", "op_device", "with_quant_attr"
"force_fp32_output", "op_namescope", "op_callstack",
"op_device", "with_quant_attr"
])) ]))
self.assertEqual(mul_op.has_attr("x_num_col_dims"), True) self.assertEqual(mul_op.has_attr("x_num_col_dims"), True)
self.assertEqual(mul_op.attr_type("x_num_col_dims"), core.AttrType.INT) self.assertEqual(mul_op.attr_type("x_num_col_dims"), core.AttrType.INT)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册