From 0cc2251fd5625ccb88908d9302d01a746e2404eb Mon Sep 17 00:00:00 2001 From: zyfncg Date: Tue, 27 Sep 2022 17:00:03 +0800 Subject: [PATCH] [cherry-pick] clear extra attrs of some ops in OpMaker (#45845, #45984, 46060) (#46218) * Clear extra attrs of elementwise op in OpMaker (#45845) * clear extra attrs of elementwise op in opmaker * fix op_debug_string_test * fix bug of grad_add * fix sort of runtime attrs * Clear extra attrs of scale in OpMaker (#45984) * clear extra attr of scale in opmaker * fix sum bug * fix merge conflict * fix minus * Clear extra attributes of some Op in OpMaker (Part4) (#46060) * clear extra attr of some ops in opmaker * revert clear use_cudnn for pool * fix test_operator_desc * fix Attr interface of OperatorBase * fix code stype --- .pre-commit-config.yaml | 2 +- paddle/fluid/framework/op_desc.cc | 19 ++- paddle/fluid/framework/operator.h | 16 ++- .../operators/elementwise/elementwise_op.h | 35 ----- paddle/fluid/operators/expand_v2_op.cc | 10 -- paddle/fluid/operators/fill_constant_op.cc | 4 - paddle/fluid/operators/gather_op.cc | 8 -- paddle/fluid/operators/minus_op.cc | 2 + paddle/fluid/operators/mul_op.cc | 29 ----- .../fluid/operators/op_debug_string_test.cc | 2 - paddle/fluid/operators/pool_op.cc | 51 ++------ paddle/fluid/operators/scale_op.cc | 9 -- paddle/fluid/operators/stack_op.cc | 5 - paddle/fluid/operators/sum_op.cc | 1 + paddle/fluid/operators/transpose_op.cc | 40 +++++- paddle/phi/api/yaml/op_compat.yaml | 123 +++++++++++++++++- .../tests/unittests/test_operator_desc.py | 4 +- 17 files changed, 200 insertions(+), 160 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f4c0f1d97c..97c1afbcc1 100755 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -24,7 +24,7 @@ repos: files: (?!.*third_party)^.*$ | (?!.*book)^.*$ - id: end-of-file-fixer - id: sort-simple-yaml - files: (api|backward|api_[a-z_]+)\.yaml$ + files: (op|backward|op_[a-z_]+)\.yaml$ - repo: local hooks: - id: clang-format diff --git a/paddle/fluid/framework/op_desc.cc b/paddle/fluid/framework/op_desc.cc index 4d0d10c783..f2474cda0a 100644 --- a/paddle/fluid/framework/op_desc.cc +++ b/paddle/fluid/framework/op_desc.cc @@ -790,11 +790,11 @@ Attribute OpDesc::GetAttr(const std::string &name, bool with_attr_var) const { auto it = attrs_.find(name); if (it == attrs_.end()) { it = runtime_attrs_.find(name); + PADDLE_ENFORCE_NE( + it, + runtime_attrs_.end(), + platform::errors::NotFound("Attribute %s is not found.", name)); } - PADDLE_ENFORCE_NE( - it, - attrs_.end(), - platform::errors::NotFound("Attribute %s is not found.", name)); if (!with_attr_var) { PADDLE_ENFORCE_EQ( HasAttrVar(it->second), @@ -998,16 +998,25 @@ void OpDesc::Flush() { std::vector> sorted_attrs{attrs_.begin(), attrs_.end()}; + + std::vector> sorted_runtime_attrs{ + runtime_attrs_.begin(), runtime_attrs_.end()}; + std::sort( sorted_attrs.begin(), sorted_attrs.end(), [](std::pair a, std::pair b) { return a.first < b.first; }); + std::sort( + sorted_runtime_attrs.begin(), + sorted_runtime_attrs.end(), + [](std::pair a, + std::pair b) { return a.first < b.first; }); for (auto &attr : sorted_attrs) { set_attr_desc(attr.first, attr.second); } - for (auto &attr : runtime_attrs_) { + for (auto &attr : sorted_runtime_attrs) { set_attr_desc(attr.first, attr.second); } diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index 0faaee4843..03367e32a8 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -182,11 +182,17 @@ class OperatorBase { } template inline const T& Attr(const std::string& name) const { - PADDLE_ENFORCE_NE( - attrs_.find(name), - attrs_.end(), - platform::errors::NotFound("(%s) is not found in AttributeMap.", name)); - return PADDLE_GET_CONST(T, attrs_.at(name)); + auto it = attrs_.find(name); + if (it == attrs_.end()) { + it = runtime_attrs_.find(name); + PADDLE_ENFORCE_NE( + it, + runtime_attrs_.end(), + platform::errors::NotFound( + "(%s) is not found in AttributeMap and RuntimeAttributeMap.", + name)); + } + return PADDLE_GET_CONST(T, it->second); } void SetAttr(const std::string& name, const Attribute& v) { PADDLE_ENFORCE_EQ( diff --git a/paddle/fluid/operators/elementwise/elementwise_op.h b/paddle/fluid/operators/elementwise/elementwise_op.h index 610e5932b1..e722d5f7e6 100644 --- a/paddle/fluid/operators/elementwise/elementwise_op.h +++ b/paddle/fluid/operators/elementwise/elementwise_op.h @@ -216,47 +216,12 @@ class ElementwiseOpMaker : public framework::OpProtoAndCheckerMaker { AddInputX(); AddInputY(); AddOpOutput(); - AddAttr("axis", "(int, default -1). If X.dimension != Y.dimension," "Y.dimension must be a subsequence of x.dimension. And axis " "is the start dimension index " "for broadcasting Y onto X. ") .SetDefault(-1); - AddAttr("use_mkldnn", "(bool, default false). Used by MKLDNN.") - .SetDefault(false) - .AsExtra(); - AddAttr("x_data_format", "This parameter is no longer used.") - .SetDefault("") - .AsExtra(); - AddAttr("y_data_format", "This parameter is no longer used.") - .SetDefault("") - .AsExtra(); - AddAttr( - "use_quantizer", - "(bool, default false) " - "This parameter is no longer used. Use 'mkldnn_data_type' instead.") - .SetDefault(false) - .AsExtra(); - AddAttr( - "mkldnn_data_type", - "(string, default \"float32\"). Data type of mkldnn kernel") - .SetDefault("float32") - .InEnum({"float32", "int8", "bfloat16"}) - .AsExtra(); - /* int8 parameters */ - AddAttr("Scale_x", - "(float, default 1.0f), The quantize scale of X tensor") - .SetDefault(1.0f) - .AsExtra(); - AddAttr("Scale_y", - "(float, default 1.0f), The quantize scale of Y tensor") - .SetDefault(1.0f) - .AsExtra(); - AddAttr("Scale_out", - "(float, default 1.0f), The quantize scale of output data") - .SetDefault(1.0f) - .AsExtra(); AddOpComment(); } diff --git a/paddle/fluid/operators/expand_v2_op.cc b/paddle/fluid/operators/expand_v2_op.cc index d548023bfb..fd92a43318 100644 --- a/paddle/fluid/operators/expand_v2_op.cc +++ b/paddle/fluid/operators/expand_v2_op.cc @@ -88,16 +88,6 @@ class ExpandV2OpMaker : public framework::OpProtoAndCheckerMaker { "the corresponding value given by Attr(expand_times)."); AddAttr>("shape", "The expanded shape for each dimension.") .SetDefault({}); - AddAttr("use_mkldnn", - "(bool, default false) Only used in mkldnn kernel") - .SetDefault(false) - .AsExtra(); - AddAttr( - "mkldnn_data_type", - "(string, default \"float32\"). Data type of mkldnn kernel") - .SetDefault("float32") - .InEnum({"float32", "bfloat16"}) - .AsExtra(); AddComment(R"DOC( Expand the input to the given shape. The rank of X should be in [1, 6] and size of 'shape' must be in [1, 6] also. diff --git a/paddle/fluid/operators/fill_constant_op.cc b/paddle/fluid/operators/fill_constant_op.cc index 0dd0e1dcec..28167c4736 100644 --- a/paddle/fluid/operators/fill_constant_op.cc +++ b/paddle/fluid/operators/fill_constant_op.cc @@ -174,10 +174,6 @@ class FillConstantOpMaker : public framework::OpProtoAndCheckerMaker { "3: XPUPlace. " "4: NPUPlace. ") .SetDefault(-1); - AddAttr("use_mkldnn", - "(bool, default false) Only used in mkldnn kernel") - .SetDefault(false) - .AsExtra(); AddOutput("Out", "(Tensor) Tensor of specified shape will be filled " "with the specified value"); diff --git a/paddle/fluid/operators/gather_op.cc b/paddle/fluid/operators/gather_op.cc index f993189f07..77e4adfeea 100644 --- a/paddle/fluid/operators/gather_op.cc +++ b/paddle/fluid/operators/gather_op.cc @@ -82,14 +82,6 @@ class GatherOpMaker : public framework::OpProtoAndCheckerMaker { "The Tensor which contains the axis that we do gather operation.") .AsDispensable(); AddOutput("Out", "The output of gather op"); - AddAttr( - "overwrite", - "(bool, default: False) " - "In backward process, calc the grad when has same index," - "If true, update the grad using the overwrite mode in same index," - "If false, using the accumulate mode in same index.") - .SetDefault(true) - .AsExtra(); AddAttr( "axis", "The Tensor which contains the axis that we do gather operation.") diff --git a/paddle/fluid/operators/minus_op.cc b/paddle/fluid/operators/minus_op.cc index 310d28738f..398a254f45 100644 --- a/paddle/fluid/operators/minus_op.cc +++ b/paddle/fluid/operators/minus_op.cc @@ -130,6 +130,7 @@ class MinusGradMaker : public imperative::GradOpBaseMakerBase { op.SetInput("X", this->OutputGrad("Out")); op.SetOutput("Out", x_g); op.SetAttr("scale", 1.0f); + op.SetDefaultAttrsMap(DefaultAttrsMap()); } if (!y_g.empty()) { @@ -138,6 +139,7 @@ class MinusGradMaker : public imperative::GradOpBaseMakerBase { op.SetInput("X", this->OutputGrad("Out")); op.SetOutput("Out", y_g); op.SetAttr("scale", -1.0f); + op.SetDefaultAttrsMap(DefaultAttrsMap()); } return node; diff --git a/paddle/fluid/operators/mul_op.cc b/paddle/fluid/operators/mul_op.cc index f26b4b9488..2d4ca62955 100644 --- a/paddle/fluid/operators/mul_op.cc +++ b/paddle/fluid/operators/mul_op.cc @@ -79,10 +79,6 @@ class MulOpMaker : public framework::OpProtoAndCheckerMaker { AddInput("X", "(Tensor), The first input tensor of mul op."); AddInput("Y", "(Tensor), The second input tensor of mul op."); AddOutput("Out", "(Tensor), The output tensor of mul op."); - AddAttr("use_mkldnn", - "(bool, default false) Only used in mkldnn kernel") - .SetDefault(false) - .AsExtra(); AddAttr( "x_num_col_dims", R"DOC((int, default 1), The mul_op can take tensors with more than two @@ -113,31 +109,6 @@ class MulOpMaker : public framework::OpProtoAndCheckerMaker { )DOC") .SetDefault(1) .EqualGreaterThan(1); - AddAttr( - "scale_x", - "scale_x to be used for int8 mul input data x. scale_x has the" - "same purpose as scale_in in OPs that support quantization." - "Only to be used with MKL-DNN INT8") - .SetDefault(1.0f) - .AsExtra(); - AddAttr>( - "scale_y", - "scale_y to be used for int8 mul input data y. scale_y has the" - "same purpose as scale_weights in OPs that support quantization." - "Only to be used with MKL-DNN INT8") - .SetDefault({1.0f}) - .AsExtra(); - AddAttr("scale_out", - "scale_out to be used for int8 output data." - "Only used with MKL-DNN INT8") - .SetDefault(1.0f) - .AsExtra(); - AddAttr( - "force_fp32_output", - "(bool, default false) Force quantize kernel output FP32, only " - "used in quantized MKL-DNN.") - .SetDefault(false) - .AsExtra(); AddComment(R"DOC( Mul Operator. diff --git a/paddle/fluid/operators/op_debug_string_test.cc b/paddle/fluid/operators/op_debug_string_test.cc index 372a71706a..fd8e027092 100644 --- a/paddle/fluid/operators/op_debug_string_test.cc +++ b/paddle/fluid/operators/op_debug_string_test.cc @@ -41,8 +41,6 @@ TEST(op_debug_str, test_unknown_dtype) { desc.SetOutput(framework::GradVarName("Y"), {framework::GradVarName("Y")}); desc.SetAttr("axis", -1); desc.SetAttr("use_mkldnn", false); - desc.SetAttr("x_data_format", ""); - desc.SetAttr("y_data_format", ""); auto x_tensor = scope.Var("X")->GetMutable(); x_tensor->Resize(dim); diff --git a/paddle/fluid/operators/pool_op.cc b/paddle/fluid/operators/pool_op.cc index c5b1ce12f1..e8b35b8915 100644 --- a/paddle/fluid/operators/pool_op.cc +++ b/paddle/fluid/operators/pool_op.cc @@ -186,34 +186,12 @@ void Pool2dOpMaker::Make() { "pooling in each grid area to get output pooling value. " "Default False.") .SetDefault(false); - - AddAttr( - "use_cudnn", - "(bool) Only used in cudnn kernel, need install cudnn. Default False") - .SetDefault(false) - .AsExtra(); AddAttr( "ceil_mode", "(bool) Whether to use the ceil function to calculate " "output height and width. False is the default. If it is set to False, " "the floor function will be used. Default False") .SetDefault(false); - AddAttr("use_mkldnn", - "(bool) Only used in mkldnn kernel. Default False") - .SetDefault(false) - .AsExtra(); - AddAttr( - "use_quantizer", - "(bool, default false) " - "This parameter is no longer used. Use 'mkldnn_data_type' instead.") - .SetDefault(false) - .AsExtra(); - AddAttr( - "mkldnn_data_type", - "(string, default \"float32\"). Data type of mkldnn kernel") - .SetDefault("float32") - .InEnum({"float32", "int8", "bfloat16"}) - .AsExtra(); AddAttr( "data_format", "(string, default NCHW) Only used in " @@ -221,12 +199,6 @@ void Pool2dOpMaker::Make() { "Defaults to \"NHWC\". Specify the data format of the output data, " "the input will be transformed automatically. ") .SetDefault("NCHW"); - AddAttr("is_test", - "(bool, default false) Set to true for inference only, false " - "for training. Some layers may run faster when this is true.") - .SetDefault(false) - .AsExtra(); - AddAttr( "padding_algorithm", "(string, default \"EXPLICIT\") An optional string from: \"EXPLICIT\"," @@ -234,7 +206,11 @@ void Pool2dOpMaker::Make() { "Set to \"SAME\" or \"VALID\" for algorithm of padding. ") .SetDefault("EXPLICIT"); // TODO(dzhwinter): need to registered layout transform function - + AddAttr( + "use_cudnn", + "(bool) Only used in cudnn kernel, need install cudnn. Default False") + .SetDefault(false) + .AsExtra(); AddComment(R"DOC( This operation calculates the pooling output based on the input, pooling_type and pool_size, pool_stride, pool_padding parameters. @@ -407,22 +383,12 @@ void Pool3dOpMaker::Make() { "pooling in each grid area to get output pooling value. " "Default False") .SetDefault(false); - - AddAttr( - "use_cudnn", - "(bool) Only used in cudnn kernel, need install cudnn. Default False") - .SetDefault(false) - .AsExtra(); AddAttr( "ceil_mode", "(bool) Whether to use the ceil function to calculate " "output height and width. False is the default. If it is set to False, " "the floor function will be used. Default False") .SetDefault(false); - AddAttr("use_mkldnn", - "(bool) Only used in mkldnn kernel. Default False") - .SetDefault(false) - .AsExtra(); AddAttr( "data_format", "(string, default NCDHW) Only used in " @@ -436,8 +402,11 @@ void Pool3dOpMaker::Make() { "\"SAME\",\"VALID\". Set to \"EXPLICIT\" for explicit padding. " "Set to \"SAME\" or \"VALID\" for algorithm of padding. ") .SetDefault("EXPLICIT"); - // TODO(dzhwinter): need to registered layout transform function - + AddAttr( + "use_cudnn", + "(bool) Only used in cudnn kernel, need install cudnn. Default False") + .SetDefault(false) + .AsExtra(); AddComment(R"DOC( This operation calculates the output based on the input, pooling_type, pool_size, pool_stride, and pool_padding parameters. diff --git a/paddle/fluid/operators/scale_op.cc b/paddle/fluid/operators/scale_op.cc index 3c2b5363d8..cab04e43e8 100644 --- a/paddle/fluid/operators/scale_op.cc +++ b/paddle/fluid/operators/scale_op.cc @@ -75,10 +75,6 @@ $$Out = scale*(X + bias)$$ "Apply bias addition after or before scaling. It is useful for " "numeric stability in some circumstances.") .SetDefault(true); - AddAttr("use_mkldnn", - "(bool, default false) Only used in mkldnn kernel") - .SetDefault(false) - .AsExtra(); } }; @@ -108,11 +104,6 @@ class ScaleGradMaker : public framework::SingleGradOpMaker { VLOG(6) << "Finish Set Attr bias"; grad_op->SetAttr("bias_after_scale", true); VLOG(6) << "Finish Set Attr bias_after_scale"; - if (grad_op->HasAttr("use_mkldnn")) { - VLOG(6) << "Finish Check Attr use_mkldnn"; - grad_op->SetAttr("use_mkldnn", this->GetAttr("use_mkldnn")); - VLOG(6) << "Finish Set Attr use_mkldnn"; - } VLOG(6) << "Finish Apply"; } }; diff --git a/paddle/fluid/operators/stack_op.cc b/paddle/fluid/operators/stack_op.cc index a0351b41a2..e9706f00ce 100644 --- a/paddle/fluid/operators/stack_op.cc +++ b/paddle/fluid/operators/stack_op.cc @@ -56,11 +56,6 @@ class StackOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr("axis", "The axis along which all of the Inputs(X) should be stacked.") .SetDefault(0); - AddAttr( - "use_mkldnn", - "(bool, default false) Indicates if MKL-DNN kernel will be used") - .SetDefault(false) - .AsExtra(); AddComment(R"DOC( Stack Operator. Stack all of the Inputs(X) into one tensor along Attr(axis). The dims of all Inputs(X) must be the same. diff --git a/paddle/fluid/operators/sum_op.cc b/paddle/fluid/operators/sum_op.cc index ca851b8ee7..fcedf2d24b 100644 --- a/paddle/fluid/operators/sum_op.cc +++ b/paddle/fluid/operators/sum_op.cc @@ -334,6 +334,7 @@ class SumGradOpBaseMaker : public imperative::GradOpBaseMakerBase { op.SetInput("X", og); op.SetOutput("Out", InputGradsType{x_grad}); op.SetAttr("scale", 1.0f); + op.SetDefaultAttrsMap(DefaultAttrsMap()); } return node; } else { diff --git a/paddle/fluid/operators/transpose_op.cc b/paddle/fluid/operators/transpose_op.cc index 610d6e1f48..b342f01e46 100644 --- a/paddle/fluid/operators/transpose_op.cc +++ b/paddle/fluid/operators/transpose_op.cc @@ -275,13 +275,49 @@ class Transpose2Op : public TransposeOp { } }; -class Transpose2OpMaker : public TransposeOpMaker { +class Transpose2OpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - TransposeOpMaker::Make(); + AddInput( + "X", + "(Tensor) The input tensor, tensors with rank up to 6 are supported."); + AddOutput("Out", "(Tensor)The output tensor."); + AddAttr>( + "axis", + "(vector) A list of values, and the size of the list should be " + "the same with the input tensor rank. This operator permutes the input " + "tensor's axes according to the values given."); AddOutput("XShape", "(Tensor)The output tensor.") .AsIntermediate() .AsExtra(); + AddComment(R"DOC( +Transpose Operator. + +The input tensor will be permuted according to the axes given. +The behavior of this operator is similar to how `numpy.transpose` works. + +- suppose the input `X` is a 2-D tensor: + $$ + X = \begin{pmatrix} + 0 &1 &2 \\ + 3 &4 &5 + \end{pmatrix}$$ + + the given `axes` is: $[1, 0]$, and $Y$ = transpose($X$, axis) + + then the output $Y$ is: + + $$ + Y = \begin{pmatrix} + 0 &3 \\ + 1 &4 \\ + 2 &5 + \end{pmatrix}$$ + +- Given a input tensor with shape $(N, C, H, W)$ and the `axes` is +$[0, 2, 3, 1]$, then shape of the output tensor will be: $(N, H, W, C)$. + +)DOC"); } }; diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index b7ce9900b6..516e03662d 100644 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -8,6 +8,12 @@ extra : attrs : [bool use_mkldnn = false, bool use_cudnn = false] +- op : add (elementwise_add) + backward : add_grad (elementwise_add_grad) + extra : + attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32", + bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f] + - op : addmm backward : addmm_grad extra : @@ -193,6 +199,12 @@ outputs : out : Out +- op : divide (elementwise_div) + backward : divide_grad (elementwise_div) + extra : + attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32", + bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f] + - op : dot inputs : {x : X, y : Y} @@ -209,6 +221,12 @@ extra : attrs : [bool fix_seed = false, int seed = 0] +- op : elementwise_pow + backward : elementwise_pow_grad + extra : + attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32", + bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f] + - op : elu backward : elu_grad extra : @@ -231,6 +249,11 @@ extra : attrs : [bool use_mkldnn = false, bool use_cudnn = false] +- op : expand (expand_v2) + backward : expand_grad (expand_v2_grad) + extra : + attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"] + - op : expm1 backward : expm1_grad extra : @@ -253,16 +276,47 @@ extra : attrs : [bool use_mkldnn = false, bool use_cudnn = false] +- op : floor_divide (elementwise_floordiv) + extra : + attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32", + bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f] + +- op : fmax (elementwise_fmax) + backward : fmax_grad (elementwise_fmax_grad) + extra : + attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32", + bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f] + +- op : fmin (elementwise_fmin) + backward : fmin_grad (elementwise_fmin_grad) + extra : + attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32", + bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f] + - op : frobenius_norm backward : frobenius_norm_grad extra : attrs : [bool use_mkldnn = false] +- op : full (fill_constant) + extra : + attrs : [bool use_mkldnn = false] + +- op : gather + backward : gather_grad + extra : + attrs : [bool overwrite = true] + - op : gelu backward : gelu_grad extra : attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool use_cudnn = false] +- op : grad_add + extra : + attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32", + bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f] + - op : grid_sampler backward : grid_sampler_grad extra : @@ -278,6 +332,12 @@ extra : attrs : [bool use_mkldnn = false] +- op : heaviside (elementwise_heaviside) + backward : heaviside_grad (elementwise_heaviside_grad) + extra : + attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32", + bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f] + - op : inplace_abn backward : inplace_abn_grad extra : @@ -344,13 +404,37 @@ extra : attrs : [bool use_mkldnn = false, 'int[] fused_reshape_Out = {}', 'int[] fused_transpose_Out = {}', str mkldnn_data_type = "float32", 'int[] fused_reshape_X = {}', 'int[] fused_reshape_Y = {}', - 'int[] fused_transpose_X = {}', 'int[] fused_transpose_Y = {}',] + 'int[] fused_transpose_X = {}', 'int[] fused_transpose_Y = {}'] + +- op : matmul_with_flatten (mul) + backward : matmul_with_flatten_grad (mul_grad) + extra : + attrs : [bool use_mkldnn = false, float scale_x = 1.0f, 'float[] scale_y = {1.0f}', + float scale_out = 1.0f, bool force_fp32_output = false] + +- op : maximum (elementwise_max) + backward : maximum_grad (elementwise_max_grad) + extra : + attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32", + bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f] + +- op : maximum (elementwise_min) + backward : maximum_grad (elementwise_min_grad) + extra : + attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32", + bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f] - op : mish backward : mish_grad extra : attrs : [bool use_mkldnn = false] +- op : multiply (elementwise_mul) + backward : multiply_grad (elementwise_mul_grad) + extra : + attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32", + bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f] + - op : mv inputs : {x : X, vec : Vec} @@ -383,6 +467,17 @@ outputs : out : Out +- op : pool2d + backward : pool2d_grad + extra : + attrs : [bool use_mkldnn = false, bool use_quantizer = false, + str mkldnn_data_type = "float32", bool is_test = false] + +- op : pool3d + backward : pool3d_grad + extra : + attrs : [bool use_mkldnn = false] + - op : prelu backward : prelu_grad extra : @@ -446,6 +541,11 @@ extra : attrs : [bool use_mkldnn = false] +- op : remainder (elementwise_mod) + extra : + attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32", + bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f] + - op : renorm backward : renorm_grad extra : @@ -466,6 +566,10 @@ extra : attrs : [bool use_mkldnn = false, bool use_cudnn = false] +- op : scale + extra : + attrs : [bool use_mkldnn = false] + - op : seed extra : attrs : [bool deterministic = false, str rng_name = "", bool force_cpu = false] @@ -546,6 +650,17 @@ extra : attrs : [bool use_mkldnn = false] +- op : stack + backward : stack_grad + extra : + attrs : [bool use_mkldnn = false] + +- op : subtract (elementwise_sub) + backward : subtract_grad (elementwise_sub_grad) + extra : + attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32", + bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f] + - op : swish backward : swish_grad extra : @@ -577,6 +692,12 @@ outputs : out : Out +- op : transpose (transpose2) + backward : transpose_grad (transpose2_grad) + extra : + attrs : [bool use_mkldnn = false, str data_format = "AnyLayout", bool use_quantizer = false, + str mkldnn_data_type = "float32"] + - op : trilinear_interp (trilinear_interp_v2) backward : trilinear_interp_grad (trilinear_interp_v2_grad) extra : diff --git a/python/paddle/fluid/tests/unittests/test_operator_desc.py b/python/paddle/fluid/tests/unittests/test_operator_desc.py index 3c0871cfc8..7654ae214b 100644 --- a/python/paddle/fluid/tests/unittests/test_operator_desc.py +++ b/python/paddle/fluid/tests/unittests/test_operator_desc.py @@ -77,9 +77,7 @@ class TestOperator(unittest.TestCase): set(mul_op.attr_names), set([ "x_num_col_dims", "y_num_col_dims", "op_role", "op_role_var", - "use_mkldnn", "scale_x", "scale_y", "scale_out", - "force_fp32_output", "op_namescope", "op_callstack", - "op_device", "with_quant_attr" + "op_namescope", "op_callstack", "op_device", "with_quant_attr" ])) self.assertEqual(mul_op.has_attr("x_num_col_dims"), True) self.assertEqual(mul_op.attr_type("x_num_col_dims"), core.AttrType.INT) -- GitLab