From 82bbbe2cb6cdfedd1df2daf3e9e38ca0b3c80fac Mon Sep 17 00:00:00 2001 From: zyfncg Date: Mon, 19 Sep 2022 11:46:54 +0800 Subject: [PATCH] Clear extra attributes of some Op in OpMaker (Part4) (#46060) * clear extra attr of some ops in opmaker * revert clear use_cudnn for pool * fix test_operator_desc * fix Attr interface of OperatorBase --- paddle/fluid/framework/op_desc.cc | 8 +-- paddle/fluid/framework/operator.h | 16 ++++-- paddle/fluid/operators/expand_v2_op.cc | 10 ---- paddle/fluid/operators/fill_constant_op.cc | 4 -- paddle/fluid/operators/gather_op.cc | 8 --- paddle/fluid/operators/mul_op.cc | 29 ----------- paddle/fluid/operators/pool_op.cc | 51 ++++--------------- paddle/fluid/operators/stack_op.cc | 5 -- paddle/fluid/operators/transpose_op.cc | 40 ++++++++++++++- paddle/phi/api/yaml/op_compat.yaml | 45 +++++++++++++++- .../tests/unittests/test_operator_desc.py | 4 +- 11 files changed, 108 insertions(+), 112 deletions(-) diff --git a/paddle/fluid/framework/op_desc.cc b/paddle/fluid/framework/op_desc.cc index a725521c423..f2474cda0a9 100644 --- a/paddle/fluid/framework/op_desc.cc +++ b/paddle/fluid/framework/op_desc.cc @@ -790,11 +790,11 @@ Attribute OpDesc::GetAttr(const std::string &name, bool with_attr_var) const { auto it = attrs_.find(name); if (it == attrs_.end()) { it = runtime_attrs_.find(name); + PADDLE_ENFORCE_NE( + it, + runtime_attrs_.end(), + platform::errors::NotFound("Attribute %s is not found.", name)); } - PADDLE_ENFORCE_NE( - it, - attrs_.end(), - platform::errors::NotFound("Attribute %s is not found.", name)); if (!with_attr_var) { PADDLE_ENFORCE_EQ( HasAttrVar(it->second), diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index 16707ca63ab..7e4dc337dbf 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -182,11 +182,17 @@ class OperatorBase { } template inline const T& Attr(const std::string& name) const { - PADDLE_ENFORCE_NE( - attrs_.find(name), - attrs_.end(), - platform::errors::NotFound("(%s) is not found in AttributeMap.", name)); - return PADDLE_GET_CONST(T, attrs_.at(name)); + auto it = attrs_.find(name); + if (it == attrs_.end()) { + it = runtime_attrs_.find(name); + PADDLE_ENFORCE_NE( + it, + runtime_attrs_.end(), + platform::errors::NotFound( + "(%s) is not found in AttributeMap and RuntimeAttributeMap.", + name)); + } + return PADDLE_GET_CONST(T, it->second); } void SetAttr(const std::string& name, const Attribute& v) { PADDLE_ENFORCE_EQ( diff --git a/paddle/fluid/operators/expand_v2_op.cc b/paddle/fluid/operators/expand_v2_op.cc index d548023bfbf..fd92a43318c 100644 --- a/paddle/fluid/operators/expand_v2_op.cc +++ b/paddle/fluid/operators/expand_v2_op.cc @@ -88,16 +88,6 @@ class ExpandV2OpMaker : public framework::OpProtoAndCheckerMaker { "the corresponding value given by Attr(expand_times)."); AddAttr>("shape", "The expanded shape for each dimension.") .SetDefault({}); - AddAttr("use_mkldnn", - "(bool, default false) Only used in mkldnn kernel") - .SetDefault(false) - .AsExtra(); - AddAttr( - "mkldnn_data_type", - "(string, default \"float32\"). Data type of mkldnn kernel") - .SetDefault("float32") - .InEnum({"float32", "bfloat16"}) - .AsExtra(); AddComment(R"DOC( Expand the input to the given shape. The rank of X should be in [1, 6] and size of 'shape' must be in [1, 6] also. diff --git a/paddle/fluid/operators/fill_constant_op.cc b/paddle/fluid/operators/fill_constant_op.cc index 0dd0e1dcecf..28167c4736f 100644 --- a/paddle/fluid/operators/fill_constant_op.cc +++ b/paddle/fluid/operators/fill_constant_op.cc @@ -174,10 +174,6 @@ class FillConstantOpMaker : public framework::OpProtoAndCheckerMaker { "3: XPUPlace. " "4: NPUPlace. ") .SetDefault(-1); - AddAttr("use_mkldnn", - "(bool, default false) Only used in mkldnn kernel") - .SetDefault(false) - .AsExtra(); AddOutput("Out", "(Tensor) Tensor of specified shape will be filled " "with the specified value"); diff --git a/paddle/fluid/operators/gather_op.cc b/paddle/fluid/operators/gather_op.cc index f993189f070..77e4adfeea7 100644 --- a/paddle/fluid/operators/gather_op.cc +++ b/paddle/fluid/operators/gather_op.cc @@ -82,14 +82,6 @@ class GatherOpMaker : public framework::OpProtoAndCheckerMaker { "The Tensor which contains the axis that we do gather operation.") .AsDispensable(); AddOutput("Out", "The output of gather op"); - AddAttr( - "overwrite", - "(bool, default: False) " - "In backward process, calc the grad when has same index," - "If true, update the grad using the overwrite mode in same index," - "If false, using the accumulate mode in same index.") - .SetDefault(true) - .AsExtra(); AddAttr( "axis", "The Tensor which contains the axis that we do gather operation.") diff --git a/paddle/fluid/operators/mul_op.cc b/paddle/fluid/operators/mul_op.cc index f26b4b94881..2d4ca62955e 100644 --- a/paddle/fluid/operators/mul_op.cc +++ b/paddle/fluid/operators/mul_op.cc @@ -79,10 +79,6 @@ class MulOpMaker : public framework::OpProtoAndCheckerMaker { AddInput("X", "(Tensor), The first input tensor of mul op."); AddInput("Y", "(Tensor), The second input tensor of mul op."); AddOutput("Out", "(Tensor), The output tensor of mul op."); - AddAttr("use_mkldnn", - "(bool, default false) Only used in mkldnn kernel") - .SetDefault(false) - .AsExtra(); AddAttr( "x_num_col_dims", R"DOC((int, default 1), The mul_op can take tensors with more than two @@ -113,31 +109,6 @@ class MulOpMaker : public framework::OpProtoAndCheckerMaker { )DOC") .SetDefault(1) .EqualGreaterThan(1); - AddAttr( - "scale_x", - "scale_x to be used for int8 mul input data x. scale_x has the" - "same purpose as scale_in in OPs that support quantization." - "Only to be used with MKL-DNN INT8") - .SetDefault(1.0f) - .AsExtra(); - AddAttr>( - "scale_y", - "scale_y to be used for int8 mul input data y. scale_y has the" - "same purpose as scale_weights in OPs that support quantization." - "Only to be used with MKL-DNN INT8") - .SetDefault({1.0f}) - .AsExtra(); - AddAttr("scale_out", - "scale_out to be used for int8 output data." - "Only used with MKL-DNN INT8") - .SetDefault(1.0f) - .AsExtra(); - AddAttr( - "force_fp32_output", - "(bool, default false) Force quantize kernel output FP32, only " - "used in quantized MKL-DNN.") - .SetDefault(false) - .AsExtra(); AddComment(R"DOC( Mul Operator. diff --git a/paddle/fluid/operators/pool_op.cc b/paddle/fluid/operators/pool_op.cc index c5b1ce12f17..e8b35b89157 100644 --- a/paddle/fluid/operators/pool_op.cc +++ b/paddle/fluid/operators/pool_op.cc @@ -186,34 +186,12 @@ void Pool2dOpMaker::Make() { "pooling in each grid area to get output pooling value. " "Default False.") .SetDefault(false); - - AddAttr( - "use_cudnn", - "(bool) Only used in cudnn kernel, need install cudnn. Default False") - .SetDefault(false) - .AsExtra(); AddAttr( "ceil_mode", "(bool) Whether to use the ceil function to calculate " "output height and width. False is the default. If it is set to False, " "the floor function will be used. Default False") .SetDefault(false); - AddAttr("use_mkldnn", - "(bool) Only used in mkldnn kernel. Default False") - .SetDefault(false) - .AsExtra(); - AddAttr( - "use_quantizer", - "(bool, default false) " - "This parameter is no longer used. Use 'mkldnn_data_type' instead.") - .SetDefault(false) - .AsExtra(); - AddAttr( - "mkldnn_data_type", - "(string, default \"float32\"). Data type of mkldnn kernel") - .SetDefault("float32") - .InEnum({"float32", "int8", "bfloat16"}) - .AsExtra(); AddAttr( "data_format", "(string, default NCHW) Only used in " @@ -221,12 +199,6 @@ void Pool2dOpMaker::Make() { "Defaults to \"NHWC\". Specify the data format of the output data, " "the input will be transformed automatically. ") .SetDefault("NCHW"); - AddAttr("is_test", - "(bool, default false) Set to true for inference only, false " - "for training. Some layers may run faster when this is true.") - .SetDefault(false) - .AsExtra(); - AddAttr( "padding_algorithm", "(string, default \"EXPLICIT\") An optional string from: \"EXPLICIT\"," @@ -234,7 +206,11 @@ void Pool2dOpMaker::Make() { "Set to \"SAME\" or \"VALID\" for algorithm of padding. ") .SetDefault("EXPLICIT"); // TODO(dzhwinter): need to registered layout transform function - + AddAttr( + "use_cudnn", + "(bool) Only used in cudnn kernel, need install cudnn. Default False") + .SetDefault(false) + .AsExtra(); AddComment(R"DOC( This operation calculates the pooling output based on the input, pooling_type and pool_size, pool_stride, pool_padding parameters. @@ -407,22 +383,12 @@ void Pool3dOpMaker::Make() { "pooling in each grid area to get output pooling value. " "Default False") .SetDefault(false); - - AddAttr( - "use_cudnn", - "(bool) Only used in cudnn kernel, need install cudnn. Default False") - .SetDefault(false) - .AsExtra(); AddAttr( "ceil_mode", "(bool) Whether to use the ceil function to calculate " "output height and width. False is the default. If it is set to False, " "the floor function will be used. Default False") .SetDefault(false); - AddAttr("use_mkldnn", - "(bool) Only used in mkldnn kernel. Default False") - .SetDefault(false) - .AsExtra(); AddAttr( "data_format", "(string, default NCDHW) Only used in " @@ -436,8 +402,11 @@ void Pool3dOpMaker::Make() { "\"SAME\",\"VALID\". Set to \"EXPLICIT\" for explicit padding. " "Set to \"SAME\" or \"VALID\" for algorithm of padding. ") .SetDefault("EXPLICIT"); - // TODO(dzhwinter): need to registered layout transform function - + AddAttr( + "use_cudnn", + "(bool) Only used in cudnn kernel, need install cudnn. Default False") + .SetDefault(false) + .AsExtra(); AddComment(R"DOC( This operation calculates the output based on the input, pooling_type, pool_size, pool_stride, and pool_padding parameters. diff --git a/paddle/fluid/operators/stack_op.cc b/paddle/fluid/operators/stack_op.cc index a0351b41a24..e9706f00ce8 100644 --- a/paddle/fluid/operators/stack_op.cc +++ b/paddle/fluid/operators/stack_op.cc @@ -56,11 +56,6 @@ class StackOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr("axis", "The axis along which all of the Inputs(X) should be stacked.") .SetDefault(0); - AddAttr( - "use_mkldnn", - "(bool, default false) Indicates if MKL-DNN kernel will be used") - .SetDefault(false) - .AsExtra(); AddComment(R"DOC( Stack Operator. Stack all of the Inputs(X) into one tensor along Attr(axis). The dims of all Inputs(X) must be the same. diff --git a/paddle/fluid/operators/transpose_op.cc b/paddle/fluid/operators/transpose_op.cc index 610d6e1f48a..b342f01e46f 100644 --- a/paddle/fluid/operators/transpose_op.cc +++ b/paddle/fluid/operators/transpose_op.cc @@ -275,13 +275,49 @@ class Transpose2Op : public TransposeOp { } }; -class Transpose2OpMaker : public TransposeOpMaker { +class Transpose2OpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - TransposeOpMaker::Make(); + AddInput( + "X", + "(Tensor) The input tensor, tensors with rank up to 6 are supported."); + AddOutput("Out", "(Tensor)The output tensor."); + AddAttr>( + "axis", + "(vector) A list of values, and the size of the list should be " + "the same with the input tensor rank. This operator permutes the input " + "tensor's axes according to the values given."); AddOutput("XShape", "(Tensor)The output tensor.") .AsIntermediate() .AsExtra(); + AddComment(R"DOC( +Transpose Operator. + +The input tensor will be permuted according to the axes given. +The behavior of this operator is similar to how `numpy.transpose` works. + +- suppose the input `X` is a 2-D tensor: + $$ + X = \begin{pmatrix} + 0 &1 &2 \\ + 3 &4 &5 + \end{pmatrix}$$ + + the given `axes` is: $[1, 0]$, and $Y$ = transpose($X$, axis) + + then the output $Y$ is: + + $$ + Y = \begin{pmatrix} + 0 &3 \\ + 1 &4 \\ + 2 &5 + \end{pmatrix}$$ + +- Given a input tensor with shape $(N, C, H, W)$ and the `axes` is +$[0, 2, 3, 1]$, then shape of the output tensor will be: $(N, H, W, C)$. + +)DOC"); } }; diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index fb2723fb25b..e01e6b78633 100644 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -390,7 +390,7 @@ extra : attrs : [bool use_mkldnn = false, 'int[] fused_reshape_Out = {}', 'int[] fused_transpose_Out = {}', str mkldnn_data_type = "float32", 'int[] fused_reshape_X = {}', 'int[] fused_reshape_Y = {}', - 'int[] fused_transpose_X = {}', 'int[] fused_transpose_Y = {}',] + 'int[] fused_transpose_X = {}', 'int[] fused_transpose_Y = {}'] - op : maximum (elementwise_max) backward : maximum_grad (elementwise_max_grad) @@ -666,3 +666,46 @@ x : X outputs : out : Out + +- op : expand (expand_v2) + backward : expand_grad (expand_v2_grad) + extra : + attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"] + +- op : full (fill_constant) + extra : + attrs : [bool use_mkldnn = false] + +- op : gather + backward : gather_grad + extra : + attrs : [bool overwrite = true] + +- op : matmul_with_flatten (mul) + backward : matmul_with_flatten_grad (mul_grad) + extra : + attrs : [bool use_mkldnn = false, float scale_x = 1.0f, 'float[] scale_y = {1.0f}', + float scale_out = 1.0f, bool force_fp32_output = false] + +- op : pool2d + backward : pool2d_grad + extra : + attrs : [bool use_mkldnn = false, bool use_quantizer = false, + str mkldnn_data_type = "float32", bool is_test = false] + +- op : pool3d + backward : pool3d_grad + extra : + attrs : [bool use_mkldnn = false] + +- op : stack + backward : stack_grad + extra : + attrs : [bool use_mkldnn = false] + + +- op : transpose (transpose2) + backward : transpose_grad (transpose2_grad) + extra : + attrs : [bool use_mkldnn = false, str data_format = "AnyLayout", bool use_quantizer = false, + str mkldnn_data_type = "float32"] diff --git a/python/paddle/fluid/tests/unittests/test_operator_desc.py b/python/paddle/fluid/tests/unittests/test_operator_desc.py index 3c0871cfc82..7654ae214b0 100644 --- a/python/paddle/fluid/tests/unittests/test_operator_desc.py +++ b/python/paddle/fluid/tests/unittests/test_operator_desc.py @@ -77,9 +77,7 @@ class TestOperator(unittest.TestCase): set(mul_op.attr_names), set([ "x_num_col_dims", "y_num_col_dims", "op_role", "op_role_var", - "use_mkldnn", "scale_x", "scale_y", "scale_out", - "force_fp32_output", "op_namescope", "op_callstack", - "op_device", "with_quant_attr" + "op_namescope", "op_callstack", "op_device", "with_quant_attr" ])) self.assertEqual(mul_op.has_attr("x_num_col_dims"), True) self.assertEqual(mul_op.attr_type("x_num_col_dims"), core.AttrType.INT) -- GitLab