未验证 提交 82bbbe2c 编写于 作者: Z zyfncg 提交者: GitHub

Clear extra attributes of some Op in OpMaker (Part4) (#46060)

* clear extra attr of some ops in opmaker

* revert clear use_cudnn for pool

* fix test_operator_desc

* fix Attr interface of OperatorBase
上级 d1b1aaa7
......@@ -790,11 +790,11 @@ Attribute OpDesc::GetAttr(const std::string &name, bool with_attr_var) const {
auto it = attrs_.find(name);
if (it == attrs_.end()) {
it = runtime_attrs_.find(name);
PADDLE_ENFORCE_NE(
it,
runtime_attrs_.end(),
platform::errors::NotFound("Attribute %s is not found.", name));
}
PADDLE_ENFORCE_NE(
it,
attrs_.end(),
platform::errors::NotFound("Attribute %s is not found.", name));
if (!with_attr_var) {
PADDLE_ENFORCE_EQ(
HasAttrVar(it->second),
......
......@@ -182,11 +182,17 @@ class OperatorBase {
}
template <typename T>
inline const T& Attr(const std::string& name) const {
PADDLE_ENFORCE_NE(
attrs_.find(name),
attrs_.end(),
platform::errors::NotFound("(%s) is not found in AttributeMap.", name));
return PADDLE_GET_CONST(T, attrs_.at(name));
auto it = attrs_.find(name);
if (it == attrs_.end()) {
it = runtime_attrs_.find(name);
PADDLE_ENFORCE_NE(
it,
runtime_attrs_.end(),
platform::errors::NotFound(
"(%s) is not found in AttributeMap and RuntimeAttributeMap.",
name));
}
return PADDLE_GET_CONST(T, it->second);
}
void SetAttr(const std::string& name, const Attribute& v) {
PADDLE_ENFORCE_EQ(
......
......@@ -88,16 +88,6 @@ class ExpandV2OpMaker : public framework::OpProtoAndCheckerMaker {
"the corresponding value given by Attr(expand_times).");
AddAttr<std::vector<int>>("shape", "The expanded shape for each dimension.")
.SetDefault({});
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>(
"mkldnn_data_type",
"(string, default \"float32\"). Data type of mkldnn kernel")
.SetDefault("float32")
.InEnum({"float32", "bfloat16"})
.AsExtra();
AddComment(R"DOC(
Expand the input to the given shape. The rank of X
should be in [1, 6] and size of 'shape' must be in [1, 6] also.
......
......@@ -174,10 +174,6 @@ class FillConstantOpMaker : public framework::OpProtoAndCheckerMaker {
"3: XPUPlace. "
"4: NPUPlace. ")
.SetDefault(-1);
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddOutput("Out",
"(Tensor) Tensor of specified shape will be filled "
"with the specified value");
......
......@@ -82,14 +82,6 @@ class GatherOpMaker : public framework::OpProtoAndCheckerMaker {
"The Tensor which contains the axis that we do gather operation.")
.AsDispensable();
AddOutput("Out", "The output of gather op");
AddAttr<bool>(
"overwrite",
"(bool, default: False) "
"In backward process, calc the grad when has same index,"
"If true, update the grad using the overwrite mode in same index,"
"If false, using the accumulate mode in same index.")
.SetDefault(true)
.AsExtra();
AddAttr<int>(
"axis",
"The Tensor which contains the axis that we do gather operation.")
......
......@@ -79,10 +79,6 @@ class MulOpMaker : public framework::OpProtoAndCheckerMaker {
AddInput("X", "(Tensor), The first input tensor of mul op.");
AddInput("Y", "(Tensor), The second input tensor of mul op.");
AddOutput("Out", "(Tensor), The output tensor of mul op.");
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<int>(
"x_num_col_dims",
R"DOC((int, default 1), The mul_op can take tensors with more than two
......@@ -113,31 +109,6 @@ class MulOpMaker : public framework::OpProtoAndCheckerMaker {
)DOC")
.SetDefault(1)
.EqualGreaterThan(1);
AddAttr<float>(
"scale_x",
"scale_x to be used for int8 mul input data x. scale_x has the"
"same purpose as scale_in in OPs that support quantization."
"Only to be used with MKL-DNN INT8")
.SetDefault(1.0f)
.AsExtra();
AddAttr<std::vector<float>>(
"scale_y",
"scale_y to be used for int8 mul input data y. scale_y has the"
"same purpose as scale_weights in OPs that support quantization."
"Only to be used with MKL-DNN INT8")
.SetDefault({1.0f})
.AsExtra();
AddAttr<float>("scale_out",
"scale_out to be used for int8 output data."
"Only used with MKL-DNN INT8")
.SetDefault(1.0f)
.AsExtra();
AddAttr<bool>(
"force_fp32_output",
"(bool, default false) Force quantize kernel output FP32, only "
"used in quantized MKL-DNN.")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC(
Mul Operator.
......
......@@ -186,34 +186,12 @@ void Pool2dOpMaker::Make() {
"pooling in each grid area to get output pooling value. "
"Default False.")
.SetDefault(false);
AddAttr<bool>(
"use_cudnn",
"(bool) Only used in cudnn kernel, need install cudnn. Default False")
.SetDefault(false)
.AsExtra();
AddAttr<bool>(
"ceil_mode",
"(bool) Whether to use the ceil function to calculate "
"output height and width. False is the default. If it is set to False, "
"the floor function will be used. Default False")
.SetDefault(false);
AddAttr<bool>("use_mkldnn",
"(bool) Only used in mkldnn kernel. Default False")
.SetDefault(false)
.AsExtra();
AddAttr<bool>(
"use_quantizer",
"(bool, default false) "
"This parameter is no longer used. Use 'mkldnn_data_type' instead.")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>(
"mkldnn_data_type",
"(string, default \"float32\"). Data type of mkldnn kernel")
.SetDefault("float32")
.InEnum({"float32", "int8", "bfloat16"})
.AsExtra();
AddAttr<std::string>(
"data_format",
"(string, default NCHW) Only used in "
......@@ -221,12 +199,6 @@ void Pool2dOpMaker::Make() {
"Defaults to \"NHWC\". Specify the data format of the output data, "
"the input will be transformed automatically. ")
.SetDefault("NCHW");
AddAttr<bool>("is_test",
"(bool, default false) Set to true for inference only, false "
"for training. Some layers may run faster when this is true.")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>(
"padding_algorithm",
"(string, default \"EXPLICIT\") An optional string from: \"EXPLICIT\","
......@@ -234,7 +206,11 @@ void Pool2dOpMaker::Make() {
"Set to \"SAME\" or \"VALID\" for algorithm of padding. ")
.SetDefault("EXPLICIT");
// TODO(dzhwinter): need to registered layout transform function
AddAttr<bool>(
"use_cudnn",
"(bool) Only used in cudnn kernel, need install cudnn. Default False")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC(
This operation calculates the pooling output based on
the input, pooling_type and pool_size, pool_stride, pool_padding parameters.
......@@ -407,22 +383,12 @@ void Pool3dOpMaker::Make() {
"pooling in each grid area to get output pooling value. "
"Default False")
.SetDefault(false);
AddAttr<bool>(
"use_cudnn",
"(bool) Only used in cudnn kernel, need install cudnn. Default False")
.SetDefault(false)
.AsExtra();
AddAttr<bool>(
"ceil_mode",
"(bool) Whether to use the ceil function to calculate "
"output height and width. False is the default. If it is set to False, "
"the floor function will be used. Default False")
.SetDefault(false);
AddAttr<bool>("use_mkldnn",
"(bool) Only used in mkldnn kernel. Default False")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>(
"data_format",
"(string, default NCDHW) Only used in "
......@@ -436,8 +402,11 @@ void Pool3dOpMaker::Make() {
"\"SAME\",\"VALID\". Set to \"EXPLICIT\" for explicit padding. "
"Set to \"SAME\" or \"VALID\" for algorithm of padding. ")
.SetDefault("EXPLICIT");
// TODO(dzhwinter): need to registered layout transform function
AddAttr<bool>(
"use_cudnn",
"(bool) Only used in cudnn kernel, need install cudnn. Default False")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC(
This operation calculates the output based on
the input, pooling_type, pool_size, pool_stride, and pool_padding parameters.
......
......@@ -56,11 +56,6 @@ class StackOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<int>("axis",
"The axis along which all of the Inputs(X) should be stacked.")
.SetDefault(0);
AddAttr<bool>(
"use_mkldnn",
"(bool, default false) Indicates if MKL-DNN kernel will be used")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC(
Stack Operator.
Stack all of the Inputs(X) into one tensor along Attr(axis). The dims of all Inputs(X) must be the same.
......
......@@ -275,13 +275,49 @@ class Transpose2Op : public TransposeOp {
}
};
class Transpose2OpMaker : public TransposeOpMaker {
class Transpose2OpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
TransposeOpMaker::Make();
AddInput(
"X",
"(Tensor) The input tensor, tensors with rank up to 6 are supported.");
AddOutput("Out", "(Tensor)The output tensor.");
AddAttr<std::vector<int>>(
"axis",
"(vector<int>) A list of values, and the size of the list should be "
"the same with the input tensor rank. This operator permutes the input "
"tensor's axes according to the values given.");
AddOutput("XShape", "(Tensor)The output tensor.")
.AsIntermediate()
.AsExtra();
AddComment(R"DOC(
Transpose Operator.
The input tensor will be permuted according to the axes given.
The behavior of this operator is similar to how `numpy.transpose` works.
- suppose the input `X` is a 2-D tensor:
$$
X = \begin{pmatrix}
0 &1 &2 \\
3 &4 &5
\end{pmatrix}$$
the given `axes` is: $[1, 0]$, and $Y$ = transpose($X$, axis)
then the output $Y$ is:
$$
Y = \begin{pmatrix}
0 &3 \\
1 &4 \\
2 &5
\end{pmatrix}$$
- Given a input tensor with shape $(N, C, H, W)$ and the `axes` is
$[0, 2, 3, 1]$, then shape of the output tensor will be: $(N, H, W, C)$.
)DOC");
}
};
......
......@@ -390,7 +390,7 @@
extra :
attrs : [bool use_mkldnn = false, 'int[] fused_reshape_Out = {}', 'int[] fused_transpose_Out = {}',
str mkldnn_data_type = "float32", 'int[] fused_reshape_X = {}', 'int[] fused_reshape_Y = {}',
'int[] fused_transpose_X = {}', 'int[] fused_transpose_Y = {}',]
'int[] fused_transpose_X = {}', 'int[] fused_transpose_Y = {}']
- op : maximum (elementwise_max)
backward : maximum_grad (elementwise_max_grad)
......@@ -666,3 +666,46 @@
x : X
outputs :
out : Out
- op : expand (expand_v2)
backward : expand_grad (expand_v2_grad)
extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]
- op : full (fill_constant)
extra :
attrs : [bool use_mkldnn = false]
- op : gather
backward : gather_grad
extra :
attrs : [bool overwrite = true]
- op : matmul_with_flatten (mul)
backward : matmul_with_flatten_grad (mul_grad)
extra :
attrs : [bool use_mkldnn = false, float scale_x = 1.0f, 'float[] scale_y = {1.0f}',
float scale_out = 1.0f, bool force_fp32_output = false]
- op : pool2d
backward : pool2d_grad
extra :
attrs : [bool use_mkldnn = false, bool use_quantizer = false,
str mkldnn_data_type = "float32", bool is_test = false]
- op : pool3d
backward : pool3d_grad
extra :
attrs : [bool use_mkldnn = false]
- op : stack
backward : stack_grad
extra :
attrs : [bool use_mkldnn = false]
- op : transpose (transpose2)
backward : transpose_grad (transpose2_grad)
extra :
attrs : [bool use_mkldnn = false, str data_format = "AnyLayout", bool use_quantizer = false,
str mkldnn_data_type = "float32"]
......@@ -77,9 +77,7 @@ class TestOperator(unittest.TestCase):
set(mul_op.attr_names),
set([
"x_num_col_dims", "y_num_col_dims", "op_role", "op_role_var",
"use_mkldnn", "scale_x", "scale_y", "scale_out",
"force_fp32_output", "op_namescope", "op_callstack",
"op_device", "with_quant_attr"
"op_namescope", "op_callstack", "op_device", "with_quant_attr"
]))
self.assertEqual(mul_op.has_attr("x_num_col_dims"), True)
self.assertEqual(mul_op.attr_type("x_num_col_dims"), core.AttrType.INT)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册